MATS ML Alignment Theory Scholars program
matsorganizationPath: /knowledge-base/organizations/mats/
E548Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "mats",
"numericId": null,
"path": "/knowledge-base/organizations/mats/",
"filePath": "knowledge-base/organizations/mats.mdx",
"title": "MATS ML Alignment Theory Scholars program",
"quality": 60,
"readerImportance": 31.5,
"researchImportance": 46.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "MATS is a well-documented 12-week fellowship program that has successfully trained 213 AI safety researchers with strong career outcomes (80% in alignment work) and research impact (160+ publications, 8000+ citations). The program provides comprehensive support (\\$27k per scholar) and has produced notable alumni who founded organizations like Apollo Research and joined major AI labs.",
"description": "A 12-week fellowship program pairing aspiring AI safety researchers with expert mentors in Berkeley and London, training scholars through mentorship, seminars, and independent research projects.",
"ratings": {
"novelty": 3,
"rigor": 6,
"actionability": 7,
"completeness": 8
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"community",
"ai-safety"
],
"metrics": {
"wordCount": 2466,
"tableCount": 2,
"diagramCount": 0,
"internalLinks": 29,
"externalLinks": 3,
"footnoteCount": 0,
"bulletRatio": 0.37,
"sectionCount": 25,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 2466,
"unconvertedLinks": [
{
"text": "matsprogram.org",
"url": "https://matsprogram.org",
"resourceId": "ba3a8bd9c8404d7b",
"resourceTitle": "MATS Research Program"
},
{
"text": "lesswrong.com",
"url": "https://www.lesswrong.com/posts/8vLvpxzpc6ntfBWNo/seri-ml-alignment-theory-scholars-program-2022",
"resourceId": "29be60f8a6a01a55",
"resourceTitle": "SERI ML Alignment Theory Scholars Program 2022"
},
{
"text": "forum.effectivealtruism.org",
"url": "https://forum.effectivealtruism.org/posts/da8MmRPAB55Fepjjk/my-experience-applying-to-mats-6-0",
"resourceId": "e83de9c886719c97",
"resourceTitle": "My experience applying to MATS 6.0"
}
],
"unconvertedLinkCount": 3,
"convertedLinkCount": 0,
"backlinkCount": 2,
"citationHealth": {
"total": 72,
"withQuotes": 59,
"verified": 57,
"accuracyChecked": 57,
"accurate": 34,
"inaccurate": 0,
"avgScore": 0.9309683452218266
},
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "center-for-applied-rationality",
"title": "Center for Applied Rationality",
"path": "/knowledge-base/organizations/center-for-applied-rationality/",
"similarity": 15
},
{
"id": "field-building-analysis",
"title": "AI Safety Field Building Analysis",
"path": "/knowledge-base/responses/field-building-analysis/",
"similarity": 15
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 14
},
{
"id": "ea-global",
"title": "EA Global",
"path": "/knowledge-base/organizations/ea-global/",
"similarity": 14
},
{
"id": "elicit",
"title": "Elicit (AI Research Tool)",
"path": "/knowledge-base/organizations/elicit/",
"similarity": 14
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 10,
"diagrams": 1,
"internalLinks": 20,
"externalLinks": 12,
"footnotes": 7,
"references": 7
},
"actuals": {
"tables": 2,
"diagrams": 0,
"internalLinks": 29,
"externalLinks": 3,
"footnotes": 0,
"references": 24,
"quotesWithQuotes": 59,
"quotesTotal": 72,
"accuracyChecked": 57,
"accuracyTotal": 72
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "amber",
"footnotes": "red",
"references": "green",
"quotes": "green",
"accuracy": "green"
},
"ratingsString": "N:3 R:6 A:7 C:8"
},
"readerRank": 437,
"researchRank": 305,
"recommendedScore": 157.5
}External Links
No external links
Backlinks (2)
| id | title | type | relationship |
|---|---|---|---|
| short-timeline-policy-implications | Short Timeline Policy Implications | analysis | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |