AI Safety Training Programs
training-programsapproachPath: /knowledge-base/responses/training-programs/
E468Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "training-programs",
"numericId": null,
"path": "/knowledge-base/responses/training-programs/",
"filePath": "knowledge-base/responses/training-programs.mdx",
"title": "AI Safety Training Programs",
"quality": 70,
"readerImportance": 56,
"researchImportance": 24.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive guide to AI safety training programs including MATS (78% alumni in alignment work, 100+ scholars annually), Anthropic Fellows (\\$2,100/week stipend, 40%+ hired full-time), LASR Labs (5 NeurIPS papers in 2024), and academic pathways. BlueDot Impact has trained 7,000+ people since 2022, with hundreds now working in AI safety. Provides concrete application criteria, timing recommendations, and structured self-study pathways with 1-5 year timeline to research contribution.",
"description": "Fellowships, PhD programs, research mentorship, and career transition pathways for growing the AI safety research workforce, including MATS, Anthropic Fellows, SPAR, and academic programs.",
"ratings": {
"novelty": 3.5,
"rigor": 5,
"actionability": 7.5,
"completeness": 6.5
},
"category": "responses",
"subcategory": "field-building",
"clusters": [
"community",
"ai-safety"
],
"metrics": {
"wordCount": 2220,
"tableCount": 17,
"diagramCount": 1,
"internalLinks": 25,
"externalLinks": 30,
"footnoteCount": 0,
"bulletRatio": 0.13,
"sectionCount": 30,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 2220,
"unconvertedLinks": [
{
"text": "MATS",
"url": "https://www.matsprogram.org/",
"resourceId": "ba3a8bd9c8404d7b",
"resourceTitle": "MATS Research Program"
},
{
"text": "BlueDot Impact",
"url": "https://bluedot.org/",
"resourceId": "a2101cb75434037d",
"resourceTitle": "BlueDot Impact"
},
{
"text": "MATS (ML Alignment Theory Scholars)",
"url": "https://www.matsprogram.org/",
"resourceId": "ba3a8bd9c8404d7b",
"resourceTitle": "MATS Research Program"
},
{
"text": "78% of surveyed alumni now working in AI alignment",
"url": "https://www.lesswrong.com/posts/jeBkx6agMuBCQW94C/mats-alumni-impact-analysis",
"resourceId": "34adf176d8299b24",
"resourceTitle": "MATS Alumni Impact Analysis"
},
{
"text": "Fellows Program",
"url": "https://alignment.anthropic.com/2024/anthropic-fellows-program/",
"resourceId": "94c867557cf1e654",
"resourceTitle": "Anthropic Fellows Program"
},
{
"text": "BlueDot Impact has trained over 7,000 people since 2022",
"url": "https://bluedot.org/",
"resourceId": "a2101cb75434037d",
"resourceTitle": "BlueDot Impact"
},
{
"text": "MATS has supported 298 scholars and 75 mentors",
"url": "https://www.matsprogram.org/",
"resourceId": "ba3a8bd9c8404d7b",
"resourceTitle": "MATS Research Program"
},
{
"text": "78% now working in AI alignment",
"url": "https://www.lesswrong.com/posts/jeBkx6agMuBCQW94C/mats-alumni-impact-analysis",
"resourceId": "34adf176d8299b24",
"resourceTitle": "MATS Alumni Impact Analysis"
},
{
"text": "Nina Rimsky received an Outstanding Paper Award at ACL 2024",
"url": "https://www.lesswrong.com/posts/jeBkx6agMuBCQW94C/mats-alumni-impact-analysis",
"resourceId": "34adf176d8299b24",
"resourceTitle": "MATS Alumni Impact Analysis"
},
{
"text": "Anthropic Fellows Program",
"url": "https://alignment.anthropic.com/2024/anthropic-fellows-program/",
"resourceId": "94c867557cf1e654",
"resourceTitle": "Anthropic Fellows Program"
},
{
"text": "Over 80% published papers; 40%+ joined Anthropic full-time",
"url": "https://alignment.anthropic.com/2025/anthropic-fellows-program-2026/",
"resourceId": "e65e76531931acc2",
"resourceTitle": "Anthropic Fellows Program"
},
{
"text": "SPAR",
"url": "https://sparai.org/",
"resourceId": "f566780364336e37",
"resourceTitle": "SPAR - Research Program for AI Risks"
},
{
"text": "SPAR research has been accepted at ICML and NeurIPS, covered by TIME, and led to full-time job offers",
"url": "https://sparai.org/",
"resourceId": "f566780364336e37",
"resourceTitle": "SPAR - Research Program for AI Risks"
},
{
"text": "ARENA",
"url": "https://www.arena.education/",
"resourceId": "a1298425a282f519",
"resourceTitle": "ARENA"
},
{
"text": "BlueDot Impact",
"url": "https://bluedot.org/",
"resourceId": "a2101cb75434037d",
"resourceTitle": "BlueDot Impact"
},
{
"text": "matsprogram.org",
"url": "https://www.matsprogram.org/",
"resourceId": "ba3a8bd9c8404d7b",
"resourceTitle": "MATS Research Program"
},
{
"text": "Alumni Impact Analysis (2024)",
"url": "https://www.lesswrong.com/posts/jeBkx6agMuBCQW94C/mats-alumni-impact-analysis",
"resourceId": "34adf176d8299b24",
"resourceTitle": "MATS Alumni Impact Analysis"
},
{
"text": "alignment.anthropic.com",
"url": "https://alignment.anthropic.com/2024/anthropic-fellows-program/",
"resourceId": "94c867557cf1e654",
"resourceTitle": "Anthropic Fellows Program"
},
{
"text": "2026 cohort applications",
"url": "https://alignment.anthropic.com/2025/anthropic-fellows-program-2026/",
"resourceId": "e65e76531931acc2",
"resourceTitle": "Anthropic Fellows Program"
},
{
"text": "sparai.org",
"url": "https://sparai.org/",
"resourceId": "f566780364336e37",
"resourceTitle": "SPAR - Research Program for AI Risks"
},
{
"text": "bluedot.org",
"url": "https://bluedot.org/",
"resourceId": "a2101cb75434037d",
"resourceTitle": "BlueDot Impact"
},
{
"text": "arena.education",
"url": "https://www.arena.education/",
"resourceId": "a1298425a282f519",
"resourceTitle": "ARENA"
}
],
"unconvertedLinkCount": 22,
"convertedLinkCount": 0,
"backlinkCount": 4,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"conceptual-content"
]
},
"entityType": "approach",
"redundancy": {
"maxSimilarity": 14,
"similarPages": [
{
"id": "mats",
"title": "MATS ML Alignment Theory Scholars program",
"path": "/knowledge-base/organizations/mats/",
"similarity": 14
},
{
"id": "field-building-analysis",
"title": "AI Safety Field Building Analysis",
"path": "/knowledge-base/responses/field-building-analysis/",
"similarity": 14
},
{
"id": "technical-research",
"title": "Technical AI Safety Research",
"path": "/knowledge-base/responses/technical-research/",
"similarity": 14
},
{
"id": "govai",
"title": "GovAI",
"path": "/knowledge-base/organizations/govai/",
"similarity": 13
},
{
"id": "capabilities-to-safety-pipeline",
"title": "Capabilities-to-Safety Pipeline Model",
"path": "/knowledge-base/models/capabilities-to-safety-pipeline/",
"similarity": 12
}
]
},
"coverage": {
"passing": 9,
"total": 13,
"targets": {
"tables": 9,
"diagrams": 1,
"internalLinks": 18,
"externalLinks": 11,
"footnotes": 7,
"references": 7
},
"actuals": {
"tables": 17,
"diagrams": 1,
"internalLinks": 25,
"externalLinks": 30,
"footnotes": 0,
"references": 7,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:3.5 R:5 A:7.5 C:6.5"
},
"readerRank": 260,
"researchRank": 457,
"recommendedScore": 189.7
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/research-training-programs"
}Backlinks (4)
| id | title | type | relationship |
|---|---|---|---|
| field-building-analysis | AI Safety Field Building Analysis | approach | — |
| ai-for-human-reasoning-fellowship | AI for Human Reasoning Fellowship | approach | — |
| __index__/knowledge-base/responses | Safety Responses | concept | — |
| expertise-atrophy | AI-Induced Expertise Atrophy | risk | — |