AI Alignment Research Agenda Comparison
research-agendascruxPath: /knowledge-base/responses/research-agendas/
E251Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "research-agendas",
"numericId": null,
"path": "/knowledge-base/responses/research-agendas/",
"filePath": "knowledge-base/responses/research-agendas.mdx",
"title": "AI Alignment Research Agenda Comparison",
"quality": 69,
"readerImportance": 57.5,
"researchImportance": 27.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive comparison of major AI safety research agendas (\\$100M+ Anthropic, \\$50M+ DeepMind, \\$5-10M nonprofits) with detailed funding, team sizes, and failure mode coverage (25-65% per agenda). Estimates 40-60% probability current approaches scale to superhuman AI, with portfolio diversification critical given no single agenda covers all major risks.",
"description": "Analysis of major AI safety research agendas comparing approaches from Anthropic (\\$100M+ annual safety budget, 37-39% team growth), DeepMind (30-50 researchers), ARC, Redwood, and MIRI. Estimates 40-60% probability that current approaches scale to superhuman AI, with portfolio allocation across near-term control, medium-term oversight, and foundational theory.",
"ratings": {
"novelty": 4.2,
"rigor": 6.8,
"actionability": 7.3,
"completeness": 7.5
},
"category": "responses",
"subcategory": "alignment",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 4279,
"tableCount": 7,
"diagramCount": 1,
"internalLinks": 61,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.06,
"sectionCount": 26,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 4279,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 39,
"backlinkCount": 0,
"hallucinationRisk": {
"level": "medium",
"score": 50,
"factors": [
"no-citations",
"few-external-sources",
"conceptual-content"
]
},
"entityType": "crux",
"redundancy": {
"maxSimilarity": 22,
"similarPages": [
{
"id": "scalable-oversight",
"title": "Scalable Oversight",
"path": "/knowledge-base/responses/scalable-oversight/",
"similarity": 22
},
{
"id": "technical-research",
"title": "Technical AI Safety Research",
"path": "/knowledge-base/responses/technical-research/",
"similarity": 21
},
{
"id": "reasoning",
"title": "Reasoning and Planning",
"path": "/knowledge-base/capabilities/reasoning/",
"similarity": 20
},
{
"id": "anthropic-core-views",
"title": "Anthropic Core Views",
"path": "/knowledge-base/responses/anthropic-core-views/",
"similarity": 20
},
{
"id": "accident-risks",
"title": "AI Accident Risk Cruxes",
"path": "/knowledge-base/cruxes/accident-risks/",
"similarity": 19
}
]
},
"coverage": {
"passing": 6,
"total": 13,
"targets": {
"tables": 17,
"diagrams": 2,
"internalLinks": 34,
"externalLinks": 21,
"footnotes": 13,
"references": 13
},
"actuals": {
"tables": 7,
"diagrams": 1,
"internalLinks": 61,
"externalLinks": 0,
"footnotes": 0,
"references": 24,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.2 R:6.8 A:7.3 C:7.5"
},
"readerRank": 248,
"researchRank": 435,
"recommendedScore": 188.61
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/research-agendas",
"eaForum": "https://forum.effectivealtruism.org/topics/research-agendas-questions-and-project-lists"
}Backlinks (0)
No backlinks