Dario Amodei
dario-amodeipersonPath: /knowledge-base/people/dario-amodei/
E91Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "dario-amodei",
"numericId": null,
"path": "/knowledge-base/people/dario-amodei/",
"filePath": "knowledge-base/people/dario-amodei.mdx",
"title": "Dario Amodei",
"quality": 41,
"readerImportance": 31,
"researchImportance": 36,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive biographical profile of Anthropic CEO Dario Amodei documenting his competitive safety development philosophy, 10-25% catastrophic risk estimate, 2026-2030 AGI timeline, and Constitutional AI approach. Documents technical contributions (Constitutional AI, RSP framework with ASL-1 through ASL-5 levels) and positions in key debates with pause advocates and accelerationists.",
"description": "CEO of Anthropic advocating competitive safety development philosophy with Constitutional AI, responsible scaling policies, and empirical alignment research. Estimates 10-25% catastrophic risk with AGI timeline 2026-2030.",
"ratings": {
"novelty": 2,
"rigor": 4.5,
"actionability": 2,
"completeness": 6
},
"category": "people",
"subcategory": "lab-leadership",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2573,
"tableCount": 16,
"diagramCount": 0,
"internalLinks": 65,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.27,
"sectionCount": 41,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 7,
"evergreen": true,
"wordCount": 2573,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 25,
"backlinkCount": 64,
"hallucinationRisk": {
"level": "high",
"score": 80,
"factors": [
"biographical-claims",
"no-citations",
"few-external-sources"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 17,
"similarPages": [
{
"id": "anthropic-core-views",
"title": "Anthropic Core Views",
"path": "/knowledge-base/responses/anthropic-core-views/",
"similarity": 17
},
{
"id": "why-alignment-easy",
"title": "Why Alignment Might Be Easy",
"path": "/knowledge-base/debates/why-alignment-easy/",
"similarity": 15
},
{
"id": "arc",
"title": "ARC (Alignment Research Center)",
"path": "/knowledge-base/organizations/arc/",
"similarity": 15
},
{
"id": "metr",
"title": "METR",
"path": "/knowledge-base/organizations/metr/",
"similarity": 15
},
{
"id": "technical-research",
"title": "Technical AI Safety Research",
"path": "/knowledge-base/responses/technical-research/",
"similarity": 15
}
]
},
"changeHistory": [
{
"date": "2026-02-26",
"branch": "claude/claims-driven-improvements",
"title": "Auto-improve (standard): Dario Amodei",
"summary": "Improved \"Dario Amodei\" via standard pipeline (279.1s). Quality score: 81. Issues resolved: Section 'Evolution of Views and Learning' and parts of 'Over; Section 'Industry Impact and Legacy > Anthropic's Market Pos; Section 'Current Research Directions > Mechanistic Interpret.",
"duration": "279.1s",
"cost": "$5-8"
},
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-X4jHg",
"title": "Audit wiki pages for factual errors and hallucinations",
"summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
},
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-11sSF",
"title": "Fix factual errors found in wiki audit",
"summary": "Systematically audited ~35+ high-risk wiki pages for factual errors and hallucinations using parallel background agents plus direct reading. Fixed 13 confirmed errors across 11 files."
}
],
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 10,
"diagrams": 1,
"internalLinks": 21,
"externalLinks": 13,
"footnotes": 8,
"references": 8
},
"actuals": {
"tables": 16,
"diagrams": 0,
"internalLinks": 65,
"externalLinks": 0,
"footnotes": 0,
"references": 16,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "green",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 3,
"ratingsString": "N:2 R:4.5 A:2 C:6"
},
"readerRank": 444,
"researchRank": 371,
"recommendedScore": 119.31
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/Dario_Amodei",
"eaForum": "https://forum.effectivealtruism.org/topics/dario-amodei",
"wikidata": "https://www.wikidata.org/wiki/Q103335665"
}Backlinks (64)
| id | title | type | relationship |
|---|---|---|---|
| agi-timeline | AGI Timeline | concept | — |
| anthropic-government-standoff | Anthropic-Pentagon Standoff (2026) | event | — |
| anthropic-stakeholders | Anthropic Stakeholders | table | — |
| long-term-benefit-trust | Long-Term Benefit Trust (Anthropic) | analysis | — |
| anthropic-ipo | Anthropic IPO | analysis | — |
| anthropic-pledge-enforcement | Anthropic Founder Pledges: Interventions to Increase Follow-Through | analysis | — |
| anthropic-pre-ipo-daf-transfers | Anthropic Pre-IPO DAF Transfers | analysis | — |
| anthropic | Anthropic | organization | leads-to |
| palisade-research | Palisade Research | organization | — |
| goodfire | Goodfire | organization | — |
| chris-olah | Chris Olah | person | — |
| jan-leike | Jan Leike | person | — |
| david-sacks | David Sacks (White House AI Czar) | person | — |
| self-improvement | Self-Improvement and Recursive Enhancement | capability | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| case-against-xrisk | The Case AGAINST AI Existential Risk | argument | — |
| open-vs-closed | Open vs Closed Source AI | crux | — |
| pause-debate | Should We Pause AI Development? | crux | — |
| scaling-debate | Is Scaling All You Need? | crux | — |
| why-alignment-easy | Why Alignment Might Be Easy | argument | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| agi-development | AGI Development | concept | — |
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| mainstream-era | Mainstream Era (2020-Present) | historical | — |
| dense-transformers | Dense Transformers | concept | — |
| ai-timelines | AI Timelines | concept | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| racing-dynamics-impact | Racing Dynamics Impact Model | analysis | — |
| scaling-laws | AI Scaling Laws | concept | — |
| warning-signs-model | Warning Signs Model | analysis | — |
| anthropic-investors | Anthropic (Funder) | analysis | — |
| anthropic-valuation | Anthropic Valuation Analysis | analysis | — |
| cais | CAIS (Center for AI Safety) | organization | — |
| fli | Future of Life Institute (FLI) | organization | — |
| ftx-collapse-ea-funding-lessons | FTX Collapse: Lessons for EA Funding Resilience | concept | — |
| lionheart-ventures | Lionheart Ventures | organization | — |
| dan-hendrycks | Dan Hendrycks | person | — |
| daniela-amodei | Daniela Amodei | person | — |
| elon-musk | Elon Musk (AI Industry) | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| helen-toner | Helen Toner | person | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| __index__/knowledge-base/people | People | concept | — |
| paul-christiano | Paul Christiano | person | — |
| yann-lecun | Yann LeCun | person | — |
| alignment | AI Alignment | approach | — |
| constitutional-ai | Constitutional AI | approach | — |
| coordination-mechanisms | International Coordination Mechanisms | policy | — |
| corporate | Corporate AI Safety Responses | approach | — |
| ea-biosecurity-scope | Is EA Biosecurity Work Limited to Restricting LLM Biological Use? | analysis | — |
| evaluation | AI Evaluation | approach | — |
| field-building-analysis | AI Safety Field Building Analysis | approach | — |
| governance-policy | AI Governance and Policy | crux | — |
| lab-culture | AI Lab Safety Culture | approach | — |
| mech-interp | Mechanistic Interpretability | approach | — |
| scalable-oversight | Scalable Oversight | safety-agenda | — |
| seoul-declaration | Seoul AI Safety Summit Declaration | policy | — |
| ai-welfare | AI Welfare and Digital Minds | concept | — |
| bioweapons | Bioweapons | risk | — |
| compute-concentration | Compute Concentration | risk | — |
| existential-risk | Existential Risk from AI | concept | — |
| lock-in | AI Value Lock-in | risk | — |
| optimistic | Optimistic Alignment Worldview | concept | — |