Longterm Wiki

Dario Amodei

dario-amodeipersonPath: /knowledge-base/people/dario-amodei/
E91Entity ID (EID)
← Back to page64 backlinksQuality: 41Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "dario-amodei",
  "numericId": null,
  "path": "/knowledge-base/people/dario-amodei/",
  "filePath": "knowledge-base/people/dario-amodei.mdx",
  "title": "Dario Amodei",
  "quality": 41,
  "readerImportance": 31,
  "researchImportance": 36,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive biographical profile of Anthropic CEO Dario Amodei documenting his competitive safety development philosophy, 10-25% catastrophic risk estimate, 2026-2030 AGI timeline, and Constitutional AI approach. Documents technical contributions (Constitutional AI, RSP framework with ASL-1 through ASL-5 levels) and positions in key debates with pause advocates and accelerationists.",
  "description": "CEO of Anthropic advocating competitive safety development philosophy with Constitutional AI, responsible scaling policies, and empirical alignment research. Estimates 10-25% catastrophic risk with AGI timeline 2026-2030.",
  "ratings": {
    "novelty": 2,
    "rigor": 4.5,
    "actionability": 2,
    "completeness": 6
  },
  "category": "people",
  "subcategory": "lab-leadership",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2573,
    "tableCount": 16,
    "diagramCount": 0,
    "internalLinks": 65,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.27,
    "sectionCount": 41,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 7,
  "evergreen": true,
  "wordCount": 2573,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 25,
  "backlinkCount": 64,
  "hallucinationRisk": {
    "level": "high",
    "score": 80,
    "factors": [
      "biographical-claims",
      "no-citations",
      "few-external-sources"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 17,
    "similarPages": [
      {
        "id": "anthropic-core-views",
        "title": "Anthropic Core Views",
        "path": "/knowledge-base/responses/anthropic-core-views/",
        "similarity": 17
      },
      {
        "id": "why-alignment-easy",
        "title": "Why Alignment Might Be Easy",
        "path": "/knowledge-base/debates/why-alignment-easy/",
        "similarity": 15
      },
      {
        "id": "arc",
        "title": "ARC (Alignment Research Center)",
        "path": "/knowledge-base/organizations/arc/",
        "similarity": 15
      },
      {
        "id": "metr",
        "title": "METR",
        "path": "/knowledge-base/organizations/metr/",
        "similarity": 15
      },
      {
        "id": "technical-research",
        "title": "Technical AI Safety Research",
        "path": "/knowledge-base/responses/technical-research/",
        "similarity": 15
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-26",
      "branch": "claude/claims-driven-improvements",
      "title": "Auto-improve (standard): Dario Amodei",
      "summary": "Improved \"Dario Amodei\" via standard pipeline (279.1s). Quality score: 81. Issues resolved: Section 'Evolution of Views and Learning' and parts of 'Over; Section 'Industry Impact and Legacy > Anthropic's Market Pos; Section 'Current Research Directions > Mechanistic Interpret.",
      "duration": "279.1s",
      "cost": "$5-8"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/audit-webpage-errors-X4jHg",
      "title": "Audit wiki pages for factual errors and hallucinations",
      "summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
    },
    {
      "date": "2026-02-18",
      "branch": "claude/audit-webpage-errors-11sSF",
      "title": "Fix factual errors found in wiki audit",
      "summary": "Systematically audited ~35+ high-risk wiki pages for factual errors and hallucinations using parallel background agents plus direct reading. Fixed 13 confirmed errors across 11 files."
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 10,
      "diagrams": 1,
      "internalLinks": 21,
      "externalLinks": 13,
      "footnotes": 8,
      "references": 8
    },
    "actuals": {
      "tables": 16,
      "diagrams": 0,
      "internalLinks": 65,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 16,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 3,
    "ratingsString": "N:2 R:4.5 A:2 C:6"
  },
  "readerRank": 444,
  "researchRank": 371,
  "recommendedScore": 119.31
}
External Links
{
  "wikipedia": "https://en.wikipedia.org/wiki/Dario_Amodei",
  "eaForum": "https://forum.effectivealtruism.org/topics/dario-amodei",
  "wikidata": "https://www.wikidata.org/wiki/Q103335665"
}
Backlinks (64)
idtitletyperelationship
agi-timelineAGI Timelineconcept
anthropic-government-standoffAnthropic-Pentagon Standoff (2026)event
anthropic-stakeholdersAnthropic Stakeholderstable
long-term-benefit-trustLong-Term Benefit Trust (Anthropic)analysis
anthropic-ipoAnthropic IPOanalysis
anthropic-pledge-enforcementAnthropic Founder Pledges: Interventions to Increase Follow-Throughanalysis
anthropic-pre-ipo-daf-transfersAnthropic Pre-IPO DAF Transfersanalysis
anthropicAnthropicorganizationleads-to
palisade-researchPalisade Researchorganization
goodfireGoodfireorganization
chris-olahChris Olahperson
jan-leikeJan Leikeperson
david-sacksDavid Sacks (White House AI Czar)person
self-improvementSelf-Improvement and Recursive Enhancementcapability
accident-risksAI Accident Risk Cruxescrux
case-against-xriskThe Case AGAINST AI Existential Riskargument
open-vs-closedOpen vs Closed Source AIcrux
pause-debateShould We Pause AI Development?crux
scaling-debateIs Scaling All You Need?crux
why-alignment-easyWhy Alignment Might Be Easyargument
why-alignment-hardWhy Alignment Might Be Hardargument
agi-developmentAGI Developmentconcept
deep-learning-eraDeep Learning Revolution (2012-2020)historical
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
mainstream-eraMainstream Era (2020-Present)historical
dense-transformersDense Transformersconcept
ai-timelinesAI Timelinesconcept
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
racing-dynamics-impactRacing Dynamics Impact Modelanalysis
scaling-lawsAI Scaling Lawsconcept
warning-signs-modelWarning Signs Modelanalysis
anthropic-investorsAnthropic (Funder)analysis
anthropic-valuationAnthropic Valuation Analysisanalysis
caisCAIS (Center for AI Safety)organization
fliFuture of Life Institute (FLI)organization
ftx-collapse-ea-funding-lessonsFTX Collapse: Lessons for EA Funding Resilienceconcept
lionheart-venturesLionheart Venturesorganization
dan-hendrycksDan Hendrycksperson
daniela-amodeiDaniela Amodeiperson
elon-muskElon Musk (AI Industry)person
geoffrey-hintonGeoffrey Hintonperson
helen-tonerHelen Tonerperson
holden-karnofskyHolden Karnofskyperson
__index__/knowledge-base/peoplePeopleconcept
paul-christianoPaul Christianoperson
yann-lecunYann LeCunperson
alignmentAI Alignmentapproach
constitutional-aiConstitutional AIapproach
coordination-mechanismsInternational Coordination Mechanismspolicy
corporateCorporate AI Safety Responsesapproach
ea-biosecurity-scopeIs EA Biosecurity Work Limited to Restricting LLM Biological Use?analysis
evaluationAI Evaluationapproach
field-building-analysisAI Safety Field Building Analysisapproach
governance-policyAI Governance and Policycrux
lab-cultureAI Lab Safety Cultureapproach
mech-interpMechanistic Interpretabilityapproach
scalable-oversightScalable Oversightsafety-agenda
seoul-declarationSeoul AI Safety Summit Declarationpolicy
ai-welfareAI Welfare and Digital Mindsconcept
bioweaponsBioweaponsrisk
compute-concentrationCompute Concentrationrisk
existential-riskExistential Risk from AIconcept
lock-inAI Value Lock-inrisk
optimisticOptimistic Alignment Worldviewconcept
Longterm Wiki