Longterm Wiki

Eliezer Yudkowsky

eliezer-yudkowskypersonPath: /knowledge-base/people/eliezer-yudkowsky/
E114Entity ID (EID)
← Back to page52 backlinksQuality: 35Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "eliezer-yudkowsky",
  "numericId": null,
  "path": "/knowledge-base/people/eliezer-yudkowsky/",
  "filePath": "knowledge-base/people/eliezer-yudkowsky.mdx",
  "title": "Eliezer Yudkowsky",
  "quality": 35,
  "readerImportance": 82,
  "researchImportance": 12,
  "tacticalValue": 75,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive biographical profile of Eliezer Yudkowsky covering his foundational contributions to AI safety (CEV, early problem formulation, agent foundations) and notably pessimistic views on AI risk. Includes detailed 'Statements & Track Record' section analyzing his mixed prediction accuracy—noting early timeline errors, his position on AI generalization in the Hanson debate, and the unfalsifiability of his core doom predictions.",
  "description": "Co-founder of MIRI, early AI safety researcher and rationalist community founder",
  "ratings": {
    "novelty": 3,
    "rigor": 4,
    "actionability": 2,
    "completeness": 6.5
  },
  "category": "people",
  "subcategory": "safety-researchers",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 3238,
    "tableCount": 3,
    "diagramCount": 0,
    "internalLinks": 30,
    "externalLinks": 5,
    "footnoteCount": 0,
    "bulletRatio": 0.25,
    "sectionCount": 29,
    "hasOverview": true,
    "structuralScore": 13
  },
  "suggestedQuality": 87,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 3238,
  "unconvertedLinks": [
    {
      "text": "en.wikipedia.org",
      "url": "https://en.wikipedia.org/wiki/Eliezer_Yudkowsky",
      "resourceId": "d8d60a1c46155a15",
      "resourceTitle": "Eliezer Yudkowsky"
    },
    {
      "text": "[PDF",
      "url": "https://intelligence.org/files/IEM.pdf",
      "resourceId": "a1186c87f23ab9ce",
      "resourceTitle": "Intelligence Explosion Microeconomics"
    }
  ],
  "unconvertedLinkCount": 2,
  "convertedLinkCount": 0,
  "backlinkCount": 52,
  "citationHealth": {
    "total": 10,
    "withQuotes": 9,
    "verified": 9,
    "accuracyChecked": 9,
    "accurate": 5,
    "inaccurate": 0,
    "avgScore": 0.8912713891930051
  },
  "hallucinationRisk": {
    "level": "high",
    "score": 80,
    "factors": [
      "biographical-claims",
      "no-citations",
      "low-quality-score"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 22,
    "similarPages": [
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 22
      },
      {
        "id": "deep-learning-era",
        "title": "Deep Learning Revolution (2012-2020)",
        "path": "/knowledge-base/history/deep-learning-era/",
        "similarity": 17
      },
      {
        "id": "early-warnings",
        "title": "Early Warnings (1950s-2000)",
        "path": "/knowledge-base/history/early-warnings/",
        "similarity": 17
      },
      {
        "id": "ai-timelines",
        "title": "AI Timelines",
        "path": "/knowledge-base/models/ai-timelines/",
        "similarity": 17
      },
      {
        "id": "connor-leahy",
        "title": "Connor Leahy",
        "path": "/knowledge-base/people/connor-leahy/",
        "similarity": 17
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-24",
      "branch": "feat/stale-fact-detection-581-582",
      "title": "Batch content fixes + stale-facts validator + 2 new validation rules",
      "summary": "(fill in)",
      "pr": 924,
      "model": "claude-sonnet-4-6"
    },
    {
      "date": "2026-02-23",
      "branch": "feat/batch-improve-high-risk-pages",
      "title": "Auto-improve (standard): Eliezer Yudkowsky",
      "summary": "Improved \"Eliezer Yudkowsky\" via standard pipeline (1505.9s).",
      "duration": "1505.9s",
      "cost": "$5-8"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 13,
      "diagrams": 1,
      "internalLinks": 26,
      "externalLinks": 16,
      "footnotes": 10,
      "references": 10
    },
    "actuals": {
      "tables": 3,
      "diagrams": 0,
      "internalLinks": 30,
      "externalLinks": 5,
      "footnotes": 0,
      "references": 5,
      "quotesWithQuotes": 9,
      "quotesTotal": 10,
      "accuracyChecked": 9,
      "accuracyTotal": 10
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "amber",
      "quotes": "green",
      "accuracy": "green"
    },
    "editHistoryCount": 3,
    "ratingsString": "N:3 R:4 A:2 C:6.5"
  },
  "readerRank": 73,
  "researchRank": 540,
  "recommendedScore": 132.86
}
External Links
{
  "wikipedia": "https://en.wikipedia.org/wiki/Eliezer_Yudkowsky",
  "lesswrong": "https://www.lesswrong.com/tag/eliezer-yudkowsky",
  "wikidata": "https://www.wikidata.org/wiki/Q704195",
  "grokipedia": "https://grokipedia.com/page/Eliezer_Yudkowsky"
}
Backlinks (52)
idtitletyperelationship
miriMIRIorganization
paul-christianoPaul Christianoperson
self-improvementSelf-Improvement and Recursive Enhancementcapability
accident-risksAI Accident Risk Cruxescrux
case-against-xriskThe Case AGAINST AI Existential Riskargument
case-for-xriskThe Case FOR AI Existential Riskargument
open-vs-closedOpen vs Closed Source AIcrux
pause-debateShould We Pause AI Development?crux
why-alignment-hardWhy Alignment Might Be Hardargument
__index__/knowledge-base/historyHistoryconcept
miri-eraThe MIRI Era (2000-2015)historical
ai-timelinesAI Timelinesconcept
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
worldview-intervention-mappingWorldview-Intervention Mappinganalysis
center-for-applied-rationalityCenter for Applied Rationalityorganization
community-building-overviewCommunity Building Organizations (Overview)concept
fliFuture of Life Institute (FLI)organization
lesswrongLessWrongorganization
manifoldManifold (Prediction Market)organization
manifundManifundorganization
pause-aiPause AIorganization
peter-thiel-philanthropyPeter Thiel (Funder)organization
sffSurvival and Flourishing Fund (SFF)organization
the-sequencesThe Sequences by Eliezer Yudkowskyorganization
connor-leahyConnor Leahyperson
dario-amodeiDario Amodeiperson
eliezer-yudkowsky-predictionsEliezer Yudkowsky: Track Recordconcept
elon-muskElon Musk (AI Industry)person
evan-hubingerEvan Hubingerperson
geoffrey-hintonGeoffrey Hintonperson
holden-karnofskyHolden Karnofskyperson
__index__/knowledge-base/peoplePeopleconcept
jaan-tallinnJaan Tallinnperson
robin-hansonRobin Hansonperson
yann-lecunYann LeCunperson
yoshua-bengioYoshua Bengioperson
alignmentAI Alignmentapproach
constitutional-aiConstitutional AIapproach
corporateCorporate AI Safety Responsesapproach
research-agendasAI Alignment Research Agenda Comparisoncrux
stampy-aisafety-infoStampy / AISafety.infoproject
timelines-wikiTimelines Wikiproject
corrigibility-failureCorrigibility Failurerisk
existential-riskExistential Risk from AIconcept
instrumental-convergenceInstrumental Convergencerisk
lock-inAI Value Lock-inrisk
sharp-left-turnSharp Left Turnrisk
superintelligenceSuperintelligenceconcept
doomerAI Doomer Worldviewconcept
__index__/knowledge-base/worldviewsWorldviewsconcept
optimisticOptimistic Alignment Worldviewconcept
similar-projectsSimilar Projects to LongtermWiki: Research Reportconcept
Longterm Wiki