Longterm Wiki

Toby Ord

toby-ordpersonPath: /knowledge-base/people/toby-ord/
E355Entity ID (EID)
← Back to page18 backlinksQuality: 41Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "toby-ord",
  "numericId": null,
  "path": "/knowledge-base/people/toby-ord/",
  "filePath": "knowledge-base/people/toby-ord.mdx",
  "title": "Toby Ord",
  "quality": 41,
  "readerImportance": 26,
  "researchImportance": 11.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive biographical profile of Toby Ord documenting his 10% AI extinction estimate and role founding effective altruism, with detailed tables on risk assessments, academic background, and influence metrics. While thorough on his contributions, provides limited original analysis beyond summarizing publicly available information about his work and impact.",
  "description": "Oxford philosopher and author of 'The Precipice' who provided foundational quantitative estimates for existential risks (10% for AI, 1/6 total this century) and philosophical frameworks for long-term thinking that shaped modern AI risk discourse.",
  "ratings": {
    "novelty": 2,
    "rigor": 4.5,
    "actionability": 2,
    "completeness": 6
  },
  "category": "people",
  "subcategory": "ea-figures",
  "clusters": [
    "community",
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2452,
    "tableCount": 19,
    "diagramCount": 0,
    "internalLinks": 40,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.16,
    "sectionCount": 47,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 2452,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 25,
  "backlinkCount": 18,
  "hallucinationRisk": {
    "level": "high",
    "score": 80,
    "factors": [
      "biographical-claims",
      "no-citations",
      "few-external-sources"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 15,
    "similarPages": [
      {
        "id": "holden-karnofsky",
        "title": "Holden Karnofsky",
        "path": "/knowledge-base/people/holden-karnofsky/",
        "similarity": 15
      },
      {
        "id": "ai-impacts",
        "title": "AI Impacts",
        "path": "/knowledge-base/organizations/ai-impacts/",
        "similarity": 13
      },
      {
        "id": "geoffrey-hinton",
        "title": "Geoffrey Hinton",
        "path": "/knowledge-base/people/geoffrey-hinton/",
        "similarity": 13
      },
      {
        "id": "nick-bostrom",
        "title": "Nick Bostrom",
        "path": "/knowledge-base/people/nick-bostrom/",
        "similarity": 13
      },
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 12
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 10,
      "diagrams": 1,
      "internalLinks": 20,
      "externalLinks": 12,
      "footnotes": 7,
      "references": 7
    },
    "actuals": {
      "tables": 19,
      "diagrams": 0,
      "internalLinks": 40,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 21,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:2 R:4.5 A:2 C:6"
  },
  "readerRank": 481,
  "researchRank": 546,
  "recommendedScore": 116.75
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/toby-ord",
  "wikidata": "https://www.wikidata.org/wiki/Q7811863",
  "grokipedia": "https://grokipedia.com/page/Toby_Ord"
}
Backlinks (18)
idtitletyperelationship
giving-what-we-canGiving What We Canorganization
holden-karnofskyHolden Karnofskyperson
nick-bostromNick Bostromperson
nick-becksteadNick Becksteadperson
will-macaskillWill MacAskillperson
case-for-xriskThe Case FOR AI Existential Riskargument
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
earning-to-giveEarning to Give: The EA Strategy and Its Limitsconcept
longtermism-credibility-after-ftxLongtermism's Philosophical Credibility After FTXconcept
longtermist-value-comparisonsRelative Longtermist Value Comparisonsanalysis
ceaCentre for Effective Altruismorganization
fhiFuture of Humanity Institute (FHI)organization
__index__/knowledge-base/peoplePeopleconcept
governance-policyAI Governance and Policycrux
bioweaponsBioweaponsrisk
existential-riskExistential Risk from AIconcept
irreversibilityAI-Induced Irreversibilityrisk
lock-inAI Value Lock-inrisk
Longterm Wiki