Toby Ord
toby-ordpersonPath: /knowledge-base/people/toby-ord/
E355Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "toby-ord",
"numericId": null,
"path": "/knowledge-base/people/toby-ord/",
"filePath": "knowledge-base/people/toby-ord.mdx",
"title": "Toby Ord",
"quality": 41,
"readerImportance": 26,
"researchImportance": 11.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive biographical profile of Toby Ord documenting his 10% AI extinction estimate and role founding effective altruism, with detailed tables on risk assessments, academic background, and influence metrics. While thorough on his contributions, provides limited original analysis beyond summarizing publicly available information about his work and impact.",
"description": "Oxford philosopher and author of 'The Precipice' who provided foundational quantitative estimates for existential risks (10% for AI, 1/6 total this century) and philosophical frameworks for long-term thinking that shaped modern AI risk discourse.",
"ratings": {
"novelty": 2,
"rigor": 4.5,
"actionability": 2,
"completeness": 6
},
"category": "people",
"subcategory": "ea-figures",
"clusters": [
"community",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2452,
"tableCount": 19,
"diagramCount": 0,
"internalLinks": 40,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.16,
"sectionCount": 47,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 2452,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 25,
"backlinkCount": 18,
"hallucinationRisk": {
"level": "high",
"score": 80,
"factors": [
"biographical-claims",
"no-citations",
"few-external-sources"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "holden-karnofsky",
"title": "Holden Karnofsky",
"path": "/knowledge-base/people/holden-karnofsky/",
"similarity": 15
},
{
"id": "ai-impacts",
"title": "AI Impacts",
"path": "/knowledge-base/organizations/ai-impacts/",
"similarity": 13
},
{
"id": "geoffrey-hinton",
"title": "Geoffrey Hinton",
"path": "/knowledge-base/people/geoffrey-hinton/",
"similarity": 13
},
{
"id": "nick-bostrom",
"title": "Nick Bostrom",
"path": "/knowledge-base/people/nick-bostrom/",
"similarity": 13
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 12
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 10,
"diagrams": 1,
"internalLinks": 20,
"externalLinks": 12,
"footnotes": 7,
"references": 7
},
"actuals": {
"tables": 19,
"diagrams": 0,
"internalLinks": 40,
"externalLinks": 0,
"footnotes": 0,
"references": 21,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2 R:4.5 A:2 C:6"
},
"readerRank": 481,
"researchRank": 546,
"recommendedScore": 116.75
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/toby-ord",
"wikidata": "https://www.wikidata.org/wiki/Q7811863",
"grokipedia": "https://grokipedia.com/page/Toby_Ord"
}Backlinks (18)
| id | title | type | relationship |
|---|---|---|---|
| giving-what-we-can | Giving What We Can | organization | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| nick-bostrom | Nick Bostrom | person | — |
| nick-beckstead | Nick Beckstead | person | — |
| will-macaskill | Will MacAskill | person | — |
| case-for-xrisk | The Case FOR AI Existential Risk | argument | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| earning-to-give | Earning to Give: The EA Strategy and Its Limits | concept | — |
| longtermism-credibility-after-ftx | Longtermism's Philosophical Credibility After FTX | concept | — |
| longtermist-value-comparisons | Relative Longtermist Value Comparisons | analysis | — |
| cea | Centre for Effective Altruism | organization | — |
| fhi | Future of Humanity Institute (FHI) | organization | — |
| __index__/knowledge-base/people | People | concept | — |
| governance-policy | AI Governance and Policy | crux | — |
| bioweapons | Bioweapons | risk | — |
| existential-risk | Existential Risk from AI | concept | — |
| irreversibility | AI-Induced Irreversibility | risk | — |
| lock-in | AI Value Lock-in | risk | — |