Future of Humanity Institute (FHI)
fhiorganizationPath: /knowledge-base/organizations/fhi/
E140Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "fhi",
"numericId": null,
"path": "/knowledge-base/organizations/fhi/",
"filePath": "knowledge-base/organizations/fhi.mdx",
"title": "Future of Humanity Institute (FHI)",
"quality": 51,
"readerImportance": 50.5,
"researchImportance": 49.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "The Future of Humanity Institute (2005-2024) was a pioneering Oxford research center that founded existential risk studies and AI alignment research, growing from 3 to ~50 researchers and receiving \\$10M+ in funding before closing due to administrative conflicts. FHI produced seminal works (Superintelligence, The Precipice), trained leaders now at Anthropic/DeepMind/GovAI, and advised UN/UK government, demonstrating both transformative intellectual impact and the challenges of housing speculative research in traditional academia.",
"description": "The Future of Humanity Institute was a pioneering interdisciplinary research center at Oxford University (2005-2024) that founded the fields of existential risk studies and AI alignment research. Under Nick Bostrom's direction, FHI produced seminal works including Superintelligence and The Precipice, trained a generation of researchers now leading organizations like GovAI, Anthropic, and DeepMind safety teams, and advised the UN and UK government on catastrophic risks before its closure in April 2024 due to administrative conflicts with Oxford's Faculty of Philosophy.",
"ratings": {
"novelty": 3.2,
"rigor": 5.8,
"actionability": 2.1,
"completeness": 7.3
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"community",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 4188,
"tableCount": 32,
"diagramCount": 2,
"internalLinks": 14,
"externalLinks": 24,
"footnoteCount": 0,
"bulletRatio": 0.05,
"sectionCount": 57,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 4188,
"unconvertedLinks": [
{
"text": "fhi.ox.ac.uk",
"url": "https://www.fhi.ox.ac.uk/",
"resourceId": "1593095c92d34ed8",
"resourceTitle": "**Future of Humanity Institute**"
},
{
"text": "Future of Humanity Institute",
"url": "https://en.wikipedia.org/wiki/Future_of_Humanity_Institute",
"resourceId": "d04582635c8c0ce4",
"resourceTitle": "Future of Humanity Institute - Wikipedia"
},
{
"text": "Nick Bostrom",
"url": "https://nickbostrom.com/",
"resourceId": "9cf1412a293bfdbe",
"resourceTitle": "Theoretical work"
},
{
"text": "Future of Humanity Institute Website",
"url": "https://www.fhi.ox.ac.uk/",
"resourceId": "1593095c92d34ed8",
"resourceTitle": "**Future of Humanity Institute**"
},
{
"text": "Nick Bostrom's Homepage",
"url": "https://nickbostrom.com/",
"resourceId": "9cf1412a293bfdbe",
"resourceTitle": "Theoretical work"
},
{
"text": "Future of Humanity Institute - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Future_of_Humanity_Institute",
"resourceId": "d04582635c8c0ce4",
"resourceTitle": "Future of Humanity Institute - Wikipedia"
},
{
"text": "Nick Bostrom - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Nick_Bostrom",
"resourceId": "kb-bab966a212f1bc8b"
},
{
"text": "Superintelligence: Paths, Dangers, Strategies - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Superintelligence:_Paths,_Dangers,_Strategies",
"resourceId": "0151481d5dc82963",
"resourceTitle": "Superintelligence"
},
{
"text": "Daily Nous: The End of the Future of Humanity Institute",
"url": "https://dailynous.com/2024/04/18/end-future-of-humanity-institute/",
"resourceId": "73a866cd6278fc9b",
"resourceTitle": "The End of the Future of Humanity Institute — Daily Nous (April 18, 2024)"
},
{
"text": "EA Forum: FHI Final Report Discussion",
"url": "https://forum.effectivealtruism.org/posts/uK27pds7J36asqJPt/future-of-humanity-institute-2005-2024-final-report",
"resourceId": "87c472d68e8a2845",
"resourceTitle": "Future of Humanity Institute 2005–2024: Final Report — EA Forum (April 17, 2024)"
}
],
"unconvertedLinkCount": 10,
"convertedLinkCount": 0,
"backlinkCount": 32,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "cser",
"title": "CSER (Centre for the Study of Existential Risk)",
"path": "/knowledge-base/organizations/cser/",
"similarity": 15
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 14
},
{
"id": "fli",
"title": "Future of Life Institute (FLI)",
"path": "/knowledge-base/organizations/fli/",
"similarity": 14
},
{
"id": "nick-beckstead",
"title": "Nick Beckstead",
"path": "/knowledge-base/people/nick-beckstead/",
"similarity": 14
},
{
"id": "ea-longtermist-wins-losses",
"title": "EA and Longtermist Wins and Losses",
"path": "/knowledge-base/history/ea-longtermist-wins-losses/",
"similarity": 13
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-X4jHg",
"title": "Audit wiki pages for factual errors and hallucinations",
"summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
}
],
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 17,
"diagrams": 2,
"internalLinks": 34,
"externalLinks": 21,
"footnotes": 13,
"references": 13
},
"actuals": {
"tables": 32,
"diagrams": 2,
"internalLinks": 14,
"externalLinks": 24,
"footnotes": 0,
"references": 7,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:3.2 R:5.8 A:2.1 C:7.3"
},
"readerRank": 299,
"researchRank": 282,
"recommendedScore": 149.11
}External Links
{
"wikidata": "https://www.wikidata.org/wiki/Q5510826"
}Backlinks (32)
| id | title | type | relationship |
|---|---|---|---|
| miri-era | The MIRI Era | historical | — |
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| epstein-ai-connections | Jeffrey Epstein's Connections to AI Researchers | concept | — |
| ftx-red-flags-pre-collapse-warning-signs-that-were-overlooked | FTX Red Flags: Pre-Collapse Warning Signs That Were Overlooked | concept | — |
| longtermism-credibility-after-ftx | Longtermism's Philosophical Credibility After FTX | concept | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| risk-activation-timeline | Risk Activation Timeline Model | analysis | — |
| safety-researcher-gap | AI Safety Talent Supply/Demand Gap Model | analysis | — |
| anthropic-ipo | Anthropic IPO | analysis | — |
| cset | CSET (Center for Security and Emerging Technology) | organization | — |
| deepmind | Google DeepMind | organization | — |
| ftx | FTX (cryptocurrency exchange) | organization | — |
| govai | GovAI | organization | — |
| ibbis | IBBIS (International Biosecurity and Biosafety Initiative for Science) | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| secure-ai-project | Secure AI Project | organization | — |
| sentinel | Sentinel (Catastrophic Risk Foresight) | organization | — |
| connor-leahy | Connor Leahy | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| issa-rice | Issa Rice | person | — |
| jan-leike | Jan Leike | person | — |
| nick-beckstead | Nick Beckstead | person | — |
| nick-bostrom | Nick Bostrom | person | — |
| nuno-sempere | Nuño Sempere | person | — |
| robin-hanson | Robin Hanson | person | — |
| toby-ord | Toby Ord | person | — |
| corrigibility | Corrigibility Research | safety-agenda | — |
| disinformation | Disinformation | risk | — |
| existential-risk | Existential Risk from AI | concept | — |
| knowledge-monopoly | AI Knowledge Monopoly | risk | — |
| superintelligence | Superintelligence | concept | — |