MIRI (Machine Intelligence Research Institute)
miriorganizationPath: /knowledge-base/organizations/miri/
E202Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "miri",
"numericId": null,
"path": "/knowledge-base/organizations/miri/",
"filePath": "knowledge-base/organizations/miri.mdx",
"title": "MIRI (Machine Intelligence Research Institute)",
"quality": 50,
"readerImportance": 32,
"researchImportance": 44,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive organizational history documenting MIRI's trajectory from pioneering AI safety research (2000-2020) to policy advocacy after acknowledging research failure, with detailed financial data showing \\$5M annual deficit and ~2 year runway. Provides well-sourced analysis of the organization's \\$25.6M revenue peak (2021), subsequent decline, and strategic pivot away from technical alignment work.",
"description": "A pioneering AI safety research organization that shifted from technical alignment research to policy advocacy, founded by Eliezer Yudkowsky in 2000 as the first organization to work on artificial superintelligence alignment.",
"ratings": {
"novelty": 3.5,
"rigor": 6,
"actionability": 2,
"completeness": 7
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"community",
"ai-safety"
],
"metrics": {
"wordCount": 1864,
"tableCount": 1,
"diagramCount": 0,
"internalLinks": 9,
"externalLinks": 74,
"footnoteCount": 0,
"bulletRatio": 0.23,
"sectionCount": 26,
"hasOverview": true,
"structuralScore": 13
},
"suggestedQuality": 87,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 1864,
"unconvertedLinks": [
{
"text": "MIRI About",
"url": "https://intelligence.org/about/",
"resourceId": "kb-2c16d126b367df5d"
},
{
"text": "MIRI About",
"url": "https://intelligence.org/about/",
"resourceId": "kb-2c16d126b367df5d"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
},
{
"text": "MIRI About",
"url": "https://intelligence.org/about/",
"resourceId": "kb-2c16d126b367df5d"
},
{
"text": "Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
},
{
"text": "MIRI About",
"url": "https://intelligence.org/about/",
"resourceId": "kb-2c16d126b367df5d"
},
{
"text": "MIRI 2024 Update",
"url": "https://intelligence.org/2024/01/04/miri-2024-mission-and-strategy-update/",
"resourceId": "435b669c11e07d8f",
"resourceTitle": "MIRI's 2024 assessment"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
},
{
"text": "Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
},
{
"text": "Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
},
{
"text": "Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
},
{
"text": "Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
},
{
"text": "Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
},
{
"text": "MIRI Publications",
"url": "https://intelligence.org/all-publications/",
"resourceId": "fc77e6a5087586a3",
"resourceTitle": "MIRI Papers"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "MIRI 2024 Update",
"url": "https://intelligence.org/2024/01/04/miri-2024-mission-and-strategy-update/",
"resourceId": "435b669c11e07d8f",
"resourceTitle": "MIRI's 2024 assessment"
},
{
"text": "MIRI 2024 Update",
"url": "https://intelligence.org/2024/01/04/miri-2024-mission-and-strategy-update/",
"resourceId": "435b669c11e07d8f",
"resourceTitle": "MIRI's 2024 assessment"
},
{
"text": "MIRI About",
"url": "https://intelligence.org/about/",
"resourceId": "kb-2c16d126b367df5d"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "ProPublica",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "MIRI About",
"url": "https://intelligence.org/about/",
"resourceId": "kb-2c16d126b367df5d"
},
{
"text": "MIRI 2024 Update",
"url": "https://intelligence.org/2024/01/04/miri-2024-mission-and-strategy-update/",
"resourceId": "435b669c11e07d8f",
"resourceTitle": "MIRI's 2024 assessment"
},
{
"text": "MIRI About",
"url": "https://intelligence.org/about/",
"resourceId": "kb-2c16d126b367df5d"
},
{
"text": "MIRI Publications",
"url": "https://intelligence.org/all-publications/",
"resourceId": "fc77e6a5087586a3",
"resourceTitle": "MIRI Papers"
},
{
"text": "MIRI Publications",
"url": "https://intelligence.org/all-publications/",
"resourceId": "fc77e6a5087586a3",
"resourceTitle": "MIRI Papers"
},
{
"text": "MIRI Publications",
"url": "https://intelligence.org/all-publications/",
"resourceId": "fc77e6a5087586a3",
"resourceTitle": "MIRI Papers"
},
{
"text": "MIRI Publications",
"url": "https://intelligence.org/all-publications/",
"resourceId": "fc77e6a5087586a3",
"resourceTitle": "MIRI Papers"
},
{
"text": "MIRI Publications",
"url": "https://intelligence.org/all-publications/",
"resourceId": "fc77e6a5087586a3",
"resourceTitle": "MIRI Papers"
},
{
"text": "MIRI Publications",
"url": "https://intelligence.org/all-publications/",
"resourceId": "fc77e6a5087586a3",
"resourceTitle": "MIRI Papers"
},
{
"text": "MIRI Publications",
"url": "https://intelligence.org/all-publications/",
"resourceId": "fc77e6a5087586a3",
"resourceTitle": "MIRI Papers"
},
{
"text": "MIRI 2024 Update",
"url": "https://intelligence.org/2024/01/04/miri-2024-mission-and-strategy-update/",
"resourceId": "435b669c11e07d8f",
"resourceTitle": "MIRI's 2024 assessment"
},
{
"text": "MIRI About",
"url": "https://intelligence.org/about/",
"resourceId": "kb-2c16d126b367df5d"
},
{
"text": "Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
},
{
"text": "MIRI About Page",
"url": "https://intelligence.org/about/",
"resourceId": "kb-2c16d126b367df5d"
},
{
"text": "MIRI 2024 Mission and Strategy Update",
"url": "https://intelligence.org/2024/01/04/miri-2024-mission-and-strategy-update/",
"resourceId": "435b669c11e07d8f",
"resourceTitle": "MIRI's 2024 assessment"
},
{
"text": "All MIRI Publications",
"url": "https://intelligence.org/all-publications/",
"resourceId": "fc77e6a5087586a3",
"resourceTitle": "MIRI Papers"
},
{
"text": "MIRI on ProPublica Nonprofit Explorer",
"url": "https://projects.propublica.org/nonprofits/organizations/582565917",
"resourceId": "kb-0aac71d5b6ac8081"
},
{
"text": "Machine Intelligence Research Institute - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"resourceId": "d351a4ad40241c73",
"resourceTitle": "Machine Intelligence Research Institute - Wikipedia"
}
],
"unconvertedLinkCount": 49,
"convertedLinkCount": 0,
"backlinkCount": 104,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "eliezer-yudkowsky",
"title": "Eliezer Yudkowsky",
"path": "/knowledge-base/people/eliezer-yudkowsky/",
"similarity": 15
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 14
},
{
"id": "arc",
"title": "ARC (Alignment Research Center)",
"path": "/knowledge-base/organizations/arc/",
"similarity": 14
},
{
"id": "coefficient-giving",
"title": "Coefficient Giving",
"path": "/knowledge-base/organizations/coefficient-giving/",
"similarity": 14
},
{
"id": "lesswrong",
"title": "LessWrong",
"path": "/knowledge-base/organizations/lesswrong/",
"similarity": 14
}
]
},
"changeHistory": [
{
"date": "2026-02-24",
"branch": "feat/stale-fact-detection-581-582",
"title": "Batch content fixes + stale-facts validator + 2 new validation rules",
"summary": "(fill in)",
"pr": 924,
"model": "claude-sonnet-4-6"
}
],
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 7,
"diagrams": 1,
"internalLinks": 15,
"externalLinks": 9,
"footnotes": 6,
"references": 6
},
"actuals": {
"tables": 1,
"diagrams": 0,
"internalLinks": 9,
"externalLinks": 74,
"footnotes": 0,
"references": 17,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:3.5 R:6 A:2 C:7"
},
"readerRank": 433,
"researchRank": 315,
"recommendedScore": 137.63
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/Machine_Intelligence_Research_Institute",
"lesswrong": "https://www.lesswrong.com/tag/machine-intelligence-research-institute-miri",
"wikidata": "https://www.wikidata.org/wiki/Q2040269",
"grokipedia": "https://grokipedia.com/page/Machine_Intelligence_Research_Institute"
}Backlinks (104)
| id | title | type | relationship |
|---|---|---|---|
| research-agendas | AI Alignment Research Agendas | crux | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| miri-era | The MIRI Era | historical | — |
| instrumental-convergence-framework | Instrumental Convergence Framework | analysis | research |
| arc | ARC | organization | — |
| redwood-research | Redwood Research | organization | — |
| eliezer-yudkowsky | Eliezer Yudkowsky | person | — |
| instrumental-convergence | Instrumental Convergence | risk | — |
| mesa-optimization | Mesa-Optimization | risk | — |
| sharp-left-turn | Sharp Left Turn | risk | — |
| coding | Autonomous Coding | capability | — |
| large-language-models | Large Language Models | concept | — |
| situational-awareness | Situational Awareness | capability | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| ea-epistemic-failures-in-the-ftx-era | EA Epistemic Failures in the FTX Era | concept | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| early-warnings | Early Warnings (1950s-2000) | historical | — |
| epstein-ai-connections | Jeffrey Epstein's Connections to AI Researchers | concept | — |
| __index__/knowledge-base/history | History | concept | — |
| __index__/knowledge-base | Knowledge Base | concept | — |
| ai-risk-portfolio-analysis | AI Risk Portfolio Analysis | analysis | — |
| ai-talent-market-dynamics | AI Talent Market Dynamics | analysis | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| corrigibility-failure-pathways | Corrigibility Failure Pathways | analysis | — |
| deceptive-alignment-decomposition | Deceptive Alignment Decomposition Model | analysis | — |
| goal-misgeneralization-probability | Goal Misgeneralization Probability Model | analysis | — |
| longtermist-value-comparisons | Relative Longtermist Value Comparisons | analysis | — |
| mesa-optimization-analysis | Mesa-Optimization Risk Analysis | analysis | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| safety-research-allocation | Safety Research Allocation Model | analysis | — |
| safety-research-value | Expected Value of AI Safety Research | analysis | — |
| safety-researcher-gap | AI Safety Talent Supply/Demand Gap Model | analysis | — |
| safety-spending-at-scale | Safety Spending at Scale | analysis | — |
| scheming-likelihood-model | Scheming Likelihood Assessment | analysis | — |
| worldview-intervention-mapping | Worldview-Intervention Mapping | analysis | — |
| ai-futures-project | AI Futures Project | organization | — |
| bridgewater-aia-labs | Bridgewater AIA Labs | organization | — |
| cais | CAIS (Center for AI Safety) | organization | — |
| center-for-applied-rationality | Center for Applied Rationality | organization | — |
| chai | CHAI (Center for Human-Compatible AI) | organization | — |
| coefficient-giving | Coefficient Giving | organization | — |
| conjecture | Conjecture | organization | — |
| ea-funding-absorption-capacity | EA Funding Absorption Capacity | concept | — |
| elon-musk-philanthropy | Elon Musk (Funder) | analysis | — |
| far-ai | FAR AI | organization | — |
| fli | Future of Life Institute (FLI) | organization | — |
| frontier-model-forum | Frontier Model Forum | organization | — |
| ftx-collapse-ea-funding-lessons | FTX Collapse: Lessons for EA Funding Resilience | concept | — |
| funders-overview | Longtermist Funders (Overview) | concept | — |
| giving-pledge | Giving Pledge | organization | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| lesswrong | LessWrong | organization | — |
| lighthaven | Lighthaven (Event Venue) | organization | — |
| macarthur-foundation | MacArthur Foundation | organization | — |
| manifest | Manifest (Forecasting Conference) | organization | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| palisade-research | Palisade Research | organization | — |
| peter-thiel-philanthropy | Peter Thiel (Funder) | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| sentinel | Sentinel (Catastrophic Risk Foresight) | organization | — |
| sff | Survival and Flourishing Fund (SFF) | organization | — |
| swift-centre | Swift Centre | organization | — |
| the-sequences | The Sequences by Eliezer Yudkowsky | organization | — |
| vitalik-buterin-philanthropy | Vitalik Buterin (Funder) | organization | — |
| dario-amodei | Dario Amodei | person | — |
| eliezer-yudkowsky-predictions | Eliezer Yudkowsky: Track Record | concept | — |
| evan-hubinger | Evan Hubinger | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| gwern | Gwern Branwen | person | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| ilya-sutskever | Ilya Sutskever | person | — |
| __index__/knowledge-base/people | People | concept | — |
| issa-rice | Issa Rice | person | — |
| jaan-tallinn | Jaan Tallinn | person | — |
| nick-bostrom | Nick Bostrom | person | — |
| nuno-sempere | Nuño Sempere | person | — |
| paul-christiano | Paul Christiano | person | — |
| stuart-russell | Stuart Russell | person | — |
| vidur-kapur | Vidur Kapur | person | — |
| vipul-naik | Vipul Naik | person | — |
| yann-lecun-predictions | Yann LeCun: Track Record | concept | — |
| yann-lecun | Yann LeCun | person | — |
| yoshua-bengio | Yoshua Bengio | person | — |
| agent-foundations | Agent Foundations | approach | — |
| ai-control | AI Control | safety-agenda | — |
| ai-watch | AI Watch | project | — |
| alignment | AI Alignment | approach | — |
| constitutional-ai | Constitutional AI | approach | — |
| corporate | Corporate AI Safety Responses | approach | — |
| corrigibility | Corrigibility Research | safety-agenda | — |
| donations-list-website | Donations List Website | project | — |
| maim | MAIM (Mutually Assured AI Malfunction) | policy | — |
| stampy-aisafety-info | Stampy / AISafety.info | project | — |
| technical-research | Technical AI Safety Research | crux | — |
| timelines-wiki | Timelines Wiki | project | — |
| training-programs | AI Safety Training Programs | approach | — |
| corrigibility-failure | Corrigibility Failure | risk | — |
| epistemic-sycophancy | Epistemic Sycophancy | risk | — |
| existential-risk | Existential Risk from AI | concept | — |
| doomer | AI Doomer Worldview | concept | — |
| __index__/knowledge-base/worldviews | Worldviews | concept | — |