Future of Life Institute (FLI)
fliorganizationPath: /knowledge-base/organizations/fli/
E528Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "fli",
"numericId": null,
"path": "/knowledge-base/organizations/fli/",
"filePath": "knowledge-base/organizations/fli.mdx",
"title": "Future of Life Institute (FLI)",
"quality": 46,
"readerImportance": 76,
"researchImportance": 53.5,
"tacticalValue": 72,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-12",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive profile of FLI documenting \\$25M+ in grants distributed (2015: \\$7M to 37 projects, 2021: \\$25M program), major public campaigns (Asilomar Principles with 5,700+ signatories, 2023 Pause Letter with 33,000+ signatories), and \\$665.8M Buterin donation (2021). Organization operates primarily through advocacy and grantmaking rather than direct research, with active EU/UN/US policy engagement.",
"description": "The Future of Life Institute is a nonprofit organization focused on reducing existential risks from advanced AI and other transformative technologies. Co-founded by Max Tegmark, Jaan Tallinn, Anthony Aguirre, Viktoriya Krakovna, and Meia Chita-Tegmark in March 2014, FLI has distributed over \\$25 million in AI safety research grants (starting with Elon Musk's \\$10M 2015 donation funding 37 projects), organized the 2015 Puerto Rico and 2017 Asilomar conferences that birthed the field of AI alignment and produced the 23 Asilomar Principles (5,700+ signatories), published the 2023 pause letter (33,000+ signatories including Yoshua Bengio and Stuart Russell), produced the viral Slaughterbots films advocating for autonomous weapons regulation, and received a \\$665.8M cryptocurrency donation from Vitalik Buterin in 2021. FLI maintains active policy engagement with the EU (advocating for foundation model regulation in the AI Act), UN (promoting autonomous weapons treaty), and US Congress.",
"ratings": {
"novelty": 2.5,
"rigor": 4,
"actionability": 2,
"completeness": 6.5
},
"category": "organizations",
"subcategory": "funders",
"clusters": [
"community",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 6056,
"tableCount": 32,
"diagramCount": 2,
"internalLinks": 33,
"externalLinks": 52,
"footnoteCount": 0,
"bulletRatio": 0.15,
"sectionCount": 51,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 6056,
"unconvertedLinks": [
{
"text": "futureoflife.org",
"url": "https://futureoflife.org/",
"resourceId": "786a68a91a7d5712",
"resourceTitle": "Future of Life Institute"
},
{
"text": "Future of Life Institute",
"url": "https://futureoflife.org/",
"resourceId": "786a68a91a7d5712",
"resourceTitle": "Future of Life Institute"
},
{
"text": "\"Pause Giant AI Experiments\"",
"url": "https://futureoflife.org/open-letter/pause-giant-ai-experiments/",
"resourceId": "531f55cee64f6509",
"resourceTitle": "FLI open letter"
},
{
"text": "Metaculus",
"url": "https://www.metaculus.com/",
"resourceId": "d99a6d0fb1edc2db",
"resourceTitle": "Metaculus"
},
{
"text": "FLI Official Website",
"url": "https://futureoflife.org/",
"resourceId": "786a68a91a7d5712",
"resourceTitle": "Future of Life Institute"
},
{
"text": "Pause Giant AI Experiments: An Open Letter",
"url": "https://futureoflife.org/open-letter/pause-giant-ai-experiments/",
"resourceId": "531f55cee64f6509",
"resourceTitle": "FLI open letter"
},
{
"text": "Pause Giant AI Experiments - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Pause_Giant_AI_Experiments:_An_Open_Letter",
"resourceId": "4fc41c1e8720f41f",
"resourceTitle": "Pause letter"
},
{
"text": "FLI Website",
"url": "https://futureoflife.org/",
"resourceId": "786a68a91a7d5712",
"resourceTitle": "Future of Life Institute"
},
{
"text": "Pause Giant AI Experiments Letter",
"url": "https://futureoflife.org/open-letter/pause-giant-ai-experiments/",
"resourceId": "531f55cee64f6509",
"resourceTitle": "FLI open letter"
}
],
"unconvertedLinkCount": 9,
"convertedLinkCount": 0,
"backlinkCount": 20,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 16
},
{
"id": "ea-longtermist-wins-losses",
"title": "EA and Longtermist Wins and Losses",
"path": "/knowledge-base/history/ea-longtermist-wins-losses/",
"similarity": 15
},
{
"id": "mainstream-era",
"title": "Mainstream Era (2020-Present)",
"path": "/knowledge-base/history/mainstream-era/",
"similarity": 15
},
{
"id": "ai-futures-project",
"title": "AI Futures Project",
"path": "/knowledge-base/organizations/ai-futures-project/",
"similarity": 15
},
{
"id": "chan-zuckerberg-initiative",
"title": "Chan Zuckerberg Initiative",
"path": "/knowledge-base/organizations/chan-zuckerberg-initiative/",
"similarity": 15
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 24,
"diagrams": 2,
"internalLinks": 48,
"externalLinks": 30,
"footnotes": 18,
"references": 18
},
"actuals": {
"tables": 32,
"diagrams": 2,
"internalLinks": 33,
"externalLinks": 52,
"footnotes": 0,
"references": 4,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2.5 R:4 A:2 C:6.5"
},
"readerRank": 117,
"researchRank": 265,
"recommendedScore": 151.73
}External Links
No external links
Backlinks (20)
| id | title | type | relationship |
|---|---|---|---|
| max-tegmark | Max Tegmark | person | — |
| pause-moratorium | Pause / Moratorium | policy | — |
| pause | Pause Advocacy | approach | — |
| situational-awareness | Situational Awareness | capability | — |
| solutions | AI Safety Solution Cruxes | crux | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| capability-threshold-model | Capability Threshold Model | analysis | — |
| feedback-loops | Feedback Loop & Cascade Model | analysis | — |
| safety-research-allocation | Safety Research Allocation Model | analysis | — |
| anthropic-investors | Anthropic (Funder) | analysis | — |
| cser | CSER (Centre for the Study of Existential Risk) | organization | — |
| funders-overview | Longtermist Funders (Overview) | concept | — |
| leading-the-future | Leading the Future super PAC | organization | — |
| lionheart-ventures | Lionheart Ventures | organization | — |
| metaculus | Metaculus | organization | — |
| openai-foundation | OpenAI Foundation | organization | — |
| elon-musk | Elon Musk (AI Industry) | person | — |
| jaan-tallinn | Jaan Tallinn | person | — |
| ai-for-human-reasoning-fellowship | AI for Human Reasoning Fellowship | approach | — |
| irreversibility | AI-Induced Irreversibility | risk | — |