AI Futures Project
ai-futures-projectorganizationPath: /knowledge-base/organizations/ai-futures-project/
E511Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "ai-futures-project",
"numericId": null,
"path": "/knowledge-base/organizations/ai-futures-project/",
"filePath": "knowledge-base/organizations/ai-futures-project.mdx",
"title": "AI Futures Project",
"quality": 50,
"readerImportance": 83,
"researchImportance": 68,
"tacticalValue": 68,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "AI Futures Project is a nonprofit co-founded in 2024 by Daniel Kokotajlo, Eli Lifland, and Thomas Larsen that produces detailed AI capability forecasts, most notably the AI 2027 scenario depicting rapid progress to superintelligence. The organization has revised timelines significantly over time and faces substantial criticism for aggressive assumptions and methodological limitations.",
"description": "Nonprofit research organization focused on forecasting AI timelines and scenarios, co-founded by Daniel Kokotajlo, Eli Lifland, and Thomas Larsen",
"ratings": {
"focus": 8.5,
"novelty": 2,
"rigor": 5.5,
"completeness": 7,
"concreteness": 6,
"actionability": 2
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"ai-safety",
"community"
],
"metrics": {
"wordCount": 2406,
"tableCount": 2,
"diagramCount": 0,
"internalLinks": 18,
"externalLinks": 2,
"footnoteCount": 0,
"bulletRatio": 0.1,
"sectionCount": 21,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 2406,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 4,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 16
},
{
"id": "ai-timelines",
"title": "AI Timelines",
"path": "/knowledge-base/models/ai-timelines/",
"similarity": 16
},
{
"id": "anthropic-ipo",
"title": "Anthropic IPO",
"path": "/knowledge-base/organizations/anthropic-ipo/",
"similarity": 16
},
{
"id": "controlai",
"title": "ControlAI",
"path": "/knowledge-base/organizations/controlai/",
"similarity": 16
},
{
"id": "futuresearch",
"title": "FutureSearch",
"path": "/knowledge-base/organizations/futuresearch/",
"similarity": 16
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/fix-issue-240-N5irU",
"title": "Surface tacticalValue in /wiki table and score 53 pages",
"summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
"model": "sonnet-4",
"duration": "~30min"
}
],
"coverage": {
"passing": 5,
"total": 13,
"targets": {
"tables": 10,
"diagrams": 1,
"internalLinks": 19,
"externalLinks": 12,
"footnotes": 7,
"references": 7
},
"actuals": {
"tables": 2,
"diagrams": 0,
"internalLinks": 18,
"externalLinks": 2,
"footnotes": 0,
"references": 0,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "red",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:2 R:5.5 A:2 C:7"
},
"readerRank": 65,
"researchRank": 167,
"recommendedScore": 163.24
}External Links
No external links
Backlinks (4)
| id | title | type | relationship |
|---|---|---|---|
| eli-lifland | Eli Lifland | person | — |
| ai-timelines | AI Timelines | concept | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| samotsvety | Samotsvety | organization | — |