Existential Risk from AI
existential-riskconceptPath: /knowledge-base/risks/existential-risk/
E131Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "existential-risk",
"numericId": null,
"path": "/knowledge-base/risks/existential-risk/",
"filePath": "knowledge-base/risks/existential-risk.mdx",
"title": "Existential Risk from AI",
"quality": 92,
"readerImportance": 95,
"researchImportance": 18.5,
"tacticalValue": 45,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-17",
"llmSummary": null,
"description": "Hypotheses concerning risks from advanced AI systems that some researchers believe could result in human extinction or permanent global catastrophe",
"ratings": null,
"category": "risks",
"subcategory": "accident",
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 1223,
"tableCount": 1,
"diagramCount": 0,
"internalLinks": 36,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.12,
"sectionCount": 11,
"hasOverview": false,
"structuralScore": 8
},
"suggestedQuality": 53,
"updateFrequency": 180,
"evergreen": true,
"wordCount": 1223,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 6,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"few-external-sources",
"conceptual-content",
"high-quality"
]
},
"entityType": "concept",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "superintelligence",
"title": "Superintelligence",
"path": "/knowledge-base/risks/superintelligence/",
"similarity": 16
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 15
},
{
"id": "irreversibility",
"title": "AI-Induced Irreversibility",
"path": "/knowledge-base/risks/irreversibility/",
"similarity": 15
},
{
"id": "doomer",
"title": "AI Doomer Worldview",
"path": "/knowledge-base/worldviews/doomer/",
"similarity": 15
},
{
"id": "self-improvement",
"title": "Self-Improvement and Recursive Enhancement",
"path": "/knowledge-base/capabilities/self-improvement/",
"similarity": 14
}
]
},
"changeHistory": [
{
"date": "2026-03-10",
"branch": "auto-update/2026-03-10",
"title": "Auto-improve (standard): Existential Risk from AI",
"summary": "Improved \"Existential Risk from AI\" via standard pipeline (1302.5s). Quality score: 88. Issues resolved: Footnote [^rc-2f55] cites Birhane et al. (2022) FAccT paper ; Footnote [^rc-f540] attributes 'offense-defense balance' pap; EntityLink id='E26' for 'arc-evals' and EntityLink id='E25' .",
"duration": "1302.5s",
"cost": "$5-8"
},
{
"date": "2026-02-18",
"branch": "claude/fix-issue-240-N5irU",
"title": "Surface tacticalValue in /wiki table and score 53 pages",
"summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
"model": "sonnet-4",
"duration": "~30min"
},
{
"date": "2026-02-17",
"branch": "claude/top-priority-update-WurDM",
"title": "Improve top 5 foundational wiki pages",
"summary": "Improved the 5 highest-importance, lowest-quality wiki pages using the Crux content pipeline. All were stubs (7 words) or had quality=0 and are now comprehensive articles with citations, EntityLinks, and balanced perspectives.",
"pr": 188
}
],
"coverage": {
"passing": 4,
"total": 13,
"targets": {
"tables": 5,
"diagrams": 0,
"internalLinks": 10,
"externalLinks": 6,
"footnotes": 4,
"references": 4
},
"actuals": {
"tables": 1,
"diagrams": 0,
"internalLinks": 36,
"externalLinks": 0,
"footnotes": 0,
"references": 0,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "red",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "red",
"tables": "amber",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "red",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 3
},
"readerRank": 2,
"researchRank": 502,
"recommendedScore": 252.94
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/existential-risk",
"eaForum": "https://forum.effectivealtruism.org/topics/existential-risk",
"stampy": "https://aisafety.info/questions/8mTg/What-is-existential-risk",
"wikidata": "https://www.wikidata.org/wiki/Q16830153",
"eightyK": "https://80000hours.org/articles/existential-risks/"
}Backlinks (6)
| id | title | type | relationship |
|---|---|---|---|
| cais | CAIS | organization | — |
| fhi | Future of Humanity Institute | organization | — |
| early-warnings | Early Warnings (1950s-2000) | historical | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| longtermist-value-comparisons | Relative Longtermist Value Comparisons | analysis | — |
| xai | xAI | organization | — |