AI Doomer Worldview
doomerconceptPath: /knowledge-base/worldviews/doomer/
E504Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "doomer",
"numericId": null,
"path": "/knowledge-base/worldviews/doomer/",
"filePath": "knowledge-base/worldviews/doomer.mdx",
"title": "AI Doomer Worldview",
"quality": 38,
"readerImportance": 20.5,
"researchImportance": 17.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive overview of the 'doomer' worldview on AI risk, characterized by 30-90% P(doom) estimates, 10-15 year AGI timelines, and belief that alignment is fundamentally hard. Documents core arguments (orthogonality thesis, instrumental convergence, one-shot problem), key proponents (Yudkowsky, MIRI), and prioritized interventions (agent foundations, pause advocacy, compute governance).",
"description": "Short timelines, hard alignment, high risk.",
"ratings": {
"novelty": 2,
"rigor": 3.5,
"actionability": 3,
"completeness": 6
},
"category": "worldviews",
"subcategory": null,
"clusters": [
"ai-safety",
"epistemics"
],
"metrics": {
"wordCount": 2185,
"tableCount": 3,
"diagramCount": 0,
"internalLinks": 35,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.47,
"sectionCount": 50,
"hasOverview": true,
"structuralScore": 10
},
"suggestedQuality": 67,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 2185,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 10,
"backlinkCount": 3,
"hallucinationRisk": {
"level": "high",
"score": 65,
"factors": [
"no-citations",
"low-rigor-score",
"low-quality-score",
"few-external-sources",
"conceptual-content"
]
},
"entityType": "concept",
"redundancy": {
"maxSimilarity": 20,
"similarPages": [
{
"id": "case-for-xrisk",
"title": "The Case FOR AI Existential Risk",
"path": "/knowledge-base/debates/case-for-xrisk/",
"similarity": 20
},
{
"id": "governance-focused",
"title": "Governance-Focused Worldview",
"path": "/knowledge-base/worldviews/governance-focused/",
"similarity": 20
},
{
"id": "long-timelines",
"title": "Long-Timelines Technical Worldview",
"path": "/knowledge-base/worldviews/long-timelines/",
"similarity": 20
},
{
"id": "optimistic",
"title": "Optimistic Alignment Worldview",
"path": "/knowledge-base/worldviews/optimistic/",
"similarity": 20
},
{
"id": "why-alignment-easy",
"title": "Why Alignment Might Be Easy",
"path": "/knowledge-base/debates/why-alignment-easy/",
"similarity": 17
}
]
},
"coverage": {
"passing": 6,
"total": 13,
"targets": {
"tables": 9,
"diagrams": 1,
"internalLinks": 17,
"externalLinks": 11,
"footnotes": 7,
"references": 7
},
"actuals": {
"tables": 3,
"diagrams": 0,
"internalLinks": 35,
"externalLinks": 0,
"footnotes": 0,
"references": 10,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2 R:3.5 A:3 C:6"
},
"readerRank": 520,
"researchRank": 510,
"recommendedScore": 107.95
}External Links
No external links
Backlinks (3)
| id | title | type | relationship |
|---|---|---|---|
| worldview-intervention-mapping | Worldview-Intervention Mapping | analysis | — |
| agent-foundations | Agent Foundations | approach | — |
| __index__/knowledge-base/worldviews | Worldviews | concept | — |