The Case AGAINST AI Existential Risk
case-against-xriskargumentPath: /knowledge-base/debates/case-against-xrisk/
E55Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "case-against-xrisk",
"numericId": null,
"path": "/knowledge-base/debates/case-against-xrisk/",
"filePath": "knowledge-base/debates/case-against-xrisk.mdx",
"title": "The Case AGAINST AI Existential Risk",
"quality": 58,
"readerImportance": 90,
"researchImportance": 93,
"tacticalValue": 58,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive synthesis of skeptical arguments against AI x-risk from prominent researchers (LeCun, Marcus, Ng, Brooks), concluding x-risk probability is <5% (likely ~2%) based on challenges to scaling continuation, alignment tractability, human control mechanisms, and methodological critiques of doom forecasts. Quantifies constraints on scaling (data exhaustion at 50-100T tokens, economic limits ~\\$10B training runs) and notes 76% of AAAI researchers doubt current approaches yield AGI.",
"description": "This analysis synthesizes the strongest skeptical arguments against AI existential risk. It presents positions from prominent researchers including Yann LeCun, Gary Marcus, and Andrew Ng, who argue that x-risk probability is under 1% due to scaling limitations, tractable alignment, and robust human control mechanisms.",
"ratings": {
"novelty": 4.2,
"rigor": 5.8,
"actionability": 3.5,
"completeness": 6.5
},
"category": "debates",
"subcategory": "formal-arguments",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 1748,
"tableCount": 9,
"diagramCount": 2,
"internalLinks": 35,
"externalLinks": 11,
"footnoteCount": 0,
"bulletRatio": 0.42,
"sectionCount": 50,
"hasOverview": false,
"structuralScore": 13
},
"suggestedQuality": 87,
"updateFrequency": 90,
"evergreen": true,
"wordCount": 1748,
"unconvertedLinks": [
{
"text": "Epoch AI estimates",
"url": "https://epochai.org/",
"resourceId": "120adc539e2fa558",
"resourceTitle": "Epoch AI"
},
{
"text": "Financial Times, 2024",
"url": "https://www.ft.com/",
"resourceId": "54ccb74b8312479b",
"resourceTitle": "FT AI Coverage"
}
],
"unconvertedLinkCount": 2,
"convertedLinkCount": 21,
"backlinkCount": 3,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"conceptual-content"
]
},
"entityType": "argument",
"redundancy": {
"maxSimilarity": 23,
"similarPages": [
{
"id": "case-for-xrisk",
"title": "The Case FOR AI Existential Risk",
"path": "/knowledge-base/debates/case-for-xrisk/",
"similarity": 23
},
{
"id": "why-alignment-easy",
"title": "Why Alignment Might Be Easy",
"path": "/knowledge-base/debates/why-alignment-easy/",
"similarity": 22
},
{
"id": "optimistic",
"title": "Optimistic Alignment Worldview",
"path": "/knowledge-base/worldviews/optimistic/",
"similarity": 20
},
{
"id": "why-alignment-hard",
"title": "Why Alignment Might Be Hard",
"path": "/knowledge-base/debates/why-alignment-hard/",
"similarity": 19
},
{
"id": "ai-timelines",
"title": "AI Timelines",
"path": "/knowledge-base/models/ai-timelines/",
"similarity": 18
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 7,
"diagrams": 1,
"internalLinks": 14,
"externalLinks": 9,
"footnotes": 5,
"references": 5
},
"actuals": {
"tables": 9,
"diagrams": 2,
"internalLinks": 35,
"externalLinks": 11,
"footnotes": 0,
"references": 12,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "red",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.2 R:5.8 A:3.5 C:6.5"
},
"readerRank": 19,
"researchRank": 14,
"recommendedScore": 182.6
}External Links
No external links
Backlinks (3)
| id | title | type | relationship |
|---|---|---|---|
| case-for-xrisk | The Case FOR AI Existential Risk | argument | — |
| __index__/knowledge-base/debates | Key Debates | concept | — |
| why-alignment-easy | Why Alignment Might Be Easy | argument | — |