Epistemic Systemic Risk
epistemic-systemic-riskriskPath: /knowledge-base/risks/epistemic-systemic-risk/
E2073Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "epistemic-systemic-risk",
"wikiId": "E2073",
"path": "/knowledge-base/risks/epistemic-systemic-risk/",
"filePath": "knowledge-base/risks/epistemic-systemic-risk.mdx",
"title": "Epistemic Systemic Risk",
"quality": null,
"readerImportance": null,
"researchImportance": null,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-03-25",
"dateCreated": "2026-03-25",
"summary": "This article synthesizes epistemic risk and systemic risk into a coherent 'epistemic systemic risk' concept, noting it remains an emerging, not-yet-formalized framework; it is intellectually serious and well-structured but offers limited actionable guidance and the AI safety connections remain speculative and underdeveloped.",
"description": "The risk that structural deficiencies in knowledge systems—flawed beliefs, information failures, or errors in collective reasoning—propagate through interconnected systems to produce cascading, potentially catastrophic outcomes.",
"ratings": null,
"category": "risks",
"subcategory": null,
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 2475,
"tableCount": 1,
"diagramCount": 0,
"internalLinks": 7,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.1,
"sectionCount": 21,
"hasOverview": true,
"structuralScore": 10
},
"suggestedQuality": 67,
"updateFrequency": null,
"evergreen": true,
"wordCount": 2475,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 0,
"hallucinationRisk": {
"level": "high",
"score": 70,
"factors": [
"no-citations",
"few-external-sources",
"mostly-unsourced-footnotes"
],
"integrityIssues": [
"mostly-unsourced-footnotes"
]
},
"entityType": "risk",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "epistemic-collapse-threshold",
"title": "Epistemic Collapse Threshold Model",
"path": "/knowledge-base/models/epistemic-collapse-threshold/",
"similarity": 16
},
{
"id": "ai-non-extremization-coordination",
"title": "AI Non-Extremization Coordination",
"path": "/knowledge-base/responses/ai-non-extremization-coordination/",
"similarity": 15
},
{
"id": "agentic-ai",
"title": "Agentic AI",
"path": "/knowledge-base/capabilities/agentic-ai/",
"similarity": 14
},
{
"id": "structural-risks",
"title": "AI Structural Risk Cruxes",
"path": "/knowledge-base/cruxes/structural-risks/",
"similarity": 14
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 14
}
]
},
"coverage": {
"passing": 3,
"total": 13,
"targets": {
"tables": 10,
"diagrams": 1,
"internalLinks": 20,
"externalLinks": 12,
"footnotes": 7,
"references": 7
},
"actuals": {
"tables": 1,
"diagrams": 0,
"internalLinks": 7,
"externalLinks": 0,
"footnotes": 0,
"references": 0,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "red",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "red",
"footnotes": "red",
"references": "red",
"quotes": "red",
"accuracy": "red"
}
},
"recommendedScore": 21.86
}External Links
No external links
Backlinks (0)
No backlinks