Political Stability as an AI Safety Factor
political-stability-as-an-ai-safety-factoranalysisPath: /knowledge-base/models/political-stability-as-an-ai-safety-factor/
E2067Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "political-stability-as-an-ai-safety-factor",
"wikiId": "E2067",
"path": "/knowledge-base/models/political-stability-as-an-ai-safety-factor/",
"filePath": "knowledge-base/models/political-stability-as-an-ai-safety-factor.mdx",
"title": "Political Stability as an AI Safety Factor",
"quality": null,
"readerImportance": null,
"researchImportance": null,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-03-25",
"dateCreated": "2026-03-25",
"summary": "This article synthesizes the relationship between political stability and AI safety across military, governance, and public trust dimensions, identifying key risk pathways (automation bias, racing dynamics, authoritarian consolidation) and noting significant governance gaps; it is comprehensive but lacks URLs for all 16 cited sources, limiting verifiability.",
"description": "An analysis of how geopolitical and domestic political environments shape AI safety outcomes, particularly through their influence on military AI risks, international governance, and the conditions for effective oversight.",
"ratings": null,
"category": "models",
"subcategory": null,
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 3423,
"tableCount": 1,
"diagramCount": 0,
"internalLinks": 2,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.06,
"sectionCount": 21,
"hasOverview": true,
"structuralScore": 9
},
"suggestedQuality": 60,
"updateFrequency": null,
"evergreen": true,
"wordCount": 3423,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 0,
"hallucinationRisk": {
"level": "high",
"score": 70,
"factors": [
"no-citations",
"few-external-sources",
"mostly-unsourced-footnotes"
],
"integrityIssues": [
"mostly-unsourced-footnotes"
]
},
"entityType": "analysis",
"redundancy": {
"maxSimilarity": 19,
"similarPages": [
{
"id": "ai-enabled-political-polarization",
"title": "AI-Enabled Political Polarization",
"path": "/knowledge-base/risks/ai-enabled-political-polarization/",
"similarity": 19
},
{
"id": "failed-stalled-proposals",
"title": "Failed and Stalled AI Policy Proposals",
"path": "/knowledge-base/responses/failed-stalled-proposals/",
"similarity": 18
},
{
"id": "structural-risks",
"title": "AI Structural Risk Cruxes",
"path": "/knowledge-base/cruxes/structural-risks/",
"similarity": 17
},
{
"id": "authoritarian-tools-diffusion",
"title": "Authoritarian Tools Diffusion Model",
"path": "/knowledge-base/models/authoritarian-tools-diffusion/",
"similarity": 17
},
{
"id": "international-regimes",
"title": "International Compute Regimes",
"path": "/knowledge-base/responses/international-regimes/",
"similarity": 17
}
]
},
"coverage": {
"passing": 3,
"total": 13,
"targets": {
"tables": 14,
"diagrams": 1,
"internalLinks": 27,
"externalLinks": 17,
"footnotes": 10,
"references": 10
},
"actuals": {
"tables": 1,
"diagrams": 0,
"internalLinks": 2,
"externalLinks": 0,
"footnotes": 0,
"references": 0,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "red",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "red",
"footnotes": "red",
"references": "red",
"quotes": "red",
"accuracy": "red"
}
},
"recommendedScore": 21.97
}External Links
No external links
Backlinks (0)
No backlinks