Governance-Focused Worldview
governance-focusedconceptPath: /knowledge-base/worldviews/governance-focused/
E397Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "governance-focused",
"numericId": null,
"path": "/knowledge-base/worldviews/governance-focused/",
"filePath": "knowledge-base/worldviews/governance-focused.mdx",
"title": "Governance-Focused Worldview",
"quality": 67,
"readerImportance": 67,
"researchImportance": 18.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "This worldview argues governance/coordination is the bottleneck for AI safety (not just technical solutions), estimating 10-30% P(doom) by 2100. Evidence includes: compute export controls reduced Huawei AI chip production 80-85%, 85% of DC AI lobbyists represent industry, US federal AI regulations doubled in 2024 (59 vs 29), and historical precedent shows technology governance can work (NPT prevented Kennedy's predicted 25-30 nuclear states, only 9 exist).",
"description": "This worldview holds that technical AI safety solutions require policy, coordination, and institutional change to be effectively adopted, estimating 10-30% existential risk by 2100. Evidence shows 85% of AI lobbyists represent industry, labs face structural racing dynamics, and governance interventions like the EU AI Act and compute export controls can meaningfully shape outcomes.",
"ratings": {
"novelty": 4.2,
"rigor": 6.8,
"actionability": 7.3,
"completeness": 7.5
},
"category": "worldviews",
"subcategory": null,
"clusters": [
"epistemics",
"governance",
"ai-safety"
],
"metrics": {
"wordCount": 3927,
"tableCount": 9,
"diagramCount": 1,
"internalLinks": 46,
"externalLinks": 2,
"footnoteCount": 0,
"bulletRatio": 0.45,
"sectionCount": 70,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 3927,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 39,
"backlinkCount": 3,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"conceptual-content"
]
},
"entityType": "concept",
"redundancy": {
"maxSimilarity": 21,
"similarPages": [
{
"id": "governance-policy",
"title": "AI Governance and Policy",
"path": "/knowledge-base/responses/governance-policy/",
"similarity": 21
},
{
"id": "doomer",
"title": "AI Doomer Worldview",
"path": "/knowledge-base/worldviews/doomer/",
"similarity": 20
},
{
"id": "optimistic",
"title": "Optimistic Alignment Worldview",
"path": "/knowledge-base/worldviews/optimistic/",
"similarity": 20
},
{
"id": "structural-risks",
"title": "AI Structural Risk Cruxes",
"path": "/knowledge-base/cruxes/structural-risks/",
"similarity": 19
},
{
"id": "us-aisi",
"title": "US AI Safety Institute",
"path": "/knowledge-base/organizations/us-aisi/",
"similarity": 19
}
]
},
"coverage": {
"passing": 6,
"total": 13,
"targets": {
"tables": 16,
"diagrams": 2,
"internalLinks": 31,
"externalLinks": 20,
"footnotes": 12,
"references": 12
},
"actuals": {
"tables": 9,
"diagrams": 1,
"internalLinks": 46,
"externalLinks": 2,
"footnotes": 0,
"references": 34,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "green",
"externalLinks": "amber",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.2 R:6.8 A:7.3 C:7.5"
},
"readerRank": 182,
"researchRank": 504,
"recommendedScore": 189.36
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/ai-governance",
"eaForum": "https://forum.effectivealtruism.org/topics/ai-governance"
}Backlinks (3)
| id | title | type | relationship |
|---|---|---|---|
| worldview-intervention-mapping | Worldview-Intervention Mapping | analysis | — |
| agent-foundations | Agent Foundations | approach | — |
| __index__/knowledge-base/worldviews | Worldviews | concept | — |