Regulatory Capture Risks in AI
regulatory-capture-risks-in-aianalysisPath: /knowledge-base/models/regulatory-capture-risks-in-ai/
E2265Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "regulatory-capture-risks-in-ai",
"wikiId": "E2265",
"path": "/knowledge-base/models/regulatory-capture-risks-in-ai/",
"filePath": "knowledge-base/models/regulatory-capture-risks-in-ai.mdx",
"title": "Regulatory Capture Risks in AI",
"quality": null,
"readerImportance": null,
"researchImportance": null,
"tacticalValue": null,
"contentFormat": "article",
"causalLevel": null,
"lastUpdated": "2026-04-12",
"dateCreated": "2026-04-12",
"summary": "A well-structured analysis of regulatory capture risks in AI governance, identifying four classic mechanisms (revolving door, lobbying, complexity moats, industry-written rules) with AI-specific evidence and historical analogues; concludes that capture poses a distinct AI safety concern by hollowing out oversight capacity, though causal evidence remains largely qualitative and contested.",
"description": "Analysis of how dominant AI companies influence regulators to protect market dominance through revolving doors, lobbying, complexity moats, and industry-written rules—covering mechanisms, evidence, historical analogues, and proposed reforms.",
"ratings": null,
"category": "models",
"subcategory": null,
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 3237,
"tableCount": 4,
"diagramCount": 0,
"internalLinks": 14,
"externalLinks": 1,
"footnoteCount": 0,
"bulletRatio": 0.05,
"sectionCount": 25,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": null,
"evergreen": true,
"wordCount": 3237,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 0,
"hallucinationRisk": {
"level": "medium",
"score": 60,
"factors": [
"no-citations",
"few-external-sources"
]
},
"entityType": "analysis",
"redundancy": {
"maxSimilarity": 18,
"similarPages": [
{
"id": "ai-governance-effectiveness-analysis",
"title": "AI Governance Effectiveness Analysis",
"path": "/knowledge-base/models/ai-governance-effectiveness-analysis/",
"similarity": 18
},
{
"id": "whistleblower-dynamics",
"title": "Whistleblower Dynamics Model",
"path": "/knowledge-base/models/whistleblower-dynamics/",
"similarity": 18
},
{
"id": "us-aisi",
"title": "US AI Safety Institute (now CAISI)",
"path": "/knowledge-base/organizations/us-aisi/",
"similarity": 18
},
{
"id": "failed-stalled-proposals",
"title": "Failed and Stalled AI Policy Proposals",
"path": "/knowledge-base/responses/failed-stalled-proposals/",
"similarity": 18
},
{
"id": "governance-overview",
"title": "AI Governance & Policy (Overview)",
"path": "/knowledge-base/responses/governance-overview/",
"similarity": 18
}
]
},
"coverage": {
"passing": 3,
"total": 13,
"targets": {
"tables": 13,
"diagrams": 1,
"internalLinks": 26,
"externalLinks": 16,
"footnotes": 10,
"references": 10
},
"actuals": {
"tables": 4,
"diagrams": 0,
"internalLinks": 14,
"externalLinks": 1,
"footnotes": 0,
"references": 0,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"summary": "green",
"schedule": "red",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "red",
"quotes": "red",
"accuracy": "red"
}
},
"recommendedScore": 21.81
}External Links
No external links
Backlinks (0)
No backlinks