Longterm Wiki

AI Governance and Policy

governance-policycruxPath: /knowledge-base/responses/governance-policy/
E154Entity ID (EID)
← Back to page12 backlinksQuality: 66Updated: 2026-03-12
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "governance-policy",
  "numericId": null,
  "path": "/knowledge-base/responses/governance-policy/",
  "filePath": "knowledge-base/responses/governance-policy.mdx",
  "title": "AI Governance and Policy",
  "quality": 66,
  "readerImportance": 65,
  "researchImportance": 29,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-12",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive analysis of AI governance mechanisms estimating 30-50% probability of meaningful regulation by 2027 and 5-25% x-risk reduction potential through coordinated international approaches. Documents EU AI Act implementation (€400M enforcement budget), RSP adoption across 60-80% of frontier labs, and current investment of \\$150-300M/year globally with 500-1,000 dedicated professionals.",
  "description": "Comprehensive framework covering international coordination, national regulation, and industry standards - with 30-50% chance of meaningful regulation by 2027 and potential 5-25% x-risk reduction through coordinated governance approaches. Analysis includes EU AI Act implementation, US Executive Order impacts, and RSP effectiveness data.",
  "ratings": {
    "novelty": 4.5,
    "rigor": 6.8,
    "actionability": 7.2,
    "completeness": 7.5
  },
  "category": "responses",
  "subcategory": "governance",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 3124,
    "tableCount": 7,
    "diagramCount": 1,
    "internalLinks": 85,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.39,
    "sectionCount": 40,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 3124,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 71,
  "backlinkCount": 12,
  "hallucinationRisk": {
    "level": "medium",
    "score": 50,
    "factors": [
      "no-citations",
      "few-external-sources",
      "conceptual-content"
    ]
  },
  "entityType": "crux",
  "redundancy": {
    "maxSimilarity": 21,
    "similarPages": [
      {
        "id": "governance-focused",
        "title": "Governance-Focused Worldview",
        "path": "/knowledge-base/worldviews/governance-focused/",
        "similarity": 21
      },
      {
        "id": "international-regimes",
        "title": "International Compute Regimes",
        "path": "/knowledge-base/responses/international-regimes/",
        "similarity": 20
      },
      {
        "id": "international-summits",
        "title": "International AI Safety Summits",
        "path": "/knowledge-base/responses/international-summits/",
        "similarity": 20
      },
      {
        "id": "structural-risks",
        "title": "AI Structural Risk Cruxes",
        "path": "/knowledge-base/cruxes/structural-risks/",
        "similarity": 19
      },
      {
        "id": "coordination-mechanisms",
        "title": "International Coordination Mechanisms",
        "path": "/knowledge-base/responses/coordination-mechanisms/",
        "similarity": 19
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 12,
      "diagrams": 1,
      "internalLinks": 25,
      "externalLinks": 16,
      "footnotes": 9,
      "references": 9
    },
    "actuals": {
      "tables": 7,
      "diagrams": 1,
      "internalLinks": 85,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 60,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.5 R:6.8 A:7.2 C:7.5"
  },
  "readerRank": 196,
  "researchRank": 424,
  "recommendedScore": 186.23
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/ai-governance",
  "eaForum": "https://forum.effectivealtruism.org/topics/ai-governance"
}
Backlinks (12)
idtitletyperelationship
anthropic-government-standoffAnthropic-Pentagon Standoff (2026)event
compute-governanceCompute Governancepolicy
80000-hours80,000 Hoursorganization
anthropic-investorsAnthropic (Funder)analysis
coefficient-givingCoefficient Givingorganization
ai-controlAI Controlsafety-agenda
governance-overviewAI Governance & Policy (Overview)concept
state-capacity-ai-governanceState Capacity and AI Governanceconcept
deceptive-alignmentDeceptive Alignmentrisk
fraudAI-Powered Fraudrisk
lock-inAI Value Lock-inrisk
structural-overviewStructural Risks (Overview)concept
Longterm Wiki