Longterm Wiki

GovAI

govaiorganizationPath: /knowledge-base/organizations/govai/
E153Entity ID (EID)
← Back to page27 backlinksQuality: 43Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "govai",
  "numericId": null,
  "path": "/knowledge-base/organizations/govai/",
  "filePath": "knowledge-base/organizations/govai.mdx",
  "title": "GovAI",
  "quality": 43,
  "readerImportance": 50.5,
  "researchImportance": 55.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "GovAI is an AI policy research organization with ~15-20 staff, funded primarily by Coefficient Giving (\\$1.8M+ in 2023-2024), that has trained 100+ governance researchers through fellowships and currently holds Vice-Chair position in EU GPAI Code drafting. Their compute governance research has influenced regulatory thresholds across US, UK, and EU, with alumni now occupying key positions in frontier labs, think tanks, and government.",
  "description": "The Centre for the Governance of AI is a leading AI policy research organization that has shaped compute governance frameworks, trained 100+ AI governance researchers, and now directly influences EU AI Act implementation through Vice-Chair roles in GPAI Code drafting.",
  "ratings": {
    "novelty": 3.5,
    "rigor": 5,
    "actionability": 4,
    "completeness": 6.5
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "ai-safety",
    "governance",
    "community"
  ],
  "metrics": {
    "wordCount": 1688,
    "tableCount": 14,
    "diagramCount": 1,
    "internalLinks": 10,
    "externalLinks": 7,
    "footnoteCount": 0,
    "bulletRatio": 0.08,
    "sectionCount": 24,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 1688,
  "unconvertedLinks": [
    {
      "text": "GovAI Homepage",
      "url": "https://www.governance.ai/",
      "resourceId": "f35c467b353f990f",
      "resourceTitle": "GovAI"
    }
  ],
  "unconvertedLinkCount": 1,
  "convertedLinkCount": 0,
  "backlinkCount": 27,
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "cset",
        "title": "CSET (Center for Security and Emerging Technology)",
        "path": "/knowledge-base/organizations/cset/",
        "similarity": 16
      },
      {
        "id": "safety-orgs-overview",
        "title": "AI Safety Organizations (Overview)",
        "path": "/knowledge-base/organizations/safety-orgs-overview/",
        "similarity": 13
      },
      {
        "id": "training-programs",
        "title": "AI Safety Training Programs",
        "path": "/knowledge-base/responses/training-programs/",
        "similarity": 13
      },
      {
        "id": "safety-research-allocation",
        "title": "Safety Research Allocation Model",
        "path": "/knowledge-base/models/safety-research-allocation/",
        "similarity": 12
      },
      {
        "id": "cais",
        "title": "CAIS (Center for AI Safety)",
        "path": "/knowledge-base/organizations/cais/",
        "similarity": 12
      }
    ]
  },
  "coverage": {
    "passing": 6,
    "total": 13,
    "targets": {
      "tables": 7,
      "diagrams": 1,
      "internalLinks": 14,
      "externalLinks": 8,
      "footnotes": 5,
      "references": 5
    },
    "actuals": {
      "tables": 14,
      "diagrams": 1,
      "internalLinks": 10,
      "externalLinks": 7,
      "footnotes": 0,
      "references": 1,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:5 A:4 C:6.5"
  },
  "readerRank": 300,
  "researchRank": 253,
  "recommendedScore": 132.83
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/centre-for-the-governance-of-ai"
}
Backlinks (27)
idtitletyperelationship
governance-policyAI Governance and Policycrux
compute-governanceCompute Governancepolicy
eu-ai-actEU AI Actpolicy
racing-dynamicsAI Development Racing Dynamicsrisk
accident-risksAI Accident Risk Cruxescrux
ai-risk-portfolio-analysisAI Risk Portfolio Analysisanalysis
capability-alignment-raceCapability-Alignment Race Modelanalysis
deceptive-alignment-decompositionDeceptive Alignment Decomposition Modelanalysis
intervention-effectiveness-matrixIntervention Effectiveness Matrixanalysis
risk-interaction-matrixRisk Interaction Matrix Modelanalysis
caisCAIS (Center for AI Safety)organization
ceaCentre for Effective Altruismorganization
conjectureConjectureorganization
csetCSET (Center for Security and Emerging Technology)organization
far-aiFAR AIorganization
fhiFuture of Humanity Institute (FHI)organization
__index__/knowledge-base/organizationsOrganizationsconcept
lionheart-venturesLionheart Venturesorganization
longview-philanthropyLongview Philanthropyorganization
matsMATS ML Alignment Theory Scholars programorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
sffSurvival and Flourishing Fund (SFF)organization
swift-centreSwift Centreorganization
dario-amodeiDario Amodeiperson
structured-accessStructured Access / API-Onlyapproach
thresholdsCompute Thresholdspolicy
__index__/knowledge-base/worldviewsWorldviewsconcept
Longterm Wiki