GovAI
govaiorganizationPath: /knowledge-base/organizations/govai/
E153Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "govai",
"numericId": null,
"path": "/knowledge-base/organizations/govai/",
"filePath": "knowledge-base/organizations/govai.mdx",
"title": "GovAI",
"quality": 43,
"readerImportance": 50.5,
"researchImportance": 55.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "GovAI is an AI policy research organization with ~15-20 staff, funded primarily by Coefficient Giving (\\$1.8M+ in 2023-2024), that has trained 100+ governance researchers through fellowships and currently holds Vice-Chair position in EU GPAI Code drafting. Their compute governance research has influenced regulatory thresholds across US, UK, and EU, with alumni now occupying key positions in frontier labs, think tanks, and government.",
"description": "The Centre for the Governance of AI is a leading AI policy research organization that has shaped compute governance frameworks, trained 100+ AI governance researchers, and now directly influences EU AI Act implementation through Vice-Chair roles in GPAI Code drafting.",
"ratings": {
"novelty": 3.5,
"rigor": 5,
"actionability": 4,
"completeness": 6.5
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"ai-safety",
"governance",
"community"
],
"metrics": {
"wordCount": 1688,
"tableCount": 14,
"diagramCount": 1,
"internalLinks": 10,
"externalLinks": 7,
"footnoteCount": 0,
"bulletRatio": 0.08,
"sectionCount": 24,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 1688,
"unconvertedLinks": [
{
"text": "GovAI Homepage",
"url": "https://www.governance.ai/",
"resourceId": "f35c467b353f990f",
"resourceTitle": "GovAI"
}
],
"unconvertedLinkCount": 1,
"convertedLinkCount": 0,
"backlinkCount": 27,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "cset",
"title": "CSET (Center for Security and Emerging Technology)",
"path": "/knowledge-base/organizations/cset/",
"similarity": 16
},
{
"id": "safety-orgs-overview",
"title": "AI Safety Organizations (Overview)",
"path": "/knowledge-base/organizations/safety-orgs-overview/",
"similarity": 13
},
{
"id": "training-programs",
"title": "AI Safety Training Programs",
"path": "/knowledge-base/responses/training-programs/",
"similarity": 13
},
{
"id": "safety-research-allocation",
"title": "Safety Research Allocation Model",
"path": "/knowledge-base/models/safety-research-allocation/",
"similarity": 12
},
{
"id": "cais",
"title": "CAIS (Center for AI Safety)",
"path": "/knowledge-base/organizations/cais/",
"similarity": 12
}
]
},
"coverage": {
"passing": 6,
"total": 13,
"targets": {
"tables": 7,
"diagrams": 1,
"internalLinks": 14,
"externalLinks": 8,
"footnotes": 5,
"references": 5
},
"actuals": {
"tables": 14,
"diagrams": 1,
"internalLinks": 10,
"externalLinks": 7,
"footnotes": 0,
"references": 1,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:3.5 R:5 A:4 C:6.5"
},
"readerRank": 300,
"researchRank": 253,
"recommendedScore": 132.83
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/centre-for-the-governance-of-ai"
}Backlinks (27)
| id | title | type | relationship |
|---|---|---|---|
| governance-policy | AI Governance and Policy | crux | — |
| compute-governance | Compute Governance | policy | — |
| eu-ai-act | EU AI Act | policy | — |
| racing-dynamics | AI Development Racing Dynamics | risk | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| ai-risk-portfolio-analysis | AI Risk Portfolio Analysis | analysis | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| deceptive-alignment-decomposition | Deceptive Alignment Decomposition Model | analysis | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |
| cais | CAIS (Center for AI Safety) | organization | — |
| cea | Centre for Effective Altruism | organization | — |
| conjecture | Conjecture | organization | — |
| cset | CSET (Center for Security and Emerging Technology) | organization | — |
| far-ai | FAR AI | organization | — |
| fhi | Future of Humanity Institute (FHI) | organization | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| lionheart-ventures | Lionheart Ventures | organization | — |
| longview-philanthropy | Longview Philanthropy | organization | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| sff | Survival and Flourishing Fund (SFF) | organization | — |
| swift-centre | Swift Centre | organization | — |
| dario-amodei | Dario Amodei | person | — |
| structured-access | Structured Access / API-Only | approach | — |
| thresholds | Compute Thresholds | policy | — |
| __index__/knowledge-base/worldviews | Worldviews | concept | — |