AI Safety Institutes
ai-safety-institutespolicyPath: /knowledge-base/responses/ai-safety-institutes/
E13Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "ai-safety-institutes",
"numericId": null,
"path": "/knowledge-base/responses/ai-safety-institutes/",
"filePath": "knowledge-base/responses/ai-safety-institutes.mdx",
"title": "AI Safety Institutes",
"quality": 69,
"readerImportance": 63.5,
"researchImportance": 39.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Analysis of government AI Safety Institutes finding they've achieved rapid institutional growth (UK: 0→100+ staff in 18 months) and secured pre-deployment access to frontier models, but face critical constraints: advisory-only authority, 10-100x resource mismatch vs labs (dozens-to-hundreds staff vs thousands; \\$10M-\\$66M vs billions), and regulatory capture risks from voluntary access agreements. Effectiveness rated as uncertain due to inability to compel action despite identifying safety concerns.",
"description": "Government-affiliated technical institutions evaluating frontier AI systems, with the UK/US institutes having secured pre-deployment access to models from major labs. Analysis finds AISIs address critical information asymmetry but face constraints including limited enforcement authority, resource mismatches (100+ staff vs. thousands at labs), and independence concerns from industry relationships.",
"ratings": {
"novelty": 5.5,
"rigor": 7,
"actionability": 6.5,
"completeness": 7.5
},
"category": "responses",
"subcategory": "institutions",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 4178,
"tableCount": 5,
"diagramCount": 1,
"internalLinks": 41,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.13,
"sectionCount": 33,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 4178,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 37,
"backlinkCount": 39,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"few-external-sources",
"high-rigor"
]
},
"entityType": "policy",
"redundancy": {
"maxSimilarity": 25,
"similarPages": [
{
"id": "us-aisi",
"title": "US AI Safety Institute",
"path": "/knowledge-base/organizations/us-aisi/",
"similarity": 25
},
{
"id": "metr",
"title": "METR",
"path": "/knowledge-base/organizations/metr/",
"similarity": 23
},
{
"id": "international-summits",
"title": "International AI Safety Summits",
"path": "/knowledge-base/responses/international-summits/",
"similarity": 23
},
{
"id": "uk-aisi",
"title": "UK AI Safety Institute",
"path": "/knowledge-base/organizations/uk-aisi/",
"similarity": 22
},
{
"id": "responsible-scaling-policies",
"title": "Responsible Scaling Policies",
"path": "/knowledge-base/responses/responsible-scaling-policies/",
"similarity": 22
}
]
},
"coverage": {
"passing": 6,
"total": 13,
"targets": {
"tables": 17,
"diagrams": 2,
"internalLinks": 33,
"externalLinks": 21,
"footnotes": 13,
"references": 13
},
"actuals": {
"tables": 5,
"diagrams": 1,
"internalLinks": 41,
"externalLinks": 0,
"footnotes": 0,
"references": 26,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:5.5 R:7 A:6.5 C:7.5"
},
"readerRank": 205,
"researchRank": 348,
"recommendedScore": 191.61
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/ai-safety-institutes"
}Backlinks (39)
| id | title | type | relationship |
|---|---|---|---|
| japan-aisi | Japan AI Safety Institute | organization | — |
| singapore-aisi | Singapore AI Safety Institute | organization | — |
| canada-aisi | Canadian AI Safety Institute | organization | — |
| eu-ai-office | EU AI Office | organization | — |
| lab-culture | AI Lab Safety Culture | approach | — |
| bletchley-declaration | Bletchley Declaration | policy | — |
| singapore-consensus | Singapore Consensus on AI Safety Research Priorities | policy | — |
| coordination-mechanisms | International Coordination Mechanisms | policy | — |
| model-registries | Model Registries | policy | — |
| international-regimes | International Compute Regimes | policy | — |
| whistleblower-protections | AI Whistleblower Protections | policy | — |
| state-capacity-ai-governance | State Capacity and AI Governance | concept | — |
| self-improvement | Self-Improvement and Recursive Enhancement | capability | — |
| __index__/knowledge-base/history | History | concept | — |
| __index__/knowledge-base | Knowledge Base | concept | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |
| frontier-model-forum | Frontier Model Forum | organization | — |
| metr | METR | organization | — |
| nist-ai | NIST and AI Safety | organization | — |
| uk-aisi | UK AI Safety Institute | organization | — |
| us-aisi | US AI Safety Institute | organization | — |
| coordination-tech | AI Governance Coordination Technologies | approach | — |
| dangerous-cap-evals | Dangerous Capability Evaluations | approach | — |
| evals | Evals & Red-teaming | safety-agenda | — |
| evaluation | AI Evaluation | approach | — |
| governance-policy | AI Governance and Policy | crux | — |
| __index__/knowledge-base/responses | Safety Responses | concept | — |
| international-summits | International AI Safety Summits | policy | — |
| intervention-portfolio | AI Safety Intervention Portfolio | approach | — |
| longterm-wiki | Longterm Wiki | project | — |
| model-auditing | Third-Party Model Auditing | approach | — |
| training-programs | AI Safety Training Programs | approach | — |
| bioweapons | Bioweapons | risk | — |
| corrigibility-failure | Corrigibility Failure | risk | — |
| cyberweapons | Cyberweapons | risk | — |
| instrumental-convergence | Instrumental Convergence | risk | — |
| lock-in | AI Value Lock-in | risk | — |
| about-this-wiki | About This Wiki | concept | — |
| knowledge-base | Knowledge Base Style Guide | concept | — |