UK AI Safety Institute
uk-aisiorganizationPath: /knowledge-base/organizations/uk-aisi/
E364Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "uk-aisi",
"numericId": null,
"path": "/knowledge-base/organizations/uk-aisi/",
"filePath": "knowledge-base/organizations/uk-aisi.mdx",
"title": "UK AI Safety Institute",
"quality": 52,
"readerImportance": 32,
"researchImportance": 48,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "The UK AI Safety Institute (renamed AI Security Institute in Feb 2025) operates with ~30 technical staff and 50M GBP annual budget, conducting frontier model evaluations using its open-source Inspect AI framework and coordinating the 10+ country International Network of AI Safety Institutes. April 2024 evaluations found frontier models capable of intermediate cybersecurity tasks and PhD-level biology knowledge, with safeguards vulnerable to basic jailbreaks.",
"description": "The UK AI Safety Institute (renamed AI Security Institute in February 2025) is a government body with approximately 30+ technical staff and an annual budget of around 50 million GBP. It conducts frontier model evaluations, develops open-source evaluation tools like Inspect AI, and coordinates the International Network of AI Safety Institutes involving 10+ countries.",
"ratings": {
"novelty": 2.5,
"rigor": 5.5,
"actionability": 4,
"completeness": 6.5
},
"category": "organizations",
"subcategory": "government",
"clusters": [
"ai-safety",
"community",
"governance"
],
"metrics": {
"wordCount": 3577,
"tableCount": 6,
"diagramCount": 1,
"internalLinks": 30,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.49,
"sectionCount": 50,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 3577,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 20,
"backlinkCount": 55,
"hallucinationRisk": {
"level": "high",
"score": 80,
"factors": [
"biographical-claims",
"no-citations",
"few-external-sources"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 24,
"similarPages": [
{
"id": "us-aisi",
"title": "US AI Safety Institute",
"path": "/knowledge-base/organizations/us-aisi/",
"similarity": 24
},
{
"id": "ai-safety-institutes",
"title": "AI Safety Institutes",
"path": "/knowledge-base/responses/ai-safety-institutes/",
"similarity": 22
},
{
"id": "international-summits",
"title": "International AI Safety Summits",
"path": "/knowledge-base/responses/international-summits/",
"similarity": 20
},
{
"id": "metr",
"title": "METR",
"path": "/knowledge-base/organizations/metr/",
"similarity": 19
},
{
"id": "coordination-mechanisms",
"title": "International Coordination Mechanisms",
"path": "/knowledge-base/responses/coordination-mechanisms/",
"similarity": 19
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 14,
"diagrams": 1,
"internalLinks": 29,
"externalLinks": 18,
"footnotes": 11,
"references": 11
},
"actuals": {
"tables": 6,
"diagrams": 1,
"internalLinks": 30,
"externalLinks": 0,
"footnotes": 0,
"references": 20,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2.5 R:5.5 A:4 C:6.5"
},
"readerRank": 434,
"researchRank": 294,
"recommendedScore": 141.86
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/uk-ai-safety-institute"
}Backlinks (55)
| id | title | type | relationship |
|---|---|---|---|
| ai-safety-summit | AI Safety Summit (Bletchley Park) | historical | — |
| apollo-research | Apollo Research | organization | — |
| conjecture | Conjecture | organization | — |
| metr | METR | organization | — |
| arc | ARC | organization | — |
| us-aisi | US AI Safety Institute | organization | — |
| japan-aisi | Japan AI Safety Institute | organization | — |
| singapore-aisi | Singapore AI Safety Institute | organization | — |
| canada-aisi | Canadian AI Safety Institute | organization | — |
| eu-ai-office | EU AI Office | organization | — |
| eu-ai-act | EU AI Act | policy | — |
| international-summits | International AI Safety Summit Series | policy | — |
| us-executive-order | US Executive Order on Safe, Secure, and Trustworthy AI | policy | — |
| bletchley-declaration | Bletchley Declaration | policy | — |
| coding | Autonomous Coding | capability | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| agi-development | AGI Development | concept | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| corrigibility-failure-pathways | Corrigibility Failure Pathways | analysis | — |
| deceptive-alignment-decomposition | Deceptive Alignment Decomposition Model | analysis | — |
| goal-misgeneralization-probability | Goal Misgeneralization Probability Model | analysis | — |
| instrumental-convergence-framework | Instrumental Convergence Framework | analysis | — |
| intervention-timing-windows | Intervention Timing Windows | analysis | — |
| mesa-optimization-analysis | Mesa-Optimization Risk Analysis | analysis | — |
| multipolar-trap-dynamics | Multipolar Trap Dynamics Model | analysis | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| racing-dynamics-impact | Racing Dynamics Impact Model | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| safety-research-allocation | Safety Research Allocation Model | analysis | — |
| scheming-likelihood-model | Scheming Likelihood Assessment | analysis | — |
| worldview-intervention-mapping | Worldview-Intervention Mapping | analysis | — |
| cais | CAIS (Center for AI Safety) | organization | — |
| far-ai | FAR AI | organization | — |
| frontier-model-forum | Frontier Model Forum | organization | — |
| government-orgs-overview | Government AI Safety Organizations (Overview) | concept | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| openai | OpenAI | organization | — |
| redwood-research | Redwood Research | organization | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| ai-safety-institutes | AI Safety Institutes | policy | — |
| alignment | AI Alignment | approach | — |
| coordination-mechanisms | International Coordination Mechanisms | policy | — |
| coordination-tech | AI Governance Coordination Technologies | approach | — |
| dangerous-cap-evals | Dangerous Capability Evaluations | approach | — |
| eval-saturation | Eval Saturation & The Evals Gap | approach | — |
| evaluation-awareness | Evaluation Awareness | approach | — |
| evaluation | AI Evaluation | approach | — |
| model-auditing | Third-Party Model Auditing | approach | — |
| red-teaming | Red Teaming | approach | — |
| safety-cases | AI Safety Cases | approach | — |
| scalable-eval-approaches | Scalable Eval Approaches | approach | — |
| thresholds | Compute Thresholds | policy | — |
| training-programs | AI Safety Training Programs | approach | — |
| bioweapons | Bioweapons | risk | — |
| knowledge-monopoly | AI Knowledge Monopoly | risk | — |