Longterm Wiki

UK AI Safety Institute

uk-aisiorganizationPath: /knowledge-base/organizations/uk-aisi/
E364Entity ID (EID)
← Back to page55 backlinksQuality: 52Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "uk-aisi",
  "numericId": null,
  "path": "/knowledge-base/organizations/uk-aisi/",
  "filePath": "knowledge-base/organizations/uk-aisi.mdx",
  "title": "UK AI Safety Institute",
  "quality": 52,
  "readerImportance": 32,
  "researchImportance": 48,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "The UK AI Safety Institute (renamed AI Security Institute in Feb 2025) operates with ~30 technical staff and 50M GBP annual budget, conducting frontier model evaluations using its open-source Inspect AI framework and coordinating the 10+ country International Network of AI Safety Institutes. April 2024 evaluations found frontier models capable of intermediate cybersecurity tasks and PhD-level biology knowledge, with safeguards vulnerable to basic jailbreaks.",
  "description": "The UK AI Safety Institute (renamed AI Security Institute in February 2025) is a government body with approximately 30+ technical staff and an annual budget of around 50 million GBP. It conducts frontier model evaluations, develops open-source evaluation tools like Inspect AI, and coordinates the International Network of AI Safety Institutes involving 10+ countries.",
  "ratings": {
    "novelty": 2.5,
    "rigor": 5.5,
    "actionability": 4,
    "completeness": 6.5
  },
  "category": "organizations",
  "subcategory": "government",
  "clusters": [
    "ai-safety",
    "community",
    "governance"
  ],
  "metrics": {
    "wordCount": 3577,
    "tableCount": 6,
    "diagramCount": 1,
    "internalLinks": 30,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.49,
    "sectionCount": 50,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 3577,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 20,
  "backlinkCount": 55,
  "hallucinationRisk": {
    "level": "high",
    "score": 80,
    "factors": [
      "biographical-claims",
      "no-citations",
      "few-external-sources"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 24,
    "similarPages": [
      {
        "id": "us-aisi",
        "title": "US AI Safety Institute",
        "path": "/knowledge-base/organizations/us-aisi/",
        "similarity": 24
      },
      {
        "id": "ai-safety-institutes",
        "title": "AI Safety Institutes",
        "path": "/knowledge-base/responses/ai-safety-institutes/",
        "similarity": 22
      },
      {
        "id": "international-summits",
        "title": "International AI Safety Summits",
        "path": "/knowledge-base/responses/international-summits/",
        "similarity": 20
      },
      {
        "id": "metr",
        "title": "METR",
        "path": "/knowledge-base/organizations/metr/",
        "similarity": 19
      },
      {
        "id": "coordination-mechanisms",
        "title": "International Coordination Mechanisms",
        "path": "/knowledge-base/responses/coordination-mechanisms/",
        "similarity": 19
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 14,
      "diagrams": 1,
      "internalLinks": 29,
      "externalLinks": 18,
      "footnotes": 11,
      "references": 11
    },
    "actuals": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 30,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 20,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:2.5 R:5.5 A:4 C:6.5"
  },
  "readerRank": 434,
  "researchRank": 294,
  "recommendedScore": 141.86
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/uk-ai-safety-institute"
}
Backlinks (55)
idtitletyperelationship
ai-safety-summitAI Safety Summit (Bletchley Park)historical
apollo-researchApollo Researchorganization
conjectureConjectureorganization
metrMETRorganization
arcARCorganization
us-aisiUS AI Safety Instituteorganization
japan-aisiJapan AI Safety Instituteorganization
singapore-aisiSingapore AI Safety Instituteorganization
canada-aisiCanadian AI Safety Instituteorganization
eu-ai-officeEU AI Officeorganization
eu-ai-actEU AI Actpolicy
international-summitsInternational AI Safety Summit Seriespolicy
us-executive-orderUS Executive Order on Safe, Secure, and Trustworthy AIpolicy
bletchley-declarationBletchley Declarationpolicy
codingAutonomous Codingcapability
accident-risksAI Accident Risk Cruxescrux
agi-developmentAGI Developmentconcept
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
corrigibility-failure-pathwaysCorrigibility Failure Pathwaysanalysis
deceptive-alignment-decompositionDeceptive Alignment Decomposition Modelanalysis
goal-misgeneralization-probabilityGoal Misgeneralization Probability Modelanalysis
instrumental-convergence-frameworkInstrumental Convergence Frameworkanalysis
intervention-timing-windowsIntervention Timing Windowsanalysis
mesa-optimization-analysisMesa-Optimization Risk Analysisanalysis
multipolar-trap-dynamicsMultipolar Trap Dynamics Modelanalysis
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
racing-dynamics-impactRacing Dynamics Impact Modelanalysis
risk-interaction-networkRisk Interaction Networkanalysis
safety-research-allocationSafety Research Allocation Modelanalysis
scheming-likelihood-modelScheming Likelihood Assessmentanalysis
worldview-intervention-mappingWorldview-Intervention Mappinganalysis
caisCAIS (Center for AI Safety)organization
far-aiFAR AIorganization
frontier-model-forumFrontier Model Forumorganization
government-orgs-overviewGovernment AI Safety Organizations (Overview)concept
__index__/knowledge-base/organizationsOrganizationsconcept
openaiOpenAIorganization
redwood-researchRedwood Researchorganization
geoffrey-hintonGeoffrey Hintonperson
ai-safety-institutesAI Safety Institutespolicy
alignmentAI Alignmentapproach
coordination-mechanismsInternational Coordination Mechanismspolicy
coordination-techAI Governance Coordination Technologiesapproach
dangerous-cap-evalsDangerous Capability Evaluationsapproach
eval-saturationEval Saturation & The Evals Gapapproach
evaluation-awarenessEvaluation Awarenessapproach
evaluationAI Evaluationapproach
model-auditingThird-Party Model Auditingapproach
red-teamingRed Teamingapproach
safety-casesAI Safety Casesapproach
scalable-eval-approachesScalable Eval Approachesapproach
thresholdsCompute Thresholdspolicy
training-programsAI Safety Training Programsapproach
bioweaponsBioweaponsrisk
knowledge-monopolyAI Knowledge Monopolyrisk
Longterm Wiki