US AI Safety Institute
us-aisiorganizationPath: /knowledge-base/organizations/us-aisi/
E365Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "us-aisi",
"numericId": null,
"path": "/knowledge-base/organizations/us-aisi/",
"filePath": "knowledge-base/organizations/us-aisi.mdx",
"title": "US AI Safety Institute",
"quality": 91,
"readerImportance": 32,
"researchImportance": 48,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "The US AI Safety Institute (AISI), established November 2023 within NIST with \\$10M budget (FY2025 request \\$82.7M), conducted pre-deployment evaluations of frontier models through MOUs with OpenAI and Anthropic. Co-led International Network of AI Safety Institutes (11 member nations). Director Elizabeth Kelly named to TIME's 100 Most Influential in AI (2024) but departed February 2025. Renamed to CAISI June 2025 with shift to innovation/competitiveness focus following Trump administration's revocation of EO 14110 and NIST layoffs affecting 73 staff.",
"description": "US government agency for AI safety research and standard-setting under NIST, established November 2023 with \\$10M initial budget (FY2025 request of \\$82.7M) and 290+ consortium members. Conducted first joint US-UK model evaluations (Claude 3.5 Sonnet, OpenAI o1) in late 2024. Renamed to Center for AI Standards and Innovation (CAISI) in June 2025 following director departure and 73 staff layoffs.",
"ratings": {
"novelty": 4,
"rigor": 6,
"actionability": 5,
"completeness": 7.5
},
"category": "organizations",
"subcategory": "government",
"clusters": [
"ai-safety",
"governance",
"community"
],
"metrics": {
"wordCount": 4801,
"tableCount": 12,
"diagramCount": 1,
"internalLinks": 33,
"externalLinks": 18,
"footnoteCount": 0,
"bulletRatio": 0.07,
"sectionCount": 28,
"hasOverview": false,
"structuralScore": 14
},
"suggestedQuality": 93,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 4801,
"unconvertedLinks": [
{
"text": "International Network of AI Safety Institutes",
"url": "https://www.nist.gov/news-events/news/2024/11/fact-sheet-us-department-commerce-us-department-state-launch-international",
"resourceId": "a65ad4f1a30f1737",
"resourceTitle": "International Network of AI Safety Institutes"
},
{
"text": "NIST announcement",
"url": "https://www.nist.gov/artificial-intelligence/artificial-intelligence-safety-institute-consortium-aisic",
"resourceId": "bfe77d043707ba19",
"resourceTitle": "AI Safety Institute Consortium (AISIC)"
},
{
"text": "5-10x higher compensation",
"url": "https://www.brookings.edu/articles/a-technical-ai-government-agency-plays-a-vital-role-in-advancing-ai-innovation-and-trustworthiness/",
"resourceId": "f7d2ebb409b056f9",
"resourceTitle": "U.S. AI Safety Institute"
},
{
"text": "\\$1 billion from Amazon",
"url": "https://www.nist.gov/news-events/news/2024/11/fact-sheet-us-department-commerce-us-department-state-launch-international",
"resourceId": "a65ad4f1a30f1737",
"resourceTitle": "International Network of AI Safety Institutes"
},
{
"text": "TIME's 100 Most Influential People in AI",
"url": "https://time.com/7012783/elizabeth-kelly/",
"resourceId": "0694bc71bc9daac0",
"resourceTitle": "Elizabeth Kelly"
}
],
"unconvertedLinkCount": 5,
"convertedLinkCount": 26,
"backlinkCount": 37,
"hallucinationRisk": {
"level": "high",
"score": 70,
"factors": [
"biographical-claims",
"no-citations",
"high-quality"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 25,
"similarPages": [
{
"id": "ai-safety-institutes",
"title": "AI Safety Institutes",
"path": "/knowledge-base/responses/ai-safety-institutes/",
"similarity": 25
},
{
"id": "uk-aisi",
"title": "UK AI Safety Institute",
"path": "/knowledge-base/organizations/uk-aisi/",
"similarity": 24
},
{
"id": "international-summits",
"title": "International AI Safety Summits",
"path": "/knowledge-base/responses/international-summits/",
"similarity": 23
},
{
"id": "metr",
"title": "METR",
"path": "/knowledge-base/organizations/metr/",
"similarity": 22
},
{
"id": "responsible-scaling-policies",
"title": "Responsible Scaling Policies",
"path": "/knowledge-base/responses/responsible-scaling-policies/",
"similarity": 22
}
]
},
"coverage": {
"passing": 4,
"total": 13,
"targets": {
"tables": 19,
"diagrams": 2,
"internalLinks": 38,
"externalLinks": 24,
"footnotes": 14,
"references": 14
},
"actuals": {
"tables": 12,
"diagrams": 1,
"internalLinks": 33,
"externalLinks": 18,
"footnotes": 0,
"references": 18,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "red",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4 R:6 A:5 C:7.5"
},
"readerRank": 435,
"researchRank": 295,
"recommendedScore": 219.86
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/us-ai-safety-institute"
}Backlinks (37)
| id | title | type | relationship |
|---|---|---|---|
| uk-aisi | UK AI Safety Institute | organization | — |
| japan-aisi | Japan AI Safety Institute | organization | — |
| singapore-aisi | Singapore AI Safety Institute | organization | — |
| canada-aisi | Canadian AI Safety Institute | organization | — |
| eu-ai-office | EU AI Office | organization | — |
| joe-biden | Joe Biden | person | — |
| ai-executive-order | Biden AI Executive Order | policy | — |
| bletchley-declaration | Bletchley Declaration | policy | — |
| coding | Autonomous Coding | capability | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| agi-development | AGI Development | concept | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| corrigibility-failure-pathways | Corrigibility Failure Pathways | analysis | — |
| mesa-optimization-analysis | Mesa-Optimization Risk Analysis | analysis | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| safety-research-allocation | Safety Research Allocation Model | analysis | — |
| safety-researcher-gap | AI Safety Talent Supply/Demand Gap Model | analysis | — |
| worldview-intervention-mapping | Worldview-Intervention Mapping | analysis | — |
| anthropic | Anthropic | organization | — |
| arc | ARC (Alignment Research Center) | organization | — |
| cais | CAIS (Center for AI Safety) | organization | — |
| frontier-model-forum | Frontier Model Forum | organization | — |
| govai | GovAI | organization | — |
| government-orgs-overview | Government AI Safety Organizations (Overview) | concept | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| metr | METR | organization | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| ai-safety-institutes | AI Safety Institutes | policy | — |
| anthropic-core-views | Anthropic Core Views | safety-agenda | — |
| coordination-tech | AI Governance Coordination Technologies | approach | — |
| effectiveness-assessment | Policy Effectiveness Assessment | analysis | — |
| evaluation | AI Evaluation | approach | — |
| international-summits | International AI Safety Summits | policy | — |
| lab-culture | AI Lab Safety Culture | approach | — |
| scalable-eval-approaches | Scalable Eval Approaches | approach | — |
| us-executive-order | US Executive Order on Safe, Secure, and Trustworthy AI | policy | — |