CAIS (Center for AI Safety)
caisorganizationPath: /knowledge-base/organizations/cais/
E47Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "cais",
"numericId": null,
"path": "/knowledge-base/organizations/cais/",
"filePath": "knowledge-base/organizations/cais.mdx",
"title": "CAIS (Center for AI Safety)",
"quality": 42,
"readerImportance": 88.5,
"researchImportance": 17.5,
"tacticalValue": 72,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "CAIS is a nonprofit research organization founded by Dan Hendrycks that has distributed compute grants to researchers, published technical AI safety papers including the representation engineering and MACHIAVELLI benchmark papers, and organized the May 2023 Statement on AI Risk signed by over 350 AI researchers and industry leaders. The organization focuses on technical safety research, field-building, and policy communication.",
"description": "Research organization focused on AI safety through technical research, field-building, and public communication, including the May 2023 Statement on AI Risk signed by prominent AI researchers and industry leaders",
"ratings": {
"novelty": 2.5,
"rigor": 4,
"actionability": 3.5,
"completeness": 5.5
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"community",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2916,
"tableCount": 6,
"diagramCount": 0,
"internalLinks": 58,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.25,
"sectionCount": 27,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 2916,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 15,
"backlinkCount": 35,
"hallucinationRisk": {
"level": "high",
"score": 80,
"factors": [
"biographical-claims",
"no-citations",
"few-external-sources"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 17,
"similarPages": [
{
"id": "ea-longtermist-wins-losses",
"title": "EA and Longtermist Wins and Losses",
"path": "/knowledge-base/history/ea-longtermist-wins-losses/",
"similarity": 17
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 17
},
{
"id": "dan-hendrycks",
"title": "Dan Hendrycks",
"path": "/knowledge-base/people/dan-hendrycks/",
"similarity": 17
},
{
"id": "ai-talent-market-dynamics",
"title": "AI Talent Market Dynamics",
"path": "/knowledge-base/models/ai-talent-market-dynamics/",
"similarity": 16
},
{
"id": "arc",
"title": "ARC (Alignment Research Center)",
"path": "/knowledge-base/organizations/arc/",
"similarity": 16
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/fix-issue-240-N5irU",
"title": "Surface tacticalValue in /wiki table and score 53 pages",
"summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
"model": "sonnet-4",
"duration": "~30min"
}
],
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 12,
"diagrams": 1,
"internalLinks": 23,
"externalLinks": 15,
"footnotes": 9,
"references": 9
},
"actuals": {
"tables": 6,
"diagrams": 0,
"internalLinks": 58,
"externalLinks": 0,
"footnotes": 0,
"references": 14,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:2.5 R:4 A:3.5 C:5.5"
},
"readerRank": 28,
"researchRank": 509,
"recommendedScore": 150.07
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/center-for-ai-safety",
"wikidata": "https://www.wikidata.org/wiki/Q119084607"
}Backlinks (35)
| id | title | type | relationship |
|---|---|---|---|
| dan-hendrycks | Dan Hendrycks | person | — |
| capability-unlearning | Capability Unlearning / Removal | approach | — |
| pause | Pause Advocacy | approach | — |
| maim | MAIM (Mutually Assured AI Malfunction) | policy | — |
| representation-engineering | Representation Engineering | approach | — |
| power-seeking | Power-Seeking AI | risk | — |
| ai-compute-scaling-metrics | AI Compute Scaling Metrics | analysis | — |
| ai-risk-portfolio-analysis | AI Risk Portfolio Analysis | analysis | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |
| risk-activation-timeline | Risk Activation Timeline Model | analysis | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| ai-impacts | AI Impacts | organization | — |
| chai | CHAI (Center for Human-Compatible AI) | organization | — |
| deepmind | Google DeepMind | organization | — |
| elon-musk-philanthropy | Elon Musk (Funder) | analysis | — |
| funders-overview | Longtermist Funders (Overview) | concept | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| longview-philanthropy | Longview Philanthropy | organization | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| secure-ai-project | Secure AI Project | organization | — |
| sff | Survival and Flourishing Fund (SFF) | organization | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| __index__/knowledge-base/people | People | concept | — |
| jaan-tallinn | Jaan Tallinn | person | — |
| nick-beckstead | Nick Beckstead | person | — |
| stuart-russell | Stuart Russell | person | — |
| ai-forecasting | AI-Augmented Forecasting | approach | — |
| california-sb1047 | California SB 1047 | policy | — |
| corporate | Corporate AI Safety Responses | approach | — |
| eval-saturation | Eval Saturation & The Evals Gap | approach | — |
| failed-stalled-proposals | Failed and Stalled AI Policy Proposals | policy | — |
| us-state-legislation | US State AI Legislation | policy | — |
| existential-risk | Existential Risk from AI | concept | — |