Longterm Wiki

CAIS (Center for AI Safety)

caisorganizationPath: /knowledge-base/organizations/cais/
E47Entity ID (EID)
← Back to page35 backlinksQuality: 42Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "cais",
  "numericId": null,
  "path": "/knowledge-base/organizations/cais/",
  "filePath": "knowledge-base/organizations/cais.mdx",
  "title": "CAIS (Center for AI Safety)",
  "quality": 42,
  "readerImportance": 88.5,
  "researchImportance": 17.5,
  "tacticalValue": 72,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "CAIS is a nonprofit research organization founded by Dan Hendrycks that has distributed compute grants to researchers, published technical AI safety papers including the representation engineering and MACHIAVELLI benchmark papers, and organized the May 2023 Statement on AI Risk signed by over 350 AI researchers and industry leaders. The organization focuses on technical safety research, field-building, and policy communication.",
  "description": "Research organization focused on AI safety through technical research, field-building, and public communication, including the May 2023 Statement on AI Risk signed by prominent AI researchers and industry leaders",
  "ratings": {
    "novelty": 2.5,
    "rigor": 4,
    "actionability": 3.5,
    "completeness": 5.5
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "community",
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2916,
    "tableCount": 6,
    "diagramCount": 0,
    "internalLinks": 58,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.25,
    "sectionCount": 27,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 2916,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 15,
  "backlinkCount": 35,
  "hallucinationRisk": {
    "level": "high",
    "score": 80,
    "factors": [
      "biographical-claims",
      "no-citations",
      "few-external-sources"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 17,
    "similarPages": [
      {
        "id": "ea-longtermist-wins-losses",
        "title": "EA and Longtermist Wins and Losses",
        "path": "/knowledge-base/history/ea-longtermist-wins-losses/",
        "similarity": 17
      },
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 17
      },
      {
        "id": "dan-hendrycks",
        "title": "Dan Hendrycks",
        "path": "/knowledge-base/people/dan-hendrycks/",
        "similarity": 17
      },
      {
        "id": "ai-talent-market-dynamics",
        "title": "AI Talent Market Dynamics",
        "path": "/knowledge-base/models/ai-talent-market-dynamics/",
        "similarity": 16
      },
      {
        "id": "arc",
        "title": "ARC (Alignment Research Center)",
        "path": "/knowledge-base/organizations/arc/",
        "similarity": 16
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    }
  ],
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 12,
      "diagrams": 1,
      "internalLinks": 23,
      "externalLinks": 15,
      "footnotes": 9,
      "references": 9
    },
    "actuals": {
      "tables": 6,
      "diagrams": 0,
      "internalLinks": 58,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 14,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:2.5 R:4 A:3.5 C:5.5"
  },
  "readerRank": 28,
  "researchRank": 509,
  "recommendedScore": 150.07
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/center-for-ai-safety",
  "wikidata": "https://www.wikidata.org/wiki/Q119084607"
}
Backlinks (35)
idtitletyperelationship
dan-hendrycksDan Hendrycksperson
capability-unlearningCapability Unlearning / Removalapproach
pausePause Advocacyapproach
maimMAIM (Mutually Assured AI Malfunction)policy
representation-engineeringRepresentation Engineeringapproach
power-seekingPower-Seeking AIrisk
ai-compute-scaling-metricsAI Compute Scaling Metricsanalysis
ai-risk-portfolio-analysisAI Risk Portfolio Analysisanalysis
intervention-effectiveness-matrixIntervention Effectiveness Matrixanalysis
risk-activation-timelineRisk Activation Timeline Modelanalysis
risk-interaction-matrixRisk Interaction Matrix Modelanalysis
risk-interaction-networkRisk Interaction Networkanalysis
ai-impactsAI Impactsorganization
chaiCHAI (Center for Human-Compatible AI)organization
deepmindGoogle DeepMindorganization
elon-musk-philanthropyElon Musk (Funder)analysis
funders-overviewLongtermist Funders (Overview)concept
__index__/knowledge-base/organizationsOrganizationsconcept
longview-philanthropyLongview Philanthropyorganization
matsMATS ML Alignment Theory Scholars programorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
secure-ai-projectSecure AI Projectorganization
sffSurvival and Flourishing Fund (SFF)organization
geoffrey-hintonGeoffrey Hintonperson
__index__/knowledge-base/peoplePeopleconcept
jaan-tallinnJaan Tallinnperson
nick-becksteadNick Becksteadperson
stuart-russellStuart Russellperson
ai-forecastingAI-Augmented Forecastingapproach
california-sb1047California SB 1047policy
corporateCorporate AI Safety Responsesapproach
eval-saturationEval Saturation & The Evals Gapapproach
failed-stalled-proposalsFailed and Stalled AI Policy Proposalspolicy
us-state-legislationUS State AI Legislationpolicy
existential-riskExistential Risk from AIconcept
Longterm Wiki