Longterm Wiki

CHAI (Center for Human-Compatible AI)

chaiorganizationPath: /knowledge-base/organizations/chai/
E57Entity ID (EID)
← Back to page30 backlinksQuality: 37Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "chai",
  "numericId": null,
  "path": "/knowledge-base/organizations/chai/",
  "filePath": "knowledge-base/organizations/chai.mdx",
  "title": "CHAI (Center for Human-Compatible AI)",
  "quality": 37,
  "readerImportance": 68.5,
  "researchImportance": 61,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "CHAI is UC Berkeley's AI safety research center founded by Stuart Russell in 2016, pioneering cooperative inverse reinforcement learning and human-compatible AI frameworks. The center has trained 30+ PhD students and influenced major labs (OpenAI's RLHF, Anthropic's Constitutional AI), though faces scalability challenges in preference learning approaches.",
  "description": "UC Berkeley research center founded by Stuart Russell developing cooperative AI frameworks and preference learning approaches to ensure AI systems remain beneficial and deferential to humans",
  "ratings": {
    "novelty": 2.5,
    "rigor": 4.5,
    "actionability": 2,
    "completeness": 6.5
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "ai-safety",
    "community"
  ],
  "metrics": {
    "wordCount": 1239,
    "tableCount": 11,
    "diagramCount": 0,
    "internalLinks": 22,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.24,
    "sectionCount": 26,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 1239,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 10,
  "backlinkCount": 30,
  "hallucinationRisk": {
    "level": "high",
    "score": 85,
    "factors": [
      "biographical-claims",
      "no-citations",
      "low-quality-score",
      "few-external-sources"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 15,
    "similarPages": [
      {
        "id": "cirl",
        "title": "Cooperative IRL (CIRL)",
        "path": "/knowledge-base/responses/cirl/",
        "similarity": 15
      },
      {
        "id": "holden-karnofsky",
        "title": "Holden Karnofsky",
        "path": "/knowledge-base/people/holden-karnofsky/",
        "similarity": 14
      },
      {
        "id": "dario-amodei",
        "title": "Dario Amodei",
        "path": "/knowledge-base/people/dario-amodei/",
        "similarity": 13
      },
      {
        "id": "paul-christiano",
        "title": "Paul Christiano",
        "path": "/knowledge-base/people/paul-christiano/",
        "similarity": 13
      },
      {
        "id": "cooperative-ai",
        "title": "Cooperative AI",
        "path": "/knowledge-base/responses/cooperative-ai/",
        "similarity": 13
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 5,
      "diagrams": 0,
      "internalLinks": 10,
      "externalLinks": 6,
      "footnotes": 4,
      "references": 4
    },
    "actuals": {
      "tables": 11,
      "diagrams": 0,
      "internalLinks": 22,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 9,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:2.5 R:4.5 A:2 C:6.5"
  },
  "readerRank": 173,
  "researchRank": 216,
  "recommendedScore": 129.7
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/center-for-human-compatible-ai-chai",
  "eaForum": "https://forum.effectivealtruism.org/topics/center-for-human-compatible-ai",
  "wikidata": "https://www.wikidata.org/wiki/Q85751153"
}
Backlinks (30)
idtitletyperelationship
stuart-russellStuart Russellperson
deep-learning-eraDeep Learning Revolution (2012-2020)historical
ai-compute-scaling-metricsAI Compute Scaling Metricsanalysis
ai-talent-market-dynamicsAI Talent Market Dynamicsanalysis
corrigibility-failure-pathwaysCorrigibility Failure Pathwaysanalysis
goal-misgeneralization-probabilityGoal Misgeneralization Probability Modelanalysis
intervention-effectiveness-matrixIntervention Effectiveness Matrixanalysis
mesa-optimization-analysisMesa-Optimization Risk Analysisanalysis
risk-interaction-matrixRisk Interaction Matrix Modelanalysis
risk-interaction-networkRisk Interaction Networkanalysis
safety-research-allocationSafety Research Allocation Modelanalysis
safety-research-valueExpected Value of AI Safety Researchanalysis
safety-researcher-gapAI Safety Talent Supply/Demand Gap Modelanalysis
worldview-intervention-mappingWorldview-Intervention Mappinganalysis
apollo-researchApollo Researchorganization
caisCAIS (Center for AI Safety)organization
center-for-applied-rationalityCenter for Applied Rationalityorganization
__index__/knowledge-base/organizationsOrganizationsconcept
lionheart-venturesLionheart Venturesorganization
matsMATS ML Alignment Theory Scholars programorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
secure-ai-projectSecure AI Projectorganization
holden-karnofskyHolden Karnofskyperson
vipul-naikVipul Naikperson
alignmentAI Alignmentapproach
cirlCooperative IRL (CIRL)approach
cooperative-aiCooperative AIapproach
evaluationAI Evaluationapproach
red-teamingRed Teamingapproach
training-programsAI Safety Training Programsapproach
Longterm Wiki