Longterm Wiki

Paul Christiano

paul-christianopersonPath: /knowledge-base/people/paul-christiano/
E220Entity ID (EID)
← Back to page47 backlinksQuality: 39Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "paul-christiano",
  "numericId": null,
  "path": "/knowledge-base/people/paul-christiano/",
  "filePath": "knowledge-base/people/paul-christiano.mdx",
  "title": "Paul Christiano",
  "quality": 39,
  "readerImportance": 28,
  "researchImportance": 36,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive biography of Paul Christiano documenting his technical contributions (IDA, debate, scalable oversight), risk assessment (~10-20% P(doom), AGI 2030s-2040s), and evolution from higher optimism to current moderate concern. Documents implementation of his ideas at major labs (RLHF at OpenAI, Constitutional AI at Anthropic) with specific citation to papers and organizational impact.",
  "description": "Founder of ARC, creator of iterated amplification and AI safety via debate. Current risk assessment ~10-20% P(doom), AGI 2030s-2040s. Pioneered prosaic alignment approach focusing on scalable oversight mechanisms.",
  "ratings": {
    "novelty": 2,
    "rigor": 4.5,
    "actionability": 2,
    "completeness": 6
  },
  "category": "people",
  "subcategory": "safety-researchers",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 1112,
    "tableCount": 12,
    "diagramCount": 0,
    "internalLinks": 46,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.1,
    "sectionCount": 25,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1112,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 18,
  "backlinkCount": 47,
  "hallucinationRisk": {
    "level": "high",
    "score": 85,
    "factors": [
      "biographical-claims",
      "no-citations",
      "low-quality-score",
      "few-external-sources"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 13,
    "similarPages": [
      {
        "id": "chai",
        "title": "CHAI (Center for Human-Compatible AI)",
        "path": "/knowledge-base/organizations/chai/",
        "similarity": 13
      },
      {
        "id": "holden-karnofsky",
        "title": "Holden Karnofsky",
        "path": "/knowledge-base/people/holden-karnofsky/",
        "similarity": 12
      },
      {
        "id": "safety-research-value",
        "title": "Expected Value of AI Safety Research",
        "path": "/knowledge-base/models/safety-research-value/",
        "similarity": 11
      },
      {
        "id": "conjecture",
        "title": "Conjecture",
        "path": "/knowledge-base/organizations/conjecture/",
        "similarity": 11
      },
      {
        "id": "scheming-likelihood-model",
        "title": "Scheming Likelihood Assessment",
        "path": "/knowledge-base/models/scheming-likelihood-model/",
        "similarity": 10
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 4,
      "diagrams": 0,
      "internalLinks": 9,
      "externalLinks": 6,
      "footnotes": 3,
      "references": 3
    },
    "actuals": {
      "tables": 12,
      "diagrams": 0,
      "internalLinks": 46,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 14,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:2 R:4.5 A:2 C:6"
  },
  "readerRank": 468,
  "researchRank": 373,
  "recommendedScore": 113.41
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/paul-christiano",
  "wikidata": "https://www.wikidata.org/wiki/Q64769299"
}
Backlinks (47)
idtitletyperelationship
long-term-benefit-trustLong-Term Benefit Trust (Anthropic)analysis
capability-alignment-raceCapability-Alignment Race Modelanalysis
model-organisms-of-misalignmentModel Organisms of Misalignmentanalysis
metrMETRorganization
arcARCorganization
miriMIRIorganization
nist-aiNIST and AI Safetyorganization
eliezer-yudkowskyEliezer Yudkowskyperson
ajeya-cotraAjeya Cotraperson
jan-leikeJan Leikeperson
stuart-russellStuart Russellperson
accident-risksAI Accident Risk Cruxescrux
case-for-xriskThe Case FOR AI Existential Riskargument
why-alignment-easyWhy Alignment Might Be Easyargument
why-alignment-hardWhy Alignment Might Be Hardargument
deep-learning-eraDeep Learning Revolution (2012-2020)historical
miri-eraThe MIRI Era (2000-2015)historical
__index__/knowledge-baseKnowledge Baseconcept
ai-timelinesAI Timelinesconcept
anthropic-pledge-enforcementAnthropic Founder Pledges: Interventions to Increase Follow-Throughanalysis
defense-in-depth-modelDefense in Depth Modelanalysis
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
scheming-likelihood-modelScheming Likelihood Assessmentanalysis
anthropic-investorsAnthropic (Funder)analysis
fliFuture of Life Institute (FLI)organization
__index__/knowledge-base/organizationsOrganizationsconcept
ltffLong-Term Future Fund (LTFF)organization
manifundManifundorganization
redwood-researchRedwood Researchorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
dario-amodeiDario Amodeiperson
dustin-moskovitzDustin Moskovitz (AI Safety Funder)person
eliezer-yudkowsky-predictionsEliezer Yudkowsky: Track Recordconcept
evan-hubingerEvan Hubingerperson
gwernGwern Branwenperson
helen-tonerHelen Tonerperson
holden-karnofskyHolden Karnofskyperson
__index__/knowledge-base/peoplePeopleconcept
ai-controlAI Controlsafety-agenda
alignmentAI Alignmentapproach
research-agendasAI Alignment Research Agenda Comparisoncrux
scalable-oversightScalable Oversightsafety-agenda
sleeper-agent-detectionSleeper Agent Detectionapproach
existential-riskExistential Risk from AIconcept
superintelligenceSuperintelligenceconcept
doomerAI Doomer Worldviewconcept
optimisticOptimistic Alignment Worldviewconcept
Longterm Wiki