Longterm Wiki

Carlsmith's Six-Premise Argument

carlsmith-six-premisesanalysisPath: /knowledge-base/models/carlsmith-six-premises/
E54Entity ID (EID)
← Back to page3 backlinksQuality: 65Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "carlsmith-six-premises",
  "numericId": null,
  "path": "/knowledge-base/models/carlsmith-six-premises/",
  "filePath": "knowledge-base/models/carlsmith-six-premises.mdx",
  "title": "Carlsmith's Six-Premise Argument",
  "quality": 65,
  "readerImportance": 37.5,
  "researchImportance": 67.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Carlsmith's framework decomposes AI existential risk into six conditional premises (timelines, incentives, alignment difficulty, power-seeking, disempowerment scaling, catastrophe), yielding ~5% risk by 2070 (updated to >10%). Comparison with superforecasters reveals largest disagreements on P3 (alignment difficulty: 40% vs 25%) and P4 (power-seeking: 65% vs 35%), with combined estimates differing ~10-25x.",
  "description": "Joe Carlsmith's probabilistic decomposition of AI existential risk into six conditional premises. Originally estimated ~5% risk by 2070, updated to >10%. The most rigorous public framework for structured x-risk estimation.",
  "ratings": {
    "focus": 9,
    "novelty": 3.5,
    "rigor": 7.5,
    "completeness": 8.5,
    "concreteness": 8,
    "actionability": 6.5
  },
  "category": "models",
  "subcategory": "framework-models",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 2179,
    "tableCount": 9,
    "diagramCount": 3,
    "internalLinks": 14,
    "externalLinks": 6,
    "footnoteCount": 0,
    "bulletRatio": 0.25,
    "sectionCount": 30,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 90,
  "evergreen": true,
  "wordCount": 2179,
  "unconvertedLinks": [
    {
      "text": "Carlsmith (2022)",
      "url": "https://arxiv.org/abs/2206.13353",
      "resourceId": "6e597a4dc1f6f860",
      "resourceTitle": "Is Power-Seeking AI an Existential Risk?"
    },
    {
      "text": "Superforecaster comparison (2023)",
      "url": "https://joecarlsmith.com/2023/10/18/superforecasting-the-premises-in-is-power-seeking-ai-an-existential-risk/",
      "resourceId": "8d9f2fea7c1b4e3a",
      "resourceTitle": "Superforecasting the Premises in 'Is Power-Seeking AI an Existential Risk?'"
    },
    {
      "text": "80,000 Hours problem profile",
      "url": "https://80000hours.org/problem-profiles/risks-from-power-seeking-ai/",
      "resourceId": "d9fb00b6393b6112",
      "resourceTitle": "80,000 Hours. \"Risks from Power-Seeking AI Systems\""
    },
    {
      "text": "80,000 Hours estimates ~300 people",
      "url": "https://80000hours.org/problem-profiles/risks-from-power-seeking-ai/",
      "resourceId": "d9fb00b6393b6112",
      "resourceTitle": "80,000 Hours. \"Risks from Power-Seeking AI Systems\""
    }
  ],
  "unconvertedLinkCount": 4,
  "convertedLinkCount": 5,
  "backlinkCount": 3,
  "hallucinationRisk": {
    "level": "medium",
    "score": 40,
    "factors": [
      "no-citations",
      "high-rigor"
    ]
  },
  "entityType": "analysis",
  "redundancy": {
    "maxSimilarity": 17,
    "similarPages": [
      {
        "id": "accident-risks",
        "title": "AI Accident Risk Cruxes",
        "path": "/knowledge-base/cruxes/accident-risks/",
        "similarity": 17
      },
      {
        "id": "instrumental-convergence",
        "title": "Instrumental Convergence",
        "path": "/knowledge-base/risks/instrumental-convergence/",
        "similarity": 17
      },
      {
        "id": "case-for-xrisk",
        "title": "The Case FOR AI Existential Risk",
        "path": "/knowledge-base/debates/case-for-xrisk/",
        "similarity": 16
      },
      {
        "id": "sleeper-agent-detection",
        "title": "Sleeper Agent Detection",
        "path": "/knowledge-base/responses/sleeper-agent-detection/",
        "similarity": 16
      },
      {
        "id": "power-seeking",
        "title": "Power-Seeking AI",
        "path": "/knowledge-base/risks/power-seeking/",
        "similarity": 16
      }
    ]
  },
  "coverage": {
    "passing": 6,
    "total": 13,
    "targets": {
      "tables": 9,
      "diagrams": 1,
      "internalLinks": 17,
      "externalLinks": 11,
      "footnotes": 7,
      "references": 7
    },
    "actuals": {
      "tables": 9,
      "diagrams": 3,
      "internalLinks": 14,
      "externalLinks": 6,
      "footnotes": 0,
      "references": 6,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:7.5 A:6.5 C:8.5"
  },
  "readerRank": 395,
  "researchRank": 169,
  "recommendedScore": 170.44
}
External Links

No external links

Backlinks (3)
idtitletyperelationship
__index__/knowledge-baseKnowledge Baseconcept
__index__/knowledge-base/modelsAnalytical Modelsconcept
longtermist-value-comparisonsRelative Longtermist Value Comparisonsanalysis
Longterm Wiki