Longterm Wiki

Is AI Existential Risk Real?

is-ai-xrisk-realcruxPath: /knowledge-base/debates/is-ai-xrisk-real/
E181Entity ID (EID)
← Back to page2 backlinksQuality: 12Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "is-ai-xrisk-real",
  "numericId": null,
  "path": "/knowledge-base/debates/is-ai-xrisk-real/",
  "filePath": "knowledge-base/debates/is-ai-xrisk-real.mdx",
  "title": "Is AI Existential Risk Real?",
  "quality": 12,
  "readerImportance": 93.5,
  "researchImportance": 72,
  "tacticalValue": 62,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-20",
  "llmSummary": "Presents two core cruxes in the AI x-risk debate: whether advanced AI would develop dangerous goals (instrumental convergence vs. trainable safety) and whether we'll get warning signs (gradual failures vs. deception/fast takeoff). No quantitative analysis, primary sources, or novel framing provided.",
  "description": "The fundamental debate about whether AI poses existential risk",
  "ratings": {
    "novelty": 1.5,
    "rigor": 2,
    "actionability": 1,
    "completeness": 1.5
  },
  "category": "debates",
  "subcategory": "policy-debates",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 32,
    "tableCount": 0,
    "diagramCount": 0,
    "internalLinks": 0,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0,
    "sectionCount": 1,
    "hasOverview": false,
    "structuralScore": 2
  },
  "suggestedQuality": 13,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 32,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 0,
  "backlinkCount": 2,
  "hallucinationRisk": {
    "level": "medium",
    "score": 35,
    "factors": [
      "low-rigor-score",
      "low-quality-score",
      "conceptual-content",
      "minimal-content"
    ]
  },
  "entityType": "crux",
  "redundancy": {
    "maxSimilarity": 0,
    "similarPages": []
  },
  "changeHistory": [
    {
      "date": "2026-03-11",
      "branch": "auto-update/2026-03-11",
      "title": "Auto-improve (standard): Is AI Existential Risk Real?",
      "summary": "Improved \"Is AI Existential Risk Real?\" via standard pipeline (1281.2s). Quality score: 81. Issues resolved: Footnote [^rc-346d] cites a Wikipedia article as evidence fo; Footnote [^rc-ada8] cites an aggregated page of forum writin; Footnote [^rc-7838] attributes a review to 'Zvi Mowshowitz' .",
      "duration": "1281.2s",
      "cost": "$5-8"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    }
  ],
  "coverage": {
    "passing": 4,
    "total": 13,
    "targets": {
      "tables": 1,
      "diagrams": 0,
      "internalLinks": 3,
      "externalLinks": 1,
      "footnotes": 2,
      "references": 1
    },
    "actuals": {
      "tables": 0,
      "diagrams": 0,
      "internalLinks": 0,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 0,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "red",
      "tables": "red",
      "diagrams": "red",
      "internalLinks": "red",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "red",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 2,
    "ratingsString": "N:1.5 R:2 A:1 C:1.5"
  },
  "readerRank": 6,
  "researchRank": 137,
  "recommendedScore": 90.62
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/existential-risk",
  "eaForum": "https://forum.effectivealtruism.org/topics/existential-risk"
}
Backlinks (2)
idtitletyperelationship
__index__/knowledge-base/debatesKey Debatesconcept
__index__/knowledge-baseKnowledge Baseconcept
Longterm Wiki