Longterm Wiki

Superintelligence

superintelligenceconceptPath: /knowledge-base/risks/superintelligence/
E291Entity ID (EID)
← Back to page10 backlinksQuality: 92Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "superintelligence",
  "numericId": null,
  "path": "/knowledge-base/risks/superintelligence/",
  "filePath": "knowledge-base/risks/superintelligence.mdx",
  "title": "Superintelligence",
  "quality": 92,
  "readerImportance": 94.5,
  "researchImportance": 18,
  "tacticalValue": 48,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-17",
  "llmSummary": null,
  "description": "AI systems with cognitive abilities vastly exceeding human intelligence",
  "ratings": null,
  "category": "risks",
  "subcategory": "accident",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 1620,
    "tableCount": 0,
    "diagramCount": 0,
    "internalLinks": 11,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.25,
    "sectionCount": 17,
    "hasOverview": false,
    "structuralScore": 8
  },
  "suggestedQuality": 53,
  "updateFrequency": 180,
  "evergreen": true,
  "wordCount": 1620,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 0,
  "backlinkCount": 10,
  "citationHealth": {
    "total": 38,
    "withQuotes": 25,
    "verified": 25,
    "accuracyChecked": 25,
    "accurate": 20,
    "inaccurate": 1,
    "avgScore": 0.9693170738220215
  },
  "hallucinationRisk": {
    "level": "medium",
    "score": 45,
    "factors": [
      "no-citations",
      "few-external-sources",
      "conceptual-content",
      "high-quality"
    ]
  },
  "entityType": "concept",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "self-improvement",
        "title": "Self-Improvement and Recursive Enhancement",
        "path": "/knowledge-base/capabilities/self-improvement/",
        "similarity": 18
      },
      {
        "id": "instrumental-convergence",
        "title": "Instrumental Convergence",
        "path": "/knowledge-base/risks/instrumental-convergence/",
        "similarity": 17
      },
      {
        "id": "reasoning",
        "title": "Reasoning and Planning",
        "path": "/knowledge-base/capabilities/reasoning/",
        "similarity": 16
      },
      {
        "id": "situational-awareness",
        "title": "Situational Awareness",
        "path": "/knowledge-base/capabilities/situational-awareness/",
        "similarity": 16
      },
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 16
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-19",
      "branch": "claude/citation-pipeline-iteration-KvR2n",
      "title": "Citation pipeline improvements and footnote normalization",
      "summary": "Fixed citation extraction to handle all footnote formats (text+bare URL), created a\nfootnote normalization script that auto-converted 58 non-standard footnotes to\nmarkdown-link format, switched dashboard export from JSON/.cache to YAML/data/ for\nproduction compatibility, ran the citation accuracy pipeline on 5 pages\n(rethink-priorities, cea, compute-governance, hewlett-foundation,\ncenter-for-applied-rationality) producing 232 citation checks with 57% accurate, 16%\nflagged, re-verified colorado-ai-act archive outside sandbox (18/19 verified), and\nimproved difficulty distribution to use structured categories (easy/medium/hard) with\nnormalization fallback.",
      "model": "claude-opus-4-6",
      "duration": "~1h"
    },
    {
      "date": "2026-02-17",
      "branch": "claude/top-priority-update-WurDM",
      "title": "Improve top 5 foundational wiki pages",
      "summary": "Improved the 5 highest-importance, lowest-quality wiki pages using the Crux content pipeline. All were stubs (7 words) or had quality=0 and are now comprehensive articles with citations, EntityLinks, and balanced perspectives.",
      "pr": 188
    }
  ],
  "coverage": {
    "passing": 3,
    "total": 13,
    "targets": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 13,
      "externalLinks": 8,
      "footnotes": 5,
      "references": 5
    },
    "actuals": {
      "tables": 0,
      "diagrams": 0,
      "internalLinks": 11,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 4,
      "quotesWithQuotes": 25,
      "quotesTotal": 38,
      "accuracyChecked": 25,
      "accuracyTotal": 38
    },
    "items": {
      "llmSummary": "red",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "red",
      "tables": "red",
      "diagrams": "red",
      "internalLinks": "amber",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "amber",
      "quotes": "amber",
      "accuracy": "amber"
    },
    "editHistoryCount": 2
  },
  "readerRank": 3,
  "researchRank": 507,
  "recommendedScore": 252.82
}
External Links
{
  "wikipedia": "https://en.wikipedia.org/wiki/Superintelligence",
  "lesswrong": "https://www.lesswrong.com/tag/superintelligence",
  "stampy": "https://aisafety.info/questions/5880/What-is-superintelligence",
  "wikidata": "https://www.wikidata.org/wiki/Q1566000",
  "grokipedia": "https://grokipedia.com/page/Superintelligence"
}
Backlinks (10)
idtitletyperelationship
self-improvementSelf-Improvement and Recursive Enhancementcapability
fast-takeoffFast Takeoffconcept
existential-riskExistential Risk from AIconcept
transformative-aiTransformative AIconcept
miri-eraThe MIRI Era (2000-2015)historical
openaiOpenAIorganization
dario-amodeiDario Amodeiperson
eliezer-yudkowskyEliezer Yudkowskyperson
jan-leikeJan Leikeperson
nick-bostromNick Bostromperson
Longterm Wiki