Longterm Wiki

When Will AGI Arrive?

agi-timeline-debatecruxPath: /knowledge-base/debates/agi-timeline-debate/
E4Entity ID (EID)
← Back to page4 backlinksQuality: 33Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "agi-timeline-debate",
  "numericId": null,
  "path": "/knowledge-base/debates/agi-timeline-debate/",
  "filePath": "knowledge-base/debates/agi-timeline-debate.mdx",
  "title": "When Will AGI Arrive?",
  "quality": 33,
  "readerImportance": 91.5,
  "researchImportance": 77,
  "tacticalValue": 78,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive survey of AGI timeline predictions ranging from 2025-2027 (ultra-short) to never with current approaches, with median expert estimates around 2032-2037. Key cruxes include whether scaling alone suffices, data/compute limits, and trust in lab leader claims; wide uncertainty reflects deep disagreement about fundamental capabilities questions.",
  "description": "The debate over AGI timelines from imminent to decades away to never with current approaches",
  "ratings": {
    "novelty": 2.5,
    "rigor": 3.5,
    "actionability": 4,
    "completeness": 5.5
  },
  "category": "debates",
  "subcategory": "policy-debates",
  "clusters": [
    "ai-safety",
    "epistemics"
  ],
  "metrics": {
    "wordCount": 1010,
    "tableCount": 1,
    "diagramCount": 0,
    "internalLinks": 2,
    "externalLinks": 2,
    "footnoteCount": 0,
    "bulletRatio": 0.33,
    "sectionCount": 14,
    "hasOverview": false,
    "structuralScore": 6
  },
  "suggestedQuality": 40,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1010,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 0,
  "backlinkCount": 4,
  "hallucinationRisk": {
    "level": "medium",
    "score": 60,
    "factors": [
      "no-citations",
      "low-rigor-score",
      "low-quality-score",
      "conceptual-content"
    ]
  },
  "entityType": "crux",
  "redundancy": {
    "maxSimilarity": 14,
    "similarPages": [
      {
        "id": "long-timelines",
        "title": "Long-Timelines Technical Worldview",
        "path": "/knowledge-base/worldviews/long-timelines/",
        "similarity": 14
      },
      {
        "id": "scaling-debate",
        "title": "Is Scaling All You Need?",
        "path": "/knowledge-base/debates/scaling-debate/",
        "similarity": 13
      },
      {
        "id": "agi-development",
        "title": "AGI Development",
        "path": "/knowledge-base/forecasting/agi-development/",
        "similarity": 13
      },
      {
        "id": "agi-timeline",
        "title": "AGI Timeline",
        "path": "/knowledge-base/forecasting/agi-timeline/",
        "similarity": 13
      },
      {
        "id": "doomer",
        "title": "AI Doomer Worldview",
        "path": "/knowledge-base/worldviews/doomer/",
        "similarity": 13
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-24",
      "branch": "feat/stale-fact-detection-581-582",
      "title": "Batch content fixes + stale-facts validator + 2 new validation rules",
      "summary": "(fill in)",
      "pr": 924,
      "model": "claude-sonnet-4-6"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    }
  ],
  "coverage": {
    "passing": 4,
    "total": 13,
    "targets": {
      "tables": 4,
      "diagrams": 0,
      "internalLinks": 8,
      "externalLinks": 5,
      "footnotes": 3,
      "references": 3
    },
    "actuals": {
      "tables": 1,
      "diagrams": 0,
      "internalLinks": 2,
      "externalLinks": 2,
      "footnotes": 0,
      "references": 0,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "red",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "red",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 2,
    "ratingsString": "N:2.5 R:3.5 A:4 C:5.5"
  },
  "readerRank": 14,
  "researchRank": 108,
  "recommendedScore": 133.08
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/ai-timelines",
  "eaForum": "https://forum.effectivealtruism.org/topics/ai-forecasting"
}
Backlinks (4)
idtitletyperelationship
__index__/knowledge-base/debatesKey Debatesconcept
compounding-risks-analysisCompounding Risks Analysisanalysis
toby-ordToby Ordperson
will-macaskillWill MacAskillperson
Longterm Wiki