Longterm Wiki

AGI Timeline

agi-timelineconceptPath: /knowledge-base/forecasting/agi-timeline/
E399Entity ID (EID)
← Back to page28 backlinksQuality: 59Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "agi-timeline",
  "numericId": null,
  "path": "/knowledge-base/forecasting/agi-timeline/",
  "filePath": "knowledge-base/forecasting/agi-timeline.mdx",
  "title": "AGI Timeline",
  "quality": 59,
  "readerImportance": 55.5,
  "researchImportance": 92.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive synthesis of AGI timeline forecasts showing dramatic acceleration: expert median dropped from 2061 (2018) to 2047 (2023), Metaculus from 50 years to 5 years since 2020, with current predictions clustering around 2027-2045 median (50% probability). Aggregates 9,300+ predictions across expert surveys, prediction markets, and lab leader statements, documenting key uncertainties around scaling limits, definitions, and technical bottlenecks.",
  "description": "Expert forecasts and prediction markets suggest 50% probability of AGI by 2030-2045, with Metaculus predicting median of November 2027 and lab leaders (Altman, Amodei, Hassabis) converging on 2026-2029. Timelines have shortened dramatically—Metaculus dropped from 50 years to 5 years since 2020.",
  "ratings": {
    "novelty": 4.2,
    "rigor": 6.8,
    "actionability": 5.5,
    "completeness": 7.5
  },
  "category": "forecasting",
  "subcategory": null,
  "clusters": [
    "ai-safety",
    "epistemics"
  ],
  "metrics": {
    "wordCount": 1982,
    "tableCount": 16,
    "diagramCount": 1,
    "internalLinks": 44,
    "externalLinks": 23,
    "footnoteCount": 0,
    "bulletRatio": 0.14,
    "sectionCount": 33,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 1982,
  "unconvertedLinks": [
    {
      "text": "Metaculus",
      "url": "https://www.metaculus.com/questions/5121/when-will-the-first-general-ai-system-be-devised-tested-and-publicly-announced/",
      "resourceId": "bb81f2a99fdba0ec",
      "resourceTitle": "Metaculus"
    },
    {
      "text": "AI Multiple",
      "url": "https://research.aimultiple.com/artificial-general-intelligence-singularity-timing/",
      "resourceId": "2f2cf65315f48c6b",
      "resourceTitle": "Andrej Karpathy"
    },
    {
      "text": "80,000 Hours",
      "url": "https://80000hours.org/2025/03/when-do-experts-expect-agi-to-arrive/",
      "resourceId": "f2394e3212f072f5",
      "resourceTitle": "80,000 Hours AGI Timelines Review"
    },
    {
      "text": "80,000 Hours analysis",
      "url": "https://80000hours.org/2025/03/when-do-experts-expect-agi-to-arrive/",
      "resourceId": "f2394e3212f072f5",
      "resourceTitle": "80,000 Hours AGI Timelines Review"
    },
    {
      "text": "AGI Dashboard",
      "url": "https://agi.goodheartlabs.com/",
      "resourceId": "62e56523995e761b",
      "resourceTitle": "AGI Timelines Dashboard"
    },
    {
      "text": "Sam Altman Blog",
      "url": "https://blog.samaltman.com/the-gentle-singularity",
      "resourceId": "2bc0d4251ea0868f",
      "resourceTitle": "\"we are past the event horizon; the takeoff has started\""
    },
    {
      "text": "Lex Fridman Interview",
      "url": "https://lexfridman.com/dario-amodei-transcript/",
      "resourceId": "c6218e8dfd42eaf4",
      "resourceTitle": "Dario Amodei"
    },
    {
      "text": "Dario Amodei",
      "url": "https://lexfridman.com/dario-amodei-transcript/",
      "resourceId": "c6218e8dfd42eaf4",
      "resourceTitle": "Dario Amodei"
    },
    {
      "text": "80,000 Hours Timeline Review",
      "url": "https://80000hours.org/2025/03/when-do-experts-expect-agi-to-arrive/",
      "resourceId": "f2394e3212f072f5",
      "resourceTitle": "80,000 Hours AGI Timelines Review"
    },
    {
      "text": "Blog",
      "url": "https://blog.samaltman.com/the-gentle-singularity",
      "resourceId": "2bc0d4251ea0868f",
      "resourceTitle": "\"we are past the event horizon; the takeoff has started\""
    },
    {
      "text": "Transcript",
      "url": "https://lexfridman.com/dario-amodei-transcript/",
      "resourceId": "c6218e8dfd42eaf4",
      "resourceTitle": "Dario Amodei"
    },
    {
      "text": "Analysis",
      "url": "https://research.aimultiple.com/artificial-general-intelligence-singularity-timing/",
      "resourceId": "2f2cf65315f48c6b",
      "resourceTitle": "Andrej Karpathy"
    },
    {
      "text": "Dashboard",
      "url": "https://agi.goodheartlabs.com/",
      "resourceId": "62e56523995e761b",
      "resourceTitle": "AGI Timelines Dashboard"
    },
    {
      "text": "Samotsvety Forecasting",
      "url": "https://samotsvety.org/",
      "resourceId": "73e5f5bbfbda4925",
      "resourceTitle": "Samotsvety Forecasting"
    }
  ],
  "unconvertedLinkCount": 14,
  "convertedLinkCount": 17,
  "backlinkCount": 28,
  "hallucinationRisk": {
    "level": "medium",
    "score": 45,
    "factors": [
      "no-citations",
      "conceptual-content"
    ]
  },
  "entityType": "concept",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "agi-development",
        "title": "AGI Development",
        "path": "/knowledge-base/forecasting/agi-development/",
        "similarity": 18
      },
      {
        "id": "large-language-models",
        "title": "Large Language Models",
        "path": "/knowledge-base/capabilities/large-language-models/",
        "similarity": 15
      },
      {
        "id": "long-timelines",
        "title": "Long-Timelines Technical Worldview",
        "path": "/knowledge-base/worldviews/long-timelines/",
        "similarity": 14
      },
      {
        "id": "coding",
        "title": "Autonomous Coding",
        "path": "/knowledge-base/capabilities/coding/",
        "similarity": 13
      },
      {
        "id": "agi-timeline-debate",
        "title": "When Will AGI Arrive?",
        "path": "/knowledge-base/debates/agi-timeline-debate/",
        "similarity": 13
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-24",
      "branch": "feat/stale-fact-detection-581-582",
      "title": "Batch content fixes + stale-facts validator + 2 new validation rules",
      "summary": "(fill in)",
      "pr": 924,
      "model": "claude-sonnet-4-6"
    }
  ],
  "coverage": {
    "passing": 10,
    "total": 13,
    "targets": {
      "tables": 8,
      "diagrams": 1,
      "internalLinks": 16,
      "externalLinks": 10,
      "footnotes": 6,
      "references": 6
    },
    "actuals": {
      "tables": 16,
      "diagrams": 1,
      "internalLinks": 44,
      "externalLinks": 23,
      "footnotes": 0,
      "references": 19,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:4.2 R:6.8 A:5.5 C:7.5"
  },
  "readerRank": 263,
  "researchRank": 17,
  "recommendedScore": 167.4
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/ai-timelines",
  "eaForum": "https://forum.effectivealtruism.org/topics/ai-forecasting"
}
Backlinks (28)
idtitletyperelationship
critical-uncertaintiesAI Risk Critical Uncertainties Modelcrux
large-language-modelsLarge Language Modelsconcept
case-for-xriskThe Case FOR AI Existential Riskargument
__index__/knowledge-base/forecastingForecastingconcept
ai-risk-portfolio-analysisAI Risk Portfolio Analysisanalysis
capabilities-to-safety-pipelineCapabilities-to-Safety Pipeline Modelanalysis
corrigibility-failure-pathwaysCorrigibility Failure Pathwaysanalysis
deceptive-alignment-decompositionDeceptive Alignment Decomposition Modelanalysis
international-coordination-gameInternational AI Coordination Gameanalysis
caisCAIS (Center for AI Safety)organization
conjectureConjectureorganization
deepmindGoogle DeepMindorganization
epistemic-orgs-overviewEpistemic & Forecasting Organizations (Overview)concept
futuresearchFutureSearchorganization
metaculusMetaculusorganization
samotsvetySamotsvetyorganization
connor-leahyConnor Leahyperson
dario-amodeiDario Amodeiperson
demis-hassabisDemis Hassabisperson
eli-liflandEli Liflandperson
elon-musk-predictionsElon Musk: Track Recordconcept
elon-muskElon Musk (AI Industry)person
leopold-aschenbrennerLeopold Aschenbrennerperson
max-tegmarkMax Tegmarkperson
paul-christianoPaul Christianoperson
sam-altman-predictionsSam Altman: Track Recordconcept
prediction-marketsPrediction Markets (AI Forecasting)approach
timelines-wikiTimelines Wikiproject
Longterm Wiki