Longterm Wiki

AGI Development

agi-developmentPath: /knowledge-base/forecasting/agi-development/
E604Entity ID (EID)
← Back to page12 backlinksQuality: 52Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "agi-development",
  "numericId": "E604",
  "path": "/knowledge-base/forecasting/agi-development/",
  "filePath": "knowledge-base/forecasting/agi-development.mdx",
  "title": "AGI Development",
  "quality": 52,
  "readerImportance": 50,
  "researchImportance": 73,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive synthesis of AGI timeline forecasts showing dramatic compression: Metaculus aggregates predict 25% probability by 2027 and 50% by 2031 (down from 50-year median in 2020), with industry leaders targeting 2026-2030. Analysis documents \\$400-450B annual investment by 2026, 3-5 year safety-capability gap, and finds 5% median (16% mean) catastrophic risk estimates from 2,778-researcher survey.",
  "description": "Analysis of AGI development forecasts showing dramatically compressed timelines—Metaculus averages 25% by 2027, 50% by 2031 (down from 50-year median in 2020). Industry leaders predict 2026-2030, with Anthropic officially targeting late 2026/early 2027 for \"Nobel-level\" AI capabilities.",
  "ratings": {
    "novelty": 3.5,
    "rigor": 6,
    "actionability": 4,
    "completeness": 6.5
  },
  "category": "forecasting",
  "subcategory": null,
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2336,
    "tableCount": 19,
    "diagramCount": 1,
    "internalLinks": 39,
    "externalLinks": 23,
    "footnoteCount": 0,
    "bulletRatio": 0.15,
    "sectionCount": 41,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 2336,
  "unconvertedLinks": [
    {
      "text": "Metaculus",
      "url": "https://www.metaculus.com/questions/5121/when-will-the-first-general-ai-system-be-devised-tested-and-publicly-announced/",
      "resourceId": "bb81f2a99fdba0ec",
      "resourceTitle": "Metaculus"
    },
    {
      "text": "80,000 Hours",
      "url": "https://80000hours.org/2025/03/when-do-experts-expect-agi-to-arrive/",
      "resourceId": "f2394e3212f072f5",
      "resourceTitle": "80,000 Hours AGI Timelines Review"
    },
    {
      "text": "\"powerful AI\" by late 2026/early 2027",
      "url": "https://darioamodei.com/essay/machines-of-loving-grace",
      "resourceId": "3633040fb7158494",
      "resourceTitle": "Dario Amodei noted"
    },
    {
      "text": "Epoch AI",
      "url": "https://epoch.ai/blog/can-ai-scaling-continue-through-2030",
      "resourceId": "9587b65b1192289d",
      "resourceTitle": "Epoch AI"
    },
    {
      "text": "CFR",
      "url": "https://www.cfr.org/article/chinas-ai-chip-deficit-why-huawei-cant-catch-nvidia-and-us-export-controls-should-remain",
      "resourceId": "fe41a8475bafc188",
      "resourceTitle": "China's AI Chip Deficit: Why Huawei Can't Catch Nvidia"
    },
    {
      "text": "AI Impacts 2024",
      "url": "https://arxiv.org/abs/2401.02843",
      "resourceId": "420c48ee4c61fe6c",
      "resourceTitle": "2023 AI researcher survey"
    },
    {
      "text": "Metaculus AGI forecasts",
      "url": "https://www.metaculus.com/questions/5121/when-will-the-first-general-ai-system-be-devised-tested-and-publicly-announced/",
      "resourceId": "bb81f2a99fdba0ec",
      "resourceTitle": "Metaculus"
    },
    {
      "text": "80,000 Hours AGI review",
      "url": "https://80000hours.org/2025/03/when-do-experts-expect-agi-to-arrive/",
      "resourceId": "f2394e3212f072f5",
      "resourceTitle": "80,000 Hours AGI Timelines Review"
    },
    {
      "text": "AI Impacts 2024 survey",
      "url": "https://arxiv.org/abs/2401.02843",
      "resourceId": "420c48ee4c61fe6c",
      "resourceTitle": "2023 AI researcher survey"
    },
    {
      "text": "metaculus.com",
      "url": "https://www.metaculus.com/questions/5121/when-will-the-first-general-ai-system-be-devised-tested-and-publicly-announced/",
      "resourceId": "bb81f2a99fdba0ec",
      "resourceTitle": "Metaculus"
    },
    {
      "text": "80000hours.org",
      "url": "https://80000hours.org/2025/03/when-do-experts-expect-agi-to-arrive/",
      "resourceId": "f2394e3212f072f5",
      "resourceTitle": "80,000 Hours AGI Timelines Review"
    },
    {
      "text": "arxiv.org/abs/2401.02843",
      "url": "https://arxiv.org/abs/2401.02843",
      "resourceId": "420c48ee4c61fe6c",
      "resourceTitle": "2023 AI researcher survey"
    },
    {
      "text": "agi.goodheartlabs.com",
      "url": "https://agi.goodheartlabs.com/",
      "resourceId": "62e56523995e761b",
      "resourceTitle": "AGI Timelines Dashboard"
    },
    {
      "text": "epoch.ai",
      "url": "https://epoch.ai/blog/can-ai-scaling-continue-through-2030",
      "resourceId": "9587b65b1192289d",
      "resourceTitle": "Epoch AI"
    }
  ],
  "unconvertedLinkCount": 14,
  "convertedLinkCount": 18,
  "backlinkCount": 12,
  "hallucinationRisk": {
    "level": "medium",
    "score": 55,
    "factors": [
      "no-citations"
    ]
  },
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "agi-timeline",
        "title": "AGI Timeline",
        "path": "/knowledge-base/forecasting/agi-timeline/",
        "similarity": 18
      },
      {
        "id": "coding",
        "title": "Autonomous Coding",
        "path": "/knowledge-base/capabilities/coding/",
        "similarity": 17
      },
      {
        "id": "large-language-models",
        "title": "Large Language Models",
        "path": "/knowledge-base/capabilities/large-language-models/",
        "similarity": 17
      },
      {
        "id": "capability-threshold-model",
        "title": "Capability Threshold Model",
        "path": "/knowledge-base/models/capability-threshold-model/",
        "similarity": 17
      },
      {
        "id": "capability-alignment-race",
        "title": "Capability-Alignment Race Model",
        "path": "/knowledge-base/models/capability-alignment-race/",
        "similarity": 16
      }
    ]
  },
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 9,
      "diagrams": 1,
      "internalLinks": 19,
      "externalLinks": 12,
      "footnotes": 7,
      "references": 7
    },
    "actuals": {
      "tables": 19,
      "diagrams": 1,
      "internalLinks": 39,
      "externalLinks": 23,
      "footnotes": 0,
      "references": 23,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "red",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:6 A:4 C:6.5"
  },
  "readerRank": 301,
  "researchRank": 133,
  "recommendedScore": 150.72
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/agi"
}
Backlinks (12)
idtitletyperelationship
agi-timelineAGI Timelineconcept
__index__/knowledge-base/forecastingForecastingconcept
racing-dynamics-impactRacing Dynamics Impact Modelanalysis
deepmindGoogle DeepMindorganization
futuresearchFutureSearchorganization
openaiOpenAIorganization
eli-liflandEli Liflandperson
leopold-aschenbrennerLeopold Aschenbrennerperson
max-tegmarkMax Tegmarkperson
learned-helplessnessEpistemic Learned Helplessnessrisk
sharp-left-turnSharp Left Turnrisk
doomerAI Doomer Worldviewconcept
Longterm Wiki