Longterm Wiki

Long-Timelines Technical Worldview

long-timelinesconceptPath: /knowledge-base/worldviews/long-timelines/
E505Entity ID (EID)
← Back to page4 backlinksQuality: 91Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "long-timelines",
  "numericId": null,
  "path": "/knowledge-base/worldviews/long-timelines/",
  "filePath": "knowledge-base/worldviews/long-timelines.mdx",
  "title": "Long-Timelines Technical Worldview",
  "quality": 91,
  "readerImportance": 14.5,
  "researchImportance": 64,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive overview of the long-timelines worldview (20-40+ years to AGI, 5-20% P(doom)), arguing for foundational research over rushed solutions based on historical AI overoptimism, current systems' limitations, and scaling constraints. Provides concrete career and research prioritization guidance but lacks novel synthesis—primarily organizes existing arguments from Brooks, Marcus, and Mitchell.",
  "description": "The long-timelines worldview (20-40+ years to AGI) argues for foundational research over rushed solutions based on historical AI overoptimism, current systems' limitations, and scaling constraints. While Metaculus forecasters now predict a 50% chance of AGI by 2031—down from 50 years away in 2020—long-timelines proponents point to survey findings that 76% of experts believe current scaling approaches are insufficient for AGI.",
  "ratings": {
    "novelty": 3.5,
    "rigor": 4,
    "actionability": 5.5,
    "completeness": 6.5
  },
  "category": "worldviews",
  "subcategory": null,
  "clusters": [
    "ai-safety",
    "epistemics"
  ],
  "metrics": {
    "wordCount": 4682,
    "tableCount": 12,
    "diagramCount": 1,
    "internalLinks": 16,
    "externalLinks": 52,
    "footnoteCount": 0,
    "bulletRatio": 0.38,
    "sectionCount": 71,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 4682,
  "unconvertedLinks": [
    {
      "text": "Metaculus forecasters",
      "url": "https://www.metaculus.com/questions/3479/date-weakly-general-ai-is-publicly-known/",
      "resourceId": "f315d8547ad503f7",
      "resourceTitle": "Metaculus (Dec 2024)"
    },
    {
      "text": "Metaculus forecasters",
      "url": "https://www.metaculus.com/questions/3479/date-weakly-general-ai-is-publicly-known/",
      "resourceId": "f315d8547ad503f7",
      "resourceTitle": "Metaculus (Dec 2024)"
    },
    {
      "text": "AI researcher survey",
      "url": "https://80000hours.org/2025/03/when-do-experts-expect-agi-to-arrive/",
      "resourceId": "f2394e3212f072f5",
      "resourceTitle": "80,000 Hours AGI Timelines Review"
    },
    {
      "text": "Epoch AI Direct Approach",
      "url": "https://epoch.ai/blog/literature-review-of-transformative-artificial-intelligence-timelines",
      "resourceId": "2cb4447b6a55df95",
      "resourceTitle": "Epoch AI: Literature Review of TAI Timelines"
    },
    {
      "text": "Ilya Sutskever",
      "url": "https://techcrunch.com/2024/11/20/ai-scaling-laws-are-showing-diminishing-returns-forcing-ai-labs-to-change-course/",
      "resourceId": "1ed975df72c30426",
      "resourceTitle": "TechCrunch"
    },
    {
      "text": "AI safety funding",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "diminishing returns",
      "url": "https://techcrunch.com/2024/11/20/ai-scaling-laws-are-showing-diminishing-returns-forcing-ai-labs-to-change-course/",
      "resourceId": "1ed975df72c30426",
      "resourceTitle": "TechCrunch"
    },
    {
      "text": "Synthetic data quality issues",
      "url": "https://epoch.ai/blog/can-ai-scaling-continue-through-2030",
      "resourceId": "9587b65b1192289d",
      "resourceTitle": "Epoch AI"
    },
    {
      "text": "Global AI investment reached \\$252B in 2024",
      "url": "https://hai.stanford.edu/ai-index/2025-ai-index-report/economy",
      "resourceId": "1db7de7741f907e5",
      "resourceTitle": "Stanford AI Index 2025"
    },
    {
      "text": "Gary Marcus",
      "url": "https://garymarcus.substack.com/",
      "resourceId": "9b1ab7f63e6b1b35",
      "resourceTitle": "Gary Marcus's Substack"
    },
    {
      "text": "AI safety research receives only \\$150-170M/year",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "corporate AI investment reached \\$252B in 2024",
      "url": "https://hai.stanford.edu/ai-index/2025-ai-index-report/economy",
      "resourceId": "1db7de7741f907e5",
      "resourceTitle": "Stanford AI Index 2025"
    },
    {
      "text": "80,000 Hours analysis",
      "url": "https://80000hours.org/2025/03/when-do-experts-expect-agi-to-arrive/",
      "resourceId": "f2394e3212f072f5",
      "resourceTitle": "80,000 Hours AGI Timelines Review"
    },
    {
      "text": "Scaling hitting diminishing returns",
      "url": "https://techcrunch.com/2024/11/20/ai-scaling-laws-are-showing-diminishing-returns-forcing-ai-labs-to-change-course/",
      "resourceId": "1ed975df72c30426",
      "resourceTitle": "TechCrunch"
    },
    {
      "text": "Returns appear diminishing",
      "url": "https://techcrunch.com/2024/11/20/ai-scaling-laws-are-showing-diminishing-returns-forcing-ai-labs-to-change-course/",
      "resourceId": "1ed975df72c30426",
      "resourceTitle": "TechCrunch"
    },
    {
      "text": "Metaculus",
      "url": "https://www.metaculus.com/questions/3479/date-weakly-general-ai-is-publicly-known/",
      "resourceId": "f315d8547ad503f7",
      "resourceTitle": "Metaculus (Dec 2024)"
    },
    {
      "text": "2024: \\$252B corporate, \\$150-170M safety",
      "url": "https://hai.stanford.edu/ai-index/2025-ai-index-report/economy",
      "resourceId": "1db7de7741f907e5",
      "resourceTitle": "Stanford AI Index 2025"
    },
    {
      "text": "80,000 Hours: Shrinking AGI Timelines (2025)",
      "url": "https://80000hours.org/2025/03/when-do-experts-expect-agi-to-arrive/",
      "resourceId": "f2394e3212f072f5",
      "resourceTitle": "80,000 Hours AGI Timelines Review"
    },
    {
      "text": "Epoch AI: Literature Review of Transformative AI Timelines",
      "url": "https://epoch.ai/blog/literature-review-of-transformative-artificial-intelligence-timelines",
      "resourceId": "2cb4447b6a55df95",
      "resourceTitle": "Epoch AI: Literature Review of TAI Timelines"
    },
    {
      "text": "Metaculus AGI Forecasts",
      "url": "https://www.metaculus.com/questions/3479/date-weakly-general-ai-is-publicly-known/",
      "resourceId": "f315d8547ad503f7",
      "resourceTitle": "Metaculus (Dec 2024)"
    },
    {
      "text": "Stanford HAI: 2025 AI Index Report",
      "url": "https://hai.stanford.edu/ai-index/2025-ai-index-report",
      "resourceId": "da87f2b213eb9272",
      "resourceTitle": "Stanford AI Index 2025"
    },
    {
      "text": "Can AI Scaling Continue Through 2030? (Epoch AI)",
      "url": "https://epoch.ai/blog/can-ai-scaling-continue-through-2030",
      "resourceId": "9587b65b1192289d",
      "resourceTitle": "Epoch AI"
    },
    {
      "text": "TechCrunch: AI Scaling Laws Showing Diminishing Returns (2024)",
      "url": "https://techcrunch.com/2024/11/20/ai-scaling-laws-are-showing-diminishing-returns-forcing-ai-labs-to-change-course/",
      "resourceId": "1ed975df72c30426",
      "resourceTitle": "TechCrunch"
    },
    {
      "text": "Future of Life Institute: 2025 AI Safety Index",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "LessWrong: AI Futures Timelines Model (Dec 2025)",
      "url": "https://www.lesswrong.com/posts/YABG5JmztGGPwNFq2/ai-futures-timelines-and-takeoff-model-dec-2025-update",
      "resourceId": "a1c863df9c0fa30c",
      "resourceTitle": "AI Futures Timelines and Takeoff Model: Dec 2025 Update"
    }
  ],
  "unconvertedLinkCount": 25,
  "convertedLinkCount": 10,
  "backlinkCount": 4,
  "hallucinationRisk": {
    "level": "medium",
    "score": 40,
    "factors": [
      "no-citations",
      "conceptual-content",
      "high-quality"
    ]
  },
  "entityType": "concept",
  "redundancy": {
    "maxSimilarity": 20,
    "similarPages": [
      {
        "id": "doomer",
        "title": "AI Doomer Worldview",
        "path": "/knowledge-base/worldviews/doomer/",
        "similarity": 20
      },
      {
        "id": "optimistic",
        "title": "Optimistic Alignment Worldview",
        "path": "/knowledge-base/worldviews/optimistic/",
        "similarity": 20
      },
      {
        "id": "case-for-xrisk",
        "title": "The Case FOR AI Existential Risk",
        "path": "/knowledge-base/debates/case-for-xrisk/",
        "similarity": 19
      },
      {
        "id": "governance-focused",
        "title": "Governance-Focused Worldview",
        "path": "/knowledge-base/worldviews/governance-focused/",
        "similarity": 19
      },
      {
        "id": "self-improvement",
        "title": "Self-Improvement and Recursive Enhancement",
        "path": "/knowledge-base/capabilities/self-improvement/",
        "similarity": 18
      }
    ]
  },
  "coverage": {
    "passing": 6,
    "total": 13,
    "targets": {
      "tables": 19,
      "diagrams": 2,
      "internalLinks": 37,
      "externalLinks": 23,
      "footnotes": 14,
      "references": 14
    },
    "actuals": {
      "tables": 12,
      "diagrams": 1,
      "internalLinks": 16,
      "externalLinks": 52,
      "footnotes": 0,
      "references": 20,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "amber",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:4 A:5.5 C:6.5"
  },
  "readerRank": 561,
  "researchRank": 196,
  "recommendedScore": 211.11
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/ai-timelines"
}
Backlinks (4)
idtitletyperelationship
worldview-intervention-mappingWorldview-Intervention Mappinganalysis
holden-karnofskyHolden Karnofskyperson
agent-foundationsAgent Foundationsapproach
__index__/knowledge-base/worldviewsWorldviewsconcept
Longterm Wiki