AI Acceleration Tradeoff Model
ai-acceleration-tradeoffanalysisPath: /knowledge-base/models/ai-acceleration-tradeoff/
E687Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "ai-acceleration-tradeoff",
"numericId": null,
"path": "/knowledge-base/models/ai-acceleration-tradeoff/",
"filePath": "knowledge-base/models/ai-acceleration-tradeoff.mdx",
"title": "AI Acceleration Tradeoff Model",
"quality": 50,
"readerImportance": 72.5,
"researchImportance": 65,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Quantitative framework for evaluating how changes to AI development speed affect existential risk and long-term value. Models the marginal impact of acceleration/deceleration on P(existential catastrophe), safety readiness, governance preparedness, and conditional future value. Finds that 1 year of additional preparation time reduces x-risk by 1-4 percentage points depending on current readiness, but also delays economic and scientific benefits worth 0.1-0.5% of future value annually.",
"description": "A quantitative framework for evaluating the costs and benefits of speeding up or slowing down AI development. Analyzes how changes to TAI arrival time affect existential risk, safety preparedness, governance readiness, and the expected value of the long-term future.",
"ratings": {
"focus": 9,
"novelty": 7,
"rigor": 6.5,
"completeness": 7,
"concreteness": 8,
"actionability": 7.5
},
"category": "models",
"subcategory": "safety-models",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 3487,
"tableCount": 14,
"diagramCount": 2,
"internalLinks": 5,
"externalLinks": 12,
"footnoteCount": 0,
"bulletRatio": 0.08,
"sectionCount": 36,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 90,
"evergreen": true,
"wordCount": 3487,
"unconvertedLinks": [
{
"text": "Metaculus",
"url": "https://www.metaculus.com/questions/5121/date-of-artificial-general-intelligence/",
"resourceId": "0aa1710a67875e8e",
"resourceTitle": "Metaculus AGI Question"
},
{
"text": "Superintelligence: Paths, Dangers, Strategies",
"url": "https://en.wikipedia.org/wiki/Superintelligence:_Paths,_Dangers,_Strategies",
"resourceId": "0151481d5dc82963",
"resourceTitle": "Superintelligence"
},
{
"text": "The Precipice",
"url": "https://theprecipice.com/",
"resourceId": "3b9fccf15651dbbe",
"resourceTitle": "Ord (2020): The Precipice"
},
{
"text": "Is Power-Seeking AI an Existential Risk?",
"url": "https://arxiv.org/abs/2206.13353",
"resourceId": "6e597a4dc1f6f860",
"resourceTitle": "Is Power-Seeking AI an Existential Risk?"
},
{
"text": "Without Specific Countermeasures",
"url": "https://www.cold-takes.com/without-specific-countermeasures-the-easiest-path-to-transformative-ai-likely-leads-to-ai-takeover/",
"resourceId": "b6967ffbd2503516",
"resourceTitle": "Cotra (2022) - AI Takeover"
},
{
"text": "Metaculus AGI Timeline",
"url": "https://www.metaculus.com/questions/5121/date-of-artificial-general-intelligence/",
"resourceId": "0aa1710a67875e8e",
"resourceTitle": "Metaculus AGI Question"
},
{
"text": "AI Impacts Expert Survey",
"url": "https://aiimpacts.org/2022-expert-survey-on-progress-in-ai/",
"resourceId": "38eba87d0a888e2e",
"resourceTitle": "AI experts show significant disagreement"
},
{
"text": "Computing Power and the Governance of AI",
"url": "https://arxiv.org/abs/2402.08797",
"resourceId": "ec57d21ec35c1d02",
"resourceTitle": "Computing Power and the Governance of AI"
},
{
"text": "AI Governance: A Research Agenda",
"url": "https://www.fhi.ox.ac.uk/wp-content/uploads/GovAI-Agenda.pdf",
"resourceId": "c2e15e64323078f5",
"resourceTitle": "AI Governance: A Research Agenda"
}
],
"unconvertedLinkCount": 9,
"convertedLinkCount": 0,
"backlinkCount": 1,
"hallucinationRisk": {
"level": "medium",
"score": 55,
"factors": [
"no-citations"
]
},
"entityType": "analysis",
"redundancy": {
"maxSimilarity": 14,
"similarPages": [
{
"id": "bioweapons-timeline",
"title": "AI-Bioweapons Timeline Model",
"path": "/knowledge-base/models/bioweapons-timeline/",
"similarity": 14
},
{
"id": "capability-alignment-race",
"title": "Capability-Alignment Race Model",
"path": "/knowledge-base/models/capability-alignment-race/",
"similarity": 14
},
{
"id": "carlsmith-six-premises",
"title": "Carlsmith's Six-Premise Argument",
"path": "/knowledge-base/models/carlsmith-six-premises/",
"similarity": 14
},
{
"id": "critical-uncertainties",
"title": "AI Risk Critical Uncertainties Model",
"path": "/knowledge-base/models/critical-uncertainties/",
"similarity": 14
},
{
"id": "fraud-sophistication-curve",
"title": "Fraud Sophistication Curve Model",
"path": "/knowledge-base/models/fraud-sophistication-curve/",
"similarity": 14
}
]
},
"coverage": {
"passing": 6,
"total": 13,
"targets": {
"tables": 14,
"diagrams": 1,
"internalLinks": 28,
"externalLinks": 17,
"footnotes": 10,
"references": 10
},
"actuals": {
"tables": 14,
"diagrams": 2,
"internalLinks": 5,
"externalLinks": 12,
"footnotes": 0,
"references": 8,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:7 R:6.5 A:7.5 C:7"
},
"readerRank": 141,
"researchRank": 186,
"recommendedScore": 158.08
}External Links
No external links
Backlinks (1)
| id | title | type | relationship |
|---|---|---|---|
| longtermist-value-comparisons | Relative Longtermist Value Comparisons | analysis | — |