Capability-Alignment Race Model
capability-alignment-raceanalysisPath: /knowledge-base/models/capability-alignment-race/
E414Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "capability-alignment-race",
"numericId": null,
"path": "/knowledge-base/models/capability-alignment-race/",
"filePath": "knowledge-base/models/capability-alignment-race.mdx",
"title": "Capability-Alignment Race Model",
"quality": 62,
"readerImportance": 76,
"researchImportance": 89.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Quantifies the capability-alignment race showing capabilities currently ~3 years ahead of alignment readiness, with gap widening at 0.5 years/year driven by 10²⁶ FLOP scaling vs. 15% interpretability coverage and 30% scalable oversight maturity. Projects gap reaching 5-7 years by 2030 unless alignment research funding increases from \\$200M to \\$800M annually, with 60% chance of warning shot before TAI potentially triggering governance response.",
"description": "This model analyzes the critical gap between AI capability progress and safety/governance readiness. Currently, capabilities are ~3 years ahead of alignment with the gap increasing at 0.5 years annually, driven by 10²⁶ FLOP scaling vs. 15% interpretability coverage.",
"ratings": {
"focus": 8.5,
"novelty": 5,
"rigor": 6.5,
"completeness": 7.5,
"concreteness": 8,
"actionability": 7
},
"category": "models",
"subcategory": "race-models",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 1805,
"tableCount": 10,
"diagramCount": 1,
"internalLinks": 41,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.05,
"sectionCount": 21,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 90,
"evergreen": true,
"wordCount": 1805,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 13,
"backlinkCount": 8,
"hallucinationRisk": {
"level": "medium",
"score": 60,
"factors": [
"no-citations",
"few-external-sources"
]
},
"entityType": "analysis",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "large-language-models",
"title": "Large Language Models",
"path": "/knowledge-base/capabilities/large-language-models/",
"similarity": 16
},
{
"id": "agi-development",
"title": "AGI Development",
"path": "/knowledge-base/forecasting/agi-development/",
"similarity": 16
},
{
"id": "critical-uncertainties",
"title": "AI Risk Critical Uncertainties Model",
"path": "/knowledge-base/models/critical-uncertainties/",
"similarity": 16
},
{
"id": "intervention-timing-windows",
"title": "Intervention Timing Windows",
"path": "/knowledge-base/models/intervention-timing-windows/",
"similarity": 16
},
{
"id": "safety-spending-at-scale",
"title": "Safety Spending at Scale",
"path": "/knowledge-base/models/safety-spending-at-scale/",
"similarity": 16
}
]
},
"changeHistory": [
{
"date": "2026-02-23",
"branch": "claude/test-research-orchestrator-DUFts",
"title": "Test Research Orchestrator (engine v2) on 3 alignment pages",
"summary": "(fill in)"
},
{
"date": "2026-02-23",
"branch": "claude/test-research-orchestrator-DUFts",
"title": "Orchestrator v2 (standard): Capability-Alignment Race Model",
"summary": "Improved \"Capability-Alignment Race Model\" via orchestrator v2 (standard, 24 tool calls, 0 refinement cycles). Quality gate: passed. Cost: ~$6.45.",
"duration": "526.3s",
"cost": "~$6.45"
},
{
"date": "2026-02-15",
"branch": "claude/review-recent-prs-tbpBf",
"title": "Review recent PRs for bugs",
"summary": "Audited ~20 recently merged PRs for bugs and code quality issues. Found and fixed 8 distinct bugs across multiple PRs including broken page links, unused imports, graph sync failures, unescaped dollar signs, missing error handling, and a validator that couldn't handle numeric entity IDs.",
"pr": 138
}
],
"coverage": {
"passing": 9,
"total": 13,
"targets": {
"tables": 7,
"diagrams": 1,
"internalLinks": 14,
"externalLinks": 9,
"footnotes": 5,
"references": 5
},
"actuals": {
"tables": 10,
"diagrams": 1,
"internalLinks": 41,
"externalLinks": 0,
"footnotes": 0,
"references": 22,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 3,
"ratingsString": "N:5 R:6.5 A:7 C:7.5"
},
"readerRank": 114,
"researchRank": 30,
"recommendedScore": 183.61
}External Links
No external links
Backlinks (8)
| id | title | type | relationship |
|---|---|---|---|
| technical-pathways | AI Safety Technical Pathway Decomposition | analysis | — |
| feedback-loops | AI Risk Feedback Loop & Cascade Model | analysis | — |
| multi-actor-landscape | AI Safety Multi-Actor Strategic Landscape | analysis | — |
| ai-acceleration-tradeoff | AI Acceleration Tradeoff Model | analysis | related |
| ai-risk-portfolio-analysis | AI Risk Portfolio Analysis | analysis | — |
| intervention-timing-windows | Intervention Timing Windows | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| alignment | AI Alignment | approach | — |