Capability Threshold Model
capability-threshold-modelanalysisPath: /knowledge-base/models/capability-threshold-model/
E53Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "capability-threshold-model",
"numericId": null,
"path": "/knowledge-base/models/capability-threshold-model/",
"filePath": "knowledge-base/models/capability-threshold-model.mdx",
"title": "Capability Threshold Model",
"quality": 72,
"readerImportance": 47,
"researchImportance": 68,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive framework mapping AI capabilities across 5 dimensions to specific risk thresholds, finding authentication collapse/mass persuasion risks at 70-85% likelihood by 2027, bioweapons development at 40% by 2029, with critical thresholds estimated when models achieve 50% on complex reasoning benchmarks and cross expert-level domain knowledge. Provides concrete capability requirements, timeline projections, and early warning indicators across 7 major risk categories with extensive benchmark tracking.",
"description": "Systematic framework mapping AI capabilities across 5 dimensions (domain knowledge, reasoning depth, planning horizon, strategic modeling, autonomous execution) to specific risk thresholds, providing concrete capability requirements for risks like bioweapons development (threshold crossing 2026-2029) and structured frameworks for risk forecasting.",
"ratings": {
"focus": 9,
"novelty": 6.5,
"rigor": 7.5,
"completeness": 8.5,
"concreteness": 8.5,
"actionability": 7
},
"category": "models",
"subcategory": "framework-models",
"clusters": [
"ai-safety",
"governance",
"cyber",
"biorisks"
],
"metrics": {
"wordCount": 1265,
"tableCount": 20,
"diagramCount": 1,
"internalLinks": 83,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.14,
"sectionCount": 28,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 90,
"evergreen": true,
"wordCount": 1265,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 78,
"backlinkCount": 6,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"few-external-sources",
"high-rigor"
]
},
"entityType": "analysis",
"redundancy": {
"maxSimilarity": 18,
"similarPages": [
{
"id": "dangerous-cap-evals",
"title": "Dangerous Capability Evaluations",
"path": "/knowledge-base/responses/dangerous-cap-evals/",
"similarity": 18
},
{
"id": "large-language-models",
"title": "Large Language Models",
"path": "/knowledge-base/capabilities/large-language-models/",
"similarity": 17
},
{
"id": "agi-development",
"title": "AGI Development",
"path": "/knowledge-base/forecasting/agi-development/",
"similarity": 17
},
{
"id": "capability-elicitation",
"title": "Capability Elicitation",
"path": "/knowledge-base/responses/capability-elicitation/",
"similarity": 17
},
{
"id": "evals",
"title": "Evals & Red-teaming",
"path": "/knowledge-base/responses/evals/",
"similarity": 17
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 5,
"diagrams": 1,
"internalLinks": 10,
"externalLinks": 6,
"footnotes": 4,
"references": 4
},
"actuals": {
"tables": 20,
"diagrams": 1,
"internalLinks": 83,
"externalLinks": 0,
"footnotes": 0,
"references": 53,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:6.5 R:7.5 A:7 C:8.5"
},
"readerRank": 326,
"researchRank": 166,
"recommendedScore": 188.96
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/ai-capabilities"
}Backlinks (6)
| id | title | type | relationship |
|---|---|---|---|
| risk-activation-timeline | AI Risk Activation Timeline Model | analysis | related |
| warning-signs-model | AI Risk Warning Signs Model | analysis | related |
| critical-uncertainties | AI Risk Critical Uncertainties Model | crux | — |
| defense-in-depth-model | Defense in Depth Model | analysis | — |
| __index__/knowledge-base/models | Analytical Models | concept | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |