AI Risk Portfolio Analysis
ai-risk-portfolio-analysisanalysisPath: /knowledge-base/models/ai-risk-portfolio-analysis/
E12Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "ai-risk-portfolio-analysis",
"numericId": null,
"path": "/knowledge-base/models/ai-risk-portfolio-analysis/",
"filePath": "knowledge-base/models/ai-risk-portfolio-analysis.mdx",
"title": "AI Risk Portfolio Analysis",
"quality": 64,
"readerImportance": 47,
"researchImportance": 67.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Quantitative portfolio framework recommending AI safety resource allocation: 40-70% to misalignment, 15-35% to misuse, 10-25% to structural risks, varying by timeline. Based on 2024 funding analysis (\\$110-130M total), identifies specific gaps including governance (underfunded by \\$15-20M), agent safety (\\$7-12M gap), and international capacity (\\$11-16M gap).",
"description": "A quantitative framework for resource allocation across AI risk categories. Analysis estimates misalignment accounts for 40-70% of existential risk, misuse 15-35%, and structural risks 10-25%, with timeline-dependent recommendations. Based on 2024 funding data (\\$110-130M total external funding), recommends rebalancing toward governance (currently underfunded by ~\\$15-20M) and interpretability research.",
"ratings": {
"focus": 8.5,
"novelty": 4.5,
"rigor": 6,
"completeness": 7.5,
"concreteness": 8,
"actionability": 7.5
},
"category": "models",
"subcategory": "analysis-models",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2241,
"tableCount": 24,
"diagramCount": 3,
"internalLinks": 39,
"externalLinks": 14,
"footnoteCount": 0,
"bulletRatio": 0.01,
"sectionCount": 34,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 90,
"evergreen": true,
"wordCount": 2241,
"unconvertedLinks": [
{
"text": "Longview Philanthropy estimates",
"url": "https://forum.effectivealtruism.org/posts/XdhwXppfqrpPL2YDX/an-overview-of-the-ai-safety-funding-situation",
"resourceId": "80125fcaf04609b8",
"resourceTitle": "Overview of AI Safety Funding"
},
{
"text": "AI Impacts survey",
"url": "https://aiimpacts.org/",
"resourceId": "3b9fda03b8be71dc",
"resourceTitle": "AI Impacts 2023"
},
{
"text": "detailed analysis",
"url": "https://www.lesswrong.com/posts/WGpFFJo2uFe5ssgEb/an-overview-of-the-ai-safety-funding-situation",
"resourceId": "b1ab921f9cbae109",
"resourceTitle": "An Overview of the AI Safety Funding Situation (LessWrong)"
},
{
"text": "Detailed analysis",
"url": "https://www.lesswrong.com/posts/adzfKEW98TswZEA6T/brief-analysis-of-op-technical-ai-safety-funding",
"resourceId": "kb-fb66d73671ec9ced"
},
{
"text": "Longview Philanthropy Analysis",
"url": "https://forum.effectivealtruism.org/posts/XdhwXppfqrpPL2YDX/an-overview-of-the-ai-safety-funding-situation",
"resourceId": "80125fcaf04609b8",
"resourceTitle": "Overview of AI Safety Funding"
},
{
"text": "CG Technical Safety Analysis",
"url": "https://www.lesswrong.com/posts/adzfKEW98TswZEA6T/brief-analysis-of-op-technical-ai-safety-funding",
"resourceId": "kb-fb66d73671ec9ced"
},
{
"text": "FLI AI Safety Index",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "Frontier Model Forum",
"url": "https://www.frontiermodelforum.org/ai-safety-fund/",
"resourceId": "6bc74edd147a374b",
"resourceTitle": "AI Safety Fund"
}
],
"unconvertedLinkCount": 8,
"convertedLinkCount": 19,
"backlinkCount": 5,
"hallucinationRisk": {
"level": "medium",
"score": 55,
"factors": [
"no-citations"
]
},
"entityType": "analysis",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "safety-research-value",
"title": "Expected Value of AI Safety Research",
"path": "/knowledge-base/models/safety-research-value/",
"similarity": 16
},
{
"id": "capabilities-to-safety-pipeline",
"title": "Capabilities-to-Safety Pipeline Model",
"path": "/knowledge-base/models/capabilities-to-safety-pipeline/",
"similarity": 14
},
{
"id": "compounding-risks-analysis",
"title": "Compounding Risks Analysis",
"path": "/knowledge-base/models/compounding-risks-analysis/",
"similarity": 14
},
{
"id": "intervention-effectiveness-matrix",
"title": "Intervention Effectiveness Matrix",
"path": "/knowledge-base/models/intervention-effectiveness-matrix/",
"similarity": 14
},
{
"id": "critical-uncertainties",
"title": "AI Risk Critical Uncertainties Model",
"path": "/knowledge-base/models/critical-uncertainties/",
"similarity": 13
}
]
},
"coverage": {
"passing": 9,
"total": 13,
"targets": {
"tables": 9,
"diagrams": 1,
"internalLinks": 18,
"externalLinks": 11,
"footnotes": 7,
"references": 7
},
"actuals": {
"tables": 24,
"diagrams": 3,
"internalLinks": 39,
"externalLinks": 14,
"footnotes": 0,
"references": 22,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.5 R:6 A:7.5 C:7.5"
},
"readerRank": 325,
"researchRank": 168,
"recommendedScore": 173.21
}External Links
No external links
Backlinks (5)
| id | title | type | relationship |
|---|---|---|---|
| worldview-intervention-mapping | Worldview-Intervention Mapping | analysis | related |
| intervention-timing-windows | Intervention Timing Windows | analysis | related |
| compounding-risks-analysis | Compounding Risks Analysis | analysis | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |