AI Misuse Risk Cruxes
misuse-riskscruxPath: /knowledge-base/cruxes/misuse-risks/
E392Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "misuse-risks",
"numericId": null,
"path": "/knowledge-base/cruxes/misuse-risks/",
"filePath": "knowledge-base/cruxes/misuse-risks.mdx",
"title": "AI Misuse Risk Cruxes",
"quality": 65,
"readerImportance": 81.5,
"researchImportance": 86,
"tacticalValue": 58,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive analysis of 13 AI misuse cruxes with quantified evidence showing mixed uplift (RAND bio study found no significant difference, but cyber CTF scores improved 27%→76% in 3 months), deepfake incidents projected at 8M by 2025 (up from 500K in 2023), and human detection accuracy at only 24.5%. Framework explicitly maps uncertainties to policy responses (restrictions, compute governance, detection systems) with probability ranges for each position.",
"description": "Key uncertainties that determine views on AI misuse risks, including capability uplift (30-45% significant vs 35-45% modest), offense-defense balance, and mitigation effectiveness across bioweapons, cyberweapons, and autonomous systems",
"ratings": {
"novelty": 5.8,
"rigor": 6.5,
"actionability": 7.2,
"completeness": 7
},
"category": "cruxes",
"subcategory": null,
"clusters": [
"ai-safety",
"biorisks",
"cyber",
"governance"
],
"metrics": {
"wordCount": 2053,
"tableCount": 18,
"diagramCount": 1,
"internalLinks": 22,
"externalLinks": 55,
"footnoteCount": 0,
"bulletRatio": 0.01,
"sectionCount": 30,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 7,
"evergreen": true,
"wordCount": 2053,
"unconvertedLinks": [
{
"text": "AI Incident Database",
"url": "https://incidentdatabase.ai/",
"resourceId": "baac25fa61cb2244",
"resourceTitle": "AI Incident Database"
},
{
"text": "RAND",
"url": "https://www.rand.org/pubs/research_reports/RRA2977-2.html",
"resourceId": "0fe4cfa7ca5f2270",
"resourceTitle": "RAND Corporation study"
},
{
"text": "RAND Red-Team Study",
"url": "https://www.rand.org/pubs/research_reports/RRA2977-2.html",
"resourceId": "0fe4cfa7ca5f2270",
"resourceTitle": "RAND Corporation study"
},
{
"text": "Deepstrike Research",
"url": "https://deepstrike.io/blog/deepfake-statistics-2025",
"resourceId": "d786af9f7b112dc6",
"resourceTitle": "Deepstrike"
},
{
"text": "Deepstrike 2025",
"url": "https://deepstrike.io/blog/deepfake-statistics-2025",
"resourceId": "d786af9f7b112dc6",
"resourceTitle": "Deepstrike"
},
{
"text": "C2PA",
"url": "https://c2pa.org/",
"resourceId": "ff89bed1f7960ab2",
"resourceTitle": "C2PA Explainer Videos"
},
{
"text": "Congressional Research Service analysis",
"url": "https://www.congress.gov/crs-product/IF11150",
"resourceId": "65548750e4511847",
"resourceTitle": "Section 1066 of the FY2025 NDAA"
},
{
"text": "ASIL Insights",
"url": "https://www.asil.org/insights/volume/29/issue/1",
"resourceId": "461296b9a5df30f5",
"resourceTitle": "December 2024 UN General Assembly resolution"
},
{
"text": "RAND Corporation",
"url": "https://www.rand.org/topics/artificial-intelligence.html",
"resourceId": "cf5fd74e8db11565",
"resourceTitle": "RAND: AI and National Security"
},
{
"text": "Georgetown CSET",
"url": "https://cset.georgetown.edu/",
"resourceId": "f0d95954b449240a",
"resourceTitle": "CSET: AI Market Dynamics"
},
{
"text": "CNAS",
"url": "https://www.cnas.org/",
"resourceId": "58f6946af0177ca5",
"resourceTitle": "CNAS"
},
{
"text": "UN CCW GGE on LAWS",
"url": "https://meetings.unoda.org/ccw/convention-on-certain-conventional-weapons-group-of-governmental-experts-on-lethal-autonomous-weapons-systems-2025",
"resourceId": "c5cc338fe2a44f23",
"resourceTitle": "March and September 2025"
},
{
"text": "Congressional Research Service",
"url": "https://www.congress.gov/crs-product/IF11150",
"resourceId": "65548750e4511847",
"resourceTitle": "Section 1066 of the FY2025 NDAA"
},
{
"text": "ASIL",
"url": "https://www.asil.org/insights/volume/29/issue/1",
"resourceId": "461296b9a5df30f5",
"resourceTitle": "December 2024 UN General Assembly resolution"
},
{
"text": "Deepstrike Research",
"url": "https://deepstrike.io/blog/deepfake-statistics-2025",
"resourceId": "d786af9f7b112dc6",
"resourceTitle": "Deepstrike"
},
{
"text": "C2PA Coalition",
"url": "https://c2pa.org/",
"resourceId": "ff89bed1f7960ab2",
"resourceTitle": "C2PA Explainer Videos"
}
],
"unconvertedLinkCount": 16,
"convertedLinkCount": 15,
"backlinkCount": 13,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"conceptual-content"
]
},
"entityType": "crux",
"redundancy": {
"maxSimilarity": 14,
"similarPages": [
{
"id": "bioweapons-ai-uplift",
"title": "AI Uplift Assessment Model",
"path": "/knowledge-base/models/bioweapons-ai-uplift/",
"similarity": 14
},
{
"id": "capability-threshold-model",
"title": "Capability Threshold Model",
"path": "/knowledge-base/models/capability-threshold-model/",
"similarity": 14
},
{
"id": "coding",
"title": "Autonomous Coding",
"path": "/knowledge-base/capabilities/coding/",
"similarity": 13
},
{
"id": "large-language-models",
"title": "Large Language Models",
"path": "/knowledge-base/capabilities/large-language-models/",
"similarity": 13
},
{
"id": "agi-development",
"title": "AGI Development",
"path": "/knowledge-base/forecasting/agi-development/",
"similarity": 13
}
]
},
"coverage": {
"passing": 9,
"total": 13,
"targets": {
"tables": 8,
"diagrams": 1,
"internalLinks": 16,
"externalLinks": 10,
"footnotes": 6,
"references": 6
},
"actuals": {
"tables": 18,
"diagrams": 1,
"internalLinks": 22,
"externalLinks": 55,
"footnotes": 0,
"references": 21,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:5.8 R:6.5 A:7.2 C:7"
},
"readerRank": 77,
"researchRank": 47,
"recommendedScore": 192.42
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/ai-misuse",
"eightyK": "https://80000hours.org/problem-profiles/catastrophic-ai-misuse/"
}Backlinks (13)
| id | title | type | relationship |
|---|---|---|---|
| epistemic-risks | AI Epistemic Cruxes | crux | — |
| __index__/knowledge-base/cruxes | Key Cruxes | concept | — |
| openclaw-matplotlib-incident-2026 | OpenClaw Matplotlib Incident (2026) | concept | — |
| 1day-sooner | 1Day Sooner | organization | — |
| nti-bio | NTI | bio (Nuclear Threat Initiative - Biological Program) | organization | — |
| securebio | SecureBio | organization | — |
| situational-awareness-lp | Situational Awareness LP | organization | — |
| ssi | Safe Superintelligence Inc (SSI) | organization | — |
| california-sb53 | California SB 53 | policy | — |
| compute-governance | Compute Governance: AI Chips Export Controls Policy | policy | — |
| model-registries | Model Registries | policy | — |
| deepfakes | Deepfakes | risk | — |
| fraud | AI-Powered Fraud | risk | — |