Bioweapons
bioweaponsriskPath: /knowledge-base/risks/bioweapons/
E42Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "bioweapons",
"numericId": null,
"path": "/knowledge-base/risks/bioweapons/",
"filePath": "knowledge-base/risks/bioweapons.mdx",
"title": "Bioweapons",
"quality": 91,
"readerImportance": 62.5,
"researchImportance": 24,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": "outcome",
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive synthesis of AI-bioweapons evidence through early 2026, including the FRI expert survey finding 5x risk increase from AI capabilities (0.3% → 1.5% annual epidemic probability), Anthropic's ASL-3 activation for Claude Opus 4, and OpenAI's o3 reaching 94th percentile on virology tests. Key developments: DNA screening now catches 97% of threats post-patch, but open-source models (DeepSeek) lack safeguards. Expert consensus: safeguards can reduce risk nearly to baseline even with advanced AI capabilities.",
"description": "AI-assisted biological weapon development represents one of the most severe near-term AI risks. In 2025, both OpenAI and Anthropic activated elevated safety measures after internal evaluations showed frontier models approaching expert-level biological capabilities, with OpenAI expecting next-gen models to hit 'high-risk' classification.",
"ratings": {
"novelty": 4.5,
"rigor": 6.8,
"actionability": 6.2,
"completeness": 7.5
},
"category": "risks",
"subcategory": "misuse",
"clusters": [
"ai-safety",
"biorisks",
"governance"
],
"metrics": {
"wordCount": 10802,
"tableCount": 23,
"diagramCount": 2,
"internalLinks": 90,
"externalLinks": 12,
"footnoteCount": 56,
"bulletRatio": 0.32,
"sectionCount": 63,
"hasOverview": true,
"structuralScore": 14
},
"suggestedQuality": 93,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 10802,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 62,
"backlinkCount": 33,
"hallucinationRisk": {
"level": "medium",
"score": 55,
"factors": [
"moderately-cited",
"high-quality",
"severe-truncation"
],
"integrityIssues": [
"severe-truncation"
]
},
"entityType": "risk",
"redundancy": {
"maxSimilarity": 21,
"similarPages": [
{
"id": "bioweapons-ai-uplift",
"title": "AI Uplift Assessment Model",
"path": "/knowledge-base/models/bioweapons-ai-uplift/",
"similarity": 21
},
{
"id": "scientific-research",
"title": "Scientific Research Capabilities",
"path": "/knowledge-base/capabilities/scientific-research/",
"similarity": 20
},
{
"id": "authoritarian-tools-diffusion",
"title": "Authoritarian Tools Diffusion Model",
"path": "/knowledge-base/models/authoritarian-tools-diffusion/",
"similarity": 20
},
{
"id": "language-models",
"title": "Large Language Models",
"path": "/knowledge-base/capabilities/language-models/",
"similarity": 19
},
{
"id": "why-alignment-hard",
"title": "Why Alignment Might Be Hard",
"path": "/knowledge-base/debates/why-alignment-hard/",
"similarity": 19
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/review-pr-216-P4Fcu",
"title": "Fix audit report findings from PR #216",
"summary": "Reviewed PR #216 (comprehensive wiki audit report) and implemented fixes for the major issues it identified: fixed 181 path-style EntityLink IDs across 33 files, converted 164 broken EntityLinks (referencing non-existent entities) to plain text across 38 files, fixed a temporal inconsistency in anthropic.mdx, and added missing description fields to 53 ai-transition-model pages."
},
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-X4jHg",
"title": "Audit wiki pages for factual errors and hallucinations",
"summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
}
],
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 43,
"diagrams": 4,
"internalLinks": 86,
"externalLinks": 54,
"footnotes": 32,
"references": 32
},
"actuals": {
"tables": 23,
"diagrams": 2,
"internalLinks": 90,
"externalLinks": 12,
"footnotes": 56,
"references": 51,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "green",
"externalLinks": "amber",
"footnotes": "green",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 2,
"ratingsString": "N:4.5 R:6.8 A:6.2 C:7.5"
},
"readerRank": 214,
"researchRank": 463,
"recommendedScore": 235.11
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/Biological_warfare",
"eaForum": "https://forum.effectivealtruism.org/topics/global-catastrophic-biological-risk",
"eightyK": "https://80000hours.org/problem-profiles/preventing-catastrophic-pandemics/",
"grokipedia": "https://grokipedia.com/page/Biological_warfare"
}Backlinks (33)
| id | title | type | relationship |
|---|---|---|---|
| misuse-risks | AI Misuse Risk Cruxes | crux | — |
| bioweapons-attack-chain | Bioweapons Attack Chain Model | analysis | related |
| bioweapons-ai-uplift | AI Uplift Assessment Model | analysis | related |
| bioweapons-timeline | AI-Bioweapons Timeline Model | analysis | related |
| marc-andreessen | Marc Andreessen (AI Investor) | person | — |
| compute-governance | Compute Governance | policy | — |
| evals | AI Evaluations | safety-agenda | — |
| cyberweapons | Cyberweapons Risk | risk | — |
| proliferation | AI Proliferation | risk | — |
| large-language-models | Large Language Models | concept | — |
| scientific-research | Scientific Research Capabilities | capability | — |
| mainstream-era | Mainstream Era (2020-Present) | historical | — |
| __index__/knowledge-base | Knowledge Base | concept | — |
| capability-threshold-model | Capability Threshold Model | analysis | — |
| risk-activation-timeline | Risk Activation Timeline Model | analysis | — |
| blueprint-biosecurity | Blueprint Biosecurity | organization | — |
| frontier-model-forum | Frontier Model Forum | organization | — |
| securebio | SecureBio | organization | — |
| max-tegmark | Max Tegmark | person | — |
| toby-ord | Toby Ord | person | — |
| ai-safety-institutes | AI Safety Institutes | policy | — |
| anthropic-core-views | Anthropic Core Views | safety-agenda | — |
| capability-unlearning | Capability Unlearning / Removal | approach | — |
| evaluation | AI Evaluation | approach | — |
| lab-culture | AI Lab Safety Culture | approach | — |
| red-teaming | Red Teaming | approach | — |
| refusal-training | Refusal Training | approach | — |
| responsible-scaling-policies | Responsible Scaling Policies | policy | — |
| seoul-declaration | Seoul AI Safety Summit Declaration | policy | — |
| technical-research | Technical AI Safety Research | crux | — |
| thresholds | Compute Thresholds | policy | — |
| __index__/knowledge-base/risks | AI Risks | concept | — |
| misuse-overview | Misuse Risks (Overview) | concept | — |