Dangerous Capability Evaluations
dangerous-cap-evalsapproachPath: /knowledge-base/responses/dangerous-cap-evals/
E442Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "dangerous-cap-evals",
"numericId": null,
"path": "/knowledge-base/responses/dangerous-cap-evals/",
"filePath": "knowledge-base/responses/dangerous-cap-evals.mdx",
"title": "Dangerous Capability Evaluations",
"quality": 64,
"readerImportance": 70.5,
"researchImportance": 71.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive synthesis showing dangerous capability evaluations are now standard practice (95%+ frontier models) but face critical limitations: AI capabilities double every 7 months while external safety orgs are underfunded 10,000:1 vs development, and 1-13% of models exhibit scheming behavior that could evade evaluations. Despite achieving significant adoption and identifying real deployment risks (e.g., o3 scoring 43.8% on virology tests vs 22.1% human expert average), DCEs cannot guarantee safety against sophisticated deception or emergent capabilities.",
"description": "Systematic testing of AI models for dangerous capabilities including bioweapons assistance, cyberattack potential, autonomous self-replication, and persuasion/manipulation abilities to inform deployment decisions and safety policies.",
"ratings": {
"novelty": 4.5,
"rigor": 7,
"actionability": 6.5,
"completeness": 7.5
},
"category": "responses",
"subcategory": "alignment-evaluation",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 3604,
"tableCount": 18,
"diagramCount": 2,
"internalLinks": 14,
"externalLinks": 39,
"footnoteCount": 0,
"bulletRatio": 0.13,
"sectionCount": 35,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 3604,
"unconvertedLinks": [
{
"text": "Autonomous task completion",
"url": "https://metr.org/blog/2025-03-19-measuring-ai-ability-to-complete-long-tasks/",
"resourceId": "271fc5f73a8304b2",
"resourceTitle": "Measuring AI Ability to Complete Long Tasks - METR"
},
{
"text": "GPT-5 Evaluation",
"url": "https://evaluations.metr.org/gpt-5-report/",
"resourceId": "7457262d461e2206",
"resourceTitle": "evaluations.metr.org"
},
{
"text": "Frontier AI Trends",
"url": "https://www.aisi.gov.uk/frontier-ai-trends-report",
"resourceId": "7042c7f8de04ccb1",
"resourceTitle": "AISI Frontier AI Trends"
},
{
"text": "In-context scheming",
"url": "https://www.apolloresearch.ai/research/scheming-reasoning-evaluations",
"resourceId": "91737bf431000298",
"resourceTitle": "Frontier Models are Capable of In-Context Scheming"
},
{
"text": "Anti-scheming training",
"url": "https://www.apolloresearch.ai/blog/more-capable-models-are-better-at-in-context-scheming/",
"resourceId": "80c6d6eca17dc925",
"resourceTitle": "More capable models scheme at higher rates"
},
{
"text": "Dangerous Capabilities",
"url": "https://arxiv.org/abs/2403.13793",
"resourceId": "daec8c61ea79836b",
"resourceTitle": "Dangerous Capability Evaluations"
},
{
"text": "METR",
"url": "https://metr.org/",
"resourceId": "45370a5153534152",
"resourceTitle": "metr.org"
},
{
"text": "UK AI Security Institute",
"url": "https://www.aisi.gov.uk/",
"resourceId": "fdf68a8f30f57dee",
"resourceTitle": "AI Safety Institute"
},
{
"text": "Frontier AI Trends Report",
"url": "https://www.aisi.gov.uk/frontier-ai-trends-report",
"resourceId": "7042c7f8de04ccb1",
"resourceTitle": "AISI Frontier AI Trends"
},
{
"text": "DeepMind's March 2024 research",
"url": "https://arxiv.org/abs/2403.13793",
"resourceId": "daec8c61ea79836b",
"resourceTitle": "Dangerous Capability Evaluations"
},
{
"text": "Future of Life Institute's 2025 AI Safety Index",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "Responsible Scaling Policy",
"url": "https://www.anthropic.com/responsible-scaling-policy",
"resourceId": "afe1e125f3ba3f14"
},
{
"text": "Preparedness Framework",
"url": "https://openai.com/index/updating-our-preparedness-framework/",
"resourceId": "ded0b05862511312",
"resourceTitle": "Preparedness Framework"
},
{
"text": "Apollo Research's December 2024 study",
"url": "https://www.apolloresearch.ai/research/scheming-reasoning-evaluations",
"resourceId": "91737bf431000298",
"resourceTitle": "Frontier Models are Capable of In-Context Scheming"
},
{
"text": "follow-up anti-scheming training research",
"url": "https://openai.com/index/detecting-and-reducing-scheming-in-ai-models/",
"resourceId": "b3f335edccfc5333",
"resourceTitle": "OpenAI Preparedness Framework"
},
{
"text": "International AI Safety Report's October 2025 update",
"url": "https://internationalaisafetyreport.org/publication/first-key-update-capabilities-and-risk-implications",
"resourceId": "6acf3be7a03c2328",
"resourceTitle": "International AI Safety Report (October 2025)"
},
{
"text": "Evaluating Frontier Models for Dangerous Capabilities",
"url": "https://arxiv.org/abs/2403.13793",
"resourceId": "daec8c61ea79836b",
"resourceTitle": "Dangerous Capability Evaluations"
},
{
"text": "Measuring AI Ability to Complete Long Tasks",
"url": "https://metr.org/blog/2025-03-19-measuring-ai-ability-to-complete-long-tasks/",
"resourceId": "271fc5f73a8304b2",
"resourceTitle": "Measuring AI Ability to Complete Long Tasks - METR"
},
{
"text": "Frontier AI Trends Report",
"url": "https://www.aisi.gov.uk/frontier-ai-trends-report",
"resourceId": "7042c7f8de04ccb1",
"resourceTitle": "AISI Frontier AI Trends"
},
{
"text": "Detecting and Reducing Scheming",
"url": "https://openai.com/index/detecting-and-reducing-scheming-in-ai-models/",
"resourceId": "b3f335edccfc5333",
"resourceTitle": "OpenAI Preparedness Framework"
},
{
"text": "First Key Update: Capabilities and Risk Implications",
"url": "https://internationalaisafetyreport.org/publication/first-key-update-capabilities-and-risk-implications",
"resourceId": "6acf3be7a03c2328",
"resourceTitle": "International AI Safety Report (October 2025)"
},
{
"text": "AI Safety Index Summer 2025",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "Our 2025 Year in Review",
"url": "https://www.aisi.gov.uk/blog/our-2025-year-in-review",
"resourceId": "3dec5f974c5da5ec",
"resourceTitle": "Our 2025 Year in Review"
},
{
"text": "Advanced AI Evaluations May Update",
"url": "https://www.aisi.gov.uk/blog/advanced-ai-evaluations-may-update",
"resourceId": "4e56cdf6b04b126b",
"resourceTitle": "UK AI Safety Institute renamed to AI Security Institute"
},
{
"text": "Responsible Scaling Policy v2.2",
"url": "https://www.anthropic.com/responsible-scaling-policy",
"resourceId": "afe1e125f3ba3f14"
},
{
"text": "Preparedness Framework v2",
"url": "https://cdn.openai.com/pdf/18a02b5d-6b67-4cec-ab64-68cdfbddebcd/preparedness-framework-v2.pdf",
"resourceId": "ec5d8e7d6a1b2c7c",
"resourceTitle": "OpenAI: Preparedness Framework Version 2"
},
{
"text": "METR",
"url": "https://metr.org/",
"resourceId": "45370a5153534152",
"resourceTitle": "metr.org"
},
{
"text": "Apollo Research",
"url": "https://www.apolloresearch.ai/",
"resourceId": "329d8c2e2532be3d",
"resourceTitle": "Apollo Research"
},
{
"text": "UK AI Security Institute",
"url": "https://www.aisi.gov.uk/",
"resourceId": "fdf68a8f30f57dee",
"resourceTitle": "AI Safety Institute"
},
{
"text": "US AI Safety Institute (NIST)",
"url": "https://www.nist.gov/aisi",
"resourceId": "84e0da6d5092e27d",
"resourceTitle": "US AISI"
},
{
"text": "SecureBio",
"url": "https://securebio.org/",
"resourceId": "81e8568b008e4245",
"resourceTitle": "SecureBio organization"
}
],
"unconvertedLinkCount": 31,
"convertedLinkCount": 0,
"backlinkCount": 5,
"hallucinationRisk": {
"level": "low",
"score": 30,
"factors": [
"no-citations",
"high-rigor",
"conceptual-content"
]
},
"entityType": "approach",
"redundancy": {
"maxSimilarity": 23,
"similarPages": [
{
"id": "capability-elicitation",
"title": "Capability Elicitation",
"path": "/knowledge-base/responses/capability-elicitation/",
"similarity": 23
},
{
"id": "model-auditing",
"title": "Third-Party Model Auditing",
"path": "/knowledge-base/responses/model-auditing/",
"similarity": 23
},
{
"id": "evals",
"title": "Evals & Red-teaming",
"path": "/knowledge-base/responses/evals/",
"similarity": 22
},
{
"id": "alignment-evals",
"title": "Alignment Evaluations",
"path": "/knowledge-base/responses/alignment-evals/",
"similarity": 20
},
{
"id": "safety-cases",
"title": "AI Safety Cases",
"path": "/knowledge-base/responses/safety-cases/",
"similarity": 19
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 14,
"diagrams": 1,
"internalLinks": 29,
"externalLinks": 18,
"footnotes": 11,
"references": 11
},
"actuals": {
"tables": 18,
"diagrams": 2,
"internalLinks": 14,
"externalLinks": 39,
"footnotes": 0,
"references": 19,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.5 R:7 A:6.5 C:7.5"
},
"readerRank": 162,
"researchRank": 140,
"recommendedScore": 185.11
}External Links
No external links
Backlinks (5)
| id | title | type | relationship |
|---|---|---|---|
| metr | METR | organization | — |
| sff | Survival and Flourishing Fund (SFF) | organization | — |
| alignment-evals | Alignment Evaluations | approach | — |
| alignment-evaluation-overview | Evaluation & Detection (Overview) | concept | — |
| evaluation | AI Evaluation | approach | — |