Alignment Evaluations
alignment-evalsapproachPath: /knowledge-base/responses/alignment-evals/
E448Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "alignment-evals",
"numericId": null,
"path": "/knowledge-base/responses/alignment-evals/",
"filePath": "knowledge-base/responses/alignment-evals.mdx",
"title": "Alignment Evaluations",
"quality": 65,
"readerImportance": 65,
"researchImportance": 78,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive review of alignment evaluation methods showing Apollo Research found 1-13% scheming rates across frontier models, while anti-scheming training reduced covert actions 30x (13%→0.4%) but Claude Sonnet 4.5 shows 58% evaluation awareness. Behavioral testing faces fundamental deception robustness problems, but joint Anthropic-OpenAI evaluations and UK AISI's £15M program demonstrate growing prioritization despite all major labs lacking superintelligence control plans.",
"description": "Systematic testing of AI models for alignment properties including honesty, corrigibility, goal stability, and absence of deceptive behavior. Apollo Research's 2024 study found 1-13% scheming rates across frontier models, while TruthfulQA shows 58-85% accuracy on factual questions. Critical for deployment decisions but faces fundamental measurement challenges where deceptive models could fake alignment.",
"ratings": {
"novelty": 5.8,
"rigor": 6.2,
"actionability": 6.5,
"completeness": 7.1
},
"category": "responses",
"subcategory": "alignment-evaluation",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 3773,
"tableCount": 20,
"diagramCount": 2,
"internalLinks": 9,
"externalLinks": 75,
"footnoteCount": 0,
"bulletRatio": 0.16,
"sectionCount": 37,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 3773,
"unconvertedLinks": [
{
"text": "HELM Safety v1.0",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "Apollo Research 2025",
"url": "https://www.apolloresearch.ai/research/",
"resourceId": "560dff85b3305858",
"resourceTitle": "Apollo Research"
},
{
"text": "UK AISI Alignment Project",
"url": "https://www.aisi.gov.uk/blog/our-2025-year-in-review",
"resourceId": "3dec5f974c5da5ec",
"resourceTitle": "Our 2025 Year in Review"
},
{
"text": "FLI AI Safety Index",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "first-ever joint evaluation between Anthropic and OpenAI",
"url": "https://openai.com/index/openai-anthropic-safety-evaluation/",
"resourceId": "cc554bd1593f0504",
"resourceTitle": "2025 OpenAI-Anthropic joint evaluation"
},
{
"text": "Apollo Research found",
"url": "https://www.apolloresearch.ai/research/",
"resourceId": "560dff85b3305858",
"resourceTitle": "Apollo Research"
},
{
"text": "joint evaluations",
"url": "https://openai.com/index/openai-anthropic-safety-evaluation/",
"resourceId": "cc554bd1593f0504",
"resourceTitle": "2025 OpenAI-Anthropic joint evaluation"
},
{
"text": "Anti-scheming training",
"url": "https://openai.com/index/detecting-and-reducing-scheming-in-ai-models/",
"resourceId": "b3f335edccfc5333",
"resourceTitle": "OpenAI Preparedness Framework"
},
{
"text": "TruthfulQA",
"url": "https://arxiv.org/abs/2109.07958",
"resourceId": "fe2a3307a3dae3e5",
"resourceTitle": "Kenton et al. (2021)"
},
{
"text": "MACHIAVELLI",
"url": "https://arxiv.org/abs/2304.03279",
"resourceId": "6d4e8851e33e1641",
"resourceTitle": "MACHIAVELLI dataset"
},
{
"text": "Apollo Research",
"url": "https://www.apolloresearch.ai/research/scheming-reasoning-evaluations",
"resourceId": "91737bf431000298",
"resourceTitle": "Frontier Models are Capable of In-Context Scheming"
},
{
"text": "Anthropic",
"url": "https://www.anthropic.com/research/sleeper-agents-training-deceptive-llms-that-persist-through-safety-training",
"resourceId": "83b187f91a7c6b88",
"resourceTitle": "Anthropic's sleeper agents research (2024)"
},
{
"text": "OpenAI",
"url": "https://openai.com/index/updating-our-preparedness-framework/",
"resourceId": "ded0b05862511312",
"resourceTitle": "Preparedness Framework"
},
{
"text": "UK AISI",
"url": "https://www.aisi.gov.uk/blog/early-lessons-from-evaluating-frontier-ai-systems",
"resourceId": "0fd3b1f5c81a37d8",
"resourceTitle": "UK AI Security Institute's evaluations"
},
{
"text": "Apollo noted",
"url": "https://www.apolloresearch.ai/blog/more-capable-models-are-better-at-in-context-scheming/",
"resourceId": "80c6d6eca17dc925",
"resourceTitle": "More capable models scheme at higher rates"
},
{
"text": "Sharma et al. (2023)",
"url": "https://arxiv.org/abs/2310.13548",
"resourceId": "7951bdb54fd936a6",
"resourceTitle": "Anthropic: \"Discovering Sycophancy in Language Models\""
},
{
"text": "Lin et al. 2021",
"url": "https://arxiv.org/abs/2109.07958",
"resourceId": "fe2a3307a3dae3e5",
"resourceTitle": "Kenton et al. (2021)"
},
{
"text": "Pan et al. 2023",
"url": "https://arxiv.org/abs/2304.03279",
"resourceId": "6d4e8851e33e1641",
"resourceTitle": "MACHIAVELLI dataset"
},
{
"text": "Apollo Research's 2025 findings",
"url": "https://www.apolloresearch.ai/research/",
"resourceId": "560dff85b3305858",
"resourceTitle": "Apollo Research"
},
{
"text": "UK AISI research",
"url": "https://www.aisi.gov.uk/blog/our-2025-year-in-review",
"resourceId": "3dec5f974c5da5ec",
"resourceTitle": "Our 2025 Year in Review"
},
{
"text": "Anthropic's pre-deployment assessment of Claude Sonnet 4.5",
"url": "https://alignment.anthropic.com/",
"resourceId": "5a651b8ed18ffeb1",
"resourceTitle": "Anthropic Alignment Science Blog"
},
{
"text": "Anthropic's Sleeper Agents paper",
"url": "https://arxiv.org/abs/2401.05566",
"resourceId": "e5c0904211c7d0cc"
},
{
"text": "Apollo Research",
"url": "https://www.apolloresearch.ai/research/",
"resourceId": "560dff85b3305858",
"resourceTitle": "Apollo Research"
},
{
"text": "Anthropic",
"url": "https://www.anthropic.com/research",
"resourceId": "f771d4f56ad4dbaa",
"resourceTitle": "Anthropic's Work on AI Safety"
},
{
"text": "OpenAI",
"url": "https://openai.com/safety/",
"resourceId": "838d7a59a02e11a7",
"resourceTitle": "OpenAI Safety Updates"
},
{
"text": "UK AISI launched The £15M Alignment Project",
"url": "https://www.aisi.gov.uk/blog/our-2025-year-in-review",
"resourceId": "3dec5f974c5da5ec",
"resourceTitle": "Our 2025 Year in Review"
},
{
"text": "alignment evaluation case studies",
"url": "https://www.aisi.gov.uk/blog/early-lessons-from-evaluating-frontier-ai-systems",
"resourceId": "0fd3b1f5c81a37d8",
"resourceTitle": "UK AI Security Institute's evaluations"
},
{
"text": "OpenAI's Deliberative Alignment",
"url": "https://openai.com/index/detecting-and-reducing-scheming-in-ai-models/",
"resourceId": "b3f335edccfc5333",
"resourceTitle": "OpenAI Preparedness Framework"
},
{
"text": "OpenAI and Anthropic conducted the first-ever joint safety evaluations",
"url": "https://openai.com/index/openai-anthropic-safety-evaluation/",
"resourceId": "cc554bd1593f0504",
"resourceTitle": "2025 OpenAI-Anthropic joint evaluation"
},
{
"text": "Anthropic researchers concluded",
"url": "https://alignment.anthropic.com/2025/openai-findings/",
"resourceId": "2fdf91febf06daaf",
"resourceTitle": "Anthropic-OpenAI joint evaluation"
},
{
"text": "Future of Life Institute's AI Safety Index",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "FLI's Safety Index",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "Apollo Research (2025)",
"url": "https://www.apolloresearch.ai/blog/more-capable-models-are-better-at-in-context-scheming/",
"resourceId": "80c6d6eca17dc925",
"resourceTitle": "More capable models scheme at higher rates"
},
{
"text": "Anthropic (2024)",
"url": "https://arxiv.org/abs/2401.05566",
"resourceId": "e5c0904211c7d0cc"
},
{
"text": "Anthropic (2024)",
"url": "https://www.anthropic.com/research/probes-catch-sleeper-agents",
"resourceId": "72c1254d07071bf7",
"resourceTitle": "Anthropic's follow-up research on defection probes"
},
{
"text": "Anthropic-OpenAI (2025)",
"url": "https://alignment.anthropic.com/2025/openai-findings/",
"resourceId": "2fdf91febf06daaf",
"resourceTitle": "Anthropic-OpenAI joint evaluation"
},
{
"text": "OpenAI (2025)",
"url": "https://openai.com/index/openai-anthropic-safety-evaluation/",
"resourceId": "cc554bd1593f0504",
"resourceTitle": "2025 OpenAI-Anthropic joint evaluation"
},
{
"text": "OpenAI (2025)",
"url": "https://openai.com/index/detecting-and-reducing-scheming-in-ai-models/",
"resourceId": "b3f335edccfc5333",
"resourceTitle": "OpenAI Preparedness Framework"
},
{
"text": "Lin, Hilton, Evans (2021)",
"url": "https://arxiv.org/abs/2109.07958",
"resourceId": "fe2a3307a3dae3e5",
"resourceTitle": "Kenton et al. (2021)"
},
{
"text": "Pan et al. (2023)",
"url": "https://arxiv.org/abs/2304.03279",
"resourceId": "6d4e8851e33e1641",
"resourceTitle": "MACHIAVELLI dataset"
},
{
"text": "Sharma et al. (2023)",
"url": "https://arxiv.org/abs/2310.13548",
"resourceId": "7951bdb54fd936a6",
"resourceTitle": "Anthropic: \"Discovering Sycophancy in Language Models\""
},
{
"text": "Future of Life Institute AI Safety Index (Summer 2025)",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "Stanford HELM Safety v1.0",
"url": "https://futureoflife.org/ai-safety-index-summer-2025/",
"resourceId": "df46edd6fa2078d1",
"resourceTitle": "FLI AI Safety Index Summer 2025"
},
{
"text": "OpenAI Preparedness Framework",
"url": "https://openai.com/index/updating-our-preparedness-framework/",
"resourceId": "ded0b05862511312",
"resourceTitle": "Preparedness Framework"
},
{
"text": "UK AISI 2025 Year in Review",
"url": "https://www.aisi.gov.uk/blog/our-2025-year-in-review",
"resourceId": "3dec5f974c5da5ec",
"resourceTitle": "Our 2025 Year in Review"
},
{
"text": "UK AISI Early Lessons from Evaluating Frontier AI",
"url": "https://www.aisi.gov.uk/blog/early-lessons-from-evaluating-frontier-ai-systems",
"resourceId": "0fd3b1f5c81a37d8",
"resourceTitle": "UK AI Security Institute's evaluations"
},
{
"text": "Apollo Research",
"url": "https://www.apolloresearch.ai/research/",
"resourceId": "560dff85b3305858",
"resourceTitle": "Apollo Research"
},
{
"text": "Anthropic Alignment Science",
"url": "https://alignment.anthropic.com/",
"resourceId": "5a651b8ed18ffeb1",
"resourceTitle": "Anthropic Alignment Science Blog"
},
{
"text": "Anthropic Fellows Program",
"url": "https://alignment.anthropic.com/2025/anthropic-fellows-program-2026/",
"resourceId": "e65e76531931acc2",
"resourceTitle": "Anthropic Fellows Program"
},
{
"text": "OpenAI Safety",
"url": "https://openai.com/safety/",
"resourceId": "838d7a59a02e11a7",
"resourceTitle": "OpenAI Safety Updates"
}
],
"unconvertedLinkCount": 50,
"convertedLinkCount": 0,
"backlinkCount": 3,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"conceptual-content"
]
},
"entityType": "approach",
"redundancy": {
"maxSimilarity": 20,
"similarPages": [
{
"id": "dangerous-cap-evals",
"title": "Dangerous Capability Evaluations",
"path": "/knowledge-base/responses/dangerous-cap-evals/",
"similarity": 20
},
{
"id": "sleeper-agent-detection",
"title": "Sleeper Agent Detection",
"path": "/knowledge-base/responses/sleeper-agent-detection/",
"similarity": 20
},
{
"id": "capability-elicitation",
"title": "Capability Elicitation",
"path": "/knowledge-base/responses/capability-elicitation/",
"similarity": 19
},
{
"id": "model-auditing",
"title": "Third-Party Model Auditing",
"path": "/knowledge-base/responses/model-auditing/",
"similarity": 19
},
{
"id": "scheming-detection",
"title": "Scheming & Deception Detection",
"path": "/knowledge-base/responses/scheming-detection/",
"similarity": 19
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 15,
"diagrams": 2,
"internalLinks": 30,
"externalLinks": 19,
"footnotes": 11,
"references": 11
},
"actuals": {
"tables": 20,
"diagrams": 2,
"internalLinks": 9,
"externalLinks": 75,
"footnotes": 0,
"references": 20,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:5.8 R:6.2 A:6.5 C:7.1"
},
"readerRank": 195,
"researchRank": 103,
"recommendedScore": 184.36
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/ai-evaluations"
}Backlinks (3)
| id | title | type | relationship |
|---|---|---|---|
| situational-awareness | Situational Awareness | capability | — |
| epistemic-orgs-overview | Epistemic & Forecasting Organizations (Overview) | concept | — |
| alignment-evaluation-overview | Evaluation & Detection (Overview) | concept | — |