Holden Karnofsky
holden-karnofskypersonPath: /knowledge-base/people/holden-karnofsky/
E156Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "holden-karnofsky",
"numericId": null,
"path": "/knowledge-base/people/holden-karnofsky/",
"filePath": "knowledge-base/people/holden-karnofsky.mdx",
"title": "Holden Karnofsky",
"quality": 40,
"readerImportance": 29.5,
"researchImportance": 40,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-12",
"dateCreated": "2026-02-15",
"llmSummary": "Holden Karnofsky directed \\$300M+ in AI safety funding through Coefficient Giving (formerly Open Philanthropy), growing the field from ~20 to 400+ FTE researchers and developing influential frameworks like the 'Most Important Century' thesis (15% transformative AI by 2036, 50% by 2060). His funding decisions include a \\$580M Anthropic investment and establishment of 15+ university AI safety programs.",
"description": "Former co-CEO of Coefficient Giving (formerly Open Philanthropy) who directed \\$300M+ toward AI safety, shaped EA prioritization, and developed influential frameworks like the \"Most Important Century\" thesis. Now at Anthropic.",
"ratings": {
"novelty": 2,
"rigor": 4.5,
"actionability": 2,
"completeness": 6
},
"category": "people",
"subcategory": "ea-figures",
"clusters": [
"community",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 1762,
"tableCount": 14,
"diagramCount": 0,
"internalLinks": 55,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.3,
"sectionCount": 35,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 1762,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 24,
"backlinkCount": 23,
"hallucinationRisk": {
"level": "high",
"score": 80,
"factors": [
"biographical-claims",
"no-citations",
"few-external-sources"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "toby-ord",
"title": "Toby Ord",
"path": "/knowledge-base/people/toby-ord/",
"similarity": 15
},
{
"id": "safety-research-value",
"title": "Expected Value of AI Safety Research",
"path": "/knowledge-base/models/safety-research-value/",
"similarity": 14
},
{
"id": "chai",
"title": "CHAI (Center for Human-Compatible AI)",
"path": "/knowledge-base/organizations/chai/",
"similarity": 14
},
{
"id": "coefficient-giving",
"title": "Coefficient Giving",
"path": "/knowledge-base/organizations/coefficient-giving/",
"similarity": 14
},
{
"id": "conjecture",
"title": "Conjecture",
"path": "/knowledge-base/organizations/conjecture/",
"similarity": 14
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-11sSF",
"title": "Fix factual errors found in wiki audit",
"summary": "Systematically audited ~35+ high-risk wiki pages for factual errors and hallucinations using parallel background agents plus direct reading. Fixed 13 confirmed errors across 11 files."
}
],
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 7,
"diagrams": 1,
"internalLinks": 14,
"externalLinks": 9,
"footnotes": 5,
"references": 5
},
"actuals": {
"tables": 14,
"diagrams": 0,
"internalLinks": 55,
"externalLinks": 0,
"footnotes": 0,
"references": 17,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "green",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:2 R:4.5 A:2 C:6"
},
"readerRank": 453,
"researchRank": 344,
"recommendedScore": 116.2
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/holden-karnofsky",
"grokipedia": "https://grokipedia.com/page/Holden_Karnofsky"
}Backlinks (23)
| id | title | type | relationship |
|---|---|---|---|
| anthropic | Anthropic | organization | leads-to |
| ajeya-cotra | Ajeya Cotra | person | — |
| toby-ord | Toby Ord | person | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| earning-to-give | Earning to Give: The EA Strategy and Its Limits | concept | — |
| anthropic-pledge-enforcement | Anthropic Founder Pledges: Interventions to Increase Follow-Through | analysis | — |
| anthropic-investors | Anthropic (Funder) | analysis | — |
| anthropic-ipo | Anthropic IPO | analysis | — |
| anthropic-pre-ipo-daf-transfers | Anthropic Pre-IPO DAF Transfers | analysis | — |
| anthropic-stakeholders | Anthropic Stakeholders | table | — |
| anthropic-valuation | Anthropic Valuation Analysis | analysis | — |
| coefficient-giving | Coefficient Giving | organization | — |
| controlai | ControlAI | organization | — |
| miri | MIRI (Machine Intelligence Research Institute) | organization | — |
| redwood-research | Redwood Research | organization | — |
| dustin-moskovitz | Dustin Moskovitz (AI Safety Funder) | person | — |
| helen-toner | Helen Toner | person | — |
| __index__/knowledge-base/people | People | concept | — |
| nuno-sempere | Nuño Sempere | person | — |
| eliciting-latent-knowledge | Eliciting Latent Knowledge (ELK) | approach | — |
| recoding-america | Recoding America | resource | — |
| research-agendas | AI Alignment Research Agenda Comparison | crux | — |
| state-capacity-ai-governance | State Capacity and AI Governance | concept | — |