Longterm Wiki

Holden Karnofsky

holden-karnofskypersonPath: /knowledge-base/people/holden-karnofsky/
E156Entity ID (EID)
← Back to page23 backlinksQuality: 40Updated: 2026-03-12
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "holden-karnofsky",
  "numericId": null,
  "path": "/knowledge-base/people/holden-karnofsky/",
  "filePath": "knowledge-base/people/holden-karnofsky.mdx",
  "title": "Holden Karnofsky",
  "quality": 40,
  "readerImportance": 29.5,
  "researchImportance": 40,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-12",
  "dateCreated": "2026-02-15",
  "llmSummary": "Holden Karnofsky directed \\$300M+ in AI safety funding through Coefficient Giving (formerly Open Philanthropy), growing the field from ~20 to 400+ FTE researchers and developing influential frameworks like the 'Most Important Century' thesis (15% transformative AI by 2036, 50% by 2060). His funding decisions include a \\$580M Anthropic investment and establishment of 15+ university AI safety programs.",
  "description": "Former co-CEO of Coefficient Giving (formerly Open Philanthropy) who directed \\$300M+ toward AI safety, shaped EA prioritization, and developed influential frameworks like the \"Most Important Century\" thesis. Now at Anthropic.",
  "ratings": {
    "novelty": 2,
    "rigor": 4.5,
    "actionability": 2,
    "completeness": 6
  },
  "category": "people",
  "subcategory": "ea-figures",
  "clusters": [
    "community",
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 1762,
    "tableCount": 14,
    "diagramCount": 0,
    "internalLinks": 55,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.3,
    "sectionCount": 35,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1762,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 24,
  "backlinkCount": 23,
  "hallucinationRisk": {
    "level": "high",
    "score": 80,
    "factors": [
      "biographical-claims",
      "no-citations",
      "few-external-sources"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 15,
    "similarPages": [
      {
        "id": "toby-ord",
        "title": "Toby Ord",
        "path": "/knowledge-base/people/toby-ord/",
        "similarity": 15
      },
      {
        "id": "safety-research-value",
        "title": "Expected Value of AI Safety Research",
        "path": "/knowledge-base/models/safety-research-value/",
        "similarity": 14
      },
      {
        "id": "chai",
        "title": "CHAI (Center for Human-Compatible AI)",
        "path": "/knowledge-base/organizations/chai/",
        "similarity": 14
      },
      {
        "id": "coefficient-giving",
        "title": "Coefficient Giving",
        "path": "/knowledge-base/organizations/coefficient-giving/",
        "similarity": 14
      },
      {
        "id": "conjecture",
        "title": "Conjecture",
        "path": "/knowledge-base/organizations/conjecture/",
        "similarity": 14
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/audit-webpage-errors-11sSF",
      "title": "Fix factual errors found in wiki audit",
      "summary": "Systematically audited ~35+ high-risk wiki pages for factual errors and hallucinations using parallel background agents plus direct reading. Fixed 13 confirmed errors across 11 files."
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 7,
      "diagrams": 1,
      "internalLinks": 14,
      "externalLinks": 9,
      "footnotes": 5,
      "references": 5
    },
    "actuals": {
      "tables": 14,
      "diagrams": 0,
      "internalLinks": 55,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 17,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:2 R:4.5 A:2 C:6"
  },
  "readerRank": 453,
  "researchRank": 344,
  "recommendedScore": 116.2
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/holden-karnofsky",
  "grokipedia": "https://grokipedia.com/page/Holden_Karnofsky"
}
Backlinks (23)
idtitletyperelationship
anthropicAnthropicorganizationleads-to
ajeya-cotraAjeya Cotraperson
toby-ordToby Ordperson
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
earning-to-giveEarning to Give: The EA Strategy and Its Limitsconcept
anthropic-pledge-enforcementAnthropic Founder Pledges: Interventions to Increase Follow-Throughanalysis
anthropic-investorsAnthropic (Funder)analysis
anthropic-ipoAnthropic IPOanalysis
anthropic-pre-ipo-daf-transfersAnthropic Pre-IPO DAF Transfersanalysis
anthropic-stakeholdersAnthropic Stakeholderstable
anthropic-valuationAnthropic Valuation Analysisanalysis
coefficient-givingCoefficient Givingorganization
controlaiControlAIorganization
miriMIRI (Machine Intelligence Research Institute)organization
redwood-researchRedwood Researchorganization
dustin-moskovitzDustin Moskovitz (AI Safety Funder)person
helen-tonerHelen Tonerperson
__index__/knowledge-base/peoplePeopleconcept
nuno-sempereNuño Sempereperson
eliciting-latent-knowledgeEliciting Latent Knowledge (ELK)approach
recoding-americaRecoding Americaresource
research-agendasAI Alignment Research Agenda Comparisoncrux
state-capacity-ai-governanceState Capacity and AI Governanceconcept
Longterm Wiki