Leopold Aschenbrenner
leopold-aschenbrennerpersonPath: /knowledge-base/people/leopold-aschenbrenner/
E578Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "leopold-aschenbrenner",
"numericId": null,
"path": "/knowledge-base/people/leopold-aschenbrenner/",
"filePath": "knowledge-base/people/leopold-aschenbrenner.mdx",
"title": "Leopold Aschenbrenner",
"quality": 61,
"readerImportance": 27,
"researchImportance": 42.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive biographical profile of Leopold Aschenbrenner, covering his trajectory from Columbia valedictorian to OpenAI researcher to \\$1.5B hedge fund founder, with detailed documentation of his controversial \"Situational Awareness\" essay predicting AGI by 2027, his disputed firing from OpenAI over security concerns, and the substantial criticisms of his epistemics and potential conflicts of interest.",
"description": "Former OpenAI researcher, author of 'Situational Awareness,' and founder of AI-focused hedge fund predicting AGI by 2027",
"ratings": {
"focus": 8.5,
"novelty": 2,
"rigor": 6,
"completeness": 8,
"concreteness": 7,
"actionability": 1
},
"category": "people",
"subcategory": "safety-researchers",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2616,
"tableCount": 2,
"diagramCount": 0,
"internalLinks": 16,
"externalLinks": 2,
"footnoteCount": 0,
"bulletRatio": 0.17,
"sectionCount": 24,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 2616,
"unconvertedLinks": [
{
"text": "en.wikipedia.org",
"url": "https://en.wikipedia.org/wiki/Leopold_Aschenbrenner",
"resourceId": "957893bf859a6d97",
"resourceTitle": "Leopold Aschenbrenner - Wikipedia"
}
],
"unconvertedLinkCount": 1,
"convertedLinkCount": 0,
"backlinkCount": 14,
"citationHealth": {
"total": 69,
"withQuotes": 49,
"verified": 49,
"accuracyChecked": 49,
"accurate": 31,
"inaccurate": 2,
"avgScore": 0.9642045522222713
},
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 19,
"similarPages": [
{
"id": "situational-awareness-lp",
"title": "Situational Awareness LP",
"path": "/knowledge-base/organizations/situational-awareness-lp/",
"similarity": 19
},
{
"id": "mainstream-era",
"title": "Mainstream Era (2020-Present)",
"path": "/knowledge-base/history/mainstream-era/",
"similarity": 15
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 15
},
{
"id": "ai-futures-project",
"title": "AI Futures Project",
"path": "/knowledge-base/organizations/ai-futures-project/",
"similarity": 15
},
{
"id": "openai",
"title": "OpenAI",
"path": "/knowledge-base/organizations/openai/",
"similarity": 15
}
]
},
"coverage": {
"passing": 4,
"total": 13,
"targets": {
"tables": 10,
"diagrams": 1,
"internalLinks": 21,
"externalLinks": 13,
"footnotes": 8,
"references": 8
},
"actuals": {
"tables": 2,
"diagrams": 0,
"internalLinks": 16,
"externalLinks": 2,
"footnotes": 0,
"references": 1,
"quotesWithQuotes": 49,
"quotesTotal": 69,
"accuracyChecked": 49,
"accuracyTotal": 69
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "amber",
"quotes": "amber",
"accuracy": "amber"
},
"ratingsString": "N:2 R:6 A:1 C:8"
},
"readerRank": 476,
"researchRank": 326,
"recommendedScore": 157.27
}External Links
{
"grokipedia": "https://grokipedia.com/page/Leopold_Aschenbrenner"
}Backlinks (14)
| id | title | type | relationship |
|---|---|---|---|
| self-improvement | Self-Improvement and Recursive Enhancement | capability | — |
| case-for-xrisk | The Case FOR AI Existential Risk | argument | — |
| ea-epistemic-failures-in-the-ftx-era | EA Epistemic Failures in the FTX Era | concept | — |
| ftx-collapse-ea-funding-lessons | FTX Collapse: Lessons for EA Funding Resilience | concept | — |
| ftx-future-fund | FTX Future Fund | organization | — |
| ftx | FTX (cryptocurrency exchange) | organization | — |
| manifold | Manifold (Prediction Market) | organization | — |
| manifund | Manifund | organization | — |
| situational-awareness-lp | Situational Awareness LP | organization | — |
| turion | Turion | organization | — |
| eliezer-yudkowsky-predictions | Eliezer Yudkowsky: Track Record | concept | — |
| ilya-sutskever | Ilya Sutskever | person | — |
| sam-altman | Sam Altman | person | — |
| sharp-left-turn | Sharp Left Turn | risk | — |