Ilya Sutskever
ilya-sutskeverpersonPath: /knowledge-base/people/ilya-sutskever/
E163Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "ilya-sutskever",
"numericId": null,
"path": "/knowledge-base/people/ilya-sutskever/",
"filePath": "knowledge-base/people/ilya-sutskever.mdx",
"title": "Ilya Sutskever",
"quality": 26,
"readerImportance": 34,
"researchImportance": 36,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Biographical overview of Ilya Sutskever's career trajectory from deep learning researcher (AlexNet, seq2seq, dropout) to co-founding Safe Superintelligence Inc. in 2024 after leaving OpenAI. Documents his role in the November 2023 OpenAI board incident, his co-leadership of the Superalignment team, SSI's funding history (over \\$3 billion raised at a \\$32 billion valuation as of April 2025), and his public statements on the limits of scaling and the path to safe superintelligence.",
"description": "Co-founder and CEO of Safe Superintelligence Inc., formerly Chief Scientist at OpenAI",
"ratings": {
"novelty": 2,
"rigor": 3.5,
"actionability": 1.5,
"completeness": 5
},
"category": "people",
"subcategory": "lab-leadership",
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 3258,
"tableCount": 1,
"diagramCount": 0,
"internalLinks": 21,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.27,
"sectionCount": 37,
"hasOverview": true,
"structuralScore": 10
},
"suggestedQuality": 67,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 3258,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 23,
"hallucinationRisk": {
"level": "high",
"score": 95,
"factors": [
"biographical-claims",
"no-citations",
"low-rigor-score",
"low-quality-score",
"few-external-sources"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 23,
"similarPages": [
{
"id": "ssi",
"title": "Safe Superintelligence Inc (SSI)",
"path": "/knowledge-base/organizations/ssi/",
"similarity": 23
},
{
"id": "jan-leike",
"title": "Jan Leike",
"path": "/knowledge-base/people/jan-leike/",
"similarity": 18
},
{
"id": "deep-learning-era",
"title": "Deep Learning Revolution (2012-2020)",
"path": "/knowledge-base/history/deep-learning-era/",
"similarity": 17
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 17
},
{
"id": "anthropic",
"title": "Anthropic",
"path": "/knowledge-base/organizations/anthropic/",
"similarity": 17
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-11sSF",
"title": "Fix factual errors found in wiki audit",
"summary": "Systematically audited ~35+ high-risk wiki pages for factual errors and hallucinations using parallel background agents plus direct reading. Fixed 13 confirmed errors across 11 files."
}
],
"coverage": {
"passing": 5,
"total": 13,
"targets": {
"tables": 13,
"diagrams": 1,
"internalLinks": 26,
"externalLinks": 16,
"footnotes": 10,
"references": 10
},
"actuals": {
"tables": 1,
"diagrams": 0,
"internalLinks": 21,
"externalLinks": 0,
"footnotes": 0,
"references": 4,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "red",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:2 R:3.5 A:1.5 C:5"
},
"readerRank": 421,
"researchRank": 372,
"recommendedScore": 90.86
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/Ilya_Sutskever",
"wikidata": "https://www.wikidata.org/wiki/Q21712134",
"grokipedia": "https://grokipedia.com/page/Ilya_Sutskever"
}Backlinks (23)
| id | title | type | relationship |
|---|---|---|---|
| openai | OpenAI | organization | leads-to |
| ssi | Safe Superintelligence Inc (SSI) | organization | — |
| case-against-xrisk | The Case AGAINST AI Existential Risk | argument | — |
| case-for-xrisk | The Case FOR AI Existential Risk | argument | — |
| scaling-debate | Is Scaling All You Need? | crux | — |
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| mainstream-era | Mainstream Era (2020-Present) | historical | — |
| dense-transformers | Dense Transformers | concept | — |
| safety-capability-tradeoff | Safety-Capability Tradeoff Model | analysis | — |
| scaling-laws | AI Scaling Laws | concept | — |
| fli | Future of Life Institute (FLI) | organization | — |
| openai-foundation | OpenAI Foundation | organization | — |
| elon-musk | Elon Musk (AI Industry) | person | — |
| helen-toner | Helen Toner | person | — |
| __index__/knowledge-base/people | People | concept | — |
| jan-leike | Jan Leike | person | — |
| leopold-aschenbrenner | Leopold Aschenbrenner | person | — |
| sam-altman | Sam Altman | person | — |
| yann-lecun-predictions | Yann LeCun: Track Record | concept | — |
| ai-assisted | AI-Assisted Alignment | approach | — |
| corporate-influence | Corporate Influence on AI Policy | crux | — |
| lab-culture | AI Lab Safety Culture | approach | — |
| research-agendas | AI Alignment Research Agenda Comparison | crux | — |