Stuart Russell
stuart-russellpersonPath: /knowledge-base/people/stuart-russell/
E290Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "stuart-russell",
"numericId": null,
"path": "/knowledge-base/people/stuart-russell/",
"filePath": "knowledge-base/people/stuart-russell.mdx",
"title": "Stuart Russell",
"quality": 30,
"readerImportance": 26.5,
"researchImportance": 12,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Stuart Russell (born 1962) is a British computer scientist and UC Berkeley professor who co-authored the dominant AI textbook 'Artificial Intelligence: A Modern Approach' (used in over 1,500 universities), founded CHAI in 2016 with initial funding from Open Philanthropy (now Coefficient Giving), and authored 'Human Compatible' (2019), which proposes cooperative inverse reinforcement learning where AI systems learn human preferences from observation rather than optimizing fixed objectives. He views AI existential risk as significant — a concern he has stated is comparable in seriousness to nuclear war and climate change — and has argued that technical solutions are tractable through a paradigm shift in how AI systems are designed. He has been active in both academic AI safety research and policy, including U.S. Senate testimony (2023), the 2021 BBC Reith Lectures, and autonomous weapons advocacy.",
"description": "UC Berkeley professor, CHAI founder, author of 'Human Compatible'",
"ratings": {
"novelty": 2,
"rigor": 4,
"actionability": 2,
"completeness": 6
},
"category": "people",
"subcategory": "safety-researchers",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 4135,
"tableCount": 1,
"diagramCount": 0,
"internalLinks": 22,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.44,
"sectionCount": 33,
"hasOverview": true,
"structuralScore": 9
},
"suggestedQuality": 60,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 4135,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 42,
"citationHealth": {
"total": 2,
"withQuotes": 1,
"verified": 1,
"accuracyChecked": 1,
"accurate": 0,
"inaccurate": 0,
"avgScore": 1
},
"hallucinationRisk": {
"level": "high",
"score": 85,
"factors": [
"biographical-claims",
"no-citations",
"low-quality-score",
"few-external-sources"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 18,
"similarPages": [
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 18
},
{
"id": "deep-learning-era",
"title": "Deep Learning Revolution (2012-2020)",
"path": "/knowledge-base/history/deep-learning-era/",
"similarity": 17
},
{
"id": "eliezer-yudkowsky",
"title": "Eliezer Yudkowsky",
"path": "/knowledge-base/people/eliezer-yudkowsky/",
"similarity": 17
},
{
"id": "early-warnings",
"title": "Early Warnings (1950s-2000)",
"path": "/knowledge-base/history/early-warnings/",
"similarity": 16
},
{
"id": "dan-hendrycks",
"title": "Dan Hendrycks",
"path": "/knowledge-base/people/dan-hendrycks/",
"similarity": 16
}
]
},
"changeHistory": [
{
"date": "2026-02-23",
"branch": "feat/batch-improve-high-risk-pages",
"title": "Auto-improve (standard): Stuart Russell",
"summary": "Improved \"Stuart Russell\" via standard pipeline (1324.8s). Quality score: 84. Issues resolved: EntityLink E47 is assigned to 'CAIS' in the Quick Assessment; EntityLink E521 is used for 'Coefficient Giving' in the Rese; The frontmatter date field is 'lastEdited: 2026-02-23' — thi.",
"duration": "1324.8s",
"cost": "$5-8"
},
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-X4jHg",
"title": "Audit wiki pages for factual errors and hallucinations",
"summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
},
{
"date": "2026-02-16",
"branch": "claude/investigate-arxiv-paper-UmGPu",
"title": "Singapore Consensus on AI Safety",
"summary": "Investigated arXiv:2506.20702 (The Singapore Consensus on Global AI Safety Research Priorities) and integrated it into the wiki. Updated the international-summits page with a new SCAI section and Mermaid diagram, fixed the broken Singapore Consensus resource in web-other.yaml, updated Bengio/Russell/Tegmark pages with references, created a new dedicated singapore-consensus page with entity E694, and registered the entity in responses.yaml.",
"pr": 157
}
],
"coverage": {
"passing": 5,
"total": 13,
"targets": {
"tables": 17,
"diagrams": 2,
"internalLinks": 33,
"externalLinks": 21,
"footnotes": 12,
"references": 12
},
"actuals": {
"tables": 1,
"diagrams": 0,
"internalLinks": 22,
"externalLinks": 0,
"footnotes": 0,
"references": 2,
"quotesWithQuotes": 1,
"quotesTotal": 2,
"accuracyChecked": 1,
"accuracyTotal": 2
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "red",
"footnotes": "red",
"references": "amber",
"quotes": "amber",
"accuracy": "amber"
},
"editHistoryCount": 3,
"ratingsString": "N:2 R:4 A:2 C:6"
},
"readerRank": 478,
"researchRank": 541,
"recommendedScore": 95.11
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/Stuart_J._Russell",
"wikidata": "https://www.wikidata.org/wiki/Q7627055",
"grokipedia": "https://grokipedia.com/page/Stuart_J._Russell"
}Backlinks (42)
| id | title | type | relationship |
|---|---|---|---|
| pause-moratorium | Pause / Moratorium | policy | — |
| self-improvement | Self-Improvement and Recursive Enhancement | capability | — |
| case-against-xrisk | The Case AGAINST AI Existential Risk | argument | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| mainstream-era | Mainstream Era (2020-Present) | historical | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| provable-safe | Provable / Guaranteed Safe AI | concept | — |
| whole-brain-emulation | Whole Brain Emulation | capability | — |
| instrumental-convergence-framework | Instrumental Convergence Framework | analysis | — |
| scheming-likelihood-model | Scheming Likelihood Assessment | analysis | — |
| warning-signs-model | Warning Signs Model | analysis | — |
| cais | CAIS (Center for AI Safety) | organization | — |
| cea | Centre for Effective Altruism | organization | — |
| chai | CHAI (Center for Human-Compatible AI) | organization | — |
| coefficient-giving | Coefficient Giving | organization | — |
| far-ai | FAR AI | organization | — |
| fli | Future of Life Institute (FLI) | organization | — |
| openai | OpenAI | organization | — |
| pause-ai | Pause AI | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| schmidt-futures | Schmidt Futures | organization | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| __index__/knowledge-base/people | People | concept | — |
| nick-bostrom | Nick Bostrom | person | — |
| paul-christiano | Paul Christiano | person | — |
| yoshua-bengio | Yoshua Bengio | person | — |
| california-sb1047 | California SB 1047 | policy | — |
| cirl | Cooperative IRL (CIRL) | approach | — |
| corporate | Corporate AI Safety Responses | approach | — |
| dangerous-cap-evals | Dangerous Capability Evaluations | approach | — |
| evaluation | AI Evaluation | approach | — |
| pause | Pause Advocacy | approach | — |
| provably-safe | Provably Safe AI (davidad agenda) | approach | — |
| singapore-consensus | Singapore Consensus on AI Safety Research Priorities | policy | — |
| training-programs | AI Safety Training Programs | approach | — |
| whistleblower-protections | AI Whistleblower Protections | policy | — |
| enfeeblement | AI-Induced Enfeeblement | risk | — |
| existential-risk | Existential Risk from AI | concept | — |
| lock-in | AI Value Lock-in | risk | — |
| steganography | AI Model Steganography | risk | — |
| optimistic | Optimistic Alignment Worldview | concept | — |