Geoffrey Hinton
geoffrey-hintonpersonPath: /knowledge-base/people/geoffrey-hinton/
E149Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "geoffrey-hinton",
"numericId": null,
"path": "/knowledge-base/people/geoffrey-hinton/",
"filePath": "knowledge-base/people/geoffrey-hinton.mdx",
"title": "Geoffrey Hinton",
"quality": 42,
"readerImportance": 28,
"researchImportance": 38.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive biographical profile of Geoffrey Hinton documenting his 2023 shift from AI pioneer to safety advocate, estimating 10-20% extinction risk in 5-20 years. Covers his media strategy, policy influence, and distinctive \"honest uncertainty\" approach, but offers limited actionable guidance for prioritization beyond noting his role in legitimizing safety concerns.",
"description": "Turing Award winner and 'Godfather of AI' who left Google in 2023 to warn about 10-20% extinction risk from AI within 5-20 years, becoming a leading voice for AI safety advocacy",
"ratings": {
"novelty": 2.5,
"rigor": 4,
"actionability": 2,
"completeness": 6.5
},
"category": "people",
"subcategory": "safety-researchers",
"clusters": [
"ai-safety"
],
"metrics": {
"wordCount": 2002,
"tableCount": 13,
"diagramCount": 0,
"internalLinks": 48,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.33,
"sectionCount": 30,
"hasOverview": true,
"structuralScore": 10
},
"suggestedQuality": 67,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 2002,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 20,
"backlinkCount": 21,
"hallucinationRisk": {
"level": "high",
"score": 80,
"factors": [
"biographical-claims",
"no-citations",
"few-external-sources"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 17,
"similarPages": [
{
"id": "yoshua-bengio",
"title": "Yoshua Bengio",
"path": "/knowledge-base/people/yoshua-bengio/",
"similarity": 17
},
{
"id": "ai-impacts",
"title": "AI Impacts",
"path": "/knowledge-base/organizations/ai-impacts/",
"similarity": 13
},
{
"id": "holden-karnofsky",
"title": "Holden Karnofsky",
"path": "/knowledge-base/people/holden-karnofsky/",
"similarity": 13
},
{
"id": "toby-ord",
"title": "Toby Ord",
"path": "/knowledge-base/people/toby-ord/",
"similarity": 13
},
{
"id": "existential-risk",
"title": "Existential Risk from AI",
"path": "/knowledge-base/risks/existential-risk/",
"similarity": 13
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-X4jHg",
"title": "Audit wiki pages for factual errors and hallucinations",
"summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
},
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-11sSF",
"title": "Fix factual errors found in wiki audit",
"summary": "Systematically audited ~35+ high-risk wiki pages for factual errors and hallucinations using parallel background agents plus direct reading. Fixed 13 confirmed errors across 11 files."
}
],
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 8,
"diagrams": 1,
"internalLinks": 16,
"externalLinks": 10,
"footnotes": 6,
"references": 6
},
"actuals": {
"tables": 13,
"diagrams": 0,
"internalLinks": 48,
"externalLinks": 0,
"footnotes": 0,
"references": 15,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "green",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 2,
"ratingsString": "N:2.5 R:4 A:2 C:6.5"
},
"readerRank": 467,
"researchRank": 355,
"recommendedScore": 119.66
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/Geoffrey_Hinton",
"wikidata": "https://www.wikidata.org/wiki/Q92894",
"grokipedia": "https://grokipedia.com/page/Geoffrey_Hinton"
}Backlinks (21)
| id | title | type | relationship |
|---|---|---|---|
| ilya-sutskever | Ilya Sutskever | person | — |
| yoshua-bengio | Yoshua Bengio | person | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| case-against-xrisk | The Case AGAINST AI Existential Risk | argument | — |
| deep-learning-era | Deep Learning Revolution (2012-2020) | historical | — |
| __index__/knowledge-base/history | History | concept | — |
| mainstream-era | Mainstream Era (2020-Present) | historical | — |
| warning-signs-model | Warning Signs Model | analysis | — |
| cais | CAIS (Center for AI Safety) | organization | — |
| leading-the-future | Leading the Future super PAC | organization | — |
| ssi | Safe Superintelligence Inc (SSI) | organization | — |
| dan-hendrycks | Dan Hendrycks | person | — |
| demis-hassabis | Demis Hassabis | person | — |
| __index__/knowledge-base/people | People | concept | — |
| yann-lecun | Yann LeCun | person | — |
| california-sb1047 | California SB 1047 | policy | — |
| field-building-analysis | AI Safety Field Building Analysis | approach | — |
| pause | Pause Advocacy | approach | — |
| whistleblower-protections | AI Whistleblower Protections | policy | — |
| existential-risk | Existential Risk from AI | concept | — |
| optimistic | Optimistic Alignment Worldview | concept | — |