Yoshua Bengio
yoshua-bengiopersonPath: /knowledge-base/people/yoshua-bengio/
E380Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "yoshua-bengio",
"numericId": null,
"path": "/knowledge-base/people/yoshua-bengio/",
"filePath": "knowledge-base/people/yoshua-bengio.mdx",
"title": "Yoshua Bengio",
"quality": 39,
"readerImportance": 26.5,
"researchImportance": 33,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive biographical overview of Yoshua Bengio's transition from deep learning pioneer (Turing Award 2018) to AI safety advocate, documenting his 2020 pivot at Mila toward safety research, co-signing of the 2023 extinction risk statement, and policy advocacy positions supporting regulation. Details his technical safety research areas (mechanistic interpretability, causal AI, consciousness research) and timeline estimates suggesting existential risk possible within 15-20 years if safety lags capabilities.",
"description": "Turing Award winner and deep learning pioneer who became a prominent AI safety advocate, co-founding safety research initiatives at Mila and co-signing the 2023 AI extinction risk statement",
"ratings": {
"novelty": 2.5,
"rigor": 4,
"actionability": 2,
"completeness": 6.5
},
"category": "people",
"subcategory": "safety-researchers",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 1777,
"tableCount": 10,
"diagramCount": 0,
"internalLinks": 35,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.32,
"sectionCount": 35,
"hasOverview": true,
"structuralScore": 10
},
"suggestedQuality": 67,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 1777,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 15,
"backlinkCount": 34,
"hallucinationRisk": {
"level": "high",
"score": 85,
"factors": [
"biographical-claims",
"no-citations",
"low-quality-score",
"few-external-sources"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 17,
"similarPages": [
{
"id": "geoffrey-hinton",
"title": "Geoffrey Hinton",
"path": "/knowledge-base/people/geoffrey-hinton/",
"similarity": 17
},
{
"id": "uk-aisi",
"title": "UK AI Safety Institute",
"path": "/knowledge-base/organizations/uk-aisi/",
"similarity": 14
},
{
"id": "dan-hendrycks",
"title": "Dan Hendrycks",
"path": "/knowledge-base/people/dan-hendrycks/",
"similarity": 14
},
{
"id": "dario-amodei",
"title": "Dario Amodei",
"path": "/knowledge-base/people/dario-amodei/",
"similarity": 14
},
{
"id": "risk-activation-timeline",
"title": "Risk Activation Timeline Model",
"path": "/knowledge-base/models/risk-activation-timeline/",
"similarity": 13
}
]
},
"changeHistory": [
{
"date": "2026-02-16",
"branch": "claude/investigate-arxiv-paper-UmGPu",
"title": "Singapore Consensus on AI Safety",
"summary": "Investigated arXiv:2506.20702 (The Singapore Consensus on Global AI Safety Research Priorities) and integrated it into the wiki. Updated the international-summits page with a new SCAI section and Mermaid diagram, fixed the broken Singapore Consensus resource in web-other.yaml, updated Bengio/Russell/Tegmark pages with references, created a new dedicated singapore-consensus page with entity E694, and registered the entity in responses.yaml.",
"pr": 157
}
],
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 7,
"diagrams": 1,
"internalLinks": 14,
"externalLinks": 9,
"footnotes": 5,
"references": 5
},
"actuals": {
"tables": 10,
"diagrams": 0,
"internalLinks": 35,
"externalLinks": 0,
"footnotes": 0,
"references": 14,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "green",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:2.5 R:4 A:2 C:6.5"
},
"readerRank": 479,
"researchRank": 393,
"recommendedScore": 112.86
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/Yoshua_Bengio",
"wikidata": "https://www.wikidata.org/wiki/Q3572699",
"grokipedia": "https://grokipedia.com/page/Yoshua_Bengio"
}Backlinks (34)
| id | title | type | relationship |
|---|---|---|---|
| palisade-research | Palisade Research | organization | — |
| dan-hendrycks | Dan Hendrycks | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| max-tegmark | Max Tegmark | person | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| case-against-xrisk | The Case AGAINST AI Existential Risk | argument | — |
| pause-debate | Should We Pause AI Development? | crux | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| mainstream-era | Mainstream Era (2020-Present) | historical | — |
| provable-safe | Provable / Guaranteed Safe AI | concept | — |
| cais | CAIS (Center for AI Safety) | organization | — |
| far-ai | FAR AI | organization | — |
| fli | Future of Life Institute (FLI) | organization | — |
| leading-the-future | Leading the Future super PAC | organization | — |
| openai | OpenAI | organization | — |
| pause-ai | Pause AI | organization | — |
| uk-aisi | UK AI Safety Institute | organization | — |
| demis-hassabis | Demis Hassabis | person | — |
| elon-musk | Elon Musk (AI Industry) | person | — |
| __index__/knowledge-base/people | People | concept | — |
| stuart-russell | Stuart Russell | person | — |
| yann-lecun-predictions | Yann LeCun: Track Record | concept | — |
| yann-lecun | Yann LeCun | person | — |
| california-sb1047 | California SB 1047 | policy | — |
| eu-ai-act | EU AI Act | policy | — |
| eval-saturation | Eval Saturation & The Evals Gap | approach | — |
| field-building-analysis | AI Safety Field Building Analysis | approach | — |
| governance-policy | AI Governance and Policy | crux | — |
| intervention-portfolio | AI Safety Intervention Portfolio | approach | — |
| pause-moratorium | Pause / Moratorium | policy | — |
| pause | Pause Advocacy | approach | — |
| provably-safe | Provably Safe AI (davidad agenda) | approach | — |
| singapore-consensus | Singapore Consensus on AI Safety Research Priorities | policy | — |
| whistleblower-protections | AI Whistleblower Protections | policy | — |