Longterm Wiki

Yoshua Bengio

yoshua-bengiopersonPath: /knowledge-base/people/yoshua-bengio/
E380Entity ID (EID)
← Back to page34 backlinksQuality: 39Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "yoshua-bengio",
  "numericId": null,
  "path": "/knowledge-base/people/yoshua-bengio/",
  "filePath": "knowledge-base/people/yoshua-bengio.mdx",
  "title": "Yoshua Bengio",
  "quality": 39,
  "readerImportance": 26.5,
  "researchImportance": 33,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive biographical overview of Yoshua Bengio's transition from deep learning pioneer (Turing Award 2018) to AI safety advocate, documenting his 2020 pivot at Mila toward safety research, co-signing of the 2023 extinction risk statement, and policy advocacy positions supporting regulation. Details his technical safety research areas (mechanistic interpretability, causal AI, consciousness research) and timeline estimates suggesting existential risk possible within 15-20 years if safety lags capabilities.",
  "description": "Turing Award winner and deep learning pioneer who became a prominent AI safety advocate, co-founding safety research initiatives at Mila and co-signing the 2023 AI extinction risk statement",
  "ratings": {
    "novelty": 2.5,
    "rigor": 4,
    "actionability": 2,
    "completeness": 6.5
  },
  "category": "people",
  "subcategory": "safety-researchers",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 1777,
    "tableCount": 10,
    "diagramCount": 0,
    "internalLinks": 35,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.32,
    "sectionCount": 35,
    "hasOverview": true,
    "structuralScore": 10
  },
  "suggestedQuality": 67,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1777,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 15,
  "backlinkCount": 34,
  "hallucinationRisk": {
    "level": "high",
    "score": 85,
    "factors": [
      "biographical-claims",
      "no-citations",
      "low-quality-score",
      "few-external-sources"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 17,
    "similarPages": [
      {
        "id": "geoffrey-hinton",
        "title": "Geoffrey Hinton",
        "path": "/knowledge-base/people/geoffrey-hinton/",
        "similarity": 17
      },
      {
        "id": "uk-aisi",
        "title": "UK AI Safety Institute",
        "path": "/knowledge-base/organizations/uk-aisi/",
        "similarity": 14
      },
      {
        "id": "dan-hendrycks",
        "title": "Dan Hendrycks",
        "path": "/knowledge-base/people/dan-hendrycks/",
        "similarity": 14
      },
      {
        "id": "dario-amodei",
        "title": "Dario Amodei",
        "path": "/knowledge-base/people/dario-amodei/",
        "similarity": 14
      },
      {
        "id": "risk-activation-timeline",
        "title": "Risk Activation Timeline Model",
        "path": "/knowledge-base/models/risk-activation-timeline/",
        "similarity": 13
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-16",
      "branch": "claude/investigate-arxiv-paper-UmGPu",
      "title": "Singapore Consensus on AI Safety",
      "summary": "Investigated arXiv:2506.20702 (The Singapore Consensus on Global AI Safety Research Priorities) and integrated it into the wiki. Updated the international-summits page with a new SCAI section and Mermaid diagram, fixed the broken Singapore Consensus resource in web-other.yaml, updated Bengio/Russell/Tegmark pages with references, created a new dedicated singapore-consensus page with entity E694, and registered the entity in responses.yaml.",
      "pr": 157
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 7,
      "diagrams": 1,
      "internalLinks": 14,
      "externalLinks": 9,
      "footnotes": 5,
      "references": 5
    },
    "actuals": {
      "tables": 10,
      "diagrams": 0,
      "internalLinks": 35,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 14,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:2.5 R:4 A:2 C:6.5"
  },
  "readerRank": 479,
  "researchRank": 393,
  "recommendedScore": 112.86
}
External Links
{
  "wikipedia": "https://en.wikipedia.org/wiki/Yoshua_Bengio",
  "wikidata": "https://www.wikidata.org/wiki/Q3572699",
  "grokipedia": "https://grokipedia.com/page/Yoshua_Bengio"
}
Backlinks (34)
idtitletyperelationship
palisade-researchPalisade Researchorganization
dan-hendrycksDan Hendrycksperson
geoffrey-hintonGeoffrey Hintonperson
max-tegmarkMax Tegmarkperson
accident-risksAI Accident Risk Cruxescrux
case-against-xriskThe Case AGAINST AI Existential Riskargument
pause-debateShould We Pause AI Development?crux
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
mainstream-eraMainstream Era (2020-Present)historical
provable-safeProvable / Guaranteed Safe AIconcept
caisCAIS (Center for AI Safety)organization
far-aiFAR AIorganization
fliFuture of Life Institute (FLI)organization
leading-the-futureLeading the Future super PACorganization
openaiOpenAIorganization
pause-aiPause AIorganization
uk-aisiUK AI Safety Instituteorganization
demis-hassabisDemis Hassabisperson
elon-muskElon Musk (AI Industry)person
__index__/knowledge-base/peoplePeopleconcept
stuart-russellStuart Russellperson
yann-lecun-predictionsYann LeCun: Track Recordconcept
yann-lecunYann LeCunperson
california-sb1047California SB 1047policy
eu-ai-actEU AI Actpolicy
eval-saturationEval Saturation & The Evals Gapapproach
field-building-analysisAI Safety Field Building Analysisapproach
governance-policyAI Governance and Policycrux
intervention-portfolioAI Safety Intervention Portfolioapproach
pause-moratoriumPause / Moratoriumpolicy
pausePause Advocacyapproach
provably-safeProvably Safe AI (davidad agenda)approach
singapore-consensusSingapore Consensus on AI Safety Research Prioritiespolicy
whistleblower-protectionsAI Whistleblower Protectionspolicy
Longterm Wiki