Longterm Wiki

Yann LeCun

yann-lecunpersonPath: /knowledge-base/people/yann-lecun/
E582Entity ID (EID)
← Back to page24 backlinksQuality: 41Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "yann-lecun",
  "numericId": null,
  "path": "/knowledge-base/people/yann-lecun/",
  "filePath": "knowledge-base/people/yann-lecun.mdx",
  "title": "Yann LeCun",
  "quality": 41,
  "readerImportance": 61.5,
  "researchImportance": 29,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive biographical profile of Yann LeCun documenting his technical contributions (CNNs, JEPA), his ~0% AI extinction risk estimate, and his opposition to AI safety regulation including SB 1047. Includes detailed 'Statements & Track Record' section analyzing his prediction accuracy—noting strength in long-term architectural intuitions but pattern of underestimating near-term LLM capabilities. Catalogs debates with Hinton, Bengio, and Yudkowsky, and tracks his November 2025 departure from Meta to found AMI Labs.",
  "description": "Turing Award winner and 'Godfather of AI' who remains one of the most prominent skeptics of AI existential risk, arguing that concerns about superintelligent AI are premature and that AI systems can be designed to remain under human control",
  "ratings": {
    "novelty": 3.5,
    "rigor": 4.5,
    "actionability": 2,
    "completeness": 7.5
  },
  "category": "people",
  "subcategory": "lab-leadership",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 4409,
    "tableCount": 25,
    "diagramCount": 1,
    "internalLinks": 20,
    "externalLinks": 18,
    "footnoteCount": 0,
    "bulletRatio": 0.1,
    "sectionCount": 54,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 4409,
  "unconvertedLinks": [
    {
      "text": "en.wikipedia.org",
      "url": "https://en.wikipedia.org/wiki/Yann_LeCun",
      "resourceId": "914e07c146555ae9",
      "resourceTitle": "Yann LeCun"
    },
    {
      "text": "Meta's Yann LeCun says worries about AI's existential threat are 'complete B.S.'",
      "url": "https://techcrunch.com/2024/10/12/metas-yann-lecun-says-worries-about-a-i-s-existential-threat-are-complete-b-s/",
      "resourceId": "61b8ab42c6b32b27",
      "resourceTitle": "TechCrunch, 2024"
    },
    {
      "text": "AI whiz Yann LeCun is already targeting a \\$1.5 billion valuation",
      "url": "https://fortune.com/2025/12/19/yann-lecun-ami-labs-ai-startup-valuation-meta-departure/",
      "resourceId": "96212024a0dc8d36",
      "resourceTitle": "Fortune - Yann LeCun AMI Valuation"
    },
    {
      "text": "Yann LeCun - Wikipedia",
      "url": "https://en.wikipedia.org/wiki/Yann_LeCun",
      "resourceId": "914e07c146555ae9",
      "resourceTitle": "Yann LeCun"
    }
  ],
  "unconvertedLinkCount": 4,
  "convertedLinkCount": 0,
  "backlinkCount": 24,
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 16
      },
      {
        "id": "eliezer-yudkowsky",
        "title": "Eliezer Yudkowsky",
        "path": "/knowledge-base/people/eliezer-yudkowsky/",
        "similarity": 16
      },
      {
        "id": "ilya-sutskever",
        "title": "Ilya Sutskever",
        "path": "/knowledge-base/people/ilya-sutskever/",
        "similarity": 16
      },
      {
        "id": "case-against-xrisk",
        "title": "The Case AGAINST AI Existential Risk",
        "path": "/knowledge-base/debates/case-against-xrisk/",
        "similarity": 15
      },
      {
        "id": "goodfire",
        "title": "Goodfire",
        "path": "/knowledge-base/organizations/goodfire/",
        "similarity": 15
      }
    ]
  },
  "coverage": {
    "passing": 5,
    "total": 13,
    "targets": {
      "tables": 18,
      "diagrams": 2,
      "internalLinks": 35,
      "externalLinks": 22,
      "footnotes": 13,
      "references": 13
    },
    "actuals": {
      "tables": 25,
      "diagrams": 1,
      "internalLinks": 20,
      "externalLinks": 18,
      "footnotes": 0,
      "references": 3,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "amber",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:4.5 A:2 C:7.5"
  },
  "readerRank": 226,
  "researchRank": 423,
  "recommendedScore": 134.61
}
External Links
{
  "grokipedia": "https://grokipedia.com/page/Yann_LeCun"
}
Backlinks (24)
idtitletyperelationship
case-against-xriskThe Case AGAINST AI Existential Riskargument
open-vs-closedOpen vs Closed Source AIcrux
pause-debateShould We Pause AI Development?crux
scaling-debateIs Scaling All You Need?crux
why-alignment-hardWhy Alignment Might Be Hardargument
agi-timelineAGI Timelineconcept
miri-eraThe MIRI Era (2000-2015)historical
world-modelsWorld Models + Planningcapability
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
arcARC (Alignment Research Center)organization
fliFuture of Life Institute (FLI)organization
frontier-ai-comparisonFrontier AI Company Comparison (2026)concept
meta-aiMeta AI (FAIR)organization
daniela-amodeiDaniela Amodeiperson
eliezer-yudkowsky-predictionsEliezer Yudkowsky: Track Recordconcept
eliezer-yudkowskyEliezer Yudkowskyperson
elon-muskElon Musk (AI Industry)person
geoffrey-hintonGeoffrey Hintonperson
stuart-russellStuart Russellperson
yann-lecun-predictionsYann LeCun: Track Recordconcept
yoshua-bengioYoshua Bengioperson
california-sb1047California SB 1047policy
existential-riskExistential Risk from AIconcept
optimisticOptimistic Alignment Worldviewconcept
Longterm Wiki