Longterm Wiki

Epistemic Learned Helplessness

learned-helplessnessriskPath: /knowledge-base/risks/learned-helplessness/
E187Entity ID (EID)
← Back to page5 backlinksQuality: 53Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "learned-helplessness",
  "numericId": null,
  "path": "/knowledge-base/risks/learned-helplessness/",
  "filePath": "knowledge-base/risks/learned-helplessness.mdx",
  "title": "Epistemic Learned Helplessness",
  "quality": 53,
  "readerImportance": 62,
  "researchImportance": 82,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": "outcome",
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Analyzes how AI-driven information environments induce epistemic learned helplessness (surrendering truth-seeking), presenting survey evidence showing 36% news avoidance and declining institutional trust (media 16%, tech 32%). Projects 55-65% helplessness rate by 2030 with democratic breakdown risks, recommending education interventions (67% improvement for lateral reading) and institutional authentication responses.",
  "description": "When AI-driven information environments induce mass abandonment of truth-seeking, creating vulnerable populations who stop distinguishing true from false information",
  "ratings": {
    "novelty": 4.5,
    "rigor": 5.8,
    "actionability": 4.2,
    "completeness": 6.5
  },
  "category": "risks",
  "subcategory": "epistemic",
  "clusters": [
    "ai-safety",
    "epistemics"
  ],
  "metrics": {
    "wordCount": 1523,
    "tableCount": 24,
    "diagramCount": 0,
    "internalLinks": 30,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.02,
    "sectionCount": 36,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1523,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 21,
  "backlinkCount": 5,
  "hallucinationRisk": {
    "level": "medium",
    "score": 60,
    "factors": [
      "no-citations",
      "few-external-sources"
    ]
  },
  "entityType": "risk",
  "redundancy": {
    "maxSimilarity": 11,
    "similarPages": [
      {
        "id": "epistemic-risks",
        "title": "AI Epistemic Cruxes",
        "path": "/knowledge-base/cruxes/epistemic-risks/",
        "similarity": 11
      },
      {
        "id": "epistemic-collapse",
        "title": "Epistemic Collapse",
        "path": "/knowledge-base/risks/epistemic-collapse/",
        "similarity": 11
      },
      {
        "id": "epistemic-overview",
        "title": "Epistemic Risks (Overview)",
        "path": "/knowledge-base/risks/epistemic-overview/",
        "similarity": 11
      },
      {
        "id": "deepfakes",
        "title": "Deepfakes",
        "path": "/knowledge-base/risks/deepfakes/",
        "similarity": 10
      },
      {
        "id": "reality-fragmentation",
        "title": "AI-Accelerated Reality Fragmentation",
        "path": "/knowledge-base/risks/reality-fragmentation/",
        "similarity": 10
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 12,
      "externalLinks": 8,
      "footnotes": 5,
      "references": 5
    },
    "actuals": {
      "tables": 24,
      "diagrams": 0,
      "internalLinks": 30,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 18,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.5 R:5.8 A:4.2 C:6.5"
  },
  "readerRank": 224,
  "researchRank": 78,
  "recommendedScore": 158.54
}
External Links
{
  "eightyK": "https://80000hours.org/problem-profiles/gradual-disempowerment/"
}
Backlinks (5)
idtitletyperelationship
sycophancy-feedback-loopSycophancy Feedback Loop Modelanalysisleads-to
epistemic-collapse-thresholdEpistemic Collapse Threshold Modelanalysisoutcome
hybrid-systemsAI-Human Hybrid Systemsapproach
geoffrey-hintonGeoffrey Hintonperson
epistemic-overviewEpistemic Risks (Overview)concept
Longterm Wiki