Longterm Wiki

Automation Bias (AI Systems)

automation-biasriskPath: /knowledge-base/risks/automation-bias/
E32Entity ID (EID)
← Back to page7 backlinksQuality: 56Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "automation-bias",
  "numericId": null,
  "path": "/knowledge-base/risks/automation-bias/",
  "filePath": "knowledge-base/risks/automation-bias.mdx",
  "title": "Automation Bias (AI Systems)",
  "quality": 56,
  "readerImportance": 15.5,
  "researchImportance": 21,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": "amplifier",
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive review of automation bias showing physician accuracy drops from 92.8% to 23.6% with incorrect AI guidance, 78% of users accept AI outputs without scrutiny, and LLM hallucination rates reach 23-79% depending on context. Documents skill degradation across healthcare, legal, and other domains, with mixed evidence on mitigation effectiveness.",
  "description": "The tendency to over-trust AI systems and accept their outputs without appropriate scrutiny. Research shows physician accuracy drops from 92.8% to 23.6% when AI provides incorrect guidance, while 78% of users rely on AI outputs without scrutiny. NHTSA reports 392 crashes involving driver assistance systems in 10 months.",
  "ratings": {
    "novelty": 3.5,
    "rigor": 6,
    "actionability": 4.5,
    "completeness": 6.5
  },
  "category": "risks",
  "subcategory": "accident",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 2923,
    "tableCount": 6,
    "diagramCount": 1,
    "internalLinks": 4,
    "externalLinks": 39,
    "footnoteCount": 0,
    "bulletRatio": 0.03,
    "sectionCount": 26,
    "hasOverview": false,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 2923,
  "unconvertedLinks": [
    {
      "text": "AI & Society, 2025",
      "url": "https://link.springer.com/article/10.1007/s00146-025-02422-7",
      "resourceId": "a96cbf6f98644f2f",
      "resourceTitle": "2025 review in AI & Society"
    },
    {
      "text": "2024 Oxford study on national security contexts",
      "url": "https://academic.oup.com/isq/article/68/2/sqae020/7638566",
      "resourceId": "b9b538f4765a69af",
      "resourceTitle": "A 2024 study in International Studies Quarterly"
    },
    {
      "text": "comprehensive 2025 review in AI & Society",
      "url": "https://link.springer.com/article/10.1007/s00146-025-02422-7",
      "resourceId": "a96cbf6f98644f2f",
      "resourceTitle": "2025 review in AI & Society"
    },
    {
      "text": "Vered et al. (2023)",
      "url": "https://link.springer.com/article/10.1007/s00146-025-02422-7",
      "resourceId": "a96cbf6f98644f2f",
      "resourceTitle": "2025 review in AI & Society"
    },
    {
      "text": "Cecil et al. (2024)",
      "url": "https://link.springer.com/article/10.1007/s00146-025-02422-7",
      "resourceId": "a96cbf6f98644f2f",
      "resourceTitle": "2025 review in AI & Society"
    },
    {
      "text": "Naiseh et al. 2023",
      "url": "https://link.springer.com/article/10.1007/s00146-025-02422-7",
      "resourceId": "a96cbf6f98644f2f",
      "resourceTitle": "2025 review in AI & Society"
    },
    {
      "text": "Public attitudes remain skeptical",
      "url": "https://www.pewresearch.org/",
      "resourceId": "3aecdca4bc8ea49c",
      "resourceTitle": "Pew Research: Institutional Trust"
    }
  ],
  "unconvertedLinkCount": 7,
  "convertedLinkCount": 0,
  "backlinkCount": 7,
  "hallucinationRisk": {
    "level": "medium",
    "score": 55,
    "factors": [
      "no-citations"
    ]
  },
  "entityType": "risk",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "automation-bias-cascade",
        "title": "Automation Bias Cascade Model",
        "path": "/knowledge-base/models/automation-bias-cascade/",
        "similarity": 18
      },
      {
        "id": "ai-forecasting",
        "title": "AI-Augmented Forecasting",
        "path": "/knowledge-base/responses/ai-forecasting/",
        "similarity": 18
      },
      {
        "id": "distributional-shift",
        "title": "AI Distributional Shift",
        "path": "/knowledge-base/risks/distributional-shift/",
        "similarity": 16
      },
      {
        "id": "epistemic-sycophancy",
        "title": "Epistemic Sycophancy",
        "path": "/knowledge-base/risks/epistemic-sycophancy/",
        "similarity": 16
      },
      {
        "id": "institutional-capture",
        "title": "AI-Driven Institutional Decision Capture",
        "path": "/knowledge-base/risks/institutional-capture/",
        "similarity": 16
      }
    ]
  },
  "coverage": {
    "passing": 5,
    "total": 13,
    "targets": {
      "tables": 12,
      "diagrams": 1,
      "internalLinks": 23,
      "externalLinks": 15,
      "footnotes": 9,
      "references": 9
    },
    "actuals": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 4,
      "externalLinks": 39,
      "footnotes": 0,
      "references": 3,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "red",
      "tables": "amber",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:6 A:4.5 C:6.5"
  },
  "readerRank": 550,
  "researchRank": 481,
  "recommendedScore": 141.57
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/automation",
  "eaForum": "https://forum.effectivealtruism.org/topics/automation"
}
Backlinks (7)
idtitletyperelationship
expertise-atrophy-progressionExpertise Atrophy Progression Modelanalysisrelated
expertise-atrophy-cascadeExpertise Atrophy Cascade Modelanalysisrelated
hybrid-systemsAI-Human Hybrid Systemsapproach
automation-bias-cascadeAutomation Bias Cascade Modelanalysis
mit-ai-risk-repositoryMIT AI Risk Repositoryproject
accident-overviewAccident Risks (Overview)concept
institutional-captureAI-Driven Institutional Decision Capturerisk
Longterm Wiki