Longterm Wiki

AI Distributional Shift

distributional-shiftriskPath: /knowledge-base/risks/distributional-shift/
E105Entity ID (EID)
← Back to page10 backlinksQuality: 91Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "distributional-shift",
  "numericId": null,
  "path": "/knowledge-base/risks/distributional-shift/",
  "filePath": "knowledge-base/risks/distributional-shift.mdx",
  "title": "AI Distributional Shift",
  "quality": 91,
  "readerImportance": 17,
  "researchImportance": 86,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": "amplifier",
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive analysis of distributional shift showing 40-45% accuracy drops when models encounter novel distributions (ObjectNet vs ImageNet), with 5,202 autonomous vehicle accidents and 15-30% medical AI degradation across hospitals documented through 2025. Current OOD detection achieves 60-92% accuracy depending on method, with benchmark gaps persisting despite significant research investment (\\$50-100M annually). Fundamental uncertainties remain about whether scale solves robustness, with MIT 2024 research showing fairness debiasing fails to transfer across institutions.",
  "description": "When AI systems fail due to differences between training and deployment contexts. Research shows 40-45% accuracy drops when models encounter novel distributions (ObjectNet vs ImageNet), with failures affecting autonomous vehicles, medical AI, and deployed ML systems at scale.",
  "ratings": {
    "novelty": 4.5,
    "rigor": 7,
    "actionability": 5.5,
    "completeness": 7.5
  },
  "category": "risks",
  "subcategory": "accident",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 3621,
    "tableCount": 11,
    "diagramCount": 1,
    "internalLinks": 17,
    "externalLinks": 14,
    "footnoteCount": 0,
    "bulletRatio": 0,
    "sectionCount": 17,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 3621,
  "unconvertedLinks": [
    {
      "text": "WILDS benchmark",
      "url": "https://wilds.stanford.edu/",
      "resourceId": "f7c48e789ade0eeb",
      "resourceTitle": "WILDS benchmark"
    },
    {
      "text": "ObjectNet",
      "url": "https://objectnet.dev/",
      "resourceId": "ae4bad9e15b8df67",
      "resourceTitle": "Barbu et al. (2019)"
    },
    {
      "text": "ObjectNet",
      "url": "https://objectnet.dev/",
      "resourceId": "ae4bad9e15b8df67",
      "resourceTitle": "Barbu et al. (2019)"
    },
    {
      "text": "WILDS benchmark",
      "url": "https://wilds.stanford.edu/",
      "resourceId": "f7c48e789ade0eeb",
      "resourceTitle": "WILDS benchmark"
    }
  ],
  "unconvertedLinkCount": 4,
  "convertedLinkCount": 14,
  "backlinkCount": 10,
  "hallucinationRisk": {
    "level": "medium",
    "score": 35,
    "factors": [
      "no-citations",
      "high-rigor",
      "high-quality"
    ]
  },
  "entityType": "risk",
  "redundancy": {
    "maxSimilarity": 19,
    "similarPages": [
      {
        "id": "goal-misgeneralization",
        "title": "Goal Misgeneralization",
        "path": "/knowledge-base/risks/goal-misgeneralization/",
        "similarity": 19
      },
      {
        "id": "situational-awareness",
        "title": "Situational Awareness",
        "path": "/knowledge-base/capabilities/situational-awareness/",
        "similarity": 17
      },
      {
        "id": "scalable-oversight",
        "title": "Scalable Oversight",
        "path": "/knowledge-base/responses/scalable-oversight/",
        "similarity": 17
      },
      {
        "id": "mesa-optimization",
        "title": "Mesa-Optimization",
        "path": "/knowledge-base/risks/mesa-optimization/",
        "similarity": 17
      },
      {
        "id": "reward-hacking",
        "title": "Reward Hacking",
        "path": "/knowledge-base/risks/reward-hacking/",
        "similarity": 17
      }
    ]
  },
  "coverage": {
    "passing": 6,
    "total": 13,
    "targets": {
      "tables": 14,
      "diagrams": 1,
      "internalLinks": 29,
      "externalLinks": 18,
      "footnotes": 11,
      "references": 11
    },
    "actuals": {
      "tables": 11,
      "diagrams": 1,
      "internalLinks": 17,
      "externalLinks": 14,
      "footnotes": 0,
      "references": 11,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.5 R:7 A:5.5 C:7.5"
  },
  "readerRank": 541,
  "researchRank": 50,
  "recommendedScore": 212.36
}
External Links

No external links

Backlinks (10)
idtitletyperelationship
goal-misgeneralization-probabilityGoal Misgeneralization Probability Modelanalysisrelated
alignment-robustness-trajectoryAlignment Robustness Trajectoryanalysis
reward-hacking-taxonomyReward Hacking Taxonomy and Severity Modelanalysis
technical-pathwaysTechnical Pathway Decompositionanalysis
jan-leikeJan Leikeperson
alignmentAI Alignmentapproach
evaluationAI Evaluationapproach
accident-overviewAccident Risks (Overview)concept
enfeeblementAI-Induced Enfeeblementrisk
mesa-optimizationMesa-Optimizationrisk
Longterm Wiki