Longterm Wiki

AI Trust Cascade Failure

trust-cascaderiskPath: /knowledge-base/risks/trust-cascade/
E360Entity ID (EID)
← Back to page9 backlinksQuality: 55Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "trust-cascade",
  "numericId": null,
  "path": "/knowledge-base/risks/trust-cascade/",
  "filePath": "knowledge-base/risks/trust-cascade.mdx",
  "title": "AI Trust Cascade Failure",
  "quality": 55,
  "readerImportance": 18,
  "researchImportance": 65.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": "pathway",
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Analysis of how declining institutional trust (media 31%, federal government 17% per 2024-2025 Gallup/Pew data) could create self-reinforcing collapse where no trusted entity can validate others, potentially accelerated by AI-enabled synthetic evidence and coordinated disinformation. Identifies cascade pathways through media, science, elections, and finance. Documents partisan trust polarization, deepfake-driven trust erosion, and the bootstrapping problem in recovery. Covers defensive strategies including C2PA provenance standards, content labeling, and open accountability protocols, but notes fundamental gaps in AI-resistant trust mechanisms.",
  "description": "A systemic risk where declining trust in institutions creates a cascading collapse, potentially accelerated by AI, where no trusted entity remains capable of rebuilding trust in others, threatening societal coordination and governance.",
  "ratings": {
    "novelty": 4.5,
    "rigor": 3,
    "actionability": 2.5,
    "completeness": 4
  },
  "category": "risks",
  "subcategory": "epistemic",
  "clusters": [
    "epistemics",
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 3231,
    "tableCount": 3,
    "diagramCount": 0,
    "internalLinks": 17,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.11,
    "sectionCount": 13,
    "hasOverview": false,
    "structuralScore": 10
  },
  "suggestedQuality": 67,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 3231,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 9,
  "backlinkCount": 9,
  "hallucinationRisk": {
    "level": "high",
    "score": 70,
    "factors": [
      "no-citations",
      "low-rigor-score",
      "few-external-sources"
    ]
  },
  "entityType": "risk",
  "redundancy": {
    "maxSimilarity": 20,
    "similarPages": [
      {
        "id": "trust-cascade-model",
        "title": "Trust Cascade Failure Model",
        "path": "/knowledge-base/models/trust-cascade-model/",
        "similarity": 20
      },
      {
        "id": "epistemic-security",
        "title": "AI-Era Epistemic Security",
        "path": "/knowledge-base/responses/epistemic-security/",
        "similarity": 19
      },
      {
        "id": "authentication-collapse-timeline",
        "title": "Authentication Collapse Timeline Model",
        "path": "/knowledge-base/models/authentication-collapse-timeline/",
        "similarity": 18
      },
      {
        "id": "disinformation",
        "title": "Disinformation",
        "path": "/knowledge-base/risks/disinformation/",
        "similarity": 18
      },
      {
        "id": "authoritarian-tools-diffusion",
        "title": "Authoritarian Tools Diffusion Model",
        "path": "/knowledge-base/models/authoritarian-tools-diffusion/",
        "similarity": 17
      }
    ]
  },
  "coverage": {
    "passing": 4,
    "total": 13,
    "targets": {
      "tables": 13,
      "diagrams": 1,
      "internalLinks": 26,
      "externalLinks": 16,
      "footnotes": 10,
      "references": 10
    },
    "actuals": {
      "tables": 3,
      "diagrams": 0,
      "internalLinks": 17,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 11,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "red",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "amber",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.5 R:3 A:2.5 C:4"
  },
  "readerRank": 535,
  "researchRank": 185,
  "recommendedScore": 140.86
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/trust"
}
Backlinks (9)
idtitletyperelationship
trust-cascade-modelTrust Cascade Failure Modelanalysisanalyzes
epistemic-collapse-thresholdEpistemic Collapse Threshold Modelanalysiscomponent
ai-enabled-untraceable-misuseAI-Enabled Untraceable Misuserisk
multipolar-trap-dynamicsMultipolar Trap Dynamics Modelanalysis
deepfakesDeepfakesrisk
epistemic-overviewEpistemic Risks (Overview)concept
fraudAI-Powered Fraudrisk
learned-helplessnessEpistemic Learned Helplessnessrisk
scientific-corruptionScientific Knowledge Corruptionrisk
Longterm Wiki