AI-Driven Trust Decline
trust-declineriskPath: /knowledge-base/risks/trust-decline/
E362Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "trust-decline",
"numericId": null,
"path": "/knowledge-base/risks/trust-decline/",
"filePath": "knowledge-base/risks/trust-decline.mdx",
"title": "AI-Driven Trust Decline",
"quality": 55,
"readerImportance": 62,
"researchImportance": 19,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": "pathway",
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "US government trust declined from 73% (1958) to 17% (2025), with AI deepfakes projected to reach 8M by 2025 accelerating erosion through the 'liar's dividend' effect—where synthetic content possibility undermines all evidence. Media literacy interventions show d=0.60 effect size, while C2PA content authentication provides medium-high promise for verification, though adoption rates remain uncertain (10-60% by 2027).",
"description": "The systematic decline in public confidence in institutions, media, and verification systems—accelerated by AI's capacity to fabricate evidence and exploit epistemic vulnerabilities. US government trust has fallen from 73% (1958) to 17% (2025), with AI-generated deepfakes projected to reach 8 million by 2025.",
"ratings": {
"novelty": 4.5,
"rigor": 6,
"actionability": 5,
"completeness": 6.5
},
"category": "risks",
"subcategory": "epistemic",
"clusters": [
"epistemics",
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 1510,
"tableCount": 8,
"diagramCount": 1,
"internalLinks": 9,
"externalLinks": 24,
"footnoteCount": 0,
"bulletRatio": 0.15,
"sectionCount": 20,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 1510,
"unconvertedLinks": [
{
"text": "C2PA standard",
"url": "https://c2pa.org/",
"resourceId": "ff89bed1f7960ab2",
"resourceTitle": "C2PA Explainer Videos"
},
{
"text": "American Political Science Review (February 2025)",
"url": "https://www.cambridge.org/core/journals/american-political-science-review/article/liars-dividend-can-politicians-claim-misinformation-to-evade-accountability/687FEE54DBD7ED0C96D72B26606AA073",
"resourceId": "c75d8df0bbf5a94d",
"resourceTitle": "2024 study in the American Political Science Review"
},
{
"text": "YouGov survey",
"url": "https://www.brennancenter.org/our-work/research-reports/deepfakes-elections-and-shrinking-liars-dividend",
"resourceId": "5494083a1717fed7",
"resourceTitle": "liar's dividend"
},
{
"text": "C2PA standard",
"url": "https://c2pa.org/",
"resourceId": "ff89bed1f7960ab2",
"resourceTitle": "C2PA Explainer Videos"
},
{
"text": "Schiff, Schiff & Bueno: The Liar's Dividend (APSR 2025)",
"url": "https://www.cambridge.org/core/journals/american-political-science-review/article/liars-dividend-can-politicians-claim-misinformation-to-evade-accountability/687FEE54DBD7ED0C96D72B26606AA073",
"resourceId": "c75d8df0bbf5a94d",
"resourceTitle": "2024 study in the American Political Science Review"
},
{
"text": "Brennan Center: Deepfakes, Elections, and Shrinking the Liar's Dividend",
"url": "https://www.brennancenter.org/our-work/research-reports/deepfakes-elections-and-shrinking-liars-dividend",
"resourceId": "5494083a1717fed7",
"resourceTitle": "liar's dividend"
},
{
"text": "Carnegie Endowment: Can Democracy Survive AI?",
"url": "https://carnegieendowment.org/research/2024/12/can-democracy-survive-the-disruptive-power-of-ai",
"resourceId": "add4f54080d0bfc5",
"resourceTitle": "Carnegie Endowment for International Peace"
},
{
"text": "C2PA: Coalition for Content Provenance and Authenticity",
"url": "https://c2pa.org/",
"resourceId": "ff89bed1f7960ab2",
"resourceTitle": "C2PA Explainer Videos"
}
],
"unconvertedLinkCount": 8,
"convertedLinkCount": 5,
"backlinkCount": 14,
"hallucinationRisk": {
"level": "medium",
"score": 55,
"factors": [
"no-citations"
]
},
"entityType": "risk",
"redundancy": {
"maxSimilarity": 12,
"similarPages": [
{
"id": "trust-erosion-dynamics",
"title": "Trust Erosion Dynamics Model",
"path": "/knowledge-base/models/trust-erosion-dynamics/",
"similarity": 12
},
{
"id": "deepfake-detection",
"title": "Deepfake Detection",
"path": "/knowledge-base/responses/deepfake-detection/",
"similarity": 12
},
{
"id": "epistemic-risks",
"title": "AI Epistemic Cruxes",
"path": "/knowledge-base/cruxes/epistemic-risks/",
"similarity": 11
},
{
"id": "disinformation-detection-race",
"title": "Disinformation Detection Arms Race Model",
"path": "/knowledge-base/models/disinformation-detection-race/",
"similarity": 11
},
{
"id": "trust-cascade-model",
"title": "Trust Cascade Failure Model",
"path": "/knowledge-base/models/trust-cascade-model/",
"similarity": 11
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 6,
"diagrams": 1,
"internalLinks": 12,
"externalLinks": 8,
"footnotes": 5,
"references": 5
},
"actuals": {
"tables": 8,
"diagrams": 1,
"internalLinks": 9,
"externalLinks": 24,
"footnotes": 0,
"references": 12,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.5 R:6 A:5 C:6.5"
},
"readerRank": 225,
"researchRank": 500,
"recommendedScore": 162.53
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/trust"
}Backlinks (14)
| id | title | type | relationship |
|---|---|---|---|
| trust-cascade-model | Trust Cascade Failure Model | analysis | related |
| trust-erosion-dynamics | Trust Erosion Dynamics Model | analysis | related |
| epistemic-security | AI-Era Epistemic Security | approach | — |
| epistemic-infrastructure | AI-Era Epistemic Infrastructure | approach | — |
| deepfakes | Deepfakes | risk | — |
| epistemic-collapse | Epistemic Collapse | risk | — |
| ai-investigation-risks | AI-Powered Investigation Risks | risk | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| epistemic-overview | Epistemic Risks (Overview) | concept | — |
| __index__/knowledge-base/risks | AI Risks | concept | — |
| near-term-risks | Key Near-Term AI Risks | risk | — |
| reality-fragmentation | AI-Accelerated Reality Fragmentation | risk | — |
| trust-cascade | AI Trust Cascade Failure | risk | — |