Longterm Wiki

AI Surveillance and US Democratic Erosion

us-ai-surveillance-democratic-erosionriskPath: /knowledge-base/risks/us-ai-surveillance-democratic-erosion/
E1003Entity ID (EID)
← Back to page1 backlinksQuality: 55Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "us-ai-surveillance-democratic-erosion",
  "numericId": "E1003",
  "path": "/knowledge-base/risks/us-ai-surveillance-democratic-erosion/",
  "filePath": "knowledge-base/risks/us-ai-surveillance-democratic-erosion.mdx",
  "title": "AI Surveillance and US Democratic Erosion",
  "quality": 55,
  "readerImportance": 85,
  "researchImportance": 75,
  "tacticalValue": 90,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": null,
  "llmSummary": "Analysis of how data centralization, oversight dismantlement, and AI capability acquisition by the US government create near-term threats to democratic processes. Documents the Anthropic-Pentagon standoff as a crystallizing moment, current administration actions (100+ targeted opponents, national citizenship database, Palantir contracts, DOGE AI surveillance of federal workers, gutted oversight boards), legal loopholes enabling warrantless bulk data collection, how AI changes surveillance economics, five threat scenarios for the 2026 midterms with probability estimates, and countervailing forces including courts and betting-market-favored Democratic House win.",
  "description": "The convergence of data centralization, oversight dismantlement, and AI surveillance capability acquisition by the current US administration poses near-term risks to democratic processes. The February 2026 Anthropic-Pentagon standoff revealed the government's pursuit of AI analysis of bulk commercial data on Americans. With 100+ political opponents already targeted, a national citizenship database under construction, and AI monitoring of federal workers underway, this is an active and escalating threat — not a theoretical future risk.",
  "ratings": {
    "focus": 9,
    "novelty": 8,
    "rigor": 6.5,
    "completeness": 7,
    "concreteness": 9,
    "actionability": 7,
    "objectivity": 6.5
  },
  "category": "risks",
  "subcategory": "governance",
  "clusters": [
    "ai-safety",
    "governance",
    "community"
  ],
  "metrics": {
    "wordCount": 2636,
    "tableCount": 2,
    "diagramCount": 0,
    "internalLinks": 11,
    "externalLinks": 4,
    "footnoteCount": 0,
    "bulletRatio": 0.29,
    "sectionCount": 21,
    "hasOverview": true,
    "structuralScore": 13
  },
  "suggestedQuality": 87,
  "updateFrequency": 7,
  "evergreen": true,
  "wordCount": 2636,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 0,
  "backlinkCount": 1,
  "hallucinationRisk": {
    "level": "medium",
    "score": 55,
    "factors": [
      "no-citations"
    ]
  },
  "entityType": "risk",
  "redundancy": {
    "maxSimilarity": 15,
    "similarPages": [
      {
        "id": "surveillance",
        "title": "Mass Surveillance",
        "path": "/knowledge-base/risks/surveillance/",
        "similarity": 15
      },
      {
        "id": "disinformation",
        "title": "Disinformation",
        "path": "/knowledge-base/risks/disinformation/",
        "similarity": 14
      },
      {
        "id": "near-term-risks",
        "title": "Key Near-Term AI Risks",
        "path": "/knowledge-base/risks/near-term-risks/",
        "similarity": 14
      },
      {
        "id": "anthropic-government-standoff",
        "title": "Anthropic-Pentagon Standoff (2026)",
        "path": "/knowledge-base/incidents/anthropic-government-standoff/",
        "similarity": 13
      },
      {
        "id": "authoritarian-tools-diffusion",
        "title": "Authoritarian Tools Diffusion Model",
        "path": "/knowledge-base/models/authoritarian-tools-diffusion/",
        "similarity": 13
      }
    ]
  },
  "coverage": {
    "passing": 4,
    "total": 13,
    "targets": {
      "tables": 11,
      "diagrams": 1,
      "internalLinks": 21,
      "externalLinks": 13,
      "footnotes": 8,
      "references": 8
    },
    "actuals": {
      "tables": 2,
      "diagrams": 0,
      "internalLinks": 11,
      "externalLinks": 4,
      "footnotes": 0,
      "references": 0,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "red",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:8 R:6.5 A:7 C:7"
  },
  "readerRank": 53,
  "researchRank": 120,
  "recommendedScore": 174.28
}
External Links

No external links

Backlinks (1)
idtitletyperelationship
near-term-risksKey Near-Term AI Risksrisk
Longterm Wiki