Longterm Wiki

AI-Driven Concentration of Power

concentration-of-powerriskPath: /knowledge-base/risks/concentration-of-power/
E68Entity ID (EID)
← Back to page29 backlinksQuality: 65Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "concentration-of-power",
  "numericId": null,
  "path": "/knowledge-base/risks/concentration-of-power/",
  "filePath": "knowledge-base/risks/concentration-of-power.mdx",
  "title": "AI-Driven Concentration of Power",
  "quality": 65,
  "readerImportance": 38.5,
  "researchImportance": 21,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": "outcome",
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Documents how AI development is concentrating in ~20 organizations due to \\$100M+ compute costs, with 5 firms controlling 80%+ of cloud infrastructure and projections reaching \\$1-10B per model by 2030. Identifies key concentration mechanisms (compute, cloud, chips, capital) and links to governance interventions, though defers comprehensive analysis to a linked parameter page.",
  "description": "AI enabling unprecedented accumulation of power by small groups—with compute requirements exceeding \\$100M for frontier models and 5 firms controlling 80%+ of AI cloud infrastructure.",
  "ratings": {
    "novelty": 3.5,
    "rigor": 4.5,
    "actionability": 5,
    "completeness": 4
  },
  "category": "risks",
  "subcategory": "structural",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 1159,
    "tableCount": 6,
    "diagramCount": 1,
    "internalLinks": 10,
    "externalLinks": 5,
    "footnoteCount": 0,
    "bulletRatio": 0.08,
    "sectionCount": 10,
    "hasOverview": true,
    "structuralScore": 12
  },
  "suggestedQuality": 80,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1159,
  "unconvertedLinks": [
    {
      "text": "noted by the Open Markets Institute",
      "url": "https://www.openmarketsinstitute.org/publications/expert-brief-ai-and-market-concentration-courtney-radsch-max-vonthun",
      "resourceId": "d25f9c30c5fa7a8e",
      "resourceTitle": "Open Markets Institute: AI and Market Concentration"
    }
  ],
  "unconvertedLinkCount": 1,
  "convertedLinkCount": 9,
  "backlinkCount": 29,
  "hallucinationRisk": {
    "level": "medium",
    "score": 55,
    "factors": [
      "no-citations"
    ]
  },
  "entityType": "risk",
  "redundancy": {
    "maxSimilarity": 15,
    "similarPages": [
      {
        "id": "winner-take-all",
        "title": "AI Winner-Take-All Dynamics",
        "path": "/knowledge-base/risks/winner-take-all/",
        "similarity": 15
      },
      {
        "id": "winner-take-all-concentration",
        "title": "Winner-Take-All Concentration Model",
        "path": "/knowledge-base/models/winner-take-all-concentration/",
        "similarity": 14
      },
      {
        "id": "compute-concentration",
        "title": "Compute Concentration",
        "path": "/knowledge-base/risks/compute-concentration/",
        "similarity": 13
      },
      {
        "id": "knowledge-monopoly",
        "title": "AI Knowledge Monopoly",
        "path": "/knowledge-base/risks/knowledge-monopoly/",
        "similarity": 13
      },
      {
        "id": "ai-megaproject-infrastructure",
        "title": "AI Megaproject Infrastructure",
        "path": "/knowledge-base/models/ai-megaproject-infrastructure/",
        "similarity": 12
      }
    ]
  },
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 5,
      "diagrams": 0,
      "internalLinks": 9,
      "externalLinks": 6,
      "footnotes": 3,
      "references": 3
    },
    "actuals": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 10,
      "externalLinks": 5,
      "footnotes": 0,
      "references": 67,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:4.5 A:5 C:4"
  },
  "readerRank": 389,
  "researchRank": 482,
  "recommendedScore": 170.67
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/concentration-of-power",
  "eightyK": "https://80000hours.org/problem-profiles/extreme-power-concentration/"
}
Backlinks (29)
idtitletyperelationship
racing-dynamics-modelRacing Dynamics Game Theory Modelanalysisoutcome
multipolar-trap-modelMultipolar Trap Coordination Modelanalysisoutcome
winner-take-all-modelWinner-Take-All Market Dynamics Modelanalysismechanism
concentration-of-power-modelConcentration of Power Systems Modelanalysisanalyzes
lock-in-modelLock-in Irreversibility Modelanalysismechanism
economic-disruption-modelEconomic Disruption Structural Modelanalysisconsequence
deepmindGoogle DeepMindorganizationaffects
compute-governanceCompute Governancepolicy
ai-accountabilityAI for Accountability and Anti-Corruptionapproach
authoritarian-toolsAI Authoritarian Toolsrisk
economic-disruptionAI-Driven Economic Disruptionrisk
irreversibilityAI-Induced Irreversibilityrisk
lock-inAI Value Lock-inrisk
authoritarian-takeoverAI-Enabled Authoritarian Takeoverrisk
multipolar-trapMultipolar Trap (AI Development)risk
surveillanceAI Mass Surveillancerisk
winner-take-allAI Winner-Take-All Dynamicsrisk
compute-concentrationCompute Concentrationrisk
concentrated-compute-cybersecurity-riskConcentrated Compute as a Cybersecurity Riskrisk
__index__/knowledge-baseKnowledge Baseconcept
ai-revenue-sourcesAI Revenue Sourcesorganization
holden-karnofskyHolden Karnofskyperson
yoshua-bengioYoshua Bengioperson
deliberationAI-Assisted Deliberation Platformsapproach
international-regimesInternational Compute Regimespolicy
lab-cultureAI Lab Safety Cultureapproach
seoul-declarationSeoul AI Safety Summit Declarationpolicy
__index__/knowledge-base/risksAI Risksconcept
structural-overviewStructural Risks (Overview)concept
Longterm Wiki