Longterm Wiki

AI Impacts

ai-impactsorganizationPath: /knowledge-base/organizations/ai-impacts/
E512Entity ID (EID)
← Back to page12 backlinksQuality: 53Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "ai-impacts",
  "numericId": null,
  "path": "/knowledge-base/organizations/ai-impacts/",
  "filePath": "knowledge-base/organizations/ai-impacts.mdx",
  "title": "AI Impacts",
  "quality": 53,
  "readerImportance": 89,
  "researchImportance": 91.5,
  "tacticalValue": 72,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "AI Impacts is a research organization that conducts empirical analysis of AI timelines and risks through surveys and historical trend analysis, contributing valuable data to AI safety discourse. While their work provides useful evidence synthesis and expert opinion surveys, it faces inherent limitations in predicting transformative AI developments and translating research into actionable outcomes.",
  "description": "Research organization focused on empirical analysis of AI timelines, risks, and the likely impacts of human-level artificial intelligence",
  "ratings": {
    "novelty": 4,
    "rigor": 6,
    "actionability": 4,
    "completeness": 7
  },
  "category": "organizations",
  "subcategory": "epistemic-orgs",
  "clusters": [
    "ai-safety",
    "community",
    "epistemics"
  ],
  "metrics": {
    "wordCount": 1481,
    "tableCount": 2,
    "diagramCount": 0,
    "internalLinks": 7,
    "externalLinks": 1,
    "footnoteCount": 0,
    "bulletRatio": 0,
    "sectionCount": 13,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1481,
  "unconvertedLinks": [
    {
      "text": "aiimpacts.org",
      "url": "https://aiimpacts.org",
      "resourceId": "3b9fda03b8be71dc",
      "resourceTitle": "AI Impacts 2023"
    }
  ],
  "unconvertedLinkCount": 1,
  "convertedLinkCount": 0,
  "backlinkCount": 12,
  "citationHealth": {
    "total": 8,
    "withQuotes": 8,
    "verified": 8,
    "accuracyChecked": 8,
    "accurate": 6,
    "inaccurate": 0,
    "avgScore": 1
  },
  "hallucinationRisk": {
    "level": "high",
    "score": 80,
    "factors": [
      "biographical-claims",
      "no-citations",
      "few-external-sources"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 17,
    "similarPages": [
      {
        "id": "futuresearch",
        "title": "FutureSearch",
        "path": "/knowledge-base/organizations/futuresearch/",
        "similarity": 17
      },
      {
        "id": "vidur-kapur",
        "title": "Vidur Kapur",
        "path": "/knowledge-base/people/vidur-kapur/",
        "similarity": 16
      },
      {
        "id": "capability-alignment-race",
        "title": "Capability-Alignment Race Model",
        "path": "/knowledge-base/models/capability-alignment-race/",
        "similarity": 15
      },
      {
        "id": "arb-research",
        "title": "Arb Research",
        "path": "/knowledge-base/organizations/arb-research/",
        "similarity": 15
      },
      {
        "id": "cais",
        "title": "CAIS (Center for AI Safety)",
        "path": "/knowledge-base/organizations/cais/",
        "similarity": 15
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 12,
      "externalLinks": 7,
      "footnotes": 4,
      "references": 4
    },
    "actuals": {
      "tables": 2,
      "diagrams": 0,
      "internalLinks": 7,
      "externalLinks": 1,
      "footnotes": 0,
      "references": 6,
      "quotesWithQuotes": 8,
      "quotesTotal": 8,
      "accuracyChecked": 8,
      "accuracyTotal": 8
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "green",
      "quotes": "green",
      "accuracy": "green"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:4 R:6 A:4 C:7"
  },
  "readerRank": 25,
  "researchRank": 23,
  "recommendedScore": 172.03
}
External Links

No external links

Backlinks (12)
idtitletyperelationship
critical-uncertaintiesAI Risk Critical Uncertainties Modelcrux
agi-timelineAGI Timelineconcept
case-against-xriskThe Case AGAINST AI Existential Riskargument
case-for-xriskThe Case FOR AI Existential Riskargument
ai-timelinesAI Timelinesconcept
capability-alignment-raceCapability-Alignment Race Modelanalysis
deceptive-alignment-decompositionDeceptive Alignment Decomposition Modelanalysis
safety-research-valueExpected Value of AI Safety Researchanalysis
gpaiGlobal Partnership on Artificial Intelligence (GPAI)organization
palisade-researchPalisade Researchorganization
coe-ai-conventionCouncil of Europe Framework Convention on Artificial Intelligencepolicy
texas-traigaTexas TRAIGA Responsible AI Governance Actpolicy
Longterm Wiki