Longterm Wiki

ControlAI

controlaiorganizationPath: /knowledge-base/organizations/controlai/
E426Entity ID (EID)
← Back to page4 backlinksQuality: 63Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "controlai",
  "numericId": null,
  "path": "/knowledge-base/organizations/controlai/",
  "filePath": "knowledge-base/organizations/controlai.mdx",
  "title": "ControlAI",
  "quality": 63,
  "readerImportance": 42,
  "researchImportance": 57,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "ControlAI is a UK-based advocacy organization that has achieved notable policy engagement success (briefing 150+ lawmakers, securing support from 100+ UK parliamentarians) while promoting direct institutional approaches to preventing AI superintelligence development through binding regulation. The organization represents a significant shift toward democratic governance approaches in AI safety, though faces skepticism about the feasibility of global coordination on AI development restrictions.",
  "description": "UK-based AI safety advocacy organization focused on preventing artificial superintelligence development through policy campaigns and grassroots outreach to lawmakers",
  "ratings": {
    "novelty": 4,
    "rigor": 6,
    "actionability": 7,
    "completeness": 8
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "community",
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2197,
    "tableCount": 2,
    "diagramCount": 0,
    "internalLinks": 17,
    "externalLinks": 2,
    "footnoteCount": 0,
    "bulletRatio": 0.41,
    "sectionCount": 27,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 2197,
  "unconvertedLinks": [
    {
      "text": "controlai.com",
      "url": "https://controlai.com",
      "resourceId": "8958feef629880c2",
      "resourceTitle": "ControlAI Overview"
    }
  ],
  "unconvertedLinkCount": 1,
  "convertedLinkCount": 0,
  "backlinkCount": 4,
  "citationHealth": {
    "total": 50,
    "withQuotes": 41,
    "verified": 41,
    "accuracyChecked": 41,
    "accurate": 23,
    "inaccurate": 7,
    "avgScore": 0.9929732869311076
  },
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "pause-ai",
        "title": "Pause AI",
        "path": "/knowledge-base/organizations/pause-ai/",
        "similarity": 18
      },
      {
        "id": "ai-futures-project",
        "title": "AI Futures Project",
        "path": "/knowledge-base/organizations/ai-futures-project/",
        "similarity": 16
      },
      {
        "id": "frontier-model-forum",
        "title": "Frontier Model Forum",
        "path": "/knowledge-base/organizations/frontier-model-forum/",
        "similarity": 16
      },
      {
        "id": "nist-ai",
        "title": "NIST and AI Safety",
        "path": "/knowledge-base/organizations/nist-ai/",
        "similarity": 16
      },
      {
        "id": "research-agendas",
        "title": "AI Alignment Research Agenda Comparison",
        "path": "/knowledge-base/responses/research-agendas/",
        "similarity": 16
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 9,
      "diagrams": 1,
      "internalLinks": 18,
      "externalLinks": 11,
      "footnotes": 7,
      "references": 7
    },
    "actuals": {
      "tables": 2,
      "diagrams": 0,
      "internalLinks": 17,
      "externalLinks": 2,
      "footnotes": 0,
      "references": 23,
      "quotesWithQuotes": 41,
      "quotesTotal": 50,
      "accuracyChecked": 41,
      "accuracyTotal": 50
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "green",
      "quotes": "green",
      "accuracy": "green"
    },
    "ratingsString": "N:4 R:6 A:7 C:8"
  },
  "readerRank": 358,
  "researchRank": 243,
  "recommendedScore": 168.7
}
External Links

No external links

Backlinks (4)
idtitletyperelationship
safety-orgs-overviewAI Safety Organizations (Overview)concept
sentinelSentinel (Catastrophic Risk Foresight)organization
eli-liflandEli Liflandperson
vidur-kapurVidur Kapurperson
Longterm Wiki