Longterm Wiki

International Coordination Mechanisms

coordination-mechanismspolicyPath: /knowledge-base/responses/coordination-mechanisms/
E470Entity ID (EID)
← Back to page8 backlinksQuality: 91Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "coordination-mechanisms",
  "numericId": null,
  "path": "/knowledge-base/responses/coordination-mechanisms/",
  "filePath": "knowledge-base/responses/coordination-mechanisms.mdx",
  "title": "International Coordination Mechanisms",
  "quality": 91,
  "readerImportance": 23.5,
  "researchImportance": 74,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive analysis of international AI coordination mechanisms shows growing but limited progress: 11-country AI Safety Institute network with ~\\$200M budget expanding to include India; Council of Europe treaty with 17 signatories and 3 ratifications; OECD Hiroshima framework with 13+ company pledges; Paris Summit drawing 61 nations (though US/UK abstained). Assessment finds high potential impact (40-60% racing risk reduction) if successful but low-medium tractability (25-40% probability), with information sharing most feasible (already active via AISI network) while capability restrictions face near-insurmountable geopolitical obstacles. UN Global Dialogue launch and India's 2026 AI Impact Summit mark expanding Global South engagement.",
  "description": "International coordination on AI safety involves multilateral treaties, bilateral dialogues, and institutional networks to manage AI risks globally. Current efforts include the Council of Europe AI Treaty (17 signatories, ratified by UK, France, Norway), the International Network of AI Safety Institutes (11+ members, approximately \\$200-250M combined budget with UK at \\$65M and US requesting \\$47.7M), the UN Global Dialogue on AI Governance with 40-member Scientific Panel (launched 2025), and US-China dialogues with planned 2026 Trump-Xi visits. The February 2025 OECD Hiroshima reporting framework saw 13+ major AI companies pledge participation. Paris Summit 2025 drew 61 signatories including China and India, though US and UK declined. New Delhi hosts the first Global South AI summit in February 2026.",
  "ratings": {
    "novelty": 5.5,
    "rigor": 7.5,
    "actionability": 6.5,
    "completeness": 8
  },
  "category": "responses",
  "subcategory": "international",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 4074,
    "tableCount": 11,
    "diagramCount": 1,
    "internalLinks": 50,
    "externalLinks": 39,
    "footnoteCount": 0,
    "bulletRatio": 0.18,
    "sectionCount": 24,
    "hasOverview": false,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 4074,
  "unconvertedLinks": [
    {
      "text": "17 signatories",
      "url": "https://www.coe.int/en/web/artificial-intelligence/the-framework-convention-on-artificial-intelligence",
      "resourceId": "5f706698d30d6737",
      "resourceTitle": "Council of Europe, *Framework Convention on Artificial Intelligence* (https://www.coe.int/en/web/artificial-intellige..."
    },
    {
      "text": "17 signatories",
      "url": "https://www.coe.int/en/web/artificial-intelligence/the-framework-convention-on-artificial-intelligence",
      "resourceId": "5f706698d30d6737",
      "resourceTitle": "Council of Europe, *Framework Convention on Artificial Intelligence* (https://www.coe.int/en/web/artificial-intellige..."
    },
    {
      "text": "US CAISI (NIST)",
      "url": "https://www.nist.gov/artificial-intelligence/ai-safety-institute",
      "resourceId": "6aee33556a4b6429",
      "resourceTitle": "US AI Safety Institute"
    },
    {
      "text": "AI Impact Summit",
      "url": "https://alltechishuman.org/all-tech-is-human-blog/the-global-landscape-of-ai-safety-institutes",
      "resourceId": "48668fbbdd965679",
      "resourceTitle": "The Global Landscape of AI Safety Institutes"
    },
    {
      "text": "India hosting February 2026 AI Impact Summit",
      "url": "https://alltechishuman.org/all-tech-is-human-blog/the-global-landscape-of-ai-safety-institutes",
      "resourceId": "48668fbbdd965679",
      "resourceTitle": "The Global Landscape of AI Safety Institutes"
    },
    {
      "text": "GovAI Research on International Governance",
      "url": "https://www.governance.ai/research",
      "resourceId": "571cb6299c6d27cf",
      "resourceTitle": "Governance research"
    },
    {
      "text": "The Annual AI Governance Report 2025",
      "url": "https://www.itu.int/epublications/en/publication/the-annual-ai-governance-report-2025-steering-the-future-of-ai/en/",
      "resourceId": "ce43b69bb5fb00b2",
      "resourceTitle": "ITU Annual AI Governance Report 2025"
    },
    {
      "text": "Global Landscape of AI Safety Institutes",
      "url": "https://alltechishuman.org/all-tech-is-human-blog/the-global-landscape-of-ai-safety-institutes",
      "resourceId": "48668fbbdd965679",
      "resourceTitle": "The Global Landscape of AI Safety Institutes"
    }
  ],
  "unconvertedLinkCount": 8,
  "convertedLinkCount": 41,
  "backlinkCount": 8,
  "hallucinationRisk": {
    "level": "medium",
    "score": 35,
    "factors": [
      "no-citations",
      "high-rigor",
      "high-quality"
    ]
  },
  "entityType": "policy",
  "redundancy": {
    "maxSimilarity": 25,
    "similarPages": [
      {
        "id": "international-summits",
        "title": "International AI Safety Summits",
        "path": "/knowledge-base/responses/international-summits/",
        "similarity": 25
      },
      {
        "id": "international-regimes",
        "title": "International Compute Regimes",
        "path": "/knowledge-base/responses/international-regimes/",
        "similarity": 22
      },
      {
        "id": "us-aisi",
        "title": "US AI Safety Institute",
        "path": "/knowledge-base/organizations/us-aisi/",
        "similarity": 21
      },
      {
        "id": "ai-safety-institutes",
        "title": "AI Safety Institutes",
        "path": "/knowledge-base/responses/ai-safety-institutes/",
        "similarity": 20
      },
      {
        "id": "china-ai-regulations",
        "title": "China AI Regulations",
        "path": "/knowledge-base/responses/china-ai-regulations/",
        "similarity": 20
      }
    ]
  },
  "coverage": {
    "passing": 6,
    "total": 13,
    "targets": {
      "tables": 16,
      "diagrams": 2,
      "internalLinks": 33,
      "externalLinks": 20,
      "footnotes": 12,
      "references": 12
    },
    "actuals": {
      "tables": 11,
      "diagrams": 1,
      "internalLinks": 50,
      "externalLinks": 39,
      "footnotes": 0,
      "references": 29,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "red",
      "tables": "amber",
      "diagrams": "amber",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:5.5 R:7.5 A:6.5 C:8"
  },
  "readerRank": 498,
  "researchRank": 129,
  "recommendedScore": 215.61
}
External Links

No external links

Backlinks (8)
idtitletyperelationship
bletchley-declarationBletchley Declarationpolicy
singapore-consensusSingapore Consensus on AI Safety Research Prioritiespolicy
maimMAIM (Mutually Assured AI Malfunction)policy
racing-dynamics-impactRacing Dynamics Impact Modelanalysis
risk-activation-timelineRisk Activation Timeline Modelanalysis
safety-spending-at-scaleSafety Spending at Scaleanalysis
uk-aisiUK AI Safety Instituteorganization
holden-karnofskyHolden Karnofskyperson
Longterm Wiki