Seoul AI Safety Summit Declaration
seoul-declarationpolicyPath: /knowledge-base/responses/seoul-declaration/
E279Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "seoul-declaration",
"numericId": null,
"path": "/knowledge-base/responses/seoul-declaration/",
"filePath": "knowledge-base/responses/seoul-declaration.mdx",
"title": "Seoul AI Safety Summit Declaration",
"quality": 60,
"readerImportance": 57,
"researchImportance": 26.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "The May 2024 Seoul AI Safety Summit achieved voluntary commitments from 16 frontier AI companies (80% of development capacity) and established an 11-nation AI Safety Institute network, with 75% compliance (12/16 companies published frameworks by December 2024). However, voluntary nature limits enforcement, with only 10-30% probability of evolving into binding agreements within 5 years and minimal progress on incident reporting or common risk thresholds.",
"description": "The May 2024 Seoul AI Safety Summit secured voluntary commitments from 16 frontier AI companies (including Chinese firm Zhipu AI) and established an 11-nation AI Safety Institute network. While 12 of 16 signatory companies have published safety frameworks by late 2024, the voluntary nature limits enforcement, with only 10-30% probability of evolving into binding international agreements within 5 years.",
"ratings": {
"novelty": 4.5,
"rigor": 6.5,
"actionability": 5,
"completeness": 7
},
"category": "responses",
"subcategory": "international",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2841,
"tableCount": 10,
"diagramCount": 1,
"internalLinks": 40,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.17,
"sectionCount": 23,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 2841,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 31,
"backlinkCount": 2,
"hallucinationRisk": {
"level": "medium",
"score": 60,
"factors": [
"no-citations",
"few-external-sources"
]
},
"entityType": "policy",
"redundancy": {
"maxSimilarity": 21,
"similarPages": [
{
"id": "international-summits",
"title": "International AI Safety Summits",
"path": "/knowledge-base/responses/international-summits/",
"similarity": 21
},
{
"id": "responsible-scaling-policies",
"title": "Responsible Scaling Policies",
"path": "/knowledge-base/responses/responsible-scaling-policies/",
"similarity": 20
},
{
"id": "voluntary-commitments",
"title": "Voluntary Industry Commitments",
"path": "/knowledge-base/responses/voluntary-commitments/",
"similarity": 20
},
{
"id": "us-aisi",
"title": "US AI Safety Institute",
"path": "/knowledge-base/organizations/us-aisi/",
"similarity": 19
},
{
"id": "bletchley-declaration",
"title": "Bletchley Declaration",
"path": "/knowledge-base/responses/bletchley-declaration/",
"similarity": 19
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 11,
"diagrams": 1,
"internalLinks": 23,
"externalLinks": 14,
"footnotes": 9,
"references": 9
},
"actuals": {
"tables": 10,
"diagrams": 1,
"internalLinks": 40,
"externalLinks": 0,
"footnotes": 0,
"references": 20,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.5 R:6.5 A:5 C:7"
},
"readerRank": 253,
"researchRank": 443,
"recommendedScore": 170.31
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/ai-safety-summit"
}Backlinks (2)
| id | title | type | relationship |
|---|---|---|---|
| compounding-risks-analysis | Compounding Risks Analysis | analysis | — |
| governance-overview | AI Governance & Policy (Overview) | concept | — |