Pause / Moratorium
pause-moratoriumpolicyPath: /knowledge-base/responses/pause-moratorium/
E460Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "pause-moratorium",
"numericId": null,
"path": "/knowledge-base/responses/pause-moratorium/",
"filePath": "knowledge-base/responses/pause-moratorium.mdx",
"title": "Pause / Moratorium",
"quality": 72,
"readerImportance": 78.5,
"researchImportance": 67,
"tacticalValue": 78,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive analysis of pause/moratorium proposals finding they would provide very high safety benefits if implemented (buying time for safety research to close the growing capability-safety gap) but face critical enforcement and coordination challenges with zero current adoption by major labs. The FLI 2023 open letter garnered 30,000+ signatures but resulted in no actual slowdown, highlighting severe tractability issues despite theoretical effectiveness.",
"description": "Proposals to pause or slow frontier AI development until safety is better understood, offering potentially high safety benefits if implemented but facing significant coordination challenges and currently lacking adoption by major AI laboratories.",
"ratings": {
"novelty": 4.2,
"rigor": 6.8,
"actionability": 5.5,
"completeness": 7.5
},
"category": "responses",
"subcategory": "alignment-policy",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2032,
"tableCount": 19,
"diagramCount": 2,
"internalLinks": 7,
"externalLinks": 26,
"footnoteCount": 0,
"bulletRatio": 0.05,
"sectionCount": 27,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 2032,
"unconvertedLinks": [
{
"text": "open letter",
"url": "https://futureoflife.org/open-letter/pause-giant-ai-experiments/",
"resourceId": "531f55cee64f6509",
"resourceTitle": "FLI open letter"
},
{
"text": "Yoshua Bengio",
"url": "https://yoshuabengio.org/",
"resourceId": "2a646e963d3eb574",
"resourceTitle": "Yoshua Bengio"
},
{
"text": "MIT Technology Review noted",
"url": "https://www.technologyreview.com/2023/09/26/1080299/six-months-on-from-the-pause-letter/",
"resourceId": "1ba1123aa592a983",
"resourceTitle": "What's changed since the \"pause AI\" letter six months ago?"
},
{
"text": "30,000+",
"url": "https://futureoflife.org/open-letter/pause-giant-ai-experiments/",
"resourceId": "531f55cee64f6509",
"resourceTitle": "FLI open letter"
},
{
"text": "renewed urgency within governments",
"url": "https://www.technologyreview.com/2023/09/26/1080299/six-months-on-from-the-pause-letter/",
"resourceId": "1ba1123aa592a983",
"resourceTitle": "What's changed since the \"pause AI\" letter six months ago?"
},
{
"text": "International moratorium",
"url": "https://pauseai.info/",
"resourceId": "a8fda81d4a00ec7c",
"resourceTitle": "Pause AI movement"
},
{
"text": "UK AI Safety Summit",
"url": "https://carnegieendowment.org/research/2024/10/the-ai-governance-arms-race-from-summit-pageantry-to-progress",
"resourceId": "a7f69bbad6cd82c0",
"resourceTitle": "Carnegie analysis warns"
},
{
"text": "UN AI Governance",
"url": "https://press.un.org/en/2025/sgsm22776.doc.htm",
"resourceId": "de840ac51dee6c7c",
"resourceTitle": "Scientific Panel"
},
{
"text": "FLI Open Letter",
"url": "https://futureoflife.org/open-letter/pause-giant-ai-experiments/",
"resourceId": "531f55cee64f6509",
"resourceTitle": "FLI open letter"
},
{
"text": "MIT Tech Review Analysis",
"url": "https://www.technologyreview.com/2023/09/26/1080299/six-months-on-from-the-pause-letter/",
"resourceId": "1ba1123aa592a983",
"resourceTitle": "What's changed since the \"pause AI\" letter six months ago?"
},
{
"text": "PauseAI",
"url": "https://pauseai.info/",
"resourceId": "a8fda81d4a00ec7c",
"resourceTitle": "Pause AI movement"
},
{
"text": "Future of Life Institute",
"url": "https://futureoflife.org/",
"resourceId": "786a68a91a7d5712",
"resourceTitle": "Future of Life Institute"
},
{
"text": "PauseAI",
"url": "https://pauseai.info/",
"resourceId": "a8fda81d4a00ec7c",
"resourceTitle": "Pause AI movement"
},
{
"text": "GovAI",
"url": "https://www.governance.ai/",
"resourceId": "f35c467b353f990f",
"resourceTitle": "GovAI"
},
{
"text": "Carnegie Endowment Analysis",
"url": "https://carnegieendowment.org/research/2024/10/the-ai-governance-arms-race-from-summit-pageantry-to-progress",
"resourceId": "a7f69bbad6cd82c0",
"resourceTitle": "Carnegie analysis warns"
}
],
"unconvertedLinkCount": 15,
"convertedLinkCount": 0,
"backlinkCount": 2,
"hallucinationRisk": {
"level": "medium",
"score": 55,
"factors": [
"no-citations"
]
},
"entityType": "policy",
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "pause-debate",
"title": "Should We Pause AI Development?",
"path": "/knowledge-base/debates/pause-debate/",
"similarity": 15
},
{
"id": "pause",
"title": "Pause Advocacy",
"path": "/knowledge-base/responses/pause/",
"similarity": 12
},
{
"id": "seoul-declaration",
"title": "Seoul AI Safety Summit Declaration",
"path": "/knowledge-base/responses/seoul-declaration/",
"similarity": 12
},
{
"id": "racing-dynamics",
"title": "AI Development Racing Dynamics",
"path": "/knowledge-base/risks/racing-dynamics/",
"similarity": 11
},
{
"id": "pause-ai",
"title": "Pause AI",
"path": "/knowledge-base/organizations/pause-ai/",
"similarity": 10
}
]
},
"coverage": {
"passing": 8,
"total": 13,
"targets": {
"tables": 8,
"diagrams": 1,
"internalLinks": 16,
"externalLinks": 10,
"footnotes": 6,
"references": 6
},
"actuals": {
"tables": 19,
"diagrams": 2,
"internalLinks": 7,
"externalLinks": 26,
"footnotes": 0,
"references": 8,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.2 R:6.8 A:5.5 C:7.5"
},
"readerRank": 98,
"researchRank": 174,
"recommendedScore": 204.91
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/ai-pause-debate-2023"
}Backlinks (2)
| id | title | type | relationship |
|---|---|---|---|
| pause | Pause Advocacy | approach | — |
| alignment-policy-overview | Policy & Governance (Overview) | concept | — |