Singapore Consensus on AI Safety Research Priorities
singapore-consensuspolicyPath: /knowledge-base/responses/singapore-consensus/
E701Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "singapore-consensus",
"numericId": null,
"path": "/knowledge-base/responses/singapore-consensus/",
"filePath": "knowledge-base/responses/singapore-consensus.mdx",
"title": "Singapore Consensus on AI Safety Research Priorities",
"quality": 45,
"readerImportance": 5,
"researchImportance": 5.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-20",
"llmSummary": "The Singapore Consensus on Global AI Safety Research Priorities (arXiv:2506.20702) is a consensus document produced by the April 2025 SCAI conference alongside ICLR 2025, authored by 88 researchers from 11 countries. Building on the 2025 International AI Safety Report, it organizes AI safety research into three defence-in-depth areas—Assessment (risk measurement, metrology, third-party audits), Development (specification, training, verification of trustworthy systems), and Control (post-deployment monitoring, ecosystem oversight, societal resilience). It proposes limiting AGI risk by constraining autonomy, generality, or intelligence dimensions, and advocates non-agentic AI monitoring agentic systems. Uniquely identifies areas of mutual interest for cooperation even among competitors.",
"description": "Consensus document from the 2025 Singapore Conference on AI (SCAI), authored by 88 researchers from 11 countries including Yoshua Bengio, Stuart Russell, Max Tegmark, and Dawn Song. Organizes AI safety research into a defence-in-depth framework across three areas (Assessment, Development, Control) and identifies areas of mutual interest where even competitors benefit from cooperation.",
"ratings": {
"novelty": 6,
"rigor": 7,
"actionability": 5.5,
"completeness": 6.5
},
"category": "responses",
"subcategory": "international",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 1180,
"tableCount": 3,
"diagramCount": 1,
"internalLinks": 5,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.15,
"sectionCount": 10,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 1180,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 0,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"few-external-sources",
"high-rigor"
]
},
"entityType": "policy",
"redundancy": {
"maxSimilarity": 14,
"similarPages": [
{
"id": "frontier-model-forum",
"title": "Frontier Model Forum",
"path": "/knowledge-base/organizations/frontier-model-forum/",
"similarity": 14
},
{
"id": "intervention-effectiveness-matrix",
"title": "Intervention Effectiveness Matrix",
"path": "/knowledge-base/models/intervention-effectiveness-matrix/",
"similarity": 13
},
{
"id": "power-seeking-conditions",
"title": "Power-Seeking Emergence Conditions Model",
"path": "/knowledge-base/models/power-seeking-conditions/",
"similarity": 13
},
{
"id": "ai-control",
"title": "AI Control",
"path": "/knowledge-base/responses/ai-control/",
"similarity": 13
},
{
"id": "responsible-scaling-policies",
"title": "Responsible Scaling Policies",
"path": "/knowledge-base/responses/responsible-scaling-policies/",
"similarity": 13
}
]
},
"changeHistory": [
{
"date": "2026-02-16",
"branch": "claude/investigate-arxiv-paper-UmGPu",
"title": "Singapore Consensus on AI Safety",
"summary": "Investigated arXiv:2506.20702 (The Singapore Consensus on Global AI Safety Research Priorities) and integrated it into the wiki. Updated the international-summits page with a new SCAI section and Mermaid diagram, fixed the broken Singapore Consensus resource in web-other.yaml, updated Bengio/Russell/Tegmark pages with references, created a new dedicated singapore-consensus page with entity E694, and registered the entity in responses.yaml.",
"pr": 157
}
],
"coverage": {
"passing": 6,
"total": 13,
"targets": {
"tables": 5,
"diagrams": 0,
"internalLinks": 9,
"externalLinks": 6,
"footnotes": 4,
"references": 4
},
"actuals": {
"tables": 3,
"diagrams": 1,
"internalLinks": 5,
"externalLinks": 0,
"footnotes": 0,
"references": 0,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "red",
"footnotes": "red",
"references": "red",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 1,
"ratingsString": "N:6 R:7 A:5.5 C:6.5"
},
"readerRank": 618,
"researchRank": 586,
"recommendedScore": 113.93
}External Links
No external links
Backlinks (0)
No backlinks