EU AI Act
eu-ai-actpolicyPath: /knowledge-base/responses/eu-ai-act/
E127Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "eu-ai-act",
"numericId": null,
"path": "/knowledge-base/responses/eu-ai-act/",
"filePath": "knowledge-base/responses/eu-ai-act.mdx",
"title": "EU AI Act",
"quality": 55,
"readerImportance": 41.5,
"researchImportance": 69.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive overview of the EU AI Act's risk-based regulatory framework, particularly its two-tier approach to foundation models that distinguishes between standard and systemic risk AI systems. The analysis provides valuable implementation details and governance structure but cuts off before addressing key criticisms and global implications.",
"description": "The world's first comprehensive AI regulation, adopting a risk-based approach to regulate foundation models and general-purpose AI systems",
"ratings": {
"novelty": 4,
"rigor": 7,
"actionability": 6,
"completeness": 5
},
"category": "responses",
"subcategory": "legislation",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 3515,
"tableCount": 5,
"diagramCount": 0,
"internalLinks": 8,
"externalLinks": 1,
"footnoteCount": 0,
"bulletRatio": 0.45,
"sectionCount": 33,
"hasOverview": true,
"structuralScore": 11
},
"suggestedQuality": 73,
"updateFrequency": 7,
"evergreen": true,
"wordCount": 3515,
"unconvertedLinks": [
{
"text": "europarl.europa.eu",
"url": "https://www.europarl.europa.eu/topics/en/article/20230601STO93804/eu-ai-act-first-regulation-on-artificial-intelligence",
"resourceId": "373effab2c489c24",
"resourceTitle": "European Parliament: EU AI Act Overview"
}
],
"unconvertedLinkCount": 1,
"convertedLinkCount": 0,
"backlinkCount": 65,
"citationHealth": {
"total": 84,
"withQuotes": 76,
"verified": 74,
"accuracyChecked": 74,
"accurate": 56,
"inaccurate": 1,
"avgScore": 0.9426634601856533
},
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"few-external-sources",
"high-rigor"
]
},
"entityType": "policy",
"redundancy": {
"maxSimilarity": 19,
"similarPages": [
{
"id": "california-sb53",
"title": "California SB 53",
"path": "/knowledge-base/responses/california-sb53/",
"similarity": 19
},
{
"id": "coe-ai-convention",
"title": "Council of Europe Framework Convention on Artificial Intelligence",
"path": "/knowledge-base/responses/coe-ai-convention/",
"similarity": 19
},
{
"id": "frontier-model-forum",
"title": "Frontier Model Forum",
"path": "/knowledge-base/organizations/frontier-model-forum/",
"similarity": 18
},
{
"id": "china-ai-regulations",
"title": "China AI Regulations",
"path": "/knowledge-base/responses/china-ai-regulations/",
"similarity": 18
},
{
"id": "colorado-ai-act",
"title": "Colorado AI Act (SB 205)",
"path": "/knowledge-base/responses/colorado-ai-act/",
"similarity": 18
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 14,
"diagrams": 1,
"internalLinks": 28,
"externalLinks": 18,
"footnotes": 11,
"references": 11
},
"actuals": {
"tables": 5,
"diagrams": 0,
"internalLinks": 8,
"externalLinks": 1,
"footnotes": 0,
"references": 14,
"quotesWithQuotes": 76,
"quotesTotal": 84,
"accuracyChecked": 74,
"accuracyTotal": 84
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "green",
"quotes": "green",
"accuracy": "green"
},
"ratingsString": "N:4 R:7 A:6 C:5"
},
"readerRank": 363,
"researchRank": 155,
"recommendedScore": 152.61
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/Artificial_Intelligence_Act",
"eaForum": "https://forum.effectivealtruism.org/topics/eu-ai-act",
"wikidata": "https://www.wikidata.org/wiki/Q108456694",
"grokipedia": "https://grokipedia.com/page/Artificial_Intelligence_Act"
}Backlinks (65)
| id | title | type | relationship |
|---|---|---|---|
| governance-policy | AI Governance and Policy | crux | — |
| governance-focused | Governance-Focused Worldview | concept | — |
| short-timeline-policy-implications | Short AI Timeline Policy Implications | analysis | — |
| eu-ai-office | EU AI Office | organization | — |
| controlai | ControlAI | organization | — |
| california-sb1047 | Safe and Secure Innovation for Frontier Artificial Intelligence Models Act | policy | — |
| china-ai-regulations | China AI Regulatory Framework | policy | — |
| us-executive-order | US Executive Order on Safe, Secure, and Trustworthy AI | policy | — |
| model-auditing | Third-Party Model Auditing | approach | — |
| california-sb53 | California SB 53 | policy | — |
| evals-governance | Evals-Based Deployment Gates | policy | — |
| thresholds | Compute Thresholds | policy | — |
| bletchley-declaration | Bletchley Declaration | policy | — |
| coordination-mechanisms | International Coordination Mechanisms | policy | — |
| model-registries | Model Registries | policy | — |
| open-source | Open Source AI Safety | approach | — |
| whistleblower-protections | AI Whistleblower Protections | policy | — |
| agentic-ai | Agentic AI | capability | — |
| ai-powered-investigation | AI-Powered Investigation | capability | — |
| persuasion | Persuasion and Social Manipulation | capability | — |
| open-vs-closed | Open vs Closed Source AI | crux | — |
| regulation-debate | Government Regulation vs Industry Self-Governance | crux | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| __index__/knowledge-base/history | History | concept | — |
| mainstream-era | Mainstream Era (2020-Present) | historical | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| instrumental-convergence-framework | Instrumental Convergence Framework | analysis | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |
| intervention-timing-windows | Intervention Timing Windows | analysis | — |
| planning-for-frontier-lab-scaling | Planning for Frontier Lab Scaling | analysis | — |
| risk-activation-timeline | Risk Activation Timeline Model | analysis | — |
| anthropic | Anthropic | organization | — |
| arc | ARC (Alignment Research Center) | organization | — |
| epoch-ai | Epoch AI | organization | — |
| fli | Future of Life Institute (FLI) | organization | — |
| goodfire | Goodfire | organization | — |
| gpai | Global Partnership on Artificial Intelligence (GPAI) | organization | — |
| microsoft | Microsoft AI | organization | — |
| openai | OpenAI | organization | — |
| dan-hendrycks | Dan Hendrycks | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| stuart-russell | Stuart Russell | person | — |
| yoshua-bengio | Yoshua Bengio | person | — |
| canada-aida | Canada AIDA | policy | — |
| coe-ai-convention | Council of Europe Framework Convention on Artificial Intelligence | policy | — |
| colorado-ai-act | Colorado AI Act (SB 205) | policy | — |
| content-authentication | AI Content Authentication | approach | — |
| effectiveness-assessment | Policy Effectiveness Assessment | analysis | — |
| evaluation | AI Evaluation | approach | — |
| __index__/knowledge-base/responses | Safety Responses | concept | — |
| international-summits | International AI Safety Summits | policy | — |
| mit-ai-risk-repository | MIT AI Risk Repository | project | — |
| red-teaming | Red Teaming | approach | — |
| safety-cases | AI Safety Cases | approach | — |
| standards-bodies | AI Standards Bodies | policy | — |
| texas-traiga | Texas TRAIGA Responsible AI Governance Act | policy | — |
| consensus-manufacturing | AI-Powered Consensus Manufacturing | risk | — |
| cyber-psychosis | AI-Induced Cyber Psychosis | risk | — |
| deepfakes | Deepfakes | risk | — |
| fraud | AI-Powered Fraud | risk | — |
| institutional-capture | AI-Driven Institutional Decision Capture | risk | — |
| knowledge-monopoly | AI Knowledge Monopoly | risk | — |
| sandbagging | AI Capability Sandbagging | risk | — |
| surveillance | Mass Surveillance | risk | — |