Agentic AI
agentic-aicapabilityPath: /knowledge-base/capabilities/agentic-ai/
E2Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "agentic-ai",
"numericId": null,
"path": "/knowledge-base/capabilities/agentic-ai/",
"filePath": "knowledge-base/capabilities/agentic-ai.mdx",
"title": "Agentic AI",
"quality": 68,
"readerImportance": 72.5,
"researchImportance": 94.5,
"tacticalValue": 88,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Analysis of agentic AI capabilities and deployment challenges, documenting industry forecasts (40% of enterprise apps by 2026, \\$199B market by 2034) alongside implementation difficulties (40%+ project cancellation rate predicted by 2027). Synthesizes technical benchmarks (SWE-bench scores improving from 13.86% to 49% in 8 months), security vulnerabilities, and safety frameworks from major AI labs. Updated to include 2025 product launches (ChatGPT agent, Codex, Operator, GPT-5 family, Gemini Robotics), new governance frameworks (AGENTS.md, Practices for Governing Agentic AI Systems), and expanded security research.",
"description": "AI systems that autonomously take actions in the world to accomplish goals. Industry forecasts project 40% of enterprise applications will include AI agents by 2026, though analysts predict 40%+ of projects will be cancelled by 2027 due to implementation challenges.",
"ratings": {
"focus": 7.5,
"novelty": 3.5,
"rigor": 6,
"completeness": 8,
"concreteness": 7.5,
"actionability": 5.5,
"objectivity": 7
},
"category": "capabilities",
"subcategory": "agentic",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 8786,
"tableCount": 24,
"diagramCount": 3,
"internalLinks": 53,
"externalLinks": 2,
"footnoteCount": 21,
"bulletRatio": 0.03,
"sectionCount": 41,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 8786,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 33,
"backlinkCount": 35,
"hallucinationRisk": {
"level": "high",
"score": 70,
"factors": [
"severe-truncation"
],
"integrityIssues": [
"severe-truncation"
]
},
"entityType": "capability",
"redundancy": {
"maxSimilarity": 23,
"similarPages": [
{
"id": "scalable-oversight",
"title": "Scalable Oversight",
"path": "/knowledge-base/responses/scalable-oversight/",
"similarity": 23
},
{
"id": "language-models",
"title": "Large Language Models",
"path": "/knowledge-base/capabilities/language-models/",
"similarity": 22
},
{
"id": "solutions",
"title": "AI Safety Solution Cruxes",
"path": "/knowledge-base/cruxes/solutions/",
"similarity": 22
},
{
"id": "why-alignment-hard",
"title": "Why Alignment Might Be Hard",
"path": "/knowledge-base/debates/why-alignment-hard/",
"similarity": 22
},
{
"id": "reasoning",
"title": "Reasoning and Planning",
"path": "/knowledge-base/capabilities/reasoning/",
"similarity": 21
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/fix-issue-240-N5irU",
"title": "Surface tacticalValue in /wiki table and score 53 pages",
"summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
"model": "sonnet-4",
"duration": "~30min"
},
{
"date": "2026-02-17",
"branch": "claude/top-priority-update-WurDM",
"title": "Improve top 5 foundational wiki pages",
"summary": "Improved the 5 highest-importance, lowest-quality wiki pages using the Crux content pipeline. All were stubs (7 words) or had quality=0 and are now comprehensive articles with citations, EntityLinks, and balanced perspectives.",
"pr": 188
}
],
"coverage": {
"passing": 5,
"total": 13,
"targets": {
"tables": 35,
"diagrams": 4,
"internalLinks": 70,
"externalLinks": 44,
"footnotes": 26,
"references": 26
},
"actuals": {
"tables": 24,
"diagrams": 3,
"internalLinks": 53,
"externalLinks": 2,
"footnotes": 21,
"references": 19,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "amber",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "amber",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"editHistoryCount": 2,
"ratingsString": "N:3.5 R:6 A:5.5 C:8"
},
"readerRank": 139,
"researchRank": 3,
"recommendedScore": 194.11
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/agentic-ai"
}Backlinks (35)
| id | title | type | relationship |
|---|---|---|---|
| language-models | Large Language Models | capability | — |
| long-horizon | Long-Horizon Autonomous Tasks | capability | — |
| tool-use | Tool Use and Computer Use | capability | — |
| ai-powered-investigation | AI-Powered Investigation | capability | — |
| autonomous-cooperative-agents | Autonomous Cooperative Agents | concept | — |
| ai-control | AI Control | safety-agenda | — |
| sandboxing | Sandboxing / Containment | approach | — |
| tool-restrictions | Tool-Use Restrictions | approach | — |
| multi-agent | Multi-Agent Safety | approach | — |
| autonomous-replication | Autonomous Replication | risk | — |
| __index__/knowledge-base/capabilities | AI Capabilities | concept | — |
| reasoning | Reasoning and Planning | capability | — |
| situational-awareness | Situational Awareness | capability | — |
| solutions | AI Safety Solution Cruxes | crux | — |
| agi-development | AGI Development | concept | — |
| claude-code-espionage-2025 | Claude Code Espionage Incident (2025) | concept | — |
| __index__/knowledge-base | Knowledge Base | concept | — |
| corrigibility-failure-pathways | Corrigibility Failure Pathways | analysis | — |
| cyberweapons-attack-automation | Autonomous Cyber Attack Timeline | analysis | — |
| goal-misgeneralization-probability | Goal Misgeneralization Probability Model | analysis | — |
| intervention-timing-windows | Intervention Timing Windows | analysis | — |
| risk-activation-timeline | Risk Activation Timeline Model | analysis | — |
| ai-revenue-sources | AI Revenue Sources | organization | — |
| microsoft | Microsoft AI | organization | — |
| nist-ai | NIST and AI Safety | organization | — |
| palisade-research | Palisade Research | organization | — |
| rethink-priorities | Rethink Priorities | organization | — |
| paul-christiano | Paul Christiano | person | — |
| alignment | AI Alignment | approach | — |
| red-teaming | Red Teaming | approach | — |
| ai-enabled-untraceable-misuse | AI-Enabled Untraceable Misuse | risk | — |
| instrumental-convergence | Instrumental Convergence | risk | — |
| power-seeking | Power-Seeking AI | risk | — |
| rogue-ai-scenarios | Rogue AI Scenarios | risk | — |
| trust-cascade | AI Trust Cascade Failure | risk | — |