Longterm Wiki

Agentic AI

agentic-aicapabilityPath: /knowledge-base/capabilities/agentic-ai/
E2Entity ID (EID)
← Back to page35 backlinksQuality: 68Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "agentic-ai",
  "numericId": null,
  "path": "/knowledge-base/capabilities/agentic-ai/",
  "filePath": "knowledge-base/capabilities/agentic-ai.mdx",
  "title": "Agentic AI",
  "quality": 68,
  "readerImportance": 72.5,
  "researchImportance": 94.5,
  "tacticalValue": 88,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Analysis of agentic AI capabilities and deployment challenges, documenting industry forecasts (40% of enterprise apps by 2026, \\$199B market by 2034) alongside implementation difficulties (40%+ project cancellation rate predicted by 2027). Synthesizes technical benchmarks (SWE-bench scores improving from 13.86% to 49% in 8 months), security vulnerabilities, and safety frameworks from major AI labs. Updated to include 2025 product launches (ChatGPT agent, Codex, Operator, GPT-5 family, Gemini Robotics), new governance frameworks (AGENTS.md, Practices for Governing Agentic AI Systems), and expanded security research.",
  "description": "AI systems that autonomously take actions in the world to accomplish goals. Industry forecasts project 40% of enterprise applications will include AI agents by 2026, though analysts predict 40%+ of projects will be cancelled by 2027 due to implementation challenges.",
  "ratings": {
    "focus": 7.5,
    "novelty": 3.5,
    "rigor": 6,
    "completeness": 8,
    "concreteness": 7.5,
    "actionability": 5.5,
    "objectivity": 7
  },
  "category": "capabilities",
  "subcategory": "agentic",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 8786,
    "tableCount": 24,
    "diagramCount": 3,
    "internalLinks": 53,
    "externalLinks": 2,
    "footnoteCount": 21,
    "bulletRatio": 0.03,
    "sectionCount": 41,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 8786,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 33,
  "backlinkCount": 35,
  "hallucinationRisk": {
    "level": "high",
    "score": 70,
    "factors": [
      "severe-truncation"
    ],
    "integrityIssues": [
      "severe-truncation"
    ]
  },
  "entityType": "capability",
  "redundancy": {
    "maxSimilarity": 23,
    "similarPages": [
      {
        "id": "scalable-oversight",
        "title": "Scalable Oversight",
        "path": "/knowledge-base/responses/scalable-oversight/",
        "similarity": 23
      },
      {
        "id": "language-models",
        "title": "Large Language Models",
        "path": "/knowledge-base/capabilities/language-models/",
        "similarity": 22
      },
      {
        "id": "solutions",
        "title": "AI Safety Solution Cruxes",
        "path": "/knowledge-base/cruxes/solutions/",
        "similarity": 22
      },
      {
        "id": "why-alignment-hard",
        "title": "Why Alignment Might Be Hard",
        "path": "/knowledge-base/debates/why-alignment-hard/",
        "similarity": 22
      },
      {
        "id": "reasoning",
        "title": "Reasoning and Planning",
        "path": "/knowledge-base/capabilities/reasoning/",
        "similarity": 21
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    },
    {
      "date": "2026-02-17",
      "branch": "claude/top-priority-update-WurDM",
      "title": "Improve top 5 foundational wiki pages",
      "summary": "Improved the 5 highest-importance, lowest-quality wiki pages using the Crux content pipeline. All were stubs (7 words) or had quality=0 and are now comprehensive articles with citations, EntityLinks, and balanced perspectives.",
      "pr": 188
    }
  ],
  "coverage": {
    "passing": 5,
    "total": 13,
    "targets": {
      "tables": 35,
      "diagrams": 4,
      "internalLinks": 70,
      "externalLinks": 44,
      "footnotes": 26,
      "references": 26
    },
    "actuals": {
      "tables": 24,
      "diagrams": 3,
      "internalLinks": 53,
      "externalLinks": 2,
      "footnotes": 21,
      "references": 19,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "amber",
      "internalLinks": "amber",
      "externalLinks": "amber",
      "footnotes": "amber",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 2,
    "ratingsString": "N:3.5 R:6 A:5.5 C:8"
  },
  "readerRank": 139,
  "researchRank": 3,
  "recommendedScore": 194.11
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/agentic-ai"
}
Backlinks (35)
idtitletyperelationship
language-modelsLarge Language Modelscapability
long-horizonLong-Horizon Autonomous Taskscapability
tool-useTool Use and Computer Usecapability
ai-powered-investigationAI-Powered Investigationcapability
autonomous-cooperative-agentsAutonomous Cooperative Agentsconcept
ai-controlAI Controlsafety-agenda
sandboxingSandboxing / Containmentapproach
tool-restrictionsTool-Use Restrictionsapproach
multi-agentMulti-Agent Safetyapproach
autonomous-replicationAutonomous Replicationrisk
__index__/knowledge-base/capabilitiesAI Capabilitiesconcept
reasoningReasoning and Planningcapability
situational-awarenessSituational Awarenesscapability
solutionsAI Safety Solution Cruxescrux
agi-developmentAGI Developmentconcept
claude-code-espionage-2025Claude Code Espionage Incident (2025)concept
__index__/knowledge-baseKnowledge Baseconcept
corrigibility-failure-pathwaysCorrigibility Failure Pathwaysanalysis
cyberweapons-attack-automationAutonomous Cyber Attack Timelineanalysis
goal-misgeneralization-probabilityGoal Misgeneralization Probability Modelanalysis
intervention-timing-windowsIntervention Timing Windowsanalysis
risk-activation-timelineRisk Activation Timeline Modelanalysis
ai-revenue-sourcesAI Revenue Sourcesorganization
microsoftMicrosoft AIorganization
nist-aiNIST and AI Safetyorganization
palisade-researchPalisade Researchorganization
rethink-prioritiesRethink Prioritiesorganization
paul-christianoPaul Christianoperson
alignmentAI Alignmentapproach
red-teamingRed Teamingapproach
ai-enabled-untraceable-misuseAI-Enabled Untraceable Misuserisk
instrumental-convergenceInstrumental Convergencerisk
power-seekingPower-Seeking AIrisk
rogue-ai-scenariosRogue AI Scenariosrisk
trust-cascadeAI Trust Cascade Failurerisk
Longterm Wiki