Longterm Wiki

Agent Foundations

agent-foundationsapproachPath: /knowledge-base/responses/agent-foundations/
E584Entity ID (EID)
← Back to page8 backlinksQuality: 59Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "agent-foundations",
  "numericId": null,
  "path": "/knowledge-base/responses/agent-foundations/",
  "filePath": "knowledge-base/responses/agent-foundations.mdx",
  "title": "Agent Foundations",
  "quality": 59,
  "readerImportance": 26,
  "researchImportance": 38.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Agent foundations research (MIRI's mathematical frameworks for aligned agency) faces low tractability after 10+ years with core problems unsolved, leading to MIRI's 2024 strategic pivot away from the field. Assessment shows ~15-25% probability the work is essential, 60-75% confidence in low tractability, with value 3-5x higher under long timeline assumptions.",
  "description": "Agent foundations research develops mathematical frameworks for understanding aligned agency, including embedded agency, decision theory, logical induction, and corrigibility. MIRI's 2024 strategic shift away from this work, citing slow progress, has reignited debate about whether theoretical prerequisites exist for alignment or whether empirical approaches on neural networks are more tractable.",
  "ratings": {
    "novelty": 4.5,
    "rigor": 6,
    "actionability": 5.5,
    "completeness": 7
  },
  "category": "responses",
  "subcategory": "alignment-theoretical",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 2156,
    "tableCount": 9,
    "diagramCount": 1,
    "internalLinks": 38,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.12,
    "sectionCount": 25,
    "hasOverview": true,
    "structuralScore": 12
  },
  "suggestedQuality": 80,
  "updateFrequency": 90,
  "evergreen": true,
  "wordCount": 2156,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 20,
  "backlinkCount": 8,
  "hallucinationRisk": {
    "level": "medium",
    "score": 50,
    "factors": [
      "no-citations",
      "few-external-sources",
      "conceptual-content"
    ]
  },
  "entityType": "approach",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "research-agendas",
        "title": "AI Alignment Research Agenda Comparison",
        "path": "/knowledge-base/responses/research-agendas/",
        "similarity": 16
      },
      {
        "id": "technical-research",
        "title": "Technical AI Safety Research",
        "path": "/knowledge-base/responses/technical-research/",
        "similarity": 16
      },
      {
        "id": "corrigibility",
        "title": "Corrigibility Research",
        "path": "/knowledge-base/responses/corrigibility/",
        "similarity": 15
      },
      {
        "id": "corrigibility-failure",
        "title": "Corrigibility Failure",
        "path": "/knowledge-base/risks/corrigibility-failure/",
        "similarity": 15
      },
      {
        "id": "instrumental-convergence",
        "title": "Instrumental Convergence",
        "path": "/knowledge-base/risks/instrumental-convergence/",
        "similarity": 15
      }
    ]
  },
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 9,
      "diagrams": 1,
      "internalLinks": 17,
      "externalLinks": 11,
      "footnotes": 6,
      "references": 6
    },
    "actuals": {
      "tables": 9,
      "diagrams": 1,
      "internalLinks": 38,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 12,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.5 R:6 A:5.5 C:7"
  },
  "readerRank": 482,
  "researchRank": 356,
  "recommendedScore": 152.66
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/agent-foundations",
  "stampy": "https://aisafety.info/questions/8Iup/What-is-agent-foundations",
  "alignmentForum": "https://www.alignmentforum.org/tag/agent-foundations"
}
Backlinks (8)
idtitletyperelationship
deep-learning-eraDeep Learning Revolution (2012-2020)historical
miri-eraThe MIRI Era (2000-2015)historical
capability-alignment-raceCapability-Alignment Race Modelanalysis
miriMIRI (Machine Intelligence Research Institute)organization
alignment-theoretical-overviewTheoretical Foundations (Overview)concept
research-agendasAI Alignment Research Agenda Comparisoncrux
doomerAI Doomer Worldviewconcept
long-timelinesLong-Timelines Technical Worldviewconcept
Longterm Wiki