Longterm Wiki

Intervention Effectiveness Matrix

intervention-effectiveness-matrixanalysisPath: /knowledge-base/models/intervention-effectiveness-matrix/
E177Entity ID (EID)
← Back to page4 backlinksQuality: 73Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "intervention-effectiveness-matrix",
  "numericId": null,
  "path": "/knowledge-base/models/intervention-effectiveness-matrix/",
  "filePath": "knowledge-base/models/intervention-effectiveness-matrix.mdx",
  "title": "Intervention Effectiveness Matrix",
  "quality": 73,
  "readerImportance": 89.5,
  "researchImportance": 7,
  "tacticalValue": 58,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Quantitative analysis mapping 15+ AI safety interventions to specific risks reveals critical misallocation: 40% of 2024 funding (\\$400M+) flows to RLHF methods showing only 10-20% effectiveness against deceptive alignment, while interpretability research (\\$52M total, 40-50% effectiveness) and AI Control (70-80% theoretical effectiveness, \\$10M funding) remain severely underfunded. Provides explicit reallocation recommendations: reduce RLHF from 40% to 25%, increase interpretability from 15% to 30%, and establish AI Control at 20% of technical safety budgets.",
  "description": "This model maps 15+ AI safety interventions to specific risk categories with quantitative effectiveness estimates derived from empirical research and expert elicitation. Analysis reveals critical resource misallocation: 40% of 2024 funding (\\$400M+) went to RLHF-based methods showing only 10-20% effectiveness against deceptive alignment, while interpretability research (\\$52M, demonstrating 40-50% effectiveness) remains severely underfunded relative to gap severity.",
  "ratings": {
    "focus": 8.5,
    "novelty": 6.5,
    "rigor": 7,
    "completeness": 8,
    "concreteness": 8.5,
    "actionability": 9
  },
  "category": "models",
  "subcategory": "intervention-models",
  "clusters": [
    "ai-safety",
    "governance",
    "community"
  ],
  "metrics": {
    "wordCount": 4205,
    "tableCount": 32,
    "diagramCount": 3,
    "internalLinks": 88,
    "externalLinks": 21,
    "footnoteCount": 0,
    "bulletRatio": 0.05,
    "sectionCount": 58,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 90,
  "evergreen": true,
  "wordCount": 4205,
  "unconvertedLinks": [
    {
      "text": "Coefficient Giving 2024 Report",
      "url": "https://www.openphilanthropy.org/research/our-progress-in-2024-and-plans-for-2025/",
      "resourceId": "7ca35422b79c3ac9",
      "resourceTitle": "Open Philanthropy: Progress in 2024 and Plans for 2025"
    },
    {
      "text": "AI Safety Funding Analysis",
      "url": "https://forum.effectivealtruism.org/posts/XdhwXppfqrpPL2YDX/an-overview-of-the-ai-safety-funding-situation",
      "resourceId": "80125fcaf04609b8",
      "resourceTitle": "Overview of AI Safety Funding"
    },
    {
      "text": "AI Control",
      "url": "https://arxiv.org/pdf/2312.06942",
      "resourceId": "cc80ab28579c5794",
      "resourceTitle": "Redwood Research's AI Control paper (December 2023)"
    },
    {
      "text": "GovAI research",
      "url": "https://www.governance.ai/analysis/computing-power-and-the-governance-of-ai",
      "resourceId": "482b71342542a659",
      "resourceTitle": "GovAI - Computing Power and the Governance of AI"
    },
    {
      "text": "RAND analysis",
      "url": "https://www.rand.org/pubs/perspectives/PEA3776-1.html",
      "resourceId": "a3e39f7b4281936a",
      "resourceTitle": "RAND research"
    },
    {
      "text": "METR's December 2025 analysis",
      "url": "https://metr.org/blog/2025-12-09-common-elements-of-frontier-ai-safety-policies/",
      "resourceId": "c8782940b880d00f",
      "resourceTitle": "METR's analysis of 12 companies"
    },
    {
      "text": "openphilanthropy.org",
      "url": "https://www.openphilanthropy.org/research/our-progress-in-2024-and-plans-for-2025/",
      "resourceId": "7ca35422b79c3ac9",
      "resourceTitle": "Open Philanthropy: Progress in 2024 and Plans for 2025"
    },
    {
      "text": "EA Forum",
      "url": "https://forum.effectivealtruism.org/posts/XdhwXppfqrpPL2YDX/an-overview-of-the-ai-safety-funding-situation",
      "resourceId": "80125fcaf04609b8",
      "resourceTitle": "Overview of AI Safety Funding"
    },
    {
      "text": "arXiv",
      "url": "https://arxiv.org/pdf/2312.06942",
      "resourceId": "cc80ab28579c5794",
      "resourceTitle": "Redwood Research's AI Control paper (December 2023)"
    },
    {
      "text": "metr.org",
      "url": "https://metr.org/blog/2025-12-09-common-elements-of-frontier-ai-safety-policies/",
      "resourceId": "c8782940b880d00f",
      "resourceTitle": "METR's analysis of 12 companies"
    },
    {
      "text": "governance.ai",
      "url": "https://www.governance.ai/analysis/computing-power-and-the-governance-of-ai",
      "resourceId": "482b71342542a659",
      "resourceTitle": "GovAI - Computing Power and the Governance of AI"
    },
    {
      "text": "rand.org",
      "url": "https://www.rand.org/pubs/perspectives/PEA3776-1.html",
      "resourceId": "a3e39f7b4281936a",
      "resourceTitle": "RAND research"
    }
  ],
  "unconvertedLinkCount": 12,
  "convertedLinkCount": 55,
  "backlinkCount": 4,
  "hallucinationRisk": {
    "level": "medium",
    "score": 40,
    "factors": [
      "no-citations",
      "high-rigor"
    ]
  },
  "entityType": "analysis",
  "redundancy": {
    "maxSimilarity": 20,
    "similarPages": [
      {
        "id": "ai-control",
        "title": "AI Control",
        "path": "/knowledge-base/responses/ai-control/",
        "similarity": 20
      },
      {
        "id": "sleeper-agent-detection",
        "title": "Sleeper Agent Detection",
        "path": "/knowledge-base/responses/sleeper-agent-detection/",
        "similarity": 19
      },
      {
        "id": "technical-research",
        "title": "Technical AI Safety Research",
        "path": "/knowledge-base/responses/technical-research/",
        "similarity": 19
      },
      {
        "id": "technical-pathways",
        "title": "Technical Pathway Decomposition",
        "path": "/knowledge-base/models/technical-pathways/",
        "similarity": 18
      },
      {
        "id": "accident-risks",
        "title": "AI Accident Risk Cruxes",
        "path": "/knowledge-base/cruxes/accident-risks/",
        "similarity": 17
      }
    ]
  },
  "coverage": {
    "passing": 9,
    "total": 13,
    "targets": {
      "tables": 17,
      "diagrams": 2,
      "internalLinks": 34,
      "externalLinks": 21,
      "footnotes": 13,
      "references": 13
    },
    "actuals": {
      "tables": 32,
      "diagrams": 3,
      "internalLinks": 88,
      "externalLinks": 21,
      "footnotes": 0,
      "references": 39,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:6.5 R:7 A:9 C:8"
  },
  "readerRank": 23,
  "researchRank": 573,
  "recommendedScore": 212.61
}
External Links

No external links

Backlinks (4)
idtitletyperelationship
safety-research-allocationAI Safety Research Allocation Modelanalysisrelated
ai-acceleration-tradeoffAI Acceleration Tradeoff Modelanalysis
__index__/knowledge-base/modelsAnalytical Modelsconcept
safety-capability-tradeoffSafety-Capability Tradeoff Modelanalysis
Longterm Wiki