Longterm Wiki

Feedback Loop & Cascade Model

feedback-loopsanalysisPath: /knowledge-base/models/feedback-loops/
E417Entity ID (EID)
← Back to page1 backlinksQuality: 59Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "feedback-loops",
  "numericId": null,
  "path": "/knowledge-base/models/feedback-loops/",
  "filePath": "knowledge-base/models/feedback-loops.mdx",
  "title": "Feedback Loop & Cascade Model",
  "quality": 59,
  "readerImportance": 36,
  "researchImportance": 88,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "System dynamics model showing AI capabilities growing at 2.5x/year vs safety at 1.2x/year, with positive feedback loops (investment→value, AI→automation) 2-3x stronger than negative loops (accidents→regulation). Estimates 10-20% probability of crossing critical thresholds (recursive improvement, deception capability) within 2-5 years, requiring \\$500M-2B/year to strengthen dampening mechanisms.",
  "description": "This model analyzes how AI risks emerge from reinforcing feedback loops. Capabilities compound at 2.5x per year on key benchmarks while safety measures improve at only 1.2x per year, with current safety investment at just 0.1% of capability investment.",
  "ratings": {
    "focus": 8.5,
    "novelty": 4,
    "rigor": 5.5,
    "completeness": 7,
    "concreteness": 6.5,
    "actionability": 5.5
  },
  "category": "models",
  "subcategory": "dynamics-models",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2199,
    "tableCount": 12,
    "diagramCount": 2,
    "internalLinks": 1,
    "externalLinks": 22,
    "footnoteCount": 0,
    "bulletRatio": 0.03,
    "sectionCount": 22,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 90,
  "evergreen": true,
  "wordCount": 2199,
  "unconvertedLinks": [
    {
      "text": "International AI Safety Report 2025",
      "url": "https://internationalaisafetyreport.org/publication/international-ai-safety-report-2025",
      "resourceId": "b163447fdc804872",
      "resourceTitle": "International AI Safety Report 2025"
    },
    {
      "text": "2025 AI Safety Index",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "2025 AI Index Report from Stanford HAI",
      "url": "https://hai.stanford.edu/ai-index/2025-ai-index-report",
      "resourceId": "da87f2b213eb9272",
      "resourceTitle": "Stanford AI Index 2025"
    },
    {
      "text": "Stanford HAI 2025",
      "url": "https://hai.stanford.edu/ai-index/2025-ai-index-report/economy",
      "resourceId": "1db7de7741f907e5",
      "resourceTitle": "Stanford AI Index 2025"
    },
    {
      "text": "LessWrong Analysis",
      "url": "https://www.lesswrong.com/posts/WGpFFJo2uFe5ssgEb/an-overview-of-the-ai-safety-funding-situation",
      "resourceId": "b1ab921f9cbae109",
      "resourceTitle": "An Overview of the AI Safety Funding Situation (LessWrong)"
    },
    {
      "text": "International AI Safety Report 2025",
      "url": "https://internationalaisafetyreport.org/publication/international-ai-safety-report-2025",
      "resourceId": "b163447fdc804872",
      "resourceTitle": "International AI Safety Report 2025"
    },
    {
      "text": "2025 AI Safety Index",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "Stanford HAI 2025 AI Index Report",
      "url": "https://hai.stanford.edu/ai-index/2025-ai-index-report",
      "resourceId": "da87f2b213eb9272",
      "resourceTitle": "Stanford AI Index 2025"
    },
    {
      "text": "AI Safety Funding Overview",
      "url": "https://www.lesswrong.com/posts/WGpFFJo2uFe5ssgEb/an-overview-of-the-ai-safety-funding-situation",
      "resourceId": "b1ab921f9cbae109",
      "resourceTitle": "An Overview of the AI Safety Funding Situation (LessWrong)"
    }
  ],
  "unconvertedLinkCount": 9,
  "convertedLinkCount": 0,
  "backlinkCount": 1,
  "hallucinationRisk": {
    "level": "medium",
    "score": 55,
    "factors": [
      "no-citations"
    ]
  },
  "entityType": "analysis",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "societal-response",
        "title": "Societal Response & Adaptation Model",
        "path": "/knowledge-base/models/societal-response/",
        "similarity": 18
      },
      {
        "id": "flash-dynamics-threshold",
        "title": "Flash Dynamics Threshold Model",
        "path": "/knowledge-base/models/flash-dynamics-threshold/",
        "similarity": 16
      },
      {
        "id": "technical-pathways",
        "title": "Technical Pathway Decomposition",
        "path": "/knowledge-base/models/technical-pathways/",
        "similarity": 16
      },
      {
        "id": "winner-take-all-concentration",
        "title": "Winner-Take-All Concentration Model",
        "path": "/knowledge-base/models/winner-take-all-concentration/",
        "similarity": 16
      },
      {
        "id": "self-improvement",
        "title": "Self-Improvement and Recursive Enhancement",
        "path": "/knowledge-base/capabilities/self-improvement/",
        "similarity": 15
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 9,
      "diagrams": 1,
      "internalLinks": 18,
      "externalLinks": 11,
      "footnotes": 7,
      "references": 7
    },
    "actuals": {
      "tables": 12,
      "diagrams": 2,
      "internalLinks": 1,
      "externalLinks": 22,
      "footnotes": 0,
      "references": 5,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4 R:5.5 A:5.5 C:7"
  },
  "readerRank": 406,
  "researchRank": 34,
  "recommendedScore": 157.7
}
External Links

No external links

Backlinks (1)
idtitletyperelationship
__index__/knowledge-base/modelsAnalytical Modelsconcept
Longterm Wiki