Longterm Wiki

AI-Human Hybrid Systems

hybrid-systemsapproachPath: /knowledge-base/responses/hybrid-systems/
E161Entity ID (EID)
← Back to page4 backlinksQuality: 91Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "hybrid-systems",
  "numericId": null,
  "path": "/knowledge-base/responses/hybrid-systems/",
  "filePath": "knowledge-base/responses/hybrid-systems.mdx",
  "title": "AI-Human Hybrid Systems",
  "quality": 91,
  "readerImportance": 63,
  "researchImportance": 70.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Hybrid AI-human systems achieve 15-40% error reduction across domains through six design patterns, with evidence from Meta (23% false positive reduction), Stanford Healthcare (27% diagnostic improvement), and forecasting platforms. Key risks include automation bias (55% error detection failure in aviation) and skill atrophy (23% navigation degradation), requiring mitigation through uncertainty visualization and maintenance programs.",
  "description": "Systematic architectures combining AI capabilities with human judgment showing 15-40% error reduction across domains. Evidence from content moderation at Meta (23% false positive reduction), medical diagnosis at Stanford (27% error reduction), and forecasting platforms demonstrates superior performance over single-agent approaches through six core design patterns.",
  "ratings": {
    "novelty": 4.5,
    "rigor": 6.5,
    "actionability": 7,
    "completeness": 6.5
  },
  "category": "responses",
  "subcategory": "epistemic-approaches",
  "clusters": [
    "ai-safety",
    "epistemics"
  ],
  "metrics": {
    "wordCount": 2436,
    "tableCount": 17,
    "diagramCount": 1,
    "internalLinks": 40,
    "externalLinks": 12,
    "footnoteCount": 0,
    "bulletRatio": 0.23,
    "sectionCount": 27,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 2436,
  "unconvertedLinks": [
    {
      "text": "Horowitz & Kahn 2024",
      "url": "https://academic.oup.com/isq/article/68/2/sqae020/7638566",
      "resourceId": "b9b538f4765a69af",
      "resourceTitle": "A 2024 study in International Studies Quarterly"
    },
    {
      "text": "2025 systematic review by Romeo and Conti",
      "url": "https://link.springer.com/article/10.1007/s00146-025-02422-7",
      "resourceId": "a96cbf6f98644f2f",
      "resourceTitle": "2025 review in AI & Society"
    },
    {
      "text": "Dunning-Kruger effect",
      "url": "https://academic.oup.com/isq/article/68/2/sqae020/7638566",
      "resourceId": "b9b538f4765a69af",
      "resourceTitle": "A 2024 study in International Studies Quarterly"
    }
  ],
  "unconvertedLinkCount": 3,
  "convertedLinkCount": 34,
  "backlinkCount": 4,
  "hallucinationRisk": {
    "level": "medium",
    "score": 40,
    "factors": [
      "no-citations",
      "conceptual-content",
      "high-quality"
    ]
  },
  "entityType": "approach",
  "redundancy": {
    "maxSimilarity": 17,
    "similarPages": [
      {
        "id": "ai-forecasting",
        "title": "AI-Augmented Forecasting",
        "path": "/knowledge-base/responses/ai-forecasting/",
        "similarity": 17
      },
      {
        "id": "power-seeking-conditions",
        "title": "Power-Seeking Emergence Conditions Model",
        "path": "/knowledge-base/models/power-seeking-conditions/",
        "similarity": 15
      },
      {
        "id": "ai-control",
        "title": "AI Control",
        "path": "/knowledge-base/responses/ai-control/",
        "similarity": 15
      },
      {
        "id": "effectiveness-assessment",
        "title": "Policy Effectiveness Assessment",
        "path": "/knowledge-base/responses/effectiveness-assessment/",
        "similarity": 15
      },
      {
        "id": "nist-ai-rmf",
        "title": "NIST AI Risk Management Framework",
        "path": "/knowledge-base/responses/nist-ai-rmf/",
        "similarity": 15
      }
    ]
  },
  "coverage": {
    "passing": 9,
    "total": 13,
    "targets": {
      "tables": 10,
      "diagrams": 1,
      "internalLinks": 19,
      "externalLinks": 12,
      "footnotes": 7,
      "references": 7
    },
    "actuals": {
      "tables": 17,
      "diagrams": 1,
      "internalLinks": 40,
      "externalLinks": 12,
      "footnotes": 0,
      "references": 32,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.5 R:6.5 A:7 C:6.5"
  },
  "readerRank": 208,
  "researchRank": 147,
  "recommendedScore": 235.24
}
External Links

No external links

Backlinks (4)
idtitletyperelationship
corrigibility-failure-pathwaysCorrigibility Failure Pathwaysanalysis
epistemic-tools-approaches-overviewApproaches (Overview)concept
erosion-of-agencyErosion of Human Agencyrisk
preference-manipulationAI Preference Manipulationrisk
Longterm Wiki