Longterm Wiki

Cyberweapons

cyberweaponsriskPath: /knowledge-base/risks/cyberweapons/
E86Entity ID (EID)
← Back to page17 backlinksQuality: 91Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "cyberweapons",
  "numericId": null,
  "path": "/knowledge-base/risks/cyberweapons/",
  "filePath": "knowledge-base/risks/cyberweapons.mdx",
  "title": "Cyberweapons",
  "quality": 91,
  "readerImportance": 82.5,
  "researchImportance": 21.5,
  "tacticalValue": 72,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": "outcome",
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive analysis showing AI-enabled cyberweapons represent a present, high-severity threat with GPT-4 exploiting 87% of one-day vulnerabilities at \\$8.80/exploit and the first documented AI-orchestrated attack in September 2025 affecting ~30 targets. Key finding: while AI helps both offense and defense, current assessment gives offense a 55-45% offense advantage, with autonomous attacks now comprising 14% of major breaches and causing average U.S. breach costs of \\$10.22M. Covers five key uncertainties with probability-weighted scenarios.",
  "description": "AI-enabled cyberweapons represent a rapidly escalating threat, with AI-powered attacks surging 72% year-over-year in 2025 and the first documented AI-orchestrated cyberattack affecting ~30 global targets. Research shows GPT-4 can exploit 87% of one-day vulnerabilities at \\$8.80 per exploit, while 14% of major corporate breaches are now fully autonomous. Key uncertainties include whether AI favors offense or defense long-term (current assessment: 55-45% offense advantage) and how quickly autonomous capabilities will proliferate.",
  "ratings": {
    "novelty": 5.5,
    "rigor": 7.5,
    "actionability": 6.5,
    "completeness": 8
  },
  "category": "risks",
  "subcategory": "misuse",
  "clusters": [
    "cyber",
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 4218,
    "tableCount": 13,
    "diagramCount": 2,
    "internalLinks": 66,
    "externalLinks": 4,
    "footnoteCount": 0,
    "bulletRatio": 0.19,
    "sectionCount": 51,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 4218,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 61,
  "backlinkCount": 17,
  "hallucinationRisk": {
    "level": "medium",
    "score": 35,
    "factors": [
      "no-citations",
      "high-rigor",
      "high-quality"
    ]
  },
  "entityType": "risk",
  "redundancy": {
    "maxSimilarity": 19,
    "similarPages": [
      {
        "id": "claude-code-espionage-2025",
        "title": "Claude Code Espionage Incident (2025)",
        "path": "/knowledge-base/incidents/claude-code-espionage-2025/",
        "similarity": 19
      },
      {
        "id": "fraud-sophistication-curve",
        "title": "Fraud Sophistication Curve Model",
        "path": "/knowledge-base/models/fraud-sophistication-curve/",
        "similarity": 19
      },
      {
        "id": "authentication-collapse-timeline",
        "title": "Authentication Collapse Timeline Model",
        "path": "/knowledge-base/models/authentication-collapse-timeline/",
        "similarity": 17
      },
      {
        "id": "authoritarian-tools-diffusion",
        "title": "Authoritarian Tools Diffusion Model",
        "path": "/knowledge-base/models/authoritarian-tools-diffusion/",
        "similarity": 17
      },
      {
        "id": "autonomous-weapons-proliferation",
        "title": "LAWS Proliferation Model",
        "path": "/knowledge-base/models/autonomous-weapons-proliferation/",
        "similarity": 17
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 17,
      "diagrams": 2,
      "internalLinks": 34,
      "externalLinks": 21,
      "footnotes": 13,
      "references": 13
    },
    "actuals": {
      "tables": 13,
      "diagrams": 2,
      "internalLinks": 66,
      "externalLinks": 4,
      "footnotes": 0,
      "references": 30,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:5.5 R:7.5 A:6.5 C:8"
  },
  "readerRank": 70,
  "researchRank": 480,
  "recommendedScore": 245.11
}
External Links
{
  "wikipedia": "https://en.wikipedia.org/wiki/Cyberwarfare",
  "lesswrong": "https://www.lesswrong.com/tag/computer-security-and-cryptography",
  "grokipedia": "https://grokipedia.com/page/Cyberwarfare"
}
Backlinks (17)
idtitletyperelationship
cyberweapons-offense-defenseCyber Offense-Defense Balance Modelanalysisrelated
cyberweapons-attack-automationAutonomous Cyber Attack Timelineanalysisrelated
compute-governanceCompute Governancepolicy
evalsAI Evaluationssafety-agenda
autonomous-weaponsAutonomous Weaponsrisk
bioweaponsBioweapons Riskrisk
proliferationAI Proliferationrisk
ai-enabled-untraceable-misuseAI-Enabled Untraceable Misuserisk
language-modelsLarge Language Modelscapability
__index__/knowledge-baseKnowledge Baseconcept
risk-activation-timelineRisk Activation Timeline Modelanalysis
evaluationAI Evaluationapproach
lab-cultureAI Lab Safety Cultureapproach
responsible-scaling-policiesResponsible Scaling Policiespolicy
seoul-declarationSeoul AI Safety Summit Declarationpolicy
__index__/knowledge-base/risksAI Risksconcept
misuse-overviewMisuse Risks (Overview)concept
Longterm Wiki