Longterm Wiki

Persuasion and Social Manipulation

persuasioncapabilityPath: /knowledge-base/capabilities/persuasion/
E224Entity ID (EID)
← Back to page7 backlinksQuality: 63Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "persuasion",
  "numericId": null,
  "path": "/knowledge-base/capabilities/persuasion/",
  "filePath": "knowledge-base/capabilities/persuasion.mdx",
  "title": "Persuasion and Social Manipulation",
  "quality": 63,
  "readerImportance": 53,
  "researchImportance": 80.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "GPT-4 achieves superhuman persuasion in controlled settings (64% win rate, 81% higher odds with personalization), with AI chatbots demonstrating 4x the impact of political ads (3.9 vs ~1 point voter shift). Post-training optimization boosts persuasion 51% but significantly decreases factual accuracy, creating a critical truth-persuasion tradeoff with implications for deceptive alignment and democratic interference.",
  "description": "AI persuasion capabilities have reached superhuman levels in controlled settings—GPT-4 is more persuasive than humans 64% of the time with personalization (Nature 2025), producing 81% higher odds of opinion change. AI chatbots demonstrated 4x the persuasive impact of political ads in the 2024 US election, with critical tradeoffs between persuasion and factual accuracy.",
  "ratings": {
    "novelty": 5.2,
    "rigor": 7.1,
    "actionability": 5.8,
    "completeness": 7.3
  },
  "category": "capabilities",
  "subcategory": "safety-relevant",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 2793,
    "tableCount": 19,
    "diagramCount": 1,
    "internalLinks": 20,
    "externalLinks": 36,
    "footnoteCount": 0,
    "bulletRatio": 0.24,
    "sectionCount": 48,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 2793,
  "unconvertedLinks": [
    {
      "text": "Future of Life AI Safety Index 2025",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "Future of Life Institute's 2025 AI Safety Index",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "Future of Life AI Safety Index (2025)",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "DeepMind Evaluations (2024)",
      "url": "https://arxiv.org/pdf/2403.13793",
      "resourceId": "8e97b1cb40edd72c",
      "resourceTitle": "Evaluating Frontier Models for Dangerous Capabilities"
    },
    {
      "text": "International AI Safety Report (2025)",
      "url": "https://internationalaisafetyreport.org/publication/international-ai-safety-report-2025",
      "resourceId": "b163447fdc804872",
      "resourceTitle": "International AI Safety Report 2025"
    },
    {
      "text": "METR Safety Policies (2025)",
      "url": "https://metr.org/blog/2025-12-09-common-elements-of-frontier-ai-safety-policies/",
      "resourceId": "c8782940b880d00f",
      "resourceTitle": "METR's analysis of 12 companies"
    },
    {
      "text": "Harvard Ash Center (2024)",
      "url": "https://ash.harvard.edu/articles/the-apocalypse-that-wasnt-ai-was-everywhere-in-2024s-elections-but-deepfakes-and-misinformation-were-only-part-of-the-picture/",
      "resourceId": "5cc2037b750354e0",
      "resourceTitle": "Harvard's Ash Center"
    }
  ],
  "unconvertedLinkCount": 7,
  "convertedLinkCount": 15,
  "backlinkCount": 7,
  "hallucinationRisk": {
    "level": "medium",
    "score": 40,
    "factors": [
      "no-citations",
      "high-rigor"
    ]
  },
  "entityType": "capability",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "epistemic-security",
        "title": "AI-Era Epistemic Security",
        "path": "/knowledge-base/responses/epistemic-security/",
        "similarity": 16
      },
      {
        "id": "power-seeking-conditions",
        "title": "Power-Seeking Emergence Conditions Model",
        "path": "/knowledge-base/models/power-seeking-conditions/",
        "similarity": 15
      },
      {
        "id": "authoritarian-tools",
        "title": "Authoritarian Tools",
        "path": "/knowledge-base/risks/authoritarian-tools/",
        "similarity": 15
      },
      {
        "id": "disinformation",
        "title": "Disinformation",
        "path": "/knowledge-base/risks/disinformation/",
        "similarity": 15
      },
      {
        "id": "epistemic-sycophancy",
        "title": "Epistemic Sycophancy",
        "path": "/knowledge-base/risks/epistemic-sycophancy/",
        "similarity": 15
      }
    ]
  },
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 11,
      "diagrams": 1,
      "internalLinks": 22,
      "externalLinks": 14,
      "footnotes": 8,
      "references": 8
    },
    "actuals": {
      "tables": 19,
      "diagrams": 1,
      "internalLinks": 20,
      "externalLinks": 36,
      "footnotes": 0,
      "references": 22,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:5.2 R:7.1 A:5.8 C:7.3"
  },
  "readerRank": 276,
  "researchRank": 84,
  "recommendedScore": 174.3
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/ai-persuasion"
}
Backlinks (7)
idtitletyperelationship
__index__/knowledge-base/capabilitiesAI Capabilitiesconcept
language-modelsLarge Language Modelscapability
goal-misgeneralization-probabilityGoal Misgeneralization Probability Modelanalysis
risk-activation-timelineRisk Activation Timeline Modelanalysis
evalsEvals & Red-teamingsafety-agenda
evaluationAI Evaluationapproach
red-teamingRed Teamingapproach
Longterm Wiki