Longterm Wiki

AI Whistleblower Protections

whistleblower-protectionspolicyPath: /knowledge-base/responses/whistleblower-protections/
E475Entity ID (EID)
← Back to page1 backlinksQuality: 63Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "whistleblower-protections",
  "numericId": null,
  "path": "/knowledge-base/responses/whistleblower-protections/",
  "filePath": "knowledge-base/responses/whistleblower-protections.mdx",
  "title": "AI Whistleblower Protections",
  "quality": 63,
  "readerImportance": 48,
  "researchImportance": 25.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive analysis of AI whistleblower protections showing severe gaps in current law (no federal protection for AI safety disclosures) with bipartisan AI Whistleblower Protection Act (S.1792) introduced May 2025 providing potential remedy. Documents concrete 2024 cases (Aschenbrenner termination, 13-employee 'Right to Warn' letter) demonstrating information asymmetry where employees possess unique safety data but face NDAs, equity clawback threats, and career risks for disclosure.",
  "description": "Legal and institutional frameworks for protecting AI researchers and employees who report safety concerns. The bipartisan AI Whistleblower Protection Act (S.1792) introduced May 2025 addresses critical gaps in current law, while EU AI Act Article 87 provides protections from August 2026. Key cases include Leopold Aschenbrenner's termination from OpenAI and the 2024 \"Right to Warn\" letter signed by 13 employees from frontier AI labs.",
  "ratings": {
    "novelty": 4.5,
    "rigor": 6.5,
    "actionability": 7,
    "completeness": 7.5
  },
  "category": "responses",
  "subcategory": "organizational-practices",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2593,
    "tableCount": 15,
    "diagramCount": 2,
    "internalLinks": 13,
    "externalLinks": 37,
    "footnoteCount": 0,
    "bulletRatio": 0.13,
    "sectionCount": 30,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 2593,
  "unconvertedLinks": [
    {
      "text": "Leopold Aschenbrenner",
      "url": "https://en.wikipedia.org/wiki/Leopold_Aschenbrenner",
      "resourceId": "957893bf859a6d97",
      "resourceTitle": "Leopold Aschenbrenner - Wikipedia"
    },
    {
      "text": "Senate Judiciary Chair Chuck Grassley",
      "url": "https://www.judiciary.senate.gov/press/rep/releases/grassley-introduces-ai-whistleblower-protection-act",
      "resourceId": "863da0838b7bc974",
      "resourceTitle": "Grassley Introduces AI Whistleblower Protection Act"
    },
    {
      "text": "Future of Life Institute's 2025 AI Safety Index",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "AI Lab Watch commitment tracker",
      "url": "https://ailabwatch.org/resources/commitments",
      "resourceId": "91ca6b1425554e9a",
      "resourceTitle": "AI Lab Watch: Commitments Tracker"
    },
    {
      "text": "Future of Life Institute's 2025 AI Safety Index",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "Responsible Scaling Policy",
      "url": "https://www.anthropic.com/index/anthropics-responsible-scaling-policy",
      "resourceId": "c637506d2cd4d849"
    },
    {
      "text": "Future of Life Institute AI Safety Index",
      "url": "https://futureoflife.org/ai-safety-index-summer-2025/",
      "resourceId": "df46edd6fa2078d1",
      "resourceTitle": "FLI AI Safety Index Summer 2025"
    },
    {
      "text": "AI Lab Watch Commitment Tracker",
      "url": "https://ailabwatch.org/resources/commitments",
      "resourceId": "91ca6b1425554e9a",
      "resourceTitle": "AI Lab Watch: Commitments Tracker"
    },
    {
      "text": "METR Common Elements Analysis",
      "url": "https://metr.org/blog/2025-12-09-common-elements-of-frontier-ai-safety-policies/",
      "resourceId": "c8782940b880d00f",
      "resourceTitle": "METR's analysis of 12 companies"
    }
  ],
  "unconvertedLinkCount": 9,
  "convertedLinkCount": 0,
  "backlinkCount": 1,
  "hallucinationRisk": {
    "level": "medium",
    "score": 55,
    "factors": [
      "no-citations"
    ]
  },
  "entityType": "policy",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "lab-culture",
        "title": "AI Lab Safety Culture",
        "path": "/knowledge-base/responses/lab-culture/",
        "similarity": 16
      },
      {
        "id": "corporate-influence",
        "title": "Corporate Influence on AI Policy",
        "path": "/knowledge-base/responses/corporate-influence/",
        "similarity": 15
      },
      {
        "id": "california-sb53",
        "title": "California SB 53",
        "path": "/knowledge-base/responses/california-sb53/",
        "similarity": 14
      },
      {
        "id": "governance-policy",
        "title": "AI Governance and Policy",
        "path": "/knowledge-base/responses/governance-policy/",
        "similarity": 14
      },
      {
        "id": "ai-safety-institutes",
        "title": "AI Safety Institutes",
        "path": "/knowledge-base/responses/ai-safety-institutes/",
        "similarity": 13
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-15",
      "branch": "claude/extract-wiki-interventions-WpOs4",
      "title": "Extract wiki proposals as structured data",
      "summary": "Created two new data layers:\n1. **Interventions** (broad categories): Extended `Intervention` schema with risk coverage matrix, ITN prioritization, funding data. Created `data/interventions.yaml` with 14 broad intervention categories. `InterventionCard`/`InterventionList` components.\n2. **Proposals** (narrow, tactical): New `Proposal` data type for specific, speculative, actionable items extracted from wiki pages. Created `data/proposals.yaml` with 27 proposals across 6 domains (philanthropic, financial, governance, technical, biosecurity, field-building). Each has cost/EV estimates, honest concerns, feasibility, stance (collaborative/adversarial). `ProposalCard`/`ProposalList` components.\n\nPost-review fixes: Fixed 13 incorrect wikiPageId E-codes in interventions.yaml (used numeric IDs instead of entity slugs). Added Intervention + Proposal to schema validator. Extracted shared badge color maps from 4 components into `badge-styles.ts`. Removed unused `client:load` prop and `fundingShare` destructure.",
      "pr": 141
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 10,
      "diagrams": 1,
      "internalLinks": 21,
      "externalLinks": 13,
      "footnotes": 8,
      "references": 8
    },
    "actuals": {
      "tables": 15,
      "diagrams": 2,
      "internalLinks": 13,
      "externalLinks": 37,
      "footnotes": 0,
      "references": 6,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:4.5 R:6.5 A:7 C:7.5"
  },
  "readerRank": 317,
  "researchRank": 452,
  "recommendedScore": 171.77
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/whistleblowing"
}
Backlinks (1)
idtitletyperelationship
lab-cultureAI Lab Safety Cultureapproach
Longterm Wiki