Longterm Wiki

Corporate Influence on AI Policy

corporate-influencecruxPath: /knowledge-base/responses/corporate-influence/
E78Entity ID (EID)
← Back to page3 backlinksQuality: 66Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "corporate-influence",
  "numericId": null,
  "path": "/knowledge-base/responses/corporate-influence/",
  "filePath": "knowledge-base/responses/corporate-influence.mdx",
  "title": "Corporate Influence on AI Policy",
  "quality": 66,
  "readerImportance": 23,
  "researchImportance": 35.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive analysis of corporate influence pathways (working inside labs, shareholder activism, whistleblowing) showing mixed effectiveness: safety teams influenced GPT-4 delays and responsible scaling policies, but ~50% of OpenAI's safety staff departed in 2024 and the November 2023 board crisis demonstrated commercial pressures override safety concerns. Provides specific compensation data (\\$115K-\\$190K for researchers), talent flow metrics (8x more likely to leave OpenAI for Anthropic), and detailed assessment that 1,500-2,500 people work in safety roles globally with 60% in SF Bay Area.",
  "description": "A comprehensive analysis of directly influencing frontier AI labs through working inside them, shareholder activism, whistleblowing, and transparency advocacy. Examines the effectiveness, risks, and strategic considerations of corporate influence approaches to AI safety, including quantitative estimates of impact and career trajectories.",
  "ratings": {
    "novelty": 4.5,
    "rigor": 6.5,
    "actionability": 7,
    "completeness": 7.5
  },
  "category": "responses",
  "subcategory": "field-building",
  "clusters": [
    "ai-safety",
    "community",
    "governance"
  ],
  "metrics": {
    "wordCount": 3303,
    "tableCount": 6,
    "diagramCount": 1,
    "internalLinks": 37,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.1,
    "sectionCount": 17,
    "hasOverview": true,
    "structuralScore": 12
  },
  "suggestedQuality": 80,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 3303,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 25,
  "backlinkCount": 3,
  "hallucinationRisk": {
    "level": "medium",
    "score": 50,
    "factors": [
      "no-citations",
      "few-external-sources",
      "conceptual-content"
    ]
  },
  "entityType": "crux",
  "redundancy": {
    "maxSimilarity": 20,
    "similarPages": [
      {
        "id": "lab-culture",
        "title": "AI Lab Safety Culture",
        "path": "/knowledge-base/responses/lab-culture/",
        "similarity": 20
      },
      {
        "id": "whistleblower-dynamics",
        "title": "Whistleblower Dynamics Model",
        "path": "/knowledge-base/models/whistleblower-dynamics/",
        "similarity": 19
      },
      {
        "id": "metr",
        "title": "METR",
        "path": "/knowledge-base/organizations/metr/",
        "similarity": 19
      },
      {
        "id": "us-aisi",
        "title": "US AI Safety Institute",
        "path": "/knowledge-base/organizations/us-aisi/",
        "similarity": 19
      },
      {
        "id": "voluntary-commitments",
        "title": "Voluntary Industry Commitments",
        "path": "/knowledge-base/responses/voluntary-commitments/",
        "similarity": 19
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 13,
      "diagrams": 1,
      "internalLinks": 26,
      "externalLinks": 17,
      "footnotes": 10,
      "references": 10
    },
    "actuals": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 37,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 20,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.5 R:6.5 A:7 C:7.5"
  },
  "readerRank": 503,
  "researchRank": 375,
  "recommendedScore": 165.36
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/working-at-ai-labs"
}
Backlinks (3)
idtitletyperelationship
ea-shareholder-diversification-anthropicEA Shareholder Diversification from Anthropicconcept
__index__/knowledge-base/responsesSafety Responsesconcept
training-programsAI Safety Training Programsapproach
Longterm Wiki