Skip to content
Longterm Wiki

Frontier AI Labs (Overview)

labs-overviewPath: /knowledge-base/organizations/labs-overview/
E820Entity ID (EID)
← Back to page1 backlinksQuality: 85Updated: 2026-04-26
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "labs-overview",
  "wikiId": "E820",
  "path": "/knowledge-base/organizations/labs-overview/",
  "filePath": "knowledge-base/organizations/labs-overview.mdx",
  "title": "Frontier AI Labs (Overview)",
  "quality": 85,
  "readerImportance": 78.5,
  "researchImportance": null,
  "tacticalValue": 92,
  "contentFormat": "article",
  "causalLevel": null,
  "lastUpdated": "2026-04-26",
  "dateCreated": "2026-02-17",
  "summary": "Comprehensive comparative overview of 7 major frontier AI labs covering safety frameworks, governance structures, competitive dynamics, and policy stances as of early 2026, with specific quantitative details (e.g., ASL-3 bioweapon task uplift of 2.5×, OpenAI framework permitting deployment with >1000-death risk potential, Microsoft's 27% equity stake). The page is primarily a well-organized compilation of publicly available information rather than original analysis, but achieves notable concreteness and coverage breadth that makes it genuinely useful for prioritization work.",
  "description": "Comparative overview of major AI research laboratories developing frontier AI systems, including safety framework profiles, governance structures, competitive dynamics, and policy stances as of early 2026.",
  "ratings": {
    "focus": 8.5,
    "novelty": 4.5,
    "rigor": 6.5,
    "completeness": 8,
    "concreteness": 7.5,
    "actionability": 5.5,
    "objectivity": 6.5
  },
  "category": "organizations",
  "subcategory": "labs",
  "clusters": [
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 4706,
    "tableCount": 2,
    "diagramCount": 0,
    "internalLinks": 53,
    "externalLinks": 46,
    "footnoteCount": 34,
    "bulletRatio": 0,
    "sectionCount": 23,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": null,
  "evergreen": true,
  "wordCount": 4706,
  "unconvertedLinks": [
    {
      "text": "Preparedness Framework v2 (Apr 2025)",
      "url": "https://openai.com/index/updating-our-preparedness-framework/",
      "resourceId": "ded0b05862511312",
      "resourceTitle": "Preparedness Framework"
    },
    {
      "text": "RSP v3 (May 2025)",
      "url": "https://www.anthropic.com/news/responsible-scaling-policy-v3",
      "resourceId": "0a9c389fb3e8f4ae",
      "resourceTitle": "Anthropic's Responsible Scaling Policy: Version 3.0"
    },
    {
      "text": "Frontier Safety Framework v3 (Sep 2025)",
      "url": "https://deepmind.google/blog/strengthening-our-frontier-safety-framework/",
      "resourceId": "a5154ccbf034e273",
      "resourceTitle": "Google DeepMind: Strengthening our Frontier Safety Framework"
    },
    {
      "text": "Responsible Use Guide; system cards for open-weight releases",
      "url": "https://ai.meta.com/blog/meta-llama-3-1-ai-responsibility/",
      "resourceId": "a4f0e262dd30ec02",
      "resourceTitle": "Llama Guard 3 and Meta's AI Responsibility Approach for Llama 3.1"
    },
    {
      "text": "Preparedness Framework v2 (Apr 2025)",
      "url": "https://openai.com/index/updating-our-preparedness-framework/",
      "resourceId": "ded0b05862511312",
      "resourceTitle": "Preparedness Framework"
    },
    {
      "text": "RSP v3 (May 2025)",
      "url": "https://www.anthropic.com/news/responsible-scaling-policy-v3",
      "resourceId": "0a9c389fb3e8f4ae",
      "resourceTitle": "Anthropic's Responsible Scaling Policy: Version 3.0"
    },
    {
      "text": "FSF v3 (Sep 2025)",
      "url": "https://deepmind.google/blog/strengthening-our-frontier-safety-framework/",
      "resourceId": "a5154ccbf034e273",
      "resourceTitle": "Google DeepMind: Strengthening our Frontier Safety Framework"
    },
    {
      "text": "Risk Mgmt Framework in draft; not applied to current models",
      "url": "https://www.lesswrong.com/posts/hQyrTDuTXpqkxrnoH/xai-s-new-safety-framework-is-dreadful",
      "resourceId": "55e9c3de9e3999d9",
      "resourceTitle": "xAI's new safety framework is dreadful"
    },
    {
      "text": "Responsible Use Guide; no capability thresholds",
      "url": "https://ai.meta.com/blog/meta-llama-3-1-ai-responsibility/",
      "resourceId": "a4f0e262dd30ec02",
      "resourceTitle": "Llama Guard 3 and Meta's AI Responsibility Approach for Llama 3.1"
    },
    {
      "text": "OpenAI, \"Our updated Preparedness Framework,\" April 15, 2025",
      "url": "https://openai.com/index/updating-our-preparedness-framework/",
      "resourceId": "ded0b05862511312",
      "resourceTitle": "Preparedness Framework"
    },
    {
      "text": "CNBC, \"OpenAI completes restructure, solidifying Microsoft as a major shareholder,\" October 28, 2025",
      "url": "https://www.cnbc.com/2025/10/28/open-ai-for-profit-microsoft.html",
      "resourceId": "kb-a7a9de24ab3a5df9",
      "resourceTitle": "OpenAI completes restructure, solidifying Microsoft as a major shareholder"
    },
    {
      "text": "CNBC, \"OpenAI dissolves Superalignment AI safety team,\" May 17, 2024",
      "url": "https://www.cnbc.com/2024/05/17/openai-superalignment-sutskever-leike.html",
      "resourceId": "33a4513e1449b55d",
      "resourceTitle": "OpenAI dissolves Superalignment AI safety team"
    },
    {
      "text": "Anthropic, \"Announcing our updated Responsible Scaling Policy,\" October 15, 2024",
      "url": "https://www.anthropic.com/news/announcing-our-updated-responsible-scaling-policy",
      "resourceId": "d0ba81cc7a8fdb2b",
      "resourceTitle": "Anthropic: Announcing our updated Responsible Scaling Policy"
    },
    {
      "text": "Anthropic, \"Responsible Scaling Policy Version 3.0,\" May 2025",
      "url": "https://www.anthropic.com/news/responsible-scaling-policy-v3",
      "resourceId": "0a9c389fb3e8f4ae",
      "resourceTitle": "Anthropic's Responsible Scaling Policy: Version 3.0"
    },
    {
      "text": "EA Forum, \"Anthropic is Quietly Backpedalling on its Safety Commitments,\" May 2025",
      "url": "https://forum.effectivealtruism.org/posts/kMpf7nYRpTkGh2Qfa/anthropic-is-quietly-backpedalling-on-its-safety-commitments",
      "resourceId": "123ce3cd0f5c7884",
      "resourceTitle": "Anthropic is Quietly Backpedalling on its Safety Commitments"
    },
    {
      "text": "Anthropic, \"Constitutional AI: Harmlessness from AI Feedback,\" arXiv:2212.08073, December 15, 2022",
      "url": "https://arxiv.org/abs/2212.08073",
      "resourceId": "683aef834ac1612a",
      "resourceTitle": "Constitutional AI: Harmlessness from AI Feedback"
    },
    {
      "text": "Anthropic Alignment Science team and Redwood Research, \"Alignment faking in large language models,\" December 18, 2024",
      "url": "https://www.anthropic.com/research/alignment-faking",
      "resourceId": "c2cfd72baafd64a9",
      "resourceTitle": "Anthropic's 2024 alignment faking study"
    },
    {
      "text": "Anthropic et al., \"Sleeper Agents: Training Deceptive LLMs that Persist Through Safety Training,\" arXiv:2401.05566, January 13, 2024",
      "url": "https://arxiv.org/abs/2401.05566",
      "resourceId": "e5c0904211c7d0cc",
      "resourceTitle": "Sleeper Agents: Training Deceptive LLMs that Persist Through Safety Training"
    },
    {
      "text": "Google DeepMind, \"Strengthening our Frontier Safety Framework,\" September 22, 2025",
      "url": "https://deepmind.google/blog/strengthening-our-frontier-safety-framework/",
      "resourceId": "a5154ccbf034e273",
      "resourceTitle": "Google DeepMind: Strengthening our Frontier Safety Framework"
    },
    {
      "text": "Institute for AI Policy and Strategy, \"Responsible Scaling: Comparing Government Guidance and Company Policy,\" 2024",
      "url": "https://www.iaps.ai/research/responsible-scaling",
      "resourceId": "364bc819bcb4c270",
      "resourceTitle": "Responsible Scaling: Comparing Government Guidance and Company Policy"
    },
    {
      "text": "Meta AI, \"Expanding our open source large language models responsibly,\" 2024",
      "url": "https://ai.meta.com/blog/meta-llama-3-1-ai-responsibility/",
      "resourceId": "a4f0e262dd30ec02",
      "resourceTitle": "Llama Guard 3 and Meta's AI Responsibility Approach for Llama 3.1"
    },
    {
      "text": "AI Lab Watch (LessWrong), \"xAI's new safety framework is dreadful,\" 2025",
      "url": "https://www.lesswrong.com/posts/hQyrTDuTXpqkxrnoH/xai-s-new-safety-framework-is-dreadful",
      "resourceId": "55e9c3de9e3999d9",
      "resourceTitle": "xAI's new safety framework is dreadful"
    },
    {
      "text": "Safe Superintelligence Inc., ssi.inc, 2024",
      "url": "https://ssi.inc/",
      "resourceId": "3fc4ee87e9bacb20",
      "resourceTitle": "Safe Superintelligence Inc"
    },
    {
      "text": "European Commission, \"AI Act — Shaping Europe's digital future,\" 2024",
      "url": "https://digital-strategy.ec.europa.eu/en/policies/regulatory-framework-ai",
      "resourceId": "acc5ad4063972046",
      "resourceTitle": "European Commission: EU AI Act"
    },
    {
      "text": "UK AI Security Institute, \"Frontier AI Trends Report,\" 2024",
      "url": "https://www.aisi.gov.uk/frontier-ai-trends-report",
      "resourceId": "7042c7f8de04ccb1",
      "resourceTitle": "AISI Frontier AI Trends"
    },
    {
      "text": "METR, metr.org, 2024",
      "url": "https://metr.org/",
      "resourceId": "45370a5153534152",
      "resourceTitle": "METR: Model Evaluation and Threat Research"
    },
    {
      "text": "Frontier Model Forum, \"Progress Update: Advancing Frontier AI Safety in 2024 and Beyond,\" 2024",
      "url": "https://www.frontiermodelforum.org/updates/progress-update-advancing-frontier-ai-safety-in-2024-and-beyond/",
      "resourceId": "51e8802a5aef29f6",
      "resourceTitle": "Frontier Model Forum"
    },
    {
      "text": "Frontier Model Forum, \"Publications,\" 2025",
      "url": "https://www.frontiermodelforum.org/publications/",
      "resourceId": "5329d38ad33971ff",
      "resourceTitle": "Early Best Practices for Frontier AI Safety Evaluations"
    }
  ],
  "unconvertedLinkCount": 28,
  "convertedLinkCount": 0,
  "backlinkCount": 1,
  "hallucinationRisk": {
    "level": "low",
    "score": 25,
    "factors": [
      "moderately-cited",
      "high-quality"
    ]
  },
  "redundancy": {
    "maxSimilarity": 21,
    "similarPages": [
      {
        "id": "existential-risk",
        "title": "Existential Risk from AI",
        "path": "/knowledge-base/risks/existential-risk/",
        "similarity": 21
      },
      {
        "id": "anthropic",
        "title": "Anthropic",
        "path": "/knowledge-base/organizations/anthropic/",
        "similarity": 20
      },
      {
        "id": "openai",
        "title": "OpenAI",
        "path": "/knowledge-base/organizations/openai/",
        "similarity": 20
      },
      {
        "id": "shareholder-and-board-influence-in-ai-labs",
        "title": "Shareholder and Board Influence in AI Labs",
        "path": "/knowledge-base/organizations/shareholder-and-board-influence-in-ai-labs/",
        "similarity": 20
      },
      {
        "id": "ilya-sutskever",
        "title": "Ilya Sutskever",
        "path": "/knowledge-base/people/ilya-sutskever/",
        "similarity": 19
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-04-12",
      "branch": "claude/qua-264-phase4-5",
      "title": "Auto-improve (standard): Frontier AI Labs (Overview)",
      "summary": "Improved \"Frontier AI Labs (Overview)\" via standard pipeline (1227.8s). Quality score: 82. Issues resolved: Section duplication: 'Competitive Dynamics' and 'Racing Dyna; Citation [^rc-5f82] (CNBC, May 29, 2024) is used to support ; Citation [^rc-0b2f] for METR is listed as 'metr.org, 2024' (.",
      "duration": "1227.8s",
      "cost": "$5-8"
    },
    {
      "date": "2026-02-17",
      "branch": "claude/clarify-overview-pages-ZQx72",
      "title": "Clarify overview pages with new entity type",
      "summary": "Added `overview` as a proper entity type throughout the system, migrated all 36 overview pages to `entityType: overview`, built overview-specific InfoBox rendering with child page links, created an OverviewBanner component, and added a knowledge-base-overview page template to Crux."
    },
    {
      "date": "2026-02-16",
      "branch": "claude/complete-new-pages-kawqG",
      "title": "Fix conflicting numeric IDs + add integrity checks",
      "summary": "Fixed all 9 overview pages from PR #118 which had numeric IDs (E687-E695) that conflicted with existing YAML entities. Reassigned to E710-E718. Then hardened the system to prevent recurrence:\n1. Added page-level numericId conflict detection to `build-data.mjs` (build now fails on conflicts)\n2. Created `numeric-id-integrity` global validation rule (cross-page uniqueness, format validation, entity conflict detection)\n3. Added `numericId` and `subcategory` to frontmatter Zod schema with format regex",
      "pr": 168
    }
  ],
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 19,
      "diagrams": 2,
      "internalLinks": 38,
      "externalLinks": 24,
      "footnotes": 14,
      "references": 14
    },
    "actuals": {
      "tables": 2,
      "diagrams": 0,
      "internalLinks": 53,
      "externalLinks": 46,
      "footnotes": 34,
      "references": 17,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "summary": "green",
      "schedule": "red",
      "entity": "red",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "green",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 3,
    "ratingsString": "N:4.5 R:6.5 A:5.5 C:8"
  },
  "readerRank": 95,
  "recommendedScore": 230.65
}
External Links

No external links

Backlinks (1)
idtitletyperelationship
ai-power-and-influence-mapAI Power and Influence Mapanalysis
Longterm Wiki