OpenAI
openaiorganizationPath: /knowledge-base/organizations/openai/
E218Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "openai",
"numericId": null,
"path": "/knowledge-base/organizations/openai/",
"filePath": "knowledge-base/organizations/openai.mdx",
"title": "OpenAI",
"quality": 62,
"readerImportance": 72.4,
"researchImportance": 44.5,
"tacticalValue": 92,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-12",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive organizational profile of OpenAI documenting evolution from 2015 non-profit to Public Benefit Corporation, with detailed analysis of governance crisis, 2024-2025 ownership restructuring (conversion from capped-profit LLC to PBC, with specific post-conversion equity percentages subject to regulatory finalization), key leadership departures, and capability advancement (o1/o3 reasoning models). Updated with 2025 developments including o3-mini release, 800M weekly active users, Altman's AGI timeline statements, enterprise market share decline from 50% to 25% between 2023 and 2025, and joint safety evaluation with Anthropic in summer 2025.",
"description": "Leading AI lab that developed GPT models and ChatGPT, analyzing organizational evolution from non-profit research to commercial AGI development amid safety-commercialization tensions",
"ratings": {
"focus": 7.2,
"novelty": 3.5,
"rigor": 5.8,
"completeness": 7.5,
"concreteness": 7.8,
"actionability": 4.5,
"objectivity": 6.5
},
"category": "organizations",
"subcategory": "labs",
"clusters": [
"ai-safety",
"community",
"governance"
],
"metrics": {
"wordCount": 3768,
"tableCount": 16,
"diagramCount": 0,
"internalLinks": 36,
"externalLinks": 11,
"footnoteCount": 0,
"bulletRatio": 0.34,
"sectionCount": 46,
"hasOverview": true,
"structuralScore": 13
},
"suggestedQuality": 87,
"updateFrequency": 3,
"evergreen": true,
"wordCount": 3768,
"unconvertedLinks": [
{
"text": "OpenAI GPT-4 System Card",
"url": "https://cdn.openai.com/papers/gpt-4-system-card.pdf",
"resourceId": "ebab6e05661645c5",
"resourceTitle": "OpenAI"
},
{
"text": "OpenAI Deliberative Alignment",
"url": "https://openai.com/index/deliberative-alignment/",
"resourceId": "ee7628aa3f6282e5",
"resourceTitle": "Deliberative alignment: reasoning enables safer language models"
},
{
"text": "Global Affairs Initiative",
"url": "https://openai.com/global-affairs/openai-for-countries/",
"resourceId": "238f28c96d8780f6",
"resourceTitle": "Introducing OpenAI for Countries"
},
{
"text": "Sora 2 Launch",
"url": "https://openai.com/index/sora-2/",
"resourceId": "edc1663b7d3b8ac2",
"resourceTitle": "Sora 2 is here"
},
{
"text": "TIME Magazine Interview",
"url": "https://time.com/7205596/sam-altman-superintelligence-agi/",
"resourceId": "358ab98ce38cdd9c",
"resourceTitle": "How OpenAI's Sam Altman Is Thinking About AGI and Superintelligence in 2025"
},
{
"text": "arXiv:2005.14165",
"url": "https://arxiv.org/abs/2005.14165",
"resourceId": "2cab3ea10b8b7ae2",
"resourceTitle": "Brown et al. (2020)"
},
{
"text": "arXiv:2203.02155",
"url": "https://arxiv.org/abs/2203.02155",
"resourceId": "1098fc60be7ca2b0",
"resourceTitle": "Training Language Models to Follow Instructions with Human Feedback"
},
{
"text": "arXiv:2312.09390",
"url": "https://arxiv.org/abs/2312.09390",
"resourceId": "0ba98ae3a8a72270",
"resourceTitle": "arXiv"
},
{
"text": "arXiv:2303.08774",
"url": "https://arxiv.org/abs/2303.08774",
"resourceId": "29a0882390ee7063",
"resourceTitle": "OpenAI's GPT-4"
}
],
"unconvertedLinkCount": 9,
"convertedLinkCount": 0,
"backlinkCount": 238,
"citationHealth": {
"total": 34,
"withQuotes": 31,
"verified": 30,
"accuracyChecked": 34,
"accurate": 25,
"inaccurate": 0,
"avgScore": 0.9180523262869927
},
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 18,
"similarPages": [
{
"id": "xai",
"title": "xAI",
"path": "/knowledge-base/organizations/xai/",
"similarity": 18
},
{
"id": "agentic-ai",
"title": "Agentic AI",
"path": "/knowledge-base/capabilities/agentic-ai/",
"similarity": 17
},
{
"id": "large-language-models",
"title": "Large Language Models",
"path": "/knowledge-base/capabilities/large-language-models/",
"similarity": 17
},
{
"id": "anthropic-ipo",
"title": "Anthropic IPO",
"path": "/knowledge-base/organizations/anthropic-ipo/",
"similarity": 17
},
{
"id": "anthropic",
"title": "Anthropic",
"path": "/knowledge-base/organizations/anthropic/",
"similarity": 17
}
]
},
"changeHistory": [
{
"date": "2026-02-26",
"branch": "claude/claims-driven-improvements",
"title": "Auto-improve (standard): OpenAI",
"summary": "Improved \"OpenAI\" via standard pipeline (403.2s). Quality score: 74. Issues resolved: Footnote [^4] is missing — footnotes skip from [^3] to [^5],; Footnote [^24], [^25], [^26] are missing — footnotes skip fr; Footnotes [^40] and [^41] cite sources (LessWrong OpenAI los.",
"duration": "403.2s",
"cost": "$5-8"
},
{
"date": "2026-02-19",
"branch": "claude/add-wiki-tables-VhyKT",
"title": "Add concrete shareable data tables to high-value pages",
"summary": "Added three concrete, screenshot-worthy data tables to high-value wiki pages: (1) OpenAI ownership/stakeholder table to openai.mdx showing the 2024-2025 PBC restructuring with Foundation ~26%, Microsoft transitioning from 49% profit share to ~2.5% equity, and Sam Altman's proposed 7% grant; (2) Budget and headcount comparison table to safety-orgs-overview.mdx covering MIRI, ARC, METR, Redwood Research, CAIS, Apollo Research, GovAI, Conjecture, and FAR AI with annual budgets, headcounts, and cost-per-researcher; (3) Per-company compensation comparison table to ai-talent-market-dynamics.mdx comparing Anthropic, OpenAI, Google DeepMind, xAI, Meta AI, and Microsoft Research by total comp range, base salary, equity type, and benefits including Anthropic's unique DAF matching program.",
"model": "sonnet-4",
"duration": "~45min"
},
{
"date": "2026-02-18",
"branch": "claude/source-unsourced-facts-RecGw",
"title": "Source unsourced facts",
"summary": "Sourced 25 of 30 previously unsourced facts across all 4 fact files (anthropic, sam-altman, openai, jaan-tallinn). Created 21 new resource entries in news-media.yaml and ai-labs.yaml with proper SHA256-based IDs. Added 8 new publications (Bloomberg, The Information, Quartz, Benzinga, Britannica, World, Sherwood News). Fixed date accuracy issues (Worldcoin stats from 2024 to 2025-05, OpenAI revenue from Oct to Jun 2024) and improved notes. Source coverage improved from 29% to 88%.",
"model": "opus-4-6",
"duration": "~45min"
},
{
"date": "2026-02-18",
"branch": "claude/review-pr-216-P4Fcu",
"title": "Fix audit report findings from PR #216",
"summary": "Reviewed PR #216 (comprehensive wiki audit report) and implemented fixes for the major issues it identified: fixed 181 path-style EntityLink IDs across 33 files, converted 164 broken EntityLinks (referencing non-existent entities) to plain text across 38 files, fixed a temporal inconsistency in anthropic.mdx, and added missing description fields to 53 ai-transition-model pages."
},
{
"date": "2026-02-18",
"branch": "claude/audit-webpage-errors-X4jHg",
"title": "Audit wiki pages for factual errors and hallucinations",
"summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
}
],
"coverage": {
"passing": 10,
"total": 13,
"targets": {
"tables": 15,
"diagrams": 2,
"internalLinks": 30,
"externalLinks": 19,
"footnotes": 11,
"references": 11
},
"actuals": {
"tables": 16,
"diagrams": 0,
"internalLinks": 36,
"externalLinks": 11,
"footnotes": 0,
"references": 28,
"quotesWithQuotes": 31,
"quotesTotal": 34,
"accuracyChecked": 34,
"accuracyTotal": 34
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "green",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "amber",
"footnotes": "red",
"references": "green",
"quotes": "green",
"accuracy": "green"
},
"editHistoryCount": 5,
"ratingsString": "N:3.5 R:5.8 A:4.5 C:7.5"
},
"readerRank": 149,
"researchRank": 309,
"recommendedScore": 181.9
}External Links
{
"wikipedia": "https://en.wikipedia.org/wiki/OpenAI",
"lesswrong": "https://www.lesswrong.com/tag/openai",
"wikidata": "https://www.wikidata.org/wiki/Q21708200",
"grokipedia": "https://grokipedia.com/page/OpenAI"
}Backlinks (238)
| id | title | type | relationship |
|---|---|---|---|
| gpt | GPT | ai-model | created-by |
| gpt-4 | GPT-4 | ai-model | created-by |
| gpt-4-turbo | GPT-4 Turbo | ai-model | created-by |
| gpt-4o | GPT-4o | ai-model | created-by |
| gpt-4o-mini | GPT-4o mini | ai-model | created-by |
| o1-preview | o1-preview | ai-model | created-by |
| o1 | o1 | ai-model | created-by |
| o1-mini | o1-mini | ai-model | created-by |
| o3-mini | o3-mini | ai-model | created-by |
| o3 | o3 | ai-model | created-by |
| gpt-4-1 | GPT-4.1 | ai-model | created-by |
| gpt-4-1-mini | GPT-4.1 mini | ai-model | created-by |
| gpt-4-1-nano | GPT-4.1 nano | ai-model | created-by |
| o4-mini | o4-mini | ai-model | created-by |
| coding | Autonomous Coding | capability | — |
| language-models | Large Language Models | capability | — |
| reasoning | Reasoning and Planning | capability | — |
| corporate-influence | Corporate Influence on AI Policy | crux | — |
| governance-focused | Governance-Focused Worldview | concept | — |
| heavy-scaffolding | Heavy Scaffolding / Agentic Systems | concept | — |
| deep-learning-era | Deep Learning Revolution Era | historical | — |
| mainstream-era | Mainstream Era | historical | — |
| anthropic-government-standoff | Anthropic-Pentagon Standoff (2026) | event | — |
| openai-foundation-governance | OpenAI Foundation Governance Paradox | analysis | — |
| anthropic-valuation | Anthropic Valuation Analysis | analysis | — |
| musk-openai-lawsuit | Musk v. OpenAI Lawsuit | analysis | — |
| elon-musk-philanthropy | Elon Musk (Funder) | analysis | — |
| anthropic-impact | Anthropic Impact Assessment Model | analysis | — |
| technical-pathways | AI Safety Technical Pathway Decomposition | analysis | — |
| multi-actor-landscape | AI Safety Multi-Actor Strategic Landscape | analysis | — |
| anthropic | Anthropic | organization | — |
| deepmind | Google DeepMind | organization | — |
| xai | xAI | organization | — |
| metr | METR | organization | — |
| arc | ARC | organization | — |
| uk-aisi | UK AI Safety Institute | organization | — |
| us-aisi | US AI Safety Institute | organization | — |
| openai-foundation | OpenAI Foundation | organization | — |
| leading-the-future | Leading the Future super PAC | organization | — |
| nist-ai | NIST and AI Safety | organization | — |
| ssi | Safe Superintelligence Inc (SSI) | organization | — |
| frontier-model-forum | Frontier Model Forum | organization | — |
| goodfire | Goodfire | organization | — |
| ilya-sutskever | Ilya Sutskever | person | — |
| elon-musk | Elon Musk (AI Industry) | person | — |
| david-sacks | David Sacks (White House AI Czar) | person | — |
| voluntary-commitments | Voluntary AI Safety Commitments | policy | — |
| eval-saturation | Eval Saturation & The Evals Gap | approach | — |
| alignment | AI Alignment | approach | — |
| scheming-detection | Scheming & Deception Detection | approach | — |
| dangerous-cap-evals | Dangerous Capability Evaluations | approach | — |
| safety-cases | AI Safety Cases | approach | — |
| ai-assisted | AI-Assisted Alignment | approach | — |
| alignment-evals | Alignment Evaluations | approach | — |
| red-teaming | Red Teaming | approach | — |
| weak-to-strong | Weak-to-Strong Generalization | approach | — |
| preference-optimization | Preference Optimization Methods | approach | — |
| process-supervision | Process Supervision | approach | — |
| refusal-training | Refusal Training | approach | — |
| rsp | Responsible Scaling Policies | policy | — |
| corporate | Corporate AI Safety Responses | approach | — |
| new-york-raise-act | New York RAISE Act | policy | — |
| open-source | Open Source AI Safety | approach | — |
| whistleblower-protections | AI Whistleblower Protections | policy | — |
| debate | AI Safety via Debate | approach | — |
| structured-access | Structured Access / API-Only | approach | — |
| tool-restrictions | Tool-Use Restrictions | approach | — |
| agentic-ai | Agentic AI | capability | — |
| large-language-models | Large Language Models | concept | — |
| long-horizon | Long-Horizon Autonomous Tasks | capability | — |
| scientific-research | Scientific Research Capabilities | capability | — |
| situational-awareness | Situational Awareness | capability | — |
| tool-use | Tool Use and Computer Use | capability | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| misuse-risks | AI Misuse Risk Cruxes | crux | — |
| solutions | AI Safety Solution Cruxes | crux | — |
| structural-risks | AI Structural Risk Cruxes | crux | — |
| interpretability-sufficient | Is Interpretability Sufficient for Safety? | crux | — |
| pause-debate | Should We Pause AI Development? | crux | — |
| regulation-debate | Government Regulation vs Industry Self-Governance | crux | — |
| scaling-debate | Is Scaling All You Need? | crux | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| agi-development | AGI Development | concept | — |
| agi-timeline | AGI Timeline | concept | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| __index__/knowledge-base/history | History | concept | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| claude-code-espionage-2025 | Claude Code Espionage Incident (2025) | concept | — |
| __index__/knowledge-base | Knowledge Base | concept | — |
| light-scaffolding | Light Scaffolding | capability | — |
| ai-compute-scaling-metrics | AI Compute Scaling Metrics | analysis | — |
| ai-megaproject-infrastructure | AI Megaproject Infrastructure | analysis | — |
| ai-talent-market-dynamics | AI Talent Market Dynamics | analysis | — |
| ai-timelines | AI Timelines | concept | — |
| bioweapons-ai-uplift | AI Uplift Assessment Model | analysis | — |
| capabilities-to-safety-pipeline | Capabilities-to-Safety Pipeline Model | analysis | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| frontier-lab-cost-structure | Frontier Lab Cost Structure | analysis | — |
| goal-misgeneralization-probability | Goal Misgeneralization Probability Model | analysis | — |
| instrumental-convergence-framework | Instrumental Convergence Framework | analysis | — |
| international-coordination-game | International AI Coordination Game | analysis | — |
| intervention-effectiveness-matrix | Intervention Effectiveness Matrix | analysis | — |
| intervention-timing-windows | Intervention Timing Windows | analysis | — |
| model-organisms-of-misalignment | Model Organisms of Misalignment | analysis | — |
| planning-for-frontier-lab-scaling | Planning for Frontier Lab Scaling | analysis | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| pre-tai-capital-deployment | Pre-TAI Capital Deployment: $100B-$300B+ Spending Analysis | analysis | — |
| projecting-compute-spending | Projecting Compute Spending | analysis | — |
| racing-dynamics-impact | Racing Dynamics Impact Model | analysis | — |
| risk-activation-timeline | Risk Activation Timeline Model | analysis | — |
| risk-interaction-matrix | Risk Interaction Matrix Model | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| safety-research-allocation | Safety Research Allocation Model | analysis | — |
| safety-researcher-gap | AI Safety Talent Supply/Demand Gap Model | analysis | — |
| safety-spending-at-scale | Safety Spending at Scale | analysis | — |
| scaling-laws | AI Scaling Laws | concept | — |
| scheming-likelihood-model | Scheming Likelihood Assessment | analysis | — |
| ai-futures-project | AI Futures Project | organization | — |
| ai-revenue-sources | AI Revenue Sources | organization | — |
| anthropic-investors | Anthropic (Funder) | analysis | — |
| anthropic-ipo | Anthropic IPO | analysis | — |
| apollo-research | Apollo Research | organization | — |
| biosecurity-orgs-overview | Biosecurity Organizations (Overview) | concept | — |
| bridgewater-aia-labs | Bridgewater AIA Labs | organization | — |
| cais | CAIS (Center for AI Safety) | organization | — |
| chai | CHAI (Center for Human-Compatible AI) | organization | — |
| coefficient-giving | Coefficient Giving | organization | — |
| conjecture | Conjecture | organization | — |
| controlai | ControlAI | organization | — |
| ea-global | EA Global | organization | — |
| epoch-ai | Epoch AI | organization | — |
| far-ai | FAR AI | organization | — |
| fhi | Future of Humanity Institute (FHI) | organization | — |
| founders-fund | Founders Fund | organization | — |
| frontier-ai-comparison | Frontier AI Company Comparison (2026) | concept | — |
| ftx | FTX (cryptocurrency exchange) | organization | — |
| futuresearch | FutureSearch | organization | — |
| govai | GovAI | organization | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| kalshi | Kalshi (Prediction Market) | organization | — |
| labs-overview | Frontier AI Labs (Overview) | concept | — |
| lesswrong | LessWrong | organization | — |
| lionheart-ventures | Lionheart Ventures | organization | — |
| long-term-benefit-trust | Long-Term Benefit Trust (Anthropic) | analysis | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| meta-ai | Meta AI (FAIR) | organization | — |
| microsoft | Microsoft AI | organization | — |
| palisade-research | Palisade Research | organization | — |
| pause-ai | Pause AI | organization | — |
| red-queen-bio | Red Queen Bio | organization | — |
| redwood-research | Redwood Research | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| schmidt-futures | Schmidt Futures | organization | — |
| securebio | SecureBio | organization | — |
| situational-awareness-lp | Situational Awareness LP | organization | — |
| chris-olah | Chris Olah | person | — |
| connor-leahy | Connor Leahy | person | — |
| dan-hendrycks | Dan Hendrycks | person | — |
| daniela-amodei | Daniela Amodei | person | — |
| dario-amodei | Dario Amodei | person | — |
| demis-hassabis | Demis Hassabis | person | — |
| dustin-moskovitz | Dustin Moskovitz (AI Safety Funder) | person | — |
| eli-lifland | Eli Lifland | person | — |
| eliezer-yudkowsky-predictions | Eliezer Yudkowsky: Track Record | concept | — |
| elon-musk-predictions | Elon Musk: Track Record | concept | — |
| evan-hubinger | Evan Hubinger | person | — |
| gwern | Gwern Branwen | person | — |
| helen-toner | Helen Toner | person | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| __index__/knowledge-base/people | People | concept | — |
| jan-leike | Jan Leike | person | — |
| leopold-aschenbrenner | Leopold Aschenbrenner | person | — |
| max-tegmark | Max Tegmark | person | — |
| nick-bostrom | Nick Bostrom | person | — |
| paul-christiano | Paul Christiano | person | — |
| sam-altman-predictions | Sam Altman: Track Record | concept | — |
| sam-altman | Sam Altman | person | — |
| vidur-kapur | Vidur Kapur | person | — |
| yann-lecun-predictions | Yann LeCun: Track Record | concept | — |
| ai-control | AI Control | safety-agenda | — |
| ai-forecasting-benchmark | AI Forecasting Benchmark Tournament | project | — |
| anthropic-core-views | Anthropic Core Views | safety-agenda | — |
| bletchley-declaration | Bletchley Declaration | policy | — |
| california-sb1047 | California SB 1047 | policy | — |
| california-sb53 | California SB 53 | policy | — |
| constitutional-ai | Constitutional AI | approach | — |
| coordination-mechanisms | International Coordination Mechanisms | policy | — |
| coordination-tech | AI Governance Coordination Technologies | approach | — |
| corrigibility | Corrigibility Research | safety-agenda | — |
| deliberation | AI-Assisted Deliberation Platforms | approach | — |
| effectiveness-assessment | Policy Effectiveness Assessment | analysis | — |
| epistemic-infrastructure | AI-Era Epistemic Infrastructure | approach | — |
| epistemic-security | AI-Era Epistemic Security | approach | — |
| eu-ai-act | EU AI Act | policy | — |
| evals-governance | Evals-Based Deployment Gates | policy | — |
| evals | Evals & Red-teaming | safety-agenda | — |
| evaluation-awareness | Evaluation Awareness | approach | — |
| evaluation | AI Evaluation | approach | — |
| governance-policy | AI Governance and Policy | crux | — |
| international-summits | International AI Safety Summits | policy | — |
| interpretability | Mechanistic Interpretability | safety-agenda | — |
| intervention-portfolio | AI Safety Intervention Portfolio | approach | — |
| lab-culture | AI Lab Safety Culture | approach | — |
| mech-interp | Mechanistic Interpretability | approach | — |
| model-auditing | Third-Party Model Auditing | approach | — |
| model-spec | AI Model Specifications | policy | — |
| output-filtering | AI Output Filtering | approach | — |
| research-agendas | AI Alignment Research Agenda Comparison | crux | — |
| responsible-scaling-policies | Responsible Scaling Policies | policy | — |
| reward-modeling | Reward Modeling | approach | — |
| rlhf | RLHF / Constitutional AI | capability | — |
| sandboxing | Sandboxing / Containment | approach | — |
| scalable-eval-approaches | Scalable Eval Approaches | approach | — |
| scalable-oversight | Scalable Oversight | safety-agenda | — |
| seoul-declaration | Seoul AI Safety Summit Declaration | policy | — |
| sparse-autoencoders | Sparse Autoencoders (SAEs) | approach | — |
| stampy-aisafety-info | Stampy / AISafety.info | project | — |
| technical-research | Technical AI Safety Research | crux | — |
| thresholds | Compute Thresholds | policy | — |
| training-programs | AI Safety Training Programs | approach | — |
| us-executive-order | US Executive Order on Safe, Secure, and Trustworthy AI | policy | — |
| us-state-legislation | US State AI Legislation | policy | — |
| ai-welfare | AI Welfare and Digital Minds | concept | — |
| bioweapons | Bioweapons | risk | — |
| concentrated-compute-cybersecurity-risk | Concentrated Compute as a Cybersecurity Risk | risk | — |
| cyber-psychosis | AI-Induced Cyber Psychosis | risk | — |
| disinformation | Disinformation | risk | — |
| epistemic-sycophancy | Epistemic Sycophancy | risk | — |
| existential-risk | Existential Risk from AI | concept | — |
| financial-stability-risks-ai-capex | Financial Stability Risks from AI Capital Expenditure | risk | — |
| knowledge-monopoly | AI Knowledge Monopoly | risk | — |
| power-seeking | Power-Seeking AI | risk | — |
| reward-hacking | Reward Hacking | risk | — |
| scheming | Scheming | risk | — |
| superintelligence | Superintelligence | concept | — |
| winner-take-all | AI Winner-Take-All Dynamics | risk | — |
| long-timelines | Long-Timelines Technical Worldview | concept | — |
| ai-research-workflows | AI-Assisted Research Workflows: Best Practices | concept | — |