Longterm Wiki

OpenAI

openaiorganizationPath: /knowledge-base/organizations/openai/
E218Entity ID (EID)
← Back to page238 backlinksQuality: 62Updated: 2026-03-12
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "openai",
  "numericId": null,
  "path": "/knowledge-base/organizations/openai/",
  "filePath": "knowledge-base/organizations/openai.mdx",
  "title": "OpenAI",
  "quality": 62,
  "readerImportance": 72.4,
  "researchImportance": 44.5,
  "tacticalValue": 92,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-12",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive organizational profile of OpenAI documenting evolution from 2015 non-profit to Public Benefit Corporation, with detailed analysis of governance crisis, 2024-2025 ownership restructuring (conversion from capped-profit LLC to PBC, with specific post-conversion equity percentages subject to regulatory finalization), key leadership departures, and capability advancement (o1/o3 reasoning models). Updated with 2025 developments including o3-mini release, 800M weekly active users, Altman's AGI timeline statements, enterprise market share decline from 50% to 25% between 2023 and 2025, and joint safety evaluation with Anthropic in summer 2025.",
  "description": "Leading AI lab that developed GPT models and ChatGPT, analyzing organizational evolution from non-profit research to commercial AGI development amid safety-commercialization tensions",
  "ratings": {
    "focus": 7.2,
    "novelty": 3.5,
    "rigor": 5.8,
    "completeness": 7.5,
    "concreteness": 7.8,
    "actionability": 4.5,
    "objectivity": 6.5
  },
  "category": "organizations",
  "subcategory": "labs",
  "clusters": [
    "ai-safety",
    "community",
    "governance"
  ],
  "metrics": {
    "wordCount": 3768,
    "tableCount": 16,
    "diagramCount": 0,
    "internalLinks": 36,
    "externalLinks": 11,
    "footnoteCount": 0,
    "bulletRatio": 0.34,
    "sectionCount": 46,
    "hasOverview": true,
    "structuralScore": 13
  },
  "suggestedQuality": 87,
  "updateFrequency": 3,
  "evergreen": true,
  "wordCount": 3768,
  "unconvertedLinks": [
    {
      "text": "OpenAI GPT-4 System Card",
      "url": "https://cdn.openai.com/papers/gpt-4-system-card.pdf",
      "resourceId": "ebab6e05661645c5",
      "resourceTitle": "OpenAI"
    },
    {
      "text": "OpenAI Deliberative Alignment",
      "url": "https://openai.com/index/deliberative-alignment/",
      "resourceId": "ee7628aa3f6282e5",
      "resourceTitle": "Deliberative alignment: reasoning enables safer language models"
    },
    {
      "text": "Global Affairs Initiative",
      "url": "https://openai.com/global-affairs/openai-for-countries/",
      "resourceId": "238f28c96d8780f6",
      "resourceTitle": "Introducing OpenAI for Countries"
    },
    {
      "text": "Sora 2 Launch",
      "url": "https://openai.com/index/sora-2/",
      "resourceId": "edc1663b7d3b8ac2",
      "resourceTitle": "Sora 2 is here"
    },
    {
      "text": "TIME Magazine Interview",
      "url": "https://time.com/7205596/sam-altman-superintelligence-agi/",
      "resourceId": "358ab98ce38cdd9c",
      "resourceTitle": "How OpenAI's Sam Altman Is Thinking About AGI and Superintelligence in 2025"
    },
    {
      "text": "arXiv:2005.14165",
      "url": "https://arxiv.org/abs/2005.14165",
      "resourceId": "2cab3ea10b8b7ae2",
      "resourceTitle": "Brown et al. (2020)"
    },
    {
      "text": "arXiv:2203.02155",
      "url": "https://arxiv.org/abs/2203.02155",
      "resourceId": "1098fc60be7ca2b0",
      "resourceTitle": "Training Language Models to Follow Instructions with Human Feedback"
    },
    {
      "text": "arXiv:2312.09390",
      "url": "https://arxiv.org/abs/2312.09390",
      "resourceId": "0ba98ae3a8a72270",
      "resourceTitle": "arXiv"
    },
    {
      "text": "arXiv:2303.08774",
      "url": "https://arxiv.org/abs/2303.08774",
      "resourceId": "29a0882390ee7063",
      "resourceTitle": "OpenAI's GPT-4"
    }
  ],
  "unconvertedLinkCount": 9,
  "convertedLinkCount": 0,
  "backlinkCount": 238,
  "citationHealth": {
    "total": 34,
    "withQuotes": 31,
    "verified": 30,
    "accuracyChecked": 34,
    "accurate": 25,
    "inaccurate": 0,
    "avgScore": 0.9180523262869927
  },
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "xai",
        "title": "xAI",
        "path": "/knowledge-base/organizations/xai/",
        "similarity": 18
      },
      {
        "id": "agentic-ai",
        "title": "Agentic AI",
        "path": "/knowledge-base/capabilities/agentic-ai/",
        "similarity": 17
      },
      {
        "id": "large-language-models",
        "title": "Large Language Models",
        "path": "/knowledge-base/capabilities/large-language-models/",
        "similarity": 17
      },
      {
        "id": "anthropic-ipo",
        "title": "Anthropic IPO",
        "path": "/knowledge-base/organizations/anthropic-ipo/",
        "similarity": 17
      },
      {
        "id": "anthropic",
        "title": "Anthropic",
        "path": "/knowledge-base/organizations/anthropic/",
        "similarity": 17
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-26",
      "branch": "claude/claims-driven-improvements",
      "title": "Auto-improve (standard): OpenAI",
      "summary": "Improved \"OpenAI\" via standard pipeline (403.2s). Quality score: 74. Issues resolved: Footnote [^4] is missing — footnotes skip from [^3] to [^5],; Footnote [^24], [^25], [^26] are missing — footnotes skip fr; Footnotes [^40] and [^41] cite sources (LessWrong OpenAI los.",
      "duration": "403.2s",
      "cost": "$5-8"
    },
    {
      "date": "2026-02-19",
      "branch": "claude/add-wiki-tables-VhyKT",
      "title": "Add concrete shareable data tables to high-value pages",
      "summary": "Added three concrete, screenshot-worthy data tables to high-value wiki pages: (1) OpenAI ownership/stakeholder table to openai.mdx showing the 2024-2025 PBC restructuring with Foundation ~26%, Microsoft transitioning from 49% profit share to ~2.5% equity, and Sam Altman's proposed 7% grant; (2) Budget and headcount comparison table to safety-orgs-overview.mdx covering MIRI, ARC, METR, Redwood Research, CAIS, Apollo Research, GovAI, Conjecture, and FAR AI with annual budgets, headcounts, and cost-per-researcher; (3) Per-company compensation comparison table to ai-talent-market-dynamics.mdx comparing Anthropic, OpenAI, Google DeepMind, xAI, Meta AI, and Microsoft Research by total comp range, base salary, equity type, and benefits including Anthropic's unique DAF matching program.",
      "model": "sonnet-4",
      "duration": "~45min"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/source-unsourced-facts-RecGw",
      "title": "Source unsourced facts",
      "summary": "Sourced 25 of 30 previously unsourced facts across all 4 fact files (anthropic, sam-altman, openai, jaan-tallinn). Created 21 new resource entries in news-media.yaml and ai-labs.yaml with proper SHA256-based IDs. Added 8 new publications (Bloomberg, The Information, Quartz, Benzinga, Britannica, World, Sherwood News). Fixed date accuracy issues (Worldcoin stats from 2024 to 2025-05, OpenAI revenue from Oct to Jun 2024) and improved notes. Source coverage improved from 29% to 88%.",
      "model": "opus-4-6",
      "duration": "~45min"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/review-pr-216-P4Fcu",
      "title": "Fix audit report findings from PR #216",
      "summary": "Reviewed PR #216 (comprehensive wiki audit report) and implemented fixes for the major issues it identified: fixed 181 path-style EntityLink IDs across 33 files, converted 164 broken EntityLinks (referencing non-existent entities) to plain text across 38 files, fixed a temporal inconsistency in anthropic.mdx, and added missing description fields to 53 ai-transition-model pages."
    },
    {
      "date": "2026-02-18",
      "branch": "claude/audit-webpage-errors-X4jHg",
      "title": "Audit wiki pages for factual errors and hallucinations",
      "summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
    }
  ],
  "coverage": {
    "passing": 10,
    "total": 13,
    "targets": {
      "tables": 15,
      "diagrams": 2,
      "internalLinks": 30,
      "externalLinks": 19,
      "footnotes": 11,
      "references": 11
    },
    "actuals": {
      "tables": 16,
      "diagrams": 0,
      "internalLinks": 36,
      "externalLinks": 11,
      "footnotes": 0,
      "references": 28,
      "quotesWithQuotes": 31,
      "quotesTotal": 34,
      "accuracyChecked": 34,
      "accuracyTotal": 34
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "green",
      "quotes": "green",
      "accuracy": "green"
    },
    "editHistoryCount": 5,
    "ratingsString": "N:3.5 R:5.8 A:4.5 C:7.5"
  },
  "readerRank": 149,
  "researchRank": 309,
  "recommendedScore": 181.9
}
External Links
{
  "wikipedia": "https://en.wikipedia.org/wiki/OpenAI",
  "lesswrong": "https://www.lesswrong.com/tag/openai",
  "wikidata": "https://www.wikidata.org/wiki/Q21708200",
  "grokipedia": "https://grokipedia.com/page/OpenAI"
}
Backlinks (238)
idtitletyperelationship
gptGPTai-modelcreated-by
gpt-4GPT-4ai-modelcreated-by
gpt-4-turboGPT-4 Turboai-modelcreated-by
gpt-4oGPT-4oai-modelcreated-by
gpt-4o-miniGPT-4o miniai-modelcreated-by
o1-previewo1-previewai-modelcreated-by
o1o1ai-modelcreated-by
o1-minio1-miniai-modelcreated-by
o3-minio3-miniai-modelcreated-by
o3o3ai-modelcreated-by
gpt-4-1GPT-4.1ai-modelcreated-by
gpt-4-1-miniGPT-4.1 miniai-modelcreated-by
gpt-4-1-nanoGPT-4.1 nanoai-modelcreated-by
o4-minio4-miniai-modelcreated-by
codingAutonomous Codingcapability
language-modelsLarge Language Modelscapability
reasoningReasoning and Planningcapability
corporate-influenceCorporate Influence on AI Policycrux
governance-focusedGovernance-Focused Worldviewconcept
heavy-scaffoldingHeavy Scaffolding / Agentic Systemsconcept
deep-learning-eraDeep Learning Revolution Erahistorical
mainstream-eraMainstream Erahistorical
anthropic-government-standoffAnthropic-Pentagon Standoff (2026)event
openai-foundation-governanceOpenAI Foundation Governance Paradoxanalysis
anthropic-valuationAnthropic Valuation Analysisanalysis
musk-openai-lawsuitMusk v. OpenAI Lawsuitanalysis
elon-musk-philanthropyElon Musk (Funder)analysis
anthropic-impactAnthropic Impact Assessment Modelanalysis
technical-pathwaysAI Safety Technical Pathway Decompositionanalysis
multi-actor-landscapeAI Safety Multi-Actor Strategic Landscapeanalysis
anthropicAnthropicorganization
deepmindGoogle DeepMindorganization
xaixAIorganization
metrMETRorganization
arcARCorganization
uk-aisiUK AI Safety Instituteorganization
us-aisiUS AI Safety Instituteorganization
openai-foundationOpenAI Foundationorganization
leading-the-futureLeading the Future super PACorganization
nist-aiNIST and AI Safetyorganization
ssiSafe Superintelligence Inc (SSI)organization
frontier-model-forumFrontier Model Forumorganization
goodfireGoodfireorganization
ilya-sutskeverIlya Sutskeverperson
elon-muskElon Musk (AI Industry)person
david-sacksDavid Sacks (White House AI Czar)person
voluntary-commitmentsVoluntary AI Safety Commitmentspolicy
eval-saturationEval Saturation & The Evals Gapapproach
alignmentAI Alignmentapproach
scheming-detectionScheming & Deception Detectionapproach
dangerous-cap-evalsDangerous Capability Evaluationsapproach
safety-casesAI Safety Casesapproach
ai-assistedAI-Assisted Alignmentapproach
alignment-evalsAlignment Evaluationsapproach
red-teamingRed Teamingapproach
weak-to-strongWeak-to-Strong Generalizationapproach
preference-optimizationPreference Optimization Methodsapproach
process-supervisionProcess Supervisionapproach
refusal-trainingRefusal Trainingapproach
rspResponsible Scaling Policiespolicy
corporateCorporate AI Safety Responsesapproach
new-york-raise-actNew York RAISE Actpolicy
open-sourceOpen Source AI Safetyapproach
whistleblower-protectionsAI Whistleblower Protectionspolicy
debateAI Safety via Debateapproach
structured-accessStructured Access / API-Onlyapproach
tool-restrictionsTool-Use Restrictionsapproach
agentic-aiAgentic AIcapability
large-language-modelsLarge Language Modelsconcept
long-horizonLong-Horizon Autonomous Taskscapability
scientific-researchScientific Research Capabilitiescapability
situational-awarenessSituational Awarenesscapability
tool-useTool Use and Computer Usecapability
accident-risksAI Accident Risk Cruxescrux
misuse-risksAI Misuse Risk Cruxescrux
solutionsAI Safety Solution Cruxescrux
structural-risksAI Structural Risk Cruxescrux
interpretability-sufficientIs Interpretability Sufficient for Safety?crux
pause-debateShould We Pause AI Development?crux
regulation-debateGovernment Regulation vs Industry Self-Governancecrux
scaling-debateIs Scaling All You Need?crux
why-alignment-hardWhy Alignment Might Be Hardargument
agi-developmentAGI Developmentconcept
agi-timelineAGI Timelineconcept
ea-longtermist-wins-lossesEA and Longtermist Wins and Lossesconcept
__index__/knowledge-base/historyHistoryconcept
miri-eraThe MIRI Era (2000-2015)historical
claude-code-espionage-2025Claude Code Espionage Incident (2025)concept
__index__/knowledge-baseKnowledge Baseconcept
light-scaffoldingLight Scaffoldingcapability
ai-compute-scaling-metricsAI Compute Scaling Metricsanalysis
ai-megaproject-infrastructureAI Megaproject Infrastructureanalysis
ai-talent-market-dynamicsAI Talent Market Dynamicsanalysis
ai-timelinesAI Timelinesconcept
bioweapons-ai-upliftAI Uplift Assessment Modelanalysis
capabilities-to-safety-pipelineCapabilities-to-Safety Pipeline Modelanalysis
capability-alignment-raceCapability-Alignment Race Modelanalysis
frontier-lab-cost-structureFrontier Lab Cost Structureanalysis
goal-misgeneralization-probabilityGoal Misgeneralization Probability Modelanalysis
instrumental-convergence-frameworkInstrumental Convergence Frameworkanalysis
international-coordination-gameInternational AI Coordination Gameanalysis
intervention-effectiveness-matrixIntervention Effectiveness Matrixanalysis
intervention-timing-windowsIntervention Timing Windowsanalysis
model-organisms-of-misalignmentModel Organisms of Misalignmentanalysis
planning-for-frontier-lab-scalingPlanning for Frontier Lab Scalinganalysis
power-seeking-conditionsPower-Seeking Emergence Conditions Modelanalysis
pre-tai-capital-deploymentPre-TAI Capital Deployment: $100B-$300B+ Spending Analysisanalysis
projecting-compute-spendingProjecting Compute Spendinganalysis
racing-dynamics-impactRacing Dynamics Impact Modelanalysis
risk-activation-timelineRisk Activation Timeline Modelanalysis
risk-interaction-matrixRisk Interaction Matrix Modelanalysis
risk-interaction-networkRisk Interaction Networkanalysis
safety-research-allocationSafety Research Allocation Modelanalysis
safety-researcher-gapAI Safety Talent Supply/Demand Gap Modelanalysis
safety-spending-at-scaleSafety Spending at Scaleanalysis
scaling-lawsAI Scaling Lawsconcept
scheming-likelihood-modelScheming Likelihood Assessmentanalysis
ai-futures-projectAI Futures Projectorganization
ai-revenue-sourcesAI Revenue Sourcesorganization
anthropic-investorsAnthropic (Funder)analysis
anthropic-ipoAnthropic IPOanalysis
apollo-researchApollo Researchorganization
biosecurity-orgs-overviewBiosecurity Organizations (Overview)concept
bridgewater-aia-labsBridgewater AIA Labsorganization
caisCAIS (Center for AI Safety)organization
chaiCHAI (Center for Human-Compatible AI)organization
coefficient-givingCoefficient Givingorganization
conjectureConjectureorganization
controlaiControlAIorganization
ea-globalEA Globalorganization
epoch-aiEpoch AIorganization
far-aiFAR AIorganization
fhiFuture of Humanity Institute (FHI)organization
founders-fundFounders Fundorganization
frontier-ai-comparisonFrontier AI Company Comparison (2026)concept
ftxFTX (cryptocurrency exchange)organization
futuresearchFutureSearchorganization
govaiGovAIorganization
__index__/knowledge-base/organizationsOrganizationsconcept
kalshiKalshi (Prediction Market)organization
labs-overviewFrontier AI Labs (Overview)concept
lesswrongLessWrongorganization
lionheart-venturesLionheart Venturesorganization
long-term-benefit-trustLong-Term Benefit Trust (Anthropic)analysis
matsMATS ML Alignment Theory Scholars programorganization
meta-aiMeta AI (FAIR)organization
microsoftMicrosoft AIorganization
palisade-researchPalisade Researchorganization
pause-aiPause AIorganization
red-queen-bioRed Queen Bioorganization
redwood-researchRedwood Researchorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
schmidt-futuresSchmidt Futuresorganization
securebioSecureBioorganization
situational-awareness-lpSituational Awareness LPorganization
chris-olahChris Olahperson
connor-leahyConnor Leahyperson
dan-hendrycksDan Hendrycksperson
daniela-amodeiDaniela Amodeiperson
dario-amodeiDario Amodeiperson
demis-hassabisDemis Hassabisperson
dustin-moskovitzDustin Moskovitz (AI Safety Funder)person
eli-liflandEli Liflandperson
eliezer-yudkowsky-predictionsEliezer Yudkowsky: Track Recordconcept
elon-musk-predictionsElon Musk: Track Recordconcept
evan-hubingerEvan Hubingerperson
gwernGwern Branwenperson
helen-tonerHelen Tonerperson
holden-karnofskyHolden Karnofskyperson
__index__/knowledge-base/peoplePeopleconcept
jan-leikeJan Leikeperson
leopold-aschenbrennerLeopold Aschenbrennerperson
max-tegmarkMax Tegmarkperson
nick-bostromNick Bostromperson
paul-christianoPaul Christianoperson
sam-altman-predictionsSam Altman: Track Recordconcept
sam-altmanSam Altmanperson
vidur-kapurVidur Kapurperson
yann-lecun-predictionsYann LeCun: Track Recordconcept
ai-controlAI Controlsafety-agenda
ai-forecasting-benchmarkAI Forecasting Benchmark Tournamentproject
anthropic-core-viewsAnthropic Core Viewssafety-agenda
bletchley-declarationBletchley Declarationpolicy
california-sb1047California SB 1047policy
california-sb53California SB 53policy
constitutional-aiConstitutional AIapproach
coordination-mechanismsInternational Coordination Mechanismspolicy
coordination-techAI Governance Coordination Technologiesapproach
corrigibilityCorrigibility Researchsafety-agenda
deliberationAI-Assisted Deliberation Platformsapproach
effectiveness-assessmentPolicy Effectiveness Assessmentanalysis
epistemic-infrastructureAI-Era Epistemic Infrastructureapproach
epistemic-securityAI-Era Epistemic Securityapproach
eu-ai-actEU AI Actpolicy
evals-governanceEvals-Based Deployment Gatespolicy
evalsEvals & Red-teamingsafety-agenda
evaluation-awarenessEvaluation Awarenessapproach
evaluationAI Evaluationapproach
governance-policyAI Governance and Policycrux
international-summitsInternational AI Safety Summitspolicy
interpretabilityMechanistic Interpretabilitysafety-agenda
intervention-portfolioAI Safety Intervention Portfolioapproach
lab-cultureAI Lab Safety Cultureapproach
mech-interpMechanistic Interpretabilityapproach
model-auditingThird-Party Model Auditingapproach
model-specAI Model Specificationspolicy
output-filteringAI Output Filteringapproach
research-agendasAI Alignment Research Agenda Comparisoncrux
responsible-scaling-policiesResponsible Scaling Policiespolicy
reward-modelingReward Modelingapproach
rlhfRLHF / Constitutional AIcapability
sandboxingSandboxing / Containmentapproach
scalable-eval-approachesScalable Eval Approachesapproach
scalable-oversightScalable Oversightsafety-agenda
seoul-declarationSeoul AI Safety Summit Declarationpolicy
sparse-autoencodersSparse Autoencoders (SAEs)approach
stampy-aisafety-infoStampy / AISafety.infoproject
technical-researchTechnical AI Safety Researchcrux
thresholdsCompute Thresholdspolicy
training-programsAI Safety Training Programsapproach
us-executive-orderUS Executive Order on Safe, Secure, and Trustworthy AIpolicy
us-state-legislationUS State AI Legislationpolicy
ai-welfareAI Welfare and Digital Mindsconcept
bioweaponsBioweaponsrisk
concentrated-compute-cybersecurity-riskConcentrated Compute as a Cybersecurity Riskrisk
cyber-psychosisAI-Induced Cyber Psychosisrisk
disinformationDisinformationrisk
epistemic-sycophancyEpistemic Sycophancyrisk
existential-riskExistential Risk from AIconcept
financial-stability-risks-ai-capexFinancial Stability Risks from AI Capital Expenditurerisk
knowledge-monopolyAI Knowledge Monopolyrisk
power-seekingPower-Seeking AIrisk
reward-hackingReward Hackingrisk
schemingSchemingrisk
superintelligenceSuperintelligenceconcept
winner-take-allAI Winner-Take-All Dynamicsrisk
long-timelinesLong-Timelines Technical Worldviewconcept
ai-research-workflowsAI-Assisted Research Workflows: Best Practicesconcept
Longterm Wiki