Longterm Wiki

Sam Altman

sam-altmanpersonPath: /knowledge-base/people/sam-altman/
E269Entity ID (EID)
← Back to page43 backlinksQuality: 40Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "sam-altman",
  "numericId": null,
  "path": "/knowledge-base/people/sam-altman/",
  "filePath": "knowledge-base/people/sam-altman.mdx",
  "title": "Sam Altman",
  "quality": 40,
  "readerImportance": 26.5,
  "researchImportance": 11.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive biographical profile of Sam Altman documenting his role as OpenAI CEO, timeline predictions (AGI within presidential term, superintelligence in \"few thousand days\"), and controversies including November 2023 board crisis and safety team departures. Includes detailed 'Statements & Track Record' section analyzing prediction accuracy—noting pattern of directional correctness on AI trajectory but consistent overoptimism on specific timelines, plus tension between safety rhetoric and deployment practices.",
  "description": "CEO of OpenAI since 2019, former Y Combinator president, and central figure in AI development. Co-founded OpenAI in 2015, survived November 2023 board crisis, and advocates for gradual AI deployment while acknowledging existential risks. Key player in debates over AI safety, commercialization, and governance.",
  "ratings": {
    "novelty": 3,
    "rigor": 5,
    "actionability": 2,
    "completeness": 7
  },
  "category": "people",
  "subcategory": "lab-leadership",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 6703,
    "tableCount": 48,
    "diagramCount": 1,
    "internalLinks": 25,
    "externalLinks": 58,
    "footnoteCount": 0,
    "bulletRatio": 0.03,
    "sectionCount": 67,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 7,
  "evergreen": true,
  "wordCount": 6703,
  "unconvertedLinks": [
    {
      "text": "openai.com",
      "url": "https://openai.com",
      "resourceId": "04d39e8bd5d50dd5",
      "resourceTitle": "OpenAI"
    },
    {
      "text": "en.wikipedia.org",
      "url": "https://en.wikipedia.org/wiki/Sam_Altman",
      "resourceId": "kb-f68985d139b4a2ac"
    },
    {
      "text": "britannica.com",
      "url": "https://www.britannica.com/money/Sam-Altman",
      "resourceId": "b795385697c55df2",
      "resourceTitle": "Sam Altman | Biography, OpenAI, Microsoft, & Facts"
    },
    {
      "text": "Sam Altman",
      "url": "https://en.wikipedia.org/wiki/Sam_Altman",
      "resourceId": "kb-f68985d139b4a2ac"
    },
    {
      "text": "OpenAI blog archives",
      "url": "https://openai.com/research",
      "resourceId": "e9aaa7b5e18f9f41",
      "resourceTitle": "OpenAI: Model Behavior"
    },
    {
      "text": "GPT-4 Technical Report",
      "url": "https://arxiv.org/abs/2303.08774",
      "resourceId": "29a0882390ee7063",
      "resourceTitle": "OpenAI's GPT-4"
    },
    {
      "text": "OpenAI announcement",
      "url": "https://openai.com/index/introducing-superalignment/",
      "resourceId": "704f57dfad89c1b3",
      "resourceTitle": "Superalignment team"
    },
    {
      "text": "Fortune",
      "url": "https://fortune.com/2025/06/20/openai-files-sam-altman-leadership-concerns-safety-failures-ai-lab/",
      "resourceId": "85ba042a002437a0",
      "resourceTitle": "\"The OpenAI Files\" reveals deep leadership concerns about Sam Altman and safety failures"
    },
    {
      "text": "CAIS Extinction Risk Statement",
      "url": "https://www.safe.ai/statement-on-ai-risk",
      "resourceId": "470ac236ca26008c",
      "resourceTitle": "AI Risk Statement"
    },
    {
      "text": "Wikipedia: Sam Altman",
      "url": "https://en.wikipedia.org/wiki/Sam_Altman",
      "resourceId": "kb-f68985d139b4a2ac"
    },
    {
      "text": "Wikipedia: Removal of Sam Altman",
      "url": "https://en.wikipedia.org/wiki/Removal_of_Sam_Altman_from_OpenAI",
      "resourceId": "25db6bbae2f82f94",
      "resourceTitle": "Wikipedia's account"
    },
    {
      "text": "TIME: OpenAI Timeline",
      "url": "https://time.com/7328674/openai-chatgpt-sam-altman-elon-musk-timeline/",
      "resourceId": "011ea0d7ed91108d",
      "resourceTitle": "OpenAI Timeline - Time Magazine"
    },
    {
      "text": "Britannica Money",
      "url": "https://www.britannica.com/money/Sam-Altman",
      "resourceId": "b795385697c55df2",
      "resourceTitle": "Sam Altman | Biography, OpenAI, Microsoft, & Facts"
    },
    {
      "text": "OpenAI: Elon Musk",
      "url": "https://openai.com/index/openai-elon-musk/",
      "resourceId": "461e3e64a9d99bd1",
      "resourceTitle": "OpenAI and Elon Musk | OpenAI"
    }
  ],
  "unconvertedLinkCount": 14,
  "convertedLinkCount": 0,
  "backlinkCount": 43,
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 17,
    "similarPages": [
      {
        "id": "mainstream-era",
        "title": "Mainstream Era (2020-Present)",
        "path": "/knowledge-base/history/mainstream-era/",
        "similarity": 17
      },
      {
        "id": "openai-foundation",
        "title": "OpenAI Foundation",
        "path": "/knowledge-base/organizations/openai-foundation/",
        "similarity": 17
      },
      {
        "id": "ilya-sutskever",
        "title": "Ilya Sutskever",
        "path": "/knowledge-base/people/ilya-sutskever/",
        "similarity": 17
      },
      {
        "id": "ssi",
        "title": "Safe Superintelligence Inc (SSI)",
        "path": "/knowledge-base/organizations/ssi/",
        "similarity": 16
      },
      {
        "id": "deep-learning-era",
        "title": "Deep Learning Revolution (2012-2020)",
        "path": "/knowledge-base/history/deep-learning-era/",
        "similarity": 15
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/source-unsourced-facts-RecGw",
      "title": "Source unsourced facts",
      "summary": "Sourced 25 of 30 previously unsourced facts across all 4 fact files (anthropic, sam-altman, openai, jaan-tallinn). Created 21 new resource entries in news-media.yaml and ai-labs.yaml with proper SHA256-based IDs. Added 8 new publications (Bloomberg, The Information, Quartz, Benzinga, Britannica, World, Sherwood News). Fixed date accuracy issues (Worldcoin stats from 2024 to 2025-05, OpenAI revenue from Oct to Jun 2024) and improved notes. Source coverage improved from 29% to 88%.",
      "model": "opus-4-6",
      "duration": "~45min"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/fact-hash-ids-UETLf",
      "title": "Migrate fact IDs from human-readable to hash-based",
      "summary": "Migrated all canonical fact IDs from human-readable slugs (e.g., `revenue-arr-2025`) to 8-char random hex hashes (e.g., `55d88868`), matching the pattern used by resources. Updated all YAML files, MDX references, build scripts, tests, LLM prompts, and documentation.",
      "model": "opus-4-6",
      "duration": "~45min"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/audit-webpage-errors-X4jHg",
      "title": "Audit wiki pages for factual errors and hallucinations",
      "summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
    }
  ],
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 27,
      "diagrams": 3,
      "internalLinks": 54,
      "externalLinks": 34,
      "footnotes": 20,
      "references": 20
    },
    "actuals": {
      "tables": 48,
      "diagrams": 1,
      "internalLinks": 25,
      "externalLinks": 58,
      "footnotes": 0,
      "references": 18,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "amber",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 3,
    "ratingsString": "N:3 R:5 A:2 C:7"
  },
  "readerRank": 477,
  "researchRank": 545,
  "recommendedScore": 115.11
}
External Links
{
  "wikipedia": "https://en.wikipedia.org/wiki/Sam_Altman",
  "wikidata": "https://www.wikidata.org/wiki/Q7407093",
  "grokipedia": "https://grokipedia.com/page/Sam_Altman"
}
Backlinks (43)
idtitletyperelationship
agi-timelineAGI Timelineconcept
musk-openai-lawsuitMusk v. OpenAI Lawsuitanalysis
openaiOpenAIorganizationleads-to
openai-foundationOpenAI Foundationorganization
case-against-xriskThe Case AGAINST AI Existential Riskargument
case-for-xriskThe Case FOR AI Existential Riskargument
open-vs-closedOpen vs Closed Source AIcrux
pause-debateShould We Pause AI Development?crux
scaling-debateIs Scaling All You Need?crux
why-alignment-easyWhy Alignment Might Be Easyargument
agi-developmentAGI Developmentconcept
deep-learning-eraDeep Learning Revolution (2012-2020)historical
mainstream-eraMainstream Era (2020-Present)historical
anthropic-government-standoffAnthropic-Pentagon Standoff (2026)event
ai-timelinesAI Timelinesconcept
anthropic-valuationAnthropic Valuation Analysisanalysis
caisCAIS (Center for AI Safety)organization
epoch-aiEpoch AIorganization
fliFuture of Life Institute (FLI)organization
frontier-ai-comparisonFrontier AI Company Comparison (2026)concept
lighthavenLighthaven (Event Venue)organization
long-term-benefit-trustLong-Term Benefit Trust (Anthropic)analysis
openai-foundation-governanceOpenAI Foundation Governance Paradoxanalysis
pause-aiPause AIorganization
ssiSafe Superintelligence Inc (SSI)organization
dan-hendrycksDan Hendrycksperson
eliezer-yudkowsky-predictionsEliezer Yudkowsky: Track Recordconcept
elon-muskElon Musk (AI Industry)person
gwernGwern Branwenperson
helen-tonerHelen Tonerperson
ilya-sutskeverIlya Sutskeverperson
jan-leikeJan Leikeperson
nick-bostromNick Bostromperson
sam-altman-predictionsSam Altman: Track Recordconcept
yann-lecun-predictionsYann LeCun: Track Recordconcept
alignmentAI Alignmentapproach
bletchley-declarationBletchley Declarationpolicy
corporate-influenceCorporate Influence on AI Policycrux
field-building-analysisAI Safety Field Building Analysisapproach
international-regimesInternational Compute Regimespolicy
lab-cultureAI Lab Safety Cultureapproach
whistleblower-protectionsAI Whistleblower Protectionspolicy
epistemic-sycophancyEpistemic Sycophancyrisk
Longterm Wiki