Longterm Wiki

Google DeepMind

deepmindorganizationPath: /knowledge-base/organizations/deepmind/
E98Entity ID (EID)
← Back to page112 backlinksQuality: 37Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "deepmind",
  "numericId": null,
  "path": "/knowledge-base/organizations/deepmind/",
  "filePath": "knowledge-base/organizations/deepmind.mdx",
  "title": "Google DeepMind",
  "quality": 37,
  "readerImportance": 35,
  "researchImportance": 55,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive overview of DeepMind's history, achievements (AlphaGo, AlphaFold with 200M+ protein structures), and 2023 merger with Google Brain. Documents racing dynamics with OpenAI and new Frontier Safety Framework with 5-tier capability thresholds, but provides limited actionable guidance for prioritization decisions.",
  "description": "Google's merged AI research lab behind AlphaGo, AlphaFold, and Gemini, formed from combining DeepMind and Google Brain in 2023 to compete with OpenAI",
  "ratings": {
    "novelty": 2,
    "rigor": 4,
    "actionability": 2,
    "completeness": 6
  },
  "category": "organizations",
  "subcategory": "labs",
  "clusters": [
    "ai-safety",
    "community"
  ],
  "metrics": {
    "wordCount": 2725,
    "tableCount": 20,
    "diagramCount": 0,
    "internalLinks": 42,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.08,
    "sectionCount": 42,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 3,
  "evergreen": true,
  "wordCount": 2725,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 14,
  "backlinkCount": 112,
  "hallucinationRisk": {
    "level": "high",
    "score": 85,
    "factors": [
      "biographical-claims",
      "no-citations",
      "low-quality-score",
      "few-external-sources"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 17,
    "similarPages": [
      {
        "id": "anthropic-core-views",
        "title": "Anthropic Core Views",
        "path": "/knowledge-base/responses/anthropic-core-views/",
        "similarity": 17
      },
      {
        "id": "openai",
        "title": "OpenAI",
        "path": "/knowledge-base/organizations/openai/",
        "similarity": 16
      },
      {
        "id": "accident-risks",
        "title": "AI Accident Risk Cruxes",
        "path": "/knowledge-base/cruxes/accident-risks/",
        "similarity": 15
      },
      {
        "id": "ssi",
        "title": "Safe Superintelligence Inc (SSI)",
        "path": "/knowledge-base/organizations/ssi/",
        "similarity": 15
      },
      {
        "id": "interpretability",
        "title": "Mechanistic Interpretability",
        "path": "/knowledge-base/responses/interpretability/",
        "similarity": 15
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-26",
      "branch": "claude/claims-driven-improvements",
      "title": "Auto-improve (standard): Google DeepMind",
      "summary": "Improved \"Google DeepMind\" via standard pipeline (301.7s). Quality score: 72. Issues resolved: EntityLink for Google DeepMind in Overview uses duplicate 'n; EntityLink in Overview references E98 as both the merged ent; Frontmatter 'lastEdited' date is '2026-02-26' which is a fut.",
      "duration": "301.7s",
      "cost": "$5-8"
    },
    {
      "date": "2026-02-24",
      "branch": "feat/stale-fact-detection-581-582",
      "title": "Batch content fixes + stale-facts validator + 2 new validation rules",
      "summary": "(fill in)",
      "pr": 924,
      "model": "claude-sonnet-4-6"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/audit-webpage-errors-X4jHg",
      "title": "Audit wiki pages for factual errors and hallucinations",
      "summary": "Systematic audit of ~20 wiki pages for factual errors, hallucinations, and inconsistencies. Found and fixed 25+ confirmed errors across 17 pages, including wrong dates, fabricated statistics, false attributions, missing major events, broken entity references, misattributed techniques, and internal inconsistencies."
    },
    {
      "date": "2026-02-18",
      "branch": "claude/audit-webpage-errors-11sSF",
      "title": "Fix factual errors found in wiki audit",
      "summary": "Systematically audited ~35+ high-risk wiki pages for factual errors and hallucinations using parallel background agents plus direct reading. Fixed 13 confirmed errors across 11 files."
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 11,
      "diagrams": 1,
      "internalLinks": 22,
      "externalLinks": 14,
      "footnotes": 8,
      "references": 8
    },
    "actuals": {
      "tables": 20,
      "diagrams": 0,
      "internalLinks": 42,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 13,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 4,
    "ratingsString": "N:2 R:4 A:2 C:6"
  },
  "readerRank": 414,
  "researchRank": 258,
  "recommendedScore": 113.29
}
External Links
{
  "wikipedia": "https://en.wikipedia.org/wiki/DeepMind",
  "wikidata": "https://www.wikidata.org/wiki/Q15733006"
}
Backlinks (112)
idtitletyperelationship
geminiGeminiai-modelcreated-by
gemini-1-0-ultraGemini 1.0 Ultraai-modelcreated-by
gemini-1-5-proGemini 1.5 Proai-modelcreated-by
gemini-1-5-flashGemini 1.5 Flashai-modelcreated-by
gemini-2-0-flashGemini 2.0 Flashai-modelcreated-by
gemini-2-5-proGemini 2.5 Proai-modelcreated-by
gemini-2-5-flashGemini 2.5 Flashai-modelcreated-by
scientific-researchScientific Research Capabilitiescapability
corporate-influenceCorporate Influence on AI Policycrux
deep-learning-eraDeep Learning Revolution Erahistorical
anthropic-impactAnthropic Impact Assessment Modelanalysis
anthropicAnthropicorganization
govaiGovAIorganization
uk-aisiUK AI Safety Instituteorganization
ssiSafe Superintelligence Inc (SSI)organization
frontier-model-forumFrontier Model Forumorganization
goodfireGoodfireorganization
geoffrey-hintonGeoffrey Hintonperson
neel-nandaNeel Nandaperson
scalable-oversightScalable Oversightsafety-agenda
safety-casesAI Safety Casesapproach
rspResponsible Scaling Policiespolicy
language-modelsLarge Language Modelscapability
long-horizonLong-Horizon Autonomous Taskscapability
solutionsAI Safety Solution Cruxescrux
interpretability-sufficientIs Interpretability Sufficient for Safety?crux
pause-debateShould We Pause AI Development?crux
scaling-debateIs Scaling All You Need?crux
why-alignment-hardWhy Alignment Might Be Hardargument
agi-developmentAGI Developmentconcept
agi-timelineAGI Timelineconcept
miri-eraThe MIRI Era (2000-2015)historical
claude-code-espionage-2025Claude Code Espionage Incident (2025)concept
ai-talent-market-dynamicsAI Talent Market Dynamicsanalysis
ai-timelinesAI Timelinesconcept
capabilities-to-safety-pipelineCapabilities-to-Safety Pipeline Modelanalysis
capability-alignment-raceCapability-Alignment Race Modelanalysis
frontier-lab-cost-structureFrontier Lab Cost Structureanalysis
goal-misgeneralization-probabilityGoal Misgeneralization Probability Modelanalysis
intervention-effectiveness-matrixIntervention Effectiveness Matrixanalysis
intervention-timing-windowsIntervention Timing Windowsanalysis
pre-tai-capital-deploymentPre-TAI Capital Deployment: $100B-$300B+ Spending Analysisanalysis
projecting-compute-spendingProjecting Compute Spendinganalysis
risk-interaction-matrixRisk Interaction Matrix Modelanalysis
safety-research-allocationSafety Research Allocation Modelanalysis
safety-researcher-gapAI Safety Talent Supply/Demand Gap Modelanalysis
safety-spending-at-scaleSafety Spending at Scaleanalysis
scaling-lawsAI Scaling Lawsconcept
ai-impactsAI Impactsorganization
ai-revenue-sourcesAI Revenue Sourcesorganization
apollo-researchApollo Researchorganization
caisCAIS (Center for AI Safety)organization
ea-globalEA Globalorganization
epoch-aiEpoch AIorganization
founders-fundFounders Fundorganization
frontier-ai-comparisonFrontier AI Company Comparison (2026)concept
__index__/knowledge-base/organizationsOrganizationsconcept
kalshiKalshi (Prediction Market)organization
labs-overviewFrontier AI Labs (Overview)concept
lesswrongLessWrongorganization
matsMATS ML Alignment Theory Scholars programorganization
meta-aiMeta AI (FAIR)organization
metrMETRorganization
microsoftMicrosoft AIorganization
openai-foundationOpenAI Foundationorganization
openaiOpenAIorganization
pause-aiPause AIorganization
redwood-researchRedwood Researchorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
swift-centreSwift Centreorganization
xaixAIorganization
chris-olahChris Olahperson
connor-leahyConnor Leahyperson
dan-hendrycksDan Hendrycksperson
demis-hassabisDemis Hassabisperson
eliezer-yudkowskyEliezer Yudkowskyperson
elon-muskElon Musk (AI Industry)person
ilya-sutskeverIlya Sutskeverperson
jaan-tallinnJaan Tallinnperson
jan-leikeJan Leikeperson
max-tegmarkMax Tegmarkperson
nick-bostromNick Bostromperson
paul-christianoPaul Christianoperson
sam-altmanSam Altmanperson
ai-controlAI Controlsafety-agenda
alignment-evalsAlignment Evaluationsapproach
alignmentAI Alignmentapproach
bletchley-declarationBletchley Declarationpolicy
california-sb1047California SB 1047policy
california-sb53California SB 53policy
constitutional-aiConstitutional AIapproach
cooperative-aiCooperative AIapproach
coordination-techAI Governance Coordination Technologiesapproach
corporateCorporate AI Safety Responsesapproach
dangerous-cap-evalsDangerous Capability Evaluationsapproach
evalsEvals & Red-teamingsafety-agenda
governance-policyAI Governance and Policycrux
international-summitsInternational AI Safety Summitspolicy
lab-cultureAI Lab Safety Cultureapproach
mech-interpMechanistic Interpretabilityapproach
model-specAI Model Specificationspolicy
monitoringCompute Monitoringpolicy
red-teamingRed Teamingapproach
responsible-scaling-policiesResponsible Scaling Policiespolicy
scalable-eval-approachesScalable Eval Approachesapproach
seoul-declarationSeoul AI Safety Summit Declarationpolicy
sparse-autoencodersSparse Autoencoders (SAEs)approach
training-programsAI Safety Training Programsapproach
whistleblower-protectionsAI Whistleblower Protectionspolicy
concentrated-compute-cybersecurity-riskConcentrated Compute as a Cybersecurity Riskrisk
existential-riskExistential Risk from AIconcept
winner-take-allAI Winner-Take-All Dynamicsrisk
Longterm Wiki