Max Tegmark
max-tegmarkpersonPath: /knowledge-base/people/max-tegmark/
E433Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "max-tegmark",
"numericId": null,
"path": "/knowledge-base/people/max-tegmark/",
"filePath": "knowledge-base/people/max-tegmark.mdx",
"title": "Max Tegmark",
"quality": 63,
"readerImportance": 81.5,
"researchImportance": 39,
"tacticalValue": 72,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive biographical profile of Max Tegmark covering his transition from cosmology to AI safety advocacy, his role founding the Future of Life Institute, and his controversial Mathematical Universe Hypothesis. The article provides balanced coverage of both his contributions and criticisms, including the 2023 grant controversy and scientific debates about his theoretical work.",
"description": "Swedish-American physicist at MIT, co-founder of the Future of Life Institute, and prominent AI safety advocate known for his work on the Mathematical Universe Hypothesis and efforts to promote safe artificial intelligence development.",
"ratings": {
"novelty": 4,
"rigor": 7,
"actionability": 6,
"completeness": 8
},
"category": "people",
"subcategory": "safety-researchers",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2642,
"tableCount": 2,
"diagramCount": 0,
"internalLinks": 14,
"externalLinks": 2,
"footnoteCount": 0,
"bulletRatio": 0.05,
"sectionCount": 23,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 2642,
"unconvertedLinks": [
{
"text": "en.wikipedia.org",
"url": "https://en.wikipedia.org/wiki/Max_Tegmark",
"resourceId": "23cbf8a562b3afec",
"resourceTitle": "Max Tegmark - Wikipedia"
}
],
"unconvertedLinkCount": 1,
"convertedLinkCount": 0,
"backlinkCount": 17,
"citationHealth": {
"total": 67,
"withQuotes": 48,
"verified": 47,
"accuracyChecked": 47,
"accurate": 29,
"inaccurate": 2,
"avgScore": 0.9614093893518051
},
"hallucinationRisk": {
"level": "medium",
"score": 60,
"factors": [
"biographical-claims",
"no-citations",
"high-rigor"
]
},
"entityType": "person",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "robin-hanson",
"title": "Robin Hanson",
"path": "/knowledge-base/people/robin-hanson/",
"similarity": 16
},
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 15
},
{
"id": "frontier-model-forum",
"title": "Frontier Model Forum",
"path": "/knowledge-base/organizations/frontier-model-forum/",
"similarity": 15
},
{
"id": "eliezer-yudkowsky",
"title": "Eliezer Yudkowsky",
"path": "/knowledge-base/people/eliezer-yudkowsky/",
"similarity": 15
},
{
"id": "stuart-russell",
"title": "Stuart Russell",
"path": "/knowledge-base/people/stuart-russell/",
"similarity": 15
}
]
},
"changeHistory": [
{
"date": "2026-02-18",
"branch": "claude/fix-issue-240-N5irU",
"title": "Surface tacticalValue in /wiki table and score 53 pages",
"summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
"model": "sonnet-4",
"duration": "~30min"
},
{
"date": "2026-02-16",
"branch": "claude/investigate-arxiv-paper-UmGPu",
"title": "Singapore Consensus on AI Safety",
"summary": "Investigated arXiv:2506.20702 (The Singapore Consensus on Global AI Safety Research Priorities) and integrated it into the wiki. Updated the international-summits page with a new SCAI section and Mermaid diagram, fixed the broken Singapore Consensus resource in web-other.yaml, updated Bengio/Russell/Tegmark pages with references, created a new dedicated singapore-consensus page with entity E694, and registered the entity in responses.yaml.",
"pr": 157
}
],
"coverage": {
"passing": 5,
"total": 13,
"targets": {
"tables": 11,
"diagrams": 1,
"internalLinks": 21,
"externalLinks": 13,
"footnotes": 8,
"references": 8
},
"actuals": {
"tables": 2,
"diagrams": 0,
"internalLinks": 14,
"externalLinks": 2,
"footnotes": 0,
"references": 1,
"quotesWithQuotes": 48,
"quotesTotal": 67,
"accuracyChecked": 47,
"accuracyTotal": 67
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "green",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "amber",
"externalLinks": "amber",
"footnotes": "red",
"references": "amber",
"quotes": "amber",
"accuracy": "amber"
},
"editHistoryCount": 2,
"ratingsString": "N:4 R:7 A:6 C:8"
},
"readerRank": 79,
"researchRank": 352,
"recommendedScore": 188.53
}External Links
{
"wikidata": "https://www.wikidata.org/wiki/Q2076321",
"grokipedia": "https://grokipedia.com/page/Max_Tegmark"
}Backlinks (17)
| id | title | type | relationship |
|---|---|---|---|
| pause-debate | Should We Pause AI Development? | crux | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| provable-safe | Provable / Guaranteed Safe AI | concept | — |
| fli | Future of Life Institute (FLI) | organization | — |
| metaculus | Metaculus | organization | — |
| sff | Survival and Flourishing Fund (SFF) | organization | — |
| elon-musk-predictions | Elon Musk: Track Record | concept | — |
| stuart-russell | Stuart Russell | person | — |
| yann-lecun-predictions | Yann LeCun: Track Record | concept | — |
| yann-lecun | Yann LeCun | person | — |
| california-sb1047 | California SB 1047 | policy | — |
| coordination-mechanisms | International Coordination Mechanisms | policy | — |
| lab-culture | AI Lab Safety Culture | approach | — |
| provably-safe | Provably Safe AI (davidad agenda) | approach | — |
| singapore-consensus | Singapore Consensus on AI Safety Research Priorities | policy | — |
| multipolar-trap | Multipolar Trap (AI Development) | risk | — |
| racing-dynamics | AI Development Racing Dynamics | risk | — |