LessWrong
lesswrongorganizationPath: /knowledge-base/organizations/lesswrong/
E538Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "lesswrong",
"numericId": null,
"path": "/knowledge-base/organizations/lesswrong/",
"filePath": "knowledge-base/organizations/lesswrong.mdx",
"title": "LessWrong",
"quality": 44,
"readerImportance": 33,
"researchImportance": 38.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "LessWrong is a rationality-focused community blog founded in 2009 that has influenced AI safety discourse, receiving \\$5M+ in funding and serving as the origin point for ~31% of EA survey respondents in 2014. Survey participation peaked at 3,000+ in 2016, declining to 558 by 2023, with the community being 75% male and highly secular.",
"description": "A community blog and forum focused on rationality, cognitive biases, and artificial intelligence that has become a central hub for AI safety discourse and the broader rationalist movement.",
"ratings": {
"novelty": 2.5,
"rigor": 5,
"actionability": 1,
"completeness": 6.5
},
"category": "organizations",
"subcategory": "community-building",
"clusters": [
"community",
"ai-safety"
],
"metrics": {
"wordCount": 1912,
"tableCount": 1,
"diagramCount": 0,
"internalLinks": 20,
"externalLinks": 73,
"footnoteCount": 0,
"bulletRatio": 0.21,
"sectionCount": 21,
"hasOverview": true,
"structuralScore": 13
},
"suggestedQuality": 87,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 1912,
"unconvertedLinks": [
{
"text": "LessWrong Wiki",
"url": "https://www.lesswrong.com/w/instrumental-convergence",
"resourceId": "90e9322ba84baa7a",
"resourceTitle": "LessWrong (2024). \"Instrumental Convergence Wiki\""
},
{
"text": "Effective altruism - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Effective_altruism",
"resourceId": "f1d79efc3fc232c1",
"resourceTitle": "Effective Altruism - Wikipedia"
},
{
"text": "Effective altruism - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Effective_altruism",
"resourceId": "f1d79efc3fc232c1",
"resourceTitle": "Effective Altruism - Wikipedia"
},
{
"text": "LessWrong - Main Site",
"url": "https://www.lesswrong.com/",
"resourceId": "815315aec82a6f7f",
"resourceTitle": "LessWrong"
},
{
"text": "Effective altruism - Wikipedia",
"url": "https://en.wikipedia.org/wiki/Effective_altruism",
"resourceId": "f1d79efc3fc232c1",
"resourceTitle": "Effective Altruism - Wikipedia"
}
],
"unconvertedLinkCount": 5,
"convertedLinkCount": 0,
"backlinkCount": 60,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 14,
"similarPages": [
{
"id": "miri-era",
"title": "The MIRI Era (2000-2015)",
"path": "/knowledge-base/history/miri-era/",
"similarity": 14
},
{
"id": "miri",
"title": "MIRI (Machine Intelligence Research Institute)",
"path": "/knowledge-base/organizations/miri/",
"similarity": 14
},
{
"id": "eliezer-yudkowsky",
"title": "Eliezer Yudkowsky",
"path": "/knowledge-base/people/eliezer-yudkowsky/",
"similarity": 14
},
{
"id": "center-for-applied-rationality",
"title": "Center for Applied Rationality",
"path": "/knowledge-base/organizations/center-for-applied-rationality/",
"similarity": 13
},
{
"id": "ea-global",
"title": "EA Global",
"path": "/knowledge-base/organizations/ea-global/",
"similarity": 13
}
]
},
"coverage": {
"passing": 6,
"total": 13,
"targets": {
"tables": 8,
"diagrams": 1,
"internalLinks": 15,
"externalLinks": 10,
"footnotes": 6,
"references": 6
},
"actuals": {
"tables": 1,
"diagrams": 0,
"internalLinks": 20,
"externalLinks": 73,
"footnotes": 0,
"references": 3,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "green",
"footnotes": "red",
"references": "amber",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2.5 R:5 A:1 C:6.5"
},
"readerRank": 426,
"researchRank": 353,
"recommendedScore": 126.14
}External Links
{
"grokipedia": "https://grokipedia.com/page/LessWrong"
}Backlinks (60)
| id | title | type | relationship |
|---|---|---|---|
| eli-lifland | Eli Lifland | person | — |
| self-improvement | Self-Improvement and Recursive Enhancement | capability | — |
| epistemic-risks | AI Epistemic Cruxes | crux | — |
| misuse-risks | AI Misuse Risk Cruxes | crux | — |
| structural-risks | AI Structural Risk Cruxes | crux | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| miri-era | The MIRI Era (2000-2015) | historical | — |
| heavy-scaffolding | Heavy Scaffolding / Agentic Systems | concept | — |
| provable-safe | Provable / Guaranteed Safe AI | concept | — |
| ai-timelines | AI Timelines | concept | — |
| model-organisms-of-misalignment | Model Organisms of Misalignment | analysis | — |
| ai-futures-project | AI Futures Project | organization | — |
| ai-impacts | AI Impacts | organization | — |
| arc | ARC (Alignment Research Center) | organization | — |
| bridgewater-aia-labs | Bridgewater AIA Labs | organization | — |
| cea | Centre for Effective Altruism | organization | — |
| center-for-applied-rationality | Center for Applied Rationality | organization | — |
| community-building-overview | Community Building Organizations (Overview) | concept | — |
| conjecture | Conjecture | organization | — |
| controlai | ControlAI | organization | — |
| deepmind | Google DeepMind | organization | — |
| elicit | Elicit (AI Research Tool) | organization | — |
| frontier-model-forum | Frontier Model Forum | organization | — |
| good-judgment | Good Judgment (Forecasting) | organization | — |
| gratified | Gratified | organization | — |
| lighthaven | Lighthaven (Event Venue) | organization | — |
| lightning-rod-labs | Lightning Rod Labs | organization | — |
| manifest | Manifest (Forecasting Conference) | organization | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| miri | MIRI (Machine Intelligence Research Institute) | organization | — |
| palisade-research | Palisade Research | organization | — |
| pause-ai | Pause AI | organization | — |
| polymarket | Polymarket | organization | — |
| samotsvety | Samotsvety | organization | — |
| the-sequences | The Sequences by Eliezer Yudkowsky | organization | — |
| connor-leahy | Connor Leahy | person | — |
| dustin-moskovitz | Dustin Moskovitz (AI Safety Funder) | person | — |
| eliezer-yudkowsky-predictions | Eliezer Yudkowsky: Track Record | concept | — |
| eliezer-yudkowsky | Eliezer Yudkowsky | person | — |
| gwern | Gwern Branwen | person | — |
| issa-rice | Issa Rice | person | — |
| jaan-tallinn | Jaan Tallinn | person | — |
| leopold-aschenbrenner | Leopold Aschenbrenner | person | — |
| neel-nanda | Neel Nanda | person | — |
| nuno-sempere | Nuño Sempere | person | — |
| sam-bankman-fried | Sam Bankman-Fried | person | — |
| vidur-kapur | Vidur Kapur | person | — |
| vipul-naik | Vipul Naik | person | — |
| ai-watch | AI Watch | project | — |
| donations-list-website | Donations List Website | project | — |
| interpretability | Mechanistic Interpretability | safety-agenda | — |
| org-watch | Org Watch | project | — |
| roastmypost | RoastMyPost | project | — |
| stampy-aisafety-info | Stampy / AISafety.info | project | — |
| timelines-wiki | Timelines Wiki | project | — |
| existential-risk | Existential Risk from AI | concept | — |
| sleeper-agents | Sleeper Agents: Training Deceptive LLMs | risk | — |
| page-creator-pipeline | Research-First Page Creation Pipeline | concept | — |
| __index__/project | LongtermWiki Project | concept | — |