Longview Philanthropy
longview-philanthropyorganizationPath: /knowledge-base/organizations/longview-philanthropy/
E542Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "longview-philanthropy",
"numericId": null,
"path": "/knowledge-base/organizations/longview-philanthropy/",
"filePath": "knowledge-base/organizations/longview-philanthropy.mdx",
"title": "Longview Philanthropy",
"quality": 45,
"readerImportance": 46,
"researchImportance": 41.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Longview Philanthropy is a philanthropic advisory organization founded in 2018 that has directed \\$140M+ to longtermist causes (\\$89M+ to AI risk), primarily through UHNW donor advising and managed funds (Frontier AI Fund: \\$13M raised, \\$11.1M disbursed to 18 orgs). Funded primarily by Coefficient Giving (\\$21M+ in grants), it operates advisory services for \\$1M+/year donors and public funds (ECF, NWPF) with 15-20 staff.",
"description": "Longview Philanthropy is a philanthropic advisory and grantmaking organization founded in 2018 by Natalie Cargill that has directed over \\$140 million to longtermist causes. As of late 2025, they have moved \\$89M+ specifically toward AI risk reduction, \\$50M+ in 2025 alone, and launched the Frontier AI Fund (raising \\$13M, disbursing \\$11.1M to 18 organizations in its first 9 months). Led by CEO Simran Dhaliwal and President Natalie Cargill, Longview operates two legal entities (UK and US) and manages public funds (Emerging Challenges Fund, Nuclear Weapons Policy Fund) alongside bespoke UHNW donor advisory services.",
"ratings": {
"novelty": 2.5,
"rigor": 4.5,
"actionability": 3,
"completeness": 6
},
"category": "organizations",
"subcategory": "funders",
"clusters": [
"community",
"ai-safety",
"governance",
"biorisks"
],
"metrics": {
"wordCount": 3480,
"tableCount": 26,
"diagramCount": 2,
"internalLinks": 6,
"externalLinks": 114,
"footnoteCount": 0,
"bulletRatio": 0.12,
"sectionCount": 48,
"hasOverview": true,
"structuralScore": 15
},
"suggestedQuality": 100,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 3480,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 0,
"backlinkCount": 8,
"hallucinationRisk": {
"level": "high",
"score": 75,
"factors": [
"biographical-claims",
"no-citations"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 15,
"similarPages": [
{
"id": "80000-hours",
"title": "80,000 Hours",
"path": "/knowledge-base/organizations/80000-hours/",
"similarity": 15
},
{
"id": "coefficient-giving",
"title": "Coefficient Giving",
"path": "/knowledge-base/organizations/coefficient-giving/",
"similarity": 15
},
{
"id": "dustin-moskovitz",
"title": "Dustin Moskovitz (AI Safety Funder)",
"path": "/knowledge-base/people/dustin-moskovitz/",
"similarity": 15
},
{
"id": "fli",
"title": "Future of Life Institute (FLI)",
"path": "/knowledge-base/organizations/fli/",
"similarity": 14
},
{
"id": "giving-what-we-can",
"title": "Giving What We Can",
"path": "/knowledge-base/organizations/giving-what-we-can/",
"similarity": 14
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 14,
"diagrams": 1,
"internalLinks": 28,
"externalLinks": 17,
"footnotes": 10,
"references": 10
},
"actuals": {
"tables": 26,
"diagrams": 2,
"internalLinks": 6,
"externalLinks": 114,
"footnotes": 0,
"references": 0,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "red",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2.5 R:4.5 A:3 C:6"
},
"readerRank": 332,
"researchRank": 331,
"recommendedScore": 134.87
}External Links
No external links
Backlinks (8)
| id | title | type | relationship |
|---|---|---|---|
| the-foundation-layer | The Foundation Layer | organization | related |
| astralis-foundation | Astralis Foundation | organization | leads-to |
| anthropic-pledge-enforcement | Anthropic Founder Pledges: Interventions to Increase Follow-Through | analysis | — |
| model-organisms-of-misalignment | Model Organisms of Misalignment | analysis | — |
| anthropic-investors | Anthropic (Funder) | analysis | — |
| cea | Centre for Effective Altruism | organization | — |
| ea-shareholder-diversification-anthropic | EA Shareholder Diversification from Anthropic | concept | — |
| ai-welfare | AI Welfare and Digital Minds | concept | — |