Open vs Closed Source AI
open-vs-closedcruxPath: /knowledge-base/debates/open-vs-closed/
E217Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "open-vs-closed",
"numericId": null,
"path": "/knowledge-base/debates/open-vs-closed/",
"filePath": "knowledge-base/debates/open-vs-closed.mdx",
"title": "Open vs Closed Source AI",
"quality": 60,
"readerImportance": 52,
"researchImportance": 69,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive analysis of open vs closed source AI debate, documenting that open model performance gap narrowed from 8% to 1.7% in 2024, with 1.2B+ Llama downloads by April 2025 and DeepSeek R1 demonstrating 90-95% cost reduction. Research shows fine-tuning can remove safety guardrails in hours, while NTIA 2024 found insufficient evidence to restrict open weights and EU AI Act exempts non-systemic open models below 10²⁵ FLOPs.",
"description": "The safety implications of releasing AI model weights publicly versus keeping them proprietary. Open model performance gap narrowed from 8% to 1.7% in 2024, with 1.2B+ Llama downloads by April 2025. DeepSeek R1 demonstrated 90-95% cost reduction. NTIA 2024 concluded evidence insufficient to warrant restrictions, while EU AI Act exempts non-systemic open models.",
"ratings": {
"novelty": 4.2,
"rigor": 6.8,
"actionability": 5.5,
"completeness": 7.5
},
"category": "debates",
"subcategory": "policy-debates",
"clusters": [
"ai-safety",
"governance"
],
"metrics": {
"wordCount": 2208,
"tableCount": 9,
"diagramCount": 1,
"internalLinks": 6,
"externalLinks": 43,
"footnoteCount": 0,
"bulletRatio": 0.09,
"sectionCount": 17,
"hasOverview": false,
"structuralScore": 13
},
"suggestedQuality": 87,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 2208,
"unconvertedLinks": [
{
"text": "Stanford HAI 2025",
"url": "https://hai.stanford.edu/ai-index/2025-ai-index-report",
"resourceId": "da87f2b213eb9272",
"resourceTitle": "Stanford AI Index 2025"
},
{
"text": "Menlo Ventures",
"url": "https://menlovc.com/perspective/2025-mid-year-llm-market-update/",
"resourceId": "3cb52ce5c601c885",
"resourceTitle": "Menlo Ventures - 2025 Mid-Year LLM Market Update"
},
{
"text": "signed AI Safety Commitments",
"url": "https://carnegieendowment.org/research/2025/01/deepseek-and-other-chinese-firms-converge-with-western-companies-on-ai-promises",
"resourceId": "e3274b108aac1712",
"resourceTitle": "Frontier AI Safety Commitments"
},
{
"text": "documented censorship and security issues",
"url": "https://www.nist.gov/news-events/news/2025/09/caisi-evaluation-deepseek-ai-models-finds-shortcomings-and-risks",
"resourceId": "ff1a185c3aa33003",
"resourceTitle": "CAISI Evaluation of DeepSeek AI Models Finds Shortcomings and Risks"
},
{
"text": "Hugging Face",
"url": "https://huggingface.co/",
"resourceId": "453cb49f45b2d3e3",
"resourceTitle": "Hugging Face"
},
{
"text": "FAR.AI 2024",
"url": "https://far.ai/post/2024-10-poisoning/",
"resourceId": "2a0c1c9020caae9c",
"resourceTitle": "FAR AI"
},
{
"text": "Menlo Ventures",
"url": "https://menlovc.com/perspective/2025-mid-year-llm-market-update/",
"resourceId": "3cb52ce5c601c885",
"resourceTitle": "Menlo Ventures - 2025 Mid-Year LLM Market Update"
},
{
"text": "Stanford HAI",
"url": "https://hai.stanford.edu/ai-index/2025-ai-index-report",
"resourceId": "da87f2b213eb9272",
"resourceTitle": "Stanford AI Index 2025"
},
{
"text": "Menlo Ventures",
"url": "https://menlovc.com/perspective/2025-the-state-of-generative-ai-in-the-enterprise/",
"resourceId": "d2115dba2489b57e",
"resourceTitle": "2025 State of Generative AI in Enterprise - Menlo Ventures"
},
{
"text": "FAR.AI",
"url": "https://far.ai/post/2024-10-poisoning/",
"resourceId": "2a0c1c9020caae9c",
"resourceTitle": "FAR AI"
},
{
"text": "FAR.AI",
"url": "https://far.ai/post/2024-10-poisoning/",
"resourceId": "2a0c1c9020caae9c",
"resourceTitle": "FAR AI"
},
{
"text": "NIST/CAISI evaluations",
"url": "https://www.nist.gov/news-events/news/2025/09/caisi-evaluation-deepseek-ai-models-finds-shortcomings-and-risks",
"resourceId": "ff1a185c3aa33003",
"resourceTitle": "CAISI Evaluation of DeepSeek AI Models Finds Shortcomings and Risks"
},
{
"text": "NIST/CAISI: Evaluation of DeepSeek AI Models (September 2025)",
"url": "https://www.nist.gov/news-events/news/2025/09/caisi-evaluation-deepseek-ai-models-finds-shortcomings-and-risks",
"resourceId": "ff1a185c3aa33003",
"resourceTitle": "CAISI Evaluation of DeepSeek AI Models Finds Shortcomings and Risks"
},
{
"text": "Carnegie: DeepSeek and Chinese AI Safety Commitments (January 2025)",
"url": "https://carnegieendowment.org/research/2025/01/deepseek-and-other-chinese-firms-converge-with-western-companies-on-ai-promises",
"resourceId": "e3274b108aac1712",
"resourceTitle": "Frontier AI Safety Commitments"
}
],
"unconvertedLinkCount": 14,
"convertedLinkCount": 0,
"backlinkCount": 4,
"hallucinationRisk": {
"level": "medium",
"score": 45,
"factors": [
"no-citations",
"conceptual-content"
]
},
"entityType": "crux",
"redundancy": {
"maxSimilarity": 16,
"similarPages": [
{
"id": "proliferation",
"title": "Proliferation",
"path": "/knowledge-base/risks/proliferation/",
"similarity": 16
},
{
"id": "regulation-debate",
"title": "Government Regulation vs Industry Self-Governance",
"path": "/knowledge-base/debates/regulation-debate/",
"similarity": 13
},
{
"id": "international-summits",
"title": "International AI Safety Summits",
"path": "/knowledge-base/responses/international-summits/",
"similarity": 13
},
{
"id": "structured-access",
"title": "Structured Access / API-Only",
"path": "/knowledge-base/responses/structured-access/",
"similarity": 13
},
{
"id": "large-language-models",
"title": "Large Language Models",
"path": "/knowledge-base/capabilities/large-language-models/",
"similarity": 12
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 9,
"diagrams": 1,
"internalLinks": 18,
"externalLinks": 11,
"footnotes": 7,
"references": 7
},
"actuals": {
"tables": 9,
"diagrams": 1,
"internalLinks": 6,
"externalLinks": 43,
"footnotes": 0,
"references": 7,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "red",
"tables": "green",
"diagrams": "green",
"internalLinks": "amber",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:4.2 R:6.8 A:5.5 C:7.5"
},
"readerRank": 286,
"researchRank": 157,
"recommendedScore": 167.7
}External Links
{
"lesswrong": "https://www.lesswrong.com/tag/open-source-ai"
}Backlinks (4)
| id | title | type | relationship |
|---|---|---|---|
| __index__/knowledge-base/debates | Key Debates | concept | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| holden-karnofsky | Holden Karnofsky | person | — |
| toby-ord | Toby Ord | person | — |