AI-Powered Consensus Manufacturing
consensus-manufacturingriskPath: /knowledge-base/risks/consensus-manufacturing/
E72Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "consensus-manufacturing",
"numericId": null,
"path": "/knowledge-base/risks/consensus-manufacturing/",
"filePath": "knowledge-base/risks/consensus-manufacturing.mdx",
"title": "AI-Powered Consensus Manufacturing",
"quality": 64,
"readerImportance": 15.5,
"researchImportance": 21.5,
"tacticalValue": null,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": "pathway",
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Consensus manufacturing through AI-generated content is already occurring at massive scale (18M of 22M FCC comments were fake in 2017; 30-40% of online reviews are fabricated). Detection systems achieve only 42-74% accuracy against AI text, with false news spreading 6x faster than truth, threatening democratic processes and market mechanisms through undetectable artificial opinion that shapes real policy and purchasing decisions.",
"description": "AI systems creating artificial appearances of public agreement through mass generation of fake comments, reviews, and social media posts. The 2017 FCC Net Neutrality case saw 18M of 22M comments fabricated, while 30-40% of online reviews are now estimated fake. Detection systems achieve only 42-74% accuracy against AI-generated text, with false news spreading 6x faster than truth on social platforms.",
"ratings": {
"novelty": 5.2,
"rigor": 6.8,
"actionability": 5.5,
"completeness": 7.1
},
"category": "risks",
"subcategory": "epistemic",
"clusters": [
"ai-safety",
"epistemics"
],
"metrics": {
"wordCount": 3444,
"tableCount": 7,
"diagramCount": 1,
"internalLinks": 34,
"externalLinks": 0,
"footnoteCount": 0,
"bulletRatio": 0.05,
"sectionCount": 20,
"hasOverview": true,
"structuralScore": 12
},
"suggestedQuality": 80,
"updateFrequency": 45,
"evergreen": true,
"wordCount": 3444,
"unconvertedLinks": [],
"unconvertedLinkCount": 0,
"convertedLinkCount": 30,
"backlinkCount": 8,
"hallucinationRisk": {
"level": "medium",
"score": 60,
"factors": [
"no-citations",
"few-external-sources"
]
},
"entityType": "risk",
"redundancy": {
"maxSimilarity": 20,
"similarPages": [
{
"id": "epistemic-security",
"title": "AI-Era Epistemic Security",
"path": "/knowledge-base/responses/epistemic-security/",
"similarity": 20
},
{
"id": "disinformation",
"title": "Disinformation",
"path": "/knowledge-base/risks/disinformation/",
"similarity": 18
},
{
"id": "authentication-collapse-timeline",
"title": "Authentication Collapse Timeline Model",
"path": "/knowledge-base/models/authentication-collapse-timeline/",
"similarity": 17
},
{
"id": "authoritarian-tools-diffusion",
"title": "Authoritarian Tools Diffusion Model",
"path": "/knowledge-base/models/authoritarian-tools-diffusion/",
"similarity": 17
},
{
"id": "deliberation",
"title": "AI-Assisted Deliberation Platforms",
"path": "/knowledge-base/responses/deliberation/",
"similarity": 17
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 14,
"diagrams": 1,
"internalLinks": 28,
"externalLinks": 17,
"footnotes": 10,
"references": 10
},
"actuals": {
"tables": 7,
"diagrams": 1,
"internalLinks": 34,
"externalLinks": 0,
"footnotes": 0,
"references": 14,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "green",
"internalLinks": "green",
"externalLinks": "red",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:5.2 R:6.8 A:5.5 C:7.1"
},
"readerRank": 551,
"researchRank": 478,
"recommendedScore": 157.61
}External Links
No external links
Backlinks (8)
| id | title | type | relationship |
|---|---|---|---|
| consensus-manufacturing-dynamics | Consensus Manufacturing Dynamics Model | analysis | related |
| epistemic-security | AI-Era Epistemic Security | approach | — |
| prediction-markets | Prediction Markets (AI Forecasting) | approach | — |
| language-models | Large Language Models | capability | — |
| epistemic-overview | Epistemic Risks (Overview) | concept | — |
| historical-revisionism | Historical Revisionism | risk | — |
| learned-helplessness | Epistemic Learned Helplessness | risk | — |
| near-term-risks | Key Near-Term AI Risks | risk | — |