Longterm Wiki

RoastMyPost

roastmypostprojectPath: /knowledge-base/responses/roastmypost/
E385Entity ID (EID)
← Back to page3 backlinksQuality: 35Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "roastmypost",
  "numericId": null,
  "path": "/knowledge-base/responses/roastmypost/",
  "filePath": "knowledge-base/responses/roastmypost.mdx",
  "title": "RoastMyPost",
  "quality": 35,
  "readerImportance": 17,
  "researchImportance": 22,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "RoastMyPost is an LLM tool (Claude Sonnet 4.5 + Perplexity) that evaluates written content through multiple specialized AI agents—fact-checking, logical fallacy detection, math verification, and more. Aimed at improving epistemic quality of research posts, particularly in EA/rationalist communities. Significant false positive rate means it's a complement to, not replacement for, human review.",
  "description": "An LLM-powered document evaluation tool that analyzes blog posts and research documents for errors, logical fallacies, and factual inaccuracies using specialized AI evaluators. Uses Claude Sonnet 4.5 with Perplexity integration for fact-checking.",
  "ratings": {
    "novelty": 6,
    "rigor": 4,
    "actionability": 7,
    "completeness": 5
  },
  "category": "responses",
  "subcategory": "epistemic-platforms",
  "clusters": [
    "epistemics",
    "ai-safety",
    "community"
  ],
  "metrics": {
    "wordCount": 677,
    "tableCount": 5,
    "diagramCount": 0,
    "internalLinks": 8,
    "externalLinks": 3,
    "footnoteCount": 0,
    "bulletRatio": 0.18,
    "sectionCount": 12,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 677,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 0,
  "backlinkCount": 3,
  "citationHealth": {
    "total": 1,
    "withQuotes": 1,
    "verified": 1,
    "accuracyChecked": 1,
    "accurate": 1,
    "inaccurate": 0,
    "avgScore": 0.8148148059844971
  },
  "hallucinationRisk": {
    "level": "medium",
    "score": 60,
    "factors": [
      "no-citations",
      "low-quality-score"
    ]
  },
  "entityType": "project",
  "redundancy": {
    "maxSimilarity": 0,
    "similarPages": []
  },
  "coverage": {
    "passing": 9,
    "total": 13,
    "targets": {
      "tables": 3,
      "diagrams": 0,
      "internalLinks": 5,
      "externalLinks": 3,
      "footnotes": 2,
      "references": 2
    },
    "actuals": {
      "tables": 5,
      "diagrams": 0,
      "internalLinks": 8,
      "externalLinks": 3,
      "footnotes": 0,
      "references": 0,
      "quotesWithQuotes": 1,
      "quotesTotal": 1,
      "accuracyChecked": 1,
      "accuracyTotal": 1
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "red",
      "quotes": "green",
      "accuracy": "green"
    },
    "ratingsString": "N:6 R:4 A:7 C:5"
  },
  "readerRank": 539,
  "researchRank": 474,
  "recommendedScore": 99.69
}
External Links

No external links

Backlinks (3)
idtitletyperelationship
quriQURI (Quantified Uncertainty Research Institute)organization
epistemic-tools-tools-overviewTools & Platforms (Overview)concept
squiggleaiSquiggleAIproject
Longterm Wiki