Longterm Wiki

Epistemic Virtue Evals

epistemic-virtue-evalsapproachPath: /knowledge-base/responses/epistemic-virtue-evals/
E592Entity ID (EID)
← Back to page3 backlinksQuality: 45Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "epistemic-virtue-evals",
  "numericId": null,
  "path": "/knowledge-base/responses/epistemic-virtue-evals/",
  "filePath": "knowledge-base/responses/epistemic-virtue-evals.mdx",
  "title": "Epistemic Virtue Evals",
  "quality": 45,
  "readerImportance": 22,
  "researchImportance": 34.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": null,
  "description": "A proposed suite of open benchmarks evaluating AI models on epistemic virtues: calibration, clarity, bias resistance, sycophancy avoidance, and manipulation detection. Includes the concept of 'pedantic mode' for maximally accurate AI outputs.",
  "ratings": {
    "novelty": 5.5,
    "rigor": 5,
    "actionability": 6,
    "completeness": 5
  },
  "category": "responses",
  "subcategory": "epistemic-approaches",
  "clusters": [
    "epistemics",
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 1508,
    "tableCount": 9,
    "diagramCount": 1,
    "internalLinks": 4,
    "externalLinks": 32,
    "footnoteCount": 0,
    "bulletRatio": 0.21,
    "sectionCount": 30,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1508,
  "unconvertedLinks": [
    {
      "text": "TruthfulQA",
      "url": "https://arxiv.org/abs/2109.07958",
      "resourceId": "fe2a3307a3dae3e5",
      "resourceTitle": "Kenton et al. (2021)"
    },
    {
      "text": "Perez et al. (2022)",
      "url": "https://arxiv.org/abs/2212.09251",
      "resourceId": "cd36bb65654c0147",
      "resourceTitle": "Perez et al. (2022): \"Sycophancy in LLMs\""
    },
    {
      "text": "Sharma et al. (2023)",
      "url": "https://arxiv.org/abs/2310.13548",
      "resourceId": "7951bdb54fd936a6",
      "resourceTitle": "Anthropic: \"Discovering Sycophancy in Language Models\""
    },
    {
      "text": "BIG-Bench",
      "url": "https://arxiv.org/abs/2206.04615",
      "resourceId": "11125731fea628f3",
      "resourceTitle": "BIG-Bench 2022"
    },
    {
      "text": "METR",
      "url": "https://metr.org/",
      "resourceId": "45370a5153534152",
      "resourceTitle": "metr.org"
    },
    {
      "text": "Bloom",
      "url": "https://alignment.anthropic.com/2025/bloom-auto-evals/",
      "resourceId": "7fa7d4cb797a5edd",
      "resourceTitle": "Bloom: Automated Behavioral Evaluations"
    },
    {
      "text": "Measuring How Models Mimic Human Falsehoods",
      "url": "https://arxiv.org/abs/2109.07958",
      "resourceId": "fe2a3307a3dae3e5",
      "resourceTitle": "Kenton et al. (2021)"
    },
    {
      "text": "Towards Understanding Sycophancy in Language Models",
      "url": "https://arxiv.org/abs/2310.13548",
      "resourceId": "7951bdb54fd936a6",
      "resourceTitle": "Anthropic: \"Discovering Sycophancy in Language Models\""
    }
  ],
  "unconvertedLinkCount": 8,
  "convertedLinkCount": 0,
  "backlinkCount": 3,
  "hallucinationRisk": {
    "level": "medium",
    "score": 45,
    "factors": [
      "no-citations",
      "conceptual-content"
    ]
  },
  "entityType": "approach",
  "redundancy": {
    "maxSimilarity": 15,
    "similarPages": [
      {
        "id": "collective-epistemics-design-sketches",
        "title": "Design Sketches for Collective Epistemics",
        "path": "/knowledge-base/responses/collective-epistemics-design-sketches/",
        "similarity": 15
      },
      {
        "id": "reliability-tracking",
        "title": "AI System Reliability Tracking",
        "path": "/knowledge-base/responses/reliability-tracking/",
        "similarity": 15
      },
      {
        "id": "provenance-tracing",
        "title": "AI Content Provenance Tracing",
        "path": "/knowledge-base/responses/provenance-tracing/",
        "similarity": 14
      },
      {
        "id": "capability-elicitation",
        "title": "Capability Elicitation",
        "path": "/knowledge-base/responses/capability-elicitation/",
        "similarity": 13
      },
      {
        "id": "rhetoric-highlighting",
        "title": "AI-Assisted Rhetoric Highlighting",
        "path": "/knowledge-base/responses/rhetoric-highlighting/",
        "similarity": 13
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 12,
      "externalLinks": 8,
      "footnotes": 5,
      "references": 5
    },
    "actuals": {
      "tables": 9,
      "diagrams": 1,
      "internalLinks": 4,
      "externalLinks": 32,
      "footnotes": 0,
      "references": 6,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "red",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:5.5 R:5 A:6 C:5"
  },
  "readerRank": 509,
  "researchRank": 385,
  "recommendedScore": 122.53
}
External Links

No external links

Backlinks (3)
idtitletyperelationship
collective-epistemics-design-sketchesDesign Sketches for Collective Epistemicsapproach
epistemic-tools-approaches-overviewApproaches (Overview)concept
provenance-tracingAI Content Provenance Tracingapproach
Longterm Wiki