Longterm Wiki

AI Model Specifications

model-specpolicyPath: /knowledge-base/responses/model-spec/
E594Entity ID (EID)
← Back to page2 backlinksQuality: 50Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "model-spec",
  "numericId": null,
  "path": "/knowledge-base/responses/model-spec/",
  "filePath": "knowledge-base/responses/model-spec.mdx",
  "title": "AI Model Specifications",
  "quality": 50,
  "readerImportance": 40,
  "researchImportance": 47.5,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Model specifications are explicit documents defining AI behavior, now published by all major frontier labs (Anthropic, OpenAI, Google, Meta) as of 2025. While they improve transparency and enable external scrutiny, they face a fundamental spec-reality gap—specifications don't guarantee implementation, with no robust verification mechanisms existing.",
  "description": "Model specifications are explicit written documents defining desired AI behavior, values, and boundaries. Pioneered by Anthropic's Claude Soul Document and OpenAI's Model Spec (updated 6+ times in 2025), they improve transparency and enable external scrutiny. As of 2025, all major frontier labs publish specs, with 78% of enterprises now using AI in at least one function—making behavioral documentation increasingly critical for accountability.",
  "ratings": {
    "novelty": 3.5,
    "rigor": 5,
    "actionability": 4.5,
    "completeness": 6
  },
  "category": "responses",
  "subcategory": "alignment-policy",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 2670,
    "tableCount": 24,
    "diagramCount": 1,
    "internalLinks": 10,
    "externalLinks": 28,
    "footnoteCount": 0,
    "bulletRatio": 0.03,
    "sectionCount": 37,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 2670,
  "unconvertedLinks": [
    {
      "text": "78% of organizations using AI",
      "url": "https://mckinsey.com",
      "resourceId": "14a922610f3ad110",
      "resourceTitle": "McKinsey Global Institute"
    },
    {
      "text": "Constitutional AI",
      "url": "https://www.anthropic.com/research/constitutional-ai-harmlessness-from-ai-feedback",
      "resourceId": "e99a5c1697baa07d",
      "resourceTitle": "Constitutional AI: Harmlessness from AI Feedback"
    },
    {
      "text": "McKinsey survey 2024",
      "url": "https://mckinsey.com",
      "resourceId": "14a922610f3ad110",
      "resourceTitle": "McKinsey Global Institute"
    },
    {
      "text": "Constitutional AI: Harmlessness from AI Feedback",
      "url": "https://www.anthropic.com/research/constitutional-ai-harmlessness-from-ai-feedback",
      "resourceId": "e99a5c1697baa07d",
      "resourceTitle": "Constitutional AI: Harmlessness from AI Feedback"
    },
    {
      "text": "Collective Constitutional AI",
      "url": "https://www.anthropic.com/research/collective-constitutional-ai-aligning-a-language-model-with-public-input",
      "resourceId": "3c862a18b467640b",
      "resourceTitle": "Collective Constitutional AI"
    },
    {
      "text": "McKinsey AI Survey 2024",
      "url": "https://mckinsey.com",
      "resourceId": "14a922610f3ad110",
      "resourceTitle": "McKinsey Global Institute"
    }
  ],
  "unconvertedLinkCount": 6,
  "convertedLinkCount": 0,
  "backlinkCount": 2,
  "hallucinationRisk": {
    "level": "medium",
    "score": 55,
    "factors": [
      "no-citations"
    ]
  },
  "entityType": "policy",
  "redundancy": {
    "maxSimilarity": 14,
    "similarPages": [
      {
        "id": "evals-governance",
        "title": "Evals-Based Deployment Gates",
        "path": "/knowledge-base/responses/evals-governance/",
        "similarity": 14
      },
      {
        "id": "ai-control",
        "title": "AI Control",
        "path": "/knowledge-base/responses/ai-control/",
        "similarity": 13
      },
      {
        "id": "constitutional-ai",
        "title": "Constitutional AI",
        "path": "/knowledge-base/responses/constitutional-ai/",
        "similarity": 13
      },
      {
        "id": "output-filtering",
        "title": "AI Output Filtering",
        "path": "/knowledge-base/responses/output-filtering/",
        "similarity": 13
      },
      {
        "id": "preference-optimization",
        "title": "Preference Optimization Methods",
        "path": "/knowledge-base/responses/preference-optimization/",
        "similarity": 13
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 11,
      "diagrams": 1,
      "internalLinks": 21,
      "externalLinks": 13,
      "footnotes": 8,
      "references": 8
    },
    "actuals": {
      "tables": 24,
      "diagrams": 1,
      "internalLinks": 10,
      "externalLinks": 28,
      "footnotes": 0,
      "references": 3,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "amber",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:3.5 R:5 A:4.5 C:6"
  },
  "readerRank": 376,
  "researchRank": 297,
  "recommendedScore": 141.75
}
External Links

No external links

Backlinks (2)
idtitletyperelationship
alignment-policy-overviewPolicy & Governance (Overview)concept
cooperative-aiCooperative AIapproach
Longterm Wiki