Longterm Wiki

Responsible Scaling Policies

rsppolicyPath: /knowledge-base/responses/rsp/
E461Entity ID (EID)
← Back to page16 backlinksQuality: 62Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "rsp",
  "numericId": null,
  "path": "/knowledge-base/responses/rsp/",
  "filePath": "knowledge-base/responses/rsp.mdx",
  "title": "Responsible Scaling Policies",
  "quality": 62,
  "readerImportance": 51,
  "researchImportance": 28,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Comprehensive analysis of Responsible Scaling Policies showing 20 companies with published frameworks as of Dec 2025, with SaferAI grading major policies 1.9-2.2/5 for specificity. Evidence suggests moderate effectiveness hindered by voluntary nature, competitive pressure among 3+ labs, and ~7-month capability doubling potentially outpacing evaluation science, though third-party verification (METR evaluated 5+ models) and Seoul Summit commitments (16 signatories) represent meaningful coordination progress.",
  "description": "Responsible Scaling Policies (RSPs) are voluntary commitments by AI labs to pause scaling when capability or safety thresholds are crossed. As of December 2025, 20 companies have published policies (up from 16 Seoul Summit signatories in May 2024). METR has conducted pre-deployment evaluations of 5+ major models. SaferAI grades the three major frameworks 1.9-2.2/5 for specificity. Effectiveness depends on voluntary compliance, evaluation quality, and whether ~7-month capability doubling outpaces governance.",
  "ratings": {
    "novelty": 4.2,
    "rigor": 6.8,
    "actionability": 6.5,
    "completeness": 7.3
  },
  "category": "responses",
  "subcategory": "alignment-policy",
  "clusters": [
    "ai-safety",
    "governance"
  ],
  "metrics": {
    "wordCount": 3422,
    "tableCount": 29,
    "diagramCount": 3,
    "internalLinks": 48,
    "externalLinks": 13,
    "footnoteCount": 0,
    "bulletRatio": 0.06,
    "sectionCount": 44,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 3422,
  "unconvertedLinks": [
    {
      "text": "20 companies",
      "url": "https://metr.org/common-elements",
      "resourceId": "30b9f5e826260d9d",
      "resourceTitle": "METR: Common Elements of Frontier AI Safety Policies"
    },
    {
      "text": "METR",
      "url": "https://metr.org/",
      "resourceId": "45370a5153534152",
      "resourceTitle": "metr.org"
    },
    {
      "text": "SaferAI grades",
      "url": "https://www.safer-ai.org/anthropics-responsible-scaling-policy-update-makes-a-step-backwards",
      "resourceId": "a5e4c7b49f5d3e1b",
      "resourceTitle": "SaferAI has argued"
    },
    {
      "text": "20 companies",
      "url": "https://metr.org/common-elements",
      "resourceId": "30b9f5e826260d9d",
      "resourceTitle": "METR: Common Elements of Frontier AI Safety Policies"
    },
    {
      "text": "METR",
      "url": "https://metr.org/",
      "resourceId": "45370a5153534152",
      "resourceTitle": "metr.org"
    },
    {
      "text": "SaferAI grade",
      "url": "https://www.safer-ai.org/anthropics-responsible-scaling-policy-update-makes-a-step-backwards",
      "resourceId": "a5e4c7b49f5d3e1b",
      "resourceTitle": "SaferAI has argued"
    },
    {
      "text": "METR Common Elements",
      "url": "https://metr.org/common-elements",
      "resourceId": "30b9f5e826260d9d",
      "resourceTitle": "METR: Common Elements of Frontier AI Safety Policies"
    },
    {
      "text": "UK Gov",
      "url": "https://www.gov.uk/government/publications/frontier-ai-safety-commitments-ai-seoul-summit-2024",
      "resourceId": "944fc2ac301f8980",
      "resourceTitle": "Seoul Frontier AI Commitments"
    },
    {
      "text": "METR",
      "url": "https://metr.org/",
      "resourceId": "45370a5153534152",
      "resourceTitle": "metr.org"
    },
    {
      "text": "METR",
      "url": "https://metr.org/",
      "resourceId": "45370a5153534152",
      "resourceTitle": "metr.org"
    },
    {
      "text": "Anthropic RSP",
      "url": "https://www.anthropic.com/rsp-updates",
      "resourceId": "c6766d463560b923",
      "resourceTitle": "Anthropic pioneered the Responsible Scaling Policy"
    }
  ],
  "unconvertedLinkCount": 11,
  "convertedLinkCount": 42,
  "backlinkCount": 16,
  "hallucinationRisk": {
    "level": "medium",
    "score": 55,
    "factors": [
      "no-citations"
    ]
  },
  "entityType": "policy",
  "redundancy": {
    "maxSimilarity": 19,
    "similarPages": [
      {
        "id": "evals-governance",
        "title": "Evals-Based Deployment Gates",
        "path": "/knowledge-base/responses/evals-governance/",
        "similarity": 19
      },
      {
        "id": "model-auditing",
        "title": "Third-Party Model Auditing",
        "path": "/knowledge-base/responses/model-auditing/",
        "similarity": 18
      },
      {
        "id": "dangerous-cap-evals",
        "title": "Dangerous Capability Evaluations",
        "path": "/knowledge-base/responses/dangerous-cap-evals/",
        "similarity": 17
      },
      {
        "id": "evals",
        "title": "Evals & Red-teaming",
        "path": "/knowledge-base/responses/evals/",
        "similarity": 16
      },
      {
        "id": "responsible-scaling-policies",
        "title": "Responsible Scaling Policies",
        "path": "/knowledge-base/responses/responsible-scaling-policies/",
        "similarity": 16
      }
    ]
  },
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 14,
      "diagrams": 1,
      "internalLinks": 27,
      "externalLinks": 17,
      "footnotes": 10,
      "references": 10
    },
    "actuals": {
      "tables": 29,
      "diagrams": 3,
      "internalLinks": 48,
      "externalLinks": 13,
      "footnotes": 0,
      "references": 21,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4.2 R:6.8 A:6.5 C:7.3"
  },
  "readerRank": 295,
  "researchRank": 433,
  "recommendedScore": 171.36
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/responsible-scaling-policies"
}
Backlinks (16)
idtitletyperelationship
language-modelsLarge Language Modelscapability
why-alignment-hardWhy Alignment Might Be Hardargument
__index__/knowledge-base/historyHistoryconcept
short-timeline-policy-implicationsShort Timeline Policy Implicationsanalysis
anthropic-ipoAnthropic IPOanalysis
arcARC (Alignment Research Center)organization
long-term-benefit-trustLong-Term Benefit Trust (Anthropic)analysis
dario-amodeiDario Amodeiperson
elon-muskElon Musk (AI Industry)person
alignment-policy-overviewPolicy & Governance (Overview)concept
corporate-influenceCorporate Influence on AI Policycrux
dangerous-cap-evalsDangerous Capability Evaluationsapproach
governance-policyAI Governance and Policycrux
responsible-scaling-policiesResponsible Scaling Policiespolicy
bioweaponsBioweaponsrisk
emergent-capabilitiesEmergent Capabilitiesrisk
Longterm Wiki