ARC (Alignment Research Center)
arcorganizationPath: /knowledge-base/organizations/arc/
E25Entity ID (EID)
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
"id": "arc",
"numericId": null,
"path": "/knowledge-base/organizations/arc/",
"filePath": "knowledge-base/organizations/arc.mdx",
"title": "ARC (Alignment Research Center)",
"quality": 57,
"readerImportance": 38.5,
"researchImportance": 17.5,
"tacticalValue": 62,
"contentFormat": "article",
"tractability": null,
"neglectedness": null,
"uncertainty": null,
"causalLevel": null,
"lastUpdated": "2026-03-13",
"dateCreated": "2026-02-15",
"llmSummary": "Comprehensive reference page on ARC (Alignment Research Center), covering its evolution from a dual theory/evals organization to ARC Theory (3 permanent researchers) plus the METR spin-out (December 2023), with specific funding figures (\\$265K Coefficient Giving (formerly Open Philanthropy) grant, \\$1.25M returned FTX grant), ELK prize details (\\$274K total), and Christiano's 20%/46% doom estimates. Content is well-sourced compilation of publicly available information with no original analysis.",
"description": "AI safety research nonprofit operating as ARC Theory, investigating fundamental alignment problems including Eliciting Latent Knowledge and heuristic arguments for neural network behavior. Its evaluations division spun out as the independent organization METR (Model Evaluation and Threat Research) in December 2023.",
"ratings": {
"focus": 7.5,
"novelty": 2.5,
"rigor": 7,
"completeness": 8,
"concreteness": 7.5,
"actionability": 3.5,
"objectivity": 7
},
"category": "organizations",
"subcategory": "safety-orgs",
"clusters": [
"ai-safety",
"community",
"governance"
],
"metrics": {
"wordCount": 3672,
"tableCount": 11,
"diagramCount": 0,
"internalLinks": 40,
"externalLinks": 28,
"footnoteCount": 0,
"bulletRatio": 0.12,
"sectionCount": 29,
"hasOverview": true,
"structuralScore": 14
},
"suggestedQuality": 93,
"updateFrequency": 21,
"evergreen": true,
"wordCount": 3672,
"unconvertedLinks": [
{
"text": "ELK Report",
"url": "https://docs.google.com/document/d/1WwsnJQstPq91_Yh-Ch2XRL8H_EpsnjrC1dwZXR37PC8/edit",
"resourceId": "ecd797db5ba5d02c",
"resourceTitle": "eliciting latent knowledge"
},
{
"text": "GPT-4 System Card",
"url": "https://cdn.openai.com/papers/gpt-4-system-card.pdf",
"resourceId": "ebab6e05661645c5",
"resourceTitle": "OpenAI"
},
{
"text": "ARC Official Homepage",
"url": "https://www.alignment.org/",
"resourceId": "0562f8c207d8b63f",
"resourceTitle": "alignment.org"
},
{
"text": "GPT-4 System Card",
"url": "https://cdn.openai.com/papers/gpt-4-system-card.pdf",
"resourceId": "ebab6e05661645c5",
"resourceTitle": "OpenAI"
},
{
"text": "Advanced AI Evaluations at AISI: May Update",
"url": "https://www.aisi.gov.uk/blog/advanced-ai-evaluations-may-update",
"resourceId": "4e56cdf6b04b126b",
"resourceTitle": "UK AI Safety Institute renamed to AI Security Institute"
},
{
"text": "My views on \"doom\"",
"url": "https://www.lesswrong.com/posts/xWMqsvHapP3nwdSW8/my-views-on-doom",
"resourceId": "ed73cbbe5dec0db9",
"resourceTitle": "Paul Christiano"
},
{
"text": "An Update on METR's Preliminary Evaluations of Claude 3.5 Sonnet and o1",
"url": "https://metr.org/blog/2025-01-31-update-sonnet-o1-evals/",
"resourceId": "89b92e6423256fc4",
"resourceTitle": "METR's research"
},
{
"text": "Common Elements of Frontier AI Safety Policies",
"url": "https://metr.org/blog/2025-12-09-common-elements-of-frontier-ai-safety-policies/",
"resourceId": "c8782940b880d00f",
"resourceTitle": "METR's analysis of 12 companies"
}
],
"unconvertedLinkCount": 8,
"convertedLinkCount": 2,
"backlinkCount": 40,
"hallucinationRisk": {
"level": "medium",
"score": 60,
"factors": [
"biographical-claims",
"no-citations",
"high-rigor"
]
},
"entityType": "organization",
"redundancy": {
"maxSimilarity": 18,
"similarPages": [
{
"id": "research-agendas",
"title": "AI Alignment Research Agenda Comparison",
"path": "/knowledge-base/responses/research-agendas/",
"similarity": 18
},
{
"id": "cais",
"title": "CAIS (Center for AI Safety)",
"path": "/knowledge-base/organizations/cais/",
"similarity": 16
},
{
"id": "frontier-model-forum",
"title": "Frontier Model Forum",
"path": "/knowledge-base/organizations/frontier-model-forum/",
"similarity": 16
},
{
"id": "dan-hendrycks",
"title": "Dan Hendrycks",
"path": "/knowledge-base/people/dan-hendrycks/",
"similarity": 16
},
{
"id": "anthropic-core-views",
"title": "Anthropic Core Views",
"path": "/knowledge-base/responses/anthropic-core-views/",
"similarity": 16
}
]
},
"coverage": {
"passing": 7,
"total": 13,
"targets": {
"tables": 15,
"diagrams": 1,
"internalLinks": 29,
"externalLinks": 18,
"footnotes": 11,
"references": 11
},
"actuals": {
"tables": 11,
"diagrams": 0,
"internalLinks": 40,
"externalLinks": 28,
"footnotes": 0,
"references": 16,
"quotesWithQuotes": 0,
"quotesTotal": 0,
"accuracyChecked": 0,
"accuracyTotal": 0
},
"items": {
"llmSummary": "green",
"schedule": "green",
"entity": "green",
"editHistory": "red",
"overview": "green",
"tables": "amber",
"diagrams": "red",
"internalLinks": "green",
"externalLinks": "green",
"footnotes": "red",
"references": "green",
"quotes": "red",
"accuracy": "red"
},
"ratingsString": "N:2.5 R:7 A:3.5 C:8"
},
"readerRank": 385,
"researchRank": 508,
"recommendedScore": 155.11
}External Links
{
"eaForum": "https://forum.effectivealtruism.org/topics/alignment-research-center"
}Backlinks (40)
| id | title | type | relationship |
|---|---|---|---|
| situational-awareness | Situational Awareness | capability | — |
| apollo-research | Apollo Research | organization | — |
| metr | METR | organization | — |
| miri | MIRI | organization | — |
| redwood-research | Redwood Research | organization | — |
| paul-christiano | Paul Christiano | person | — |
| scalable-oversight | Scalable Oversight | safety-agenda | — |
| sandbagging | AI Capability Sandbagging | risk | — |
| coding | Autonomous Coding | capability | — |
| language-models | Large Language Models | capability | — |
| accident-risks | AI Accident Risk Cruxes | crux | — |
| why-alignment-hard | Why Alignment Might Be Hard | argument | — |
| ea-epistemic-failures-in-the-ftx-era | EA Epistemic Failures in the FTX Era | concept | — |
| ea-longtermist-wins-losses | EA and Longtermist Wins and Losses | concept | — |
| ai-talent-market-dynamics | AI Talent Market Dynamics | analysis | — |
| capability-alignment-race | Capability-Alignment Race Model | analysis | — |
| deceptive-alignment-decomposition | Deceptive Alignment Decomposition Model | analysis | — |
| goal-misgeneralization-probability | Goal Misgeneralization Probability Model | analysis | — |
| instrumental-convergence-framework | Instrumental Convergence Framework | analysis | — |
| model-organisms-of-misalignment | Model Organisms of Misalignment | analysis | — |
| planning-for-frontier-lab-scaling | Planning for Frontier Lab Scaling | analysis | — |
| power-seeking-conditions | Power-Seeking Emergence Conditions Model | analysis | — |
| risk-interaction-network | Risk Interaction Network | analysis | — |
| safety-research-value | Expected Value of AI Safety Research | analysis | — |
| scheming-likelihood-model | Scheming Likelihood Assessment | analysis | — |
| conjecture | Conjecture | organization | — |
| far-ai | FAR AI | organization | — |
| ftx-collapse-ea-funding-lessons | FTX Collapse: Lessons for EA Funding Resilience | concept | — |
| __index__/knowledge-base/organizations | Organizations | concept | — |
| long-term-benefit-trust | Long-Term Benefit Trust (Anthropic) | analysis | — |
| mats | MATS ML Alignment Theory Scholars program | organization | — |
| nist-ai | NIST and AI Safety | organization | — |
| safety-orgs-overview | AI Safety Organizations (Overview) | concept | — |
| vara | Value Aligned Research Advisors | organization | — |
| elon-musk | Elon Musk (AI Industry) | person | — |
| geoffrey-hinton | Geoffrey Hinton | person | — |
| ilya-sutskever | Ilya Sutskever | person | — |
| ai-control | AI Control | safety-agenda | — |
| alignment | AI Alignment | approach | — |
| x-com-epistemics | X.com Platform Epistemics | approach | — |