Longterm Wiki

FAR AI

far-aiorganizationPath: /knowledge-base/organizations/far-ai/
E138Entity ID (EID)
← Back to page8 backlinksQuality: 76Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "far-ai",
  "numericId": null,
  "path": "/knowledge-base/organizations/far-ai/",
  "filePath": "knowledge-base/organizations/far-ai.mdx",
  "title": "FAR AI",
  "quality": 76,
  "readerImportance": 84.5,
  "researchImportance": 53.5,
  "tacticalValue": 68,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "FAR AI is an AI safety research nonprofit founded in July 2022 by Adam Gleave (CEO) and Karl Berzins (Co-founder & President). Based in Berkeley, California, the organization conducts technical research in adversarial robustness, model evaluation, interpretability, and alignment. Notable work includes demonstrating that adversarial policies can defeat superhuman Go AIs and co-authoring the 'Towards Guaranteed Safe AI' framework. FAR AI reported \\$24.3M in FY2024 revenue and secured over \\$30M in 2025 funding commitments from funders including Coefficient Giving (previously Open Philanthropy), Schmidt Sciences, and the Survival and Flourishing Fund. In early 2026, FAR AI was selected by the European Commission's AI Office to lead CBRN risk research under tender EC-CNECT/2025/OP/0032. The organization also operates FAR.Labs (a Berkeley coworking space with 40+ members) and a \\$12M grantmaking program.",
  "description": "AI safety research nonprofit founded in 2022 by Adam Gleave and Karl Berzins, focusing on adversarial robustness, model evaluation, and alignment research",
  "ratings": {
    "novelty": 2.5,
    "rigor": 3,
    "actionability": 2,
    "completeness": 5
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "ai-safety",
    "community"
  ],
  "metrics": {
    "wordCount": 3250,
    "tableCount": 4,
    "diagramCount": 0,
    "internalLinks": 32,
    "externalLinks": 16,
    "footnoteCount": 0,
    "bulletRatio": 0.08,
    "sectionCount": 29,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 3250,
  "unconvertedLinks": [
    {
      "text": "far.ai",
      "url": "https://www.far.ai",
      "resourceId": "9199f43edaf3a03b",
      "resourceTitle": "FAR AI"
    },
    {
      "text": "FAR.AI",
      "url": "https://www.far.ai/",
      "resourceId": "9199f43edaf3a03b",
      "resourceTitle": "FAR AI"
    },
    {
      "text": "FAR.AI Research",
      "url": "https://www.far.ai/research",
      "resourceId": "512c393a589f266f",
      "resourceTitle": "Research Overview – FAR.AI"
    },
    {
      "text": "FAR.AI Programs",
      "url": "https://www.far.ai/programs",
      "resourceId": "9520625a8333f057",
      "resourceTitle": "Programs – FAR.AI"
    },
    {
      "text": "FAR.AI Transparency",
      "url": "https://www.far.ai/about/transparency",
      "resourceId": "eeed7e12c212632c",
      "resourceTitle": "Transparency"
    },
    {
      "text": "Grantmaking | FAR.AI",
      "url": "https://www.far.ai/programs/grantmaking",
      "resourceId": "f39e450eac7bbaa9",
      "resourceTitle": "Grantmaking"
    },
    {
      "text": "What's new at FAR AI — EA Forum",
      "url": "https://forum.effectivealtruism.org/posts/arfyBSCunWXCtdPMJ/what-s-new-at-far-ai-1",
      "resourceId": "862576e20112243d",
      "resourceTitle": "What's new at FAR AI — EA Forum"
    }
  ],
  "unconvertedLinkCount": 7,
  "convertedLinkCount": 0,
  "backlinkCount": 8,
  "hallucinationRisk": {
    "level": "high",
    "score": 85,
    "factors": [
      "biographical-claims",
      "no-citations",
      "low-rigor-score"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 16,
    "similarPages": [
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 16
      },
      {
        "id": "anthropic-ipo",
        "title": "Anthropic IPO",
        "path": "/knowledge-base/organizations/anthropic-ipo/",
        "similarity": 16
      },
      {
        "id": "cais",
        "title": "CAIS (Center for AI Safety)",
        "path": "/knowledge-base/organizations/cais/",
        "similarity": 16
      },
      {
        "id": "frontier-model-forum",
        "title": "Frontier Model Forum",
        "path": "/knowledge-base/organizations/frontier-model-forum/",
        "similarity": 16
      },
      {
        "id": "openai",
        "title": "OpenAI",
        "path": "/knowledge-base/organizations/openai/",
        "similarity": 16
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-26",
      "branch": "claude/fix-far-ai-page",
      "title": "Auto-improve (standard): FAR AI",
      "summary": "Improved \"FAR AI\" via standard pipeline (1222.4s). Quality score: 76. Issues resolved: Bare URL in Overview section: 'far.ai' appears as plain text; Frontmatter field 'lastEdited' uses a future date (2026-02-2; Section duplication: The 40+ FAR.Labs member count and 1,000.",
      "duration": "1222.4s",
      "cost": "$5-8"
    },
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    },
    {
      "date": "2026-02-17",
      "branch": "claude/review-wiki-editing-scCul",
      "title": "Wiki editing system refactoring",
      "summary": "Six refactors to the wiki editing pipeline: (1) extracted shared regex patterns to `crux/lib/patterns.ts`, (2) refactored validation in page-improver to use in-process engine calls instead of subprocess spawning, (3) split the 694-line `phases.ts` into 7 individual phase modules under `phases/`, (4) created shared LLM abstraction `crux/lib/llm.ts` unifying duplicated streaming/retry/tool-loop code, (5) added Zod schemas for LLM JSON response validation, (6) decomposed 820-line mermaid validation into `crux/lib/mermaid-checks.ts` (604 lines) + slim orchestrator (281 lines). Follow-up review integrated patterns.ts across 19+ files, fixed dead imports, corrected ToolHandler type, wired mdx-utils.ts to use shared patterns, replaced hardcoded model strings with MODELS constants, replaced `new Anthropic()` with `createLlmClient()`, replaced inline `extractText` implementations with shared `extractText()` from llm.ts, integrated `MARKDOWN_LINK_RE` into link validators, added `objectivityIssues` to the `AnalysisResult` type (removing an unsafe cast in utils.ts), fixed CI failure from eager client creation, and tested the full pipeline by improving 3 wiki pages. After manual review of 3 improved pages, fixed 8 systematic pipeline issues: (1) added content preservation instructions to prevent polish-tier content loss, (2) made auto-grading default after --apply, (3) added polish-tier citation suppression to prevent fabricated citations, (4) added Quick Assessment table requirement for person pages, (5) added required Overview section enforcement, (6) added section deduplication and content repetition checks to review phase, (7) added bare URL→markdown link conversion instruction, (8) extended biographical claim checker to catch publication/co-authorship and citation count claims.\n\nSubsequent iterative testing and prompt refinement: ran pipeline on jan-leike, chris-olah, far-ai pages. Discovered and fixed: (a) `<!-- NEEDS CITATION -->` HTML comments break MDX compilation (changed to `{/* NEEDS CITATION */}`), (b) excessive citation markers at polish tier — added instruction to only mark NEW claims (max 3-5 per page), (c) editorial meta-comments cluttering output — added no-meta-comments instruction, (d) thin padding sections — added anti-padding instruction, (e) section deduplication needed stronger emphasis — added merge instruction with common patterns. Final test results: jan-leike 1254→1997 words, chris-olah 1187→1687 words, far-ai 1519→2783 words, miri-era 2678→4338 words; all MDX compile, zero critical issues.",
      "pr": 184
    }
  ],
  "coverage": {
    "passing": 8,
    "total": 13,
    "targets": {
      "tables": 13,
      "diagrams": 1,
      "internalLinks": 26,
      "externalLinks": 16,
      "footnotes": 10,
      "references": 10
    },
    "actuals": {
      "tables": 4,
      "diagrams": 0,
      "internalLinks": 32,
      "externalLinks": 16,
      "footnotes": 0,
      "references": 27,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "editHistoryCount": 3,
    "ratingsString": "N:2.5 R:3 A:2 C:5"
  },
  "readerRank": 57,
  "researchRank": 264,
  "recommendedScore": 216.11
}
External Links
{
  "eaForum": "https://forum.effectivealtruism.org/topics/far-ai"
}
Backlinks (8)
idtitletyperelationship
arb-researchArb Researchorganization
ea-globalEA Globalorganization
__index__/knowledge-base/organizationsOrganizationsconcept
lionheart-venturesLionheart Venturesorganization
matsMATS ML Alignment Theory Scholars programorganization
safety-orgs-overviewAI Safety Organizations (Overview)concept
sffSurvival and Flourishing Fund (SFF)organization
jaan-tallinnJaan Tallinnperson
Longterm Wiki