Longterm Wiki

Goodfire

goodfireorganizationPath: /knowledge-base/organizations/goodfire/
E430Entity ID (EID)
← Back to page4 backlinksQuality: 68Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "goodfire",
  "numericId": null,
  "path": "/knowledge-base/organizations/goodfire/",
  "filePath": "knowledge-base/organizations/goodfire.mdx",
  "title": "Goodfire",
  "quality": 68,
  "readerImportance": 85.5,
  "researchImportance": 56,
  "tacticalValue": 76,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Goodfire is a well-funded AI interpretability startup valued at \\$1.25B (Feb 2026) developing mechanistic interpretability tools like Ember API to make neural networks more transparent and steerable. The company's pivot toward using interpretability in model training (\"intentional design\") has sparked significant AI safety community debate about whether this compromises interpretability as an independent safety tool.",
  "description": "AI interpretability research lab developing tools to decode and control neural network internals for safer AI systems",
  "ratings": {
    "novelty": 6,
    "rigor": 7,
    "actionability": 6,
    "completeness": 8
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "ai-safety",
    "community"
  ],
  "metrics": {
    "wordCount": 2422,
    "tableCount": 3,
    "diagramCount": 0,
    "internalLinks": 19,
    "externalLinks": 2,
    "footnoteCount": 0,
    "bulletRatio": 0.23,
    "sectionCount": 25,
    "hasOverview": true,
    "structuralScore": 12
  },
  "suggestedQuality": 80,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 2422,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 0,
  "backlinkCount": 4,
  "citationHealth": {
    "total": 50,
    "withQuotes": 42,
    "verified": 41,
    "accuracyChecked": 41,
    "accurate": 27,
    "inaccurate": 2,
    "avgScore": 0.9417429679916018
  },
  "hallucinationRisk": {
    "level": "medium",
    "score": 60,
    "factors": [
      "biographical-claims",
      "no-citations",
      "high-rigor"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 18,
    "similarPages": [
      {
        "id": "anthropic-core-views",
        "title": "Anthropic Core Views",
        "path": "/knowledge-base/responses/anthropic-core-views/",
        "similarity": 18
      },
      {
        "id": "interpretability",
        "title": "Mechanistic Interpretability",
        "path": "/knowledge-base/responses/interpretability/",
        "similarity": 18
      },
      {
        "id": "elicit",
        "title": "Elicit (AI Research Tool)",
        "path": "/knowledge-base/organizations/elicit/",
        "similarity": 17
      },
      {
        "id": "chris-olah",
        "title": "Chris Olah",
        "path": "/knowledge-base/people/chris-olah/",
        "similarity": 17
      },
      {
        "id": "research-agendas",
        "title": "AI Alignment Research Agenda Comparison",
        "path": "/knowledge-base/responses/research-agendas/",
        "similarity": 17
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-18",
      "branch": "claude/fix-issue-240-N5irU",
      "title": "Surface tacticalValue in /wiki table and score 53 pages",
      "summary": "Added `tacticalValue` to `ExploreItem` interface, `getExploreItems()` mappings, the `/wiki` explore table (new sortable \"Tact.\" column), and the card view sort dropdown. Scored 49 new pages with tactical values (4 were already scored), bringing total to 53.",
      "model": "sonnet-4",
      "duration": "~30min"
    }
  ],
  "coverage": {
    "passing": 9,
    "total": 13,
    "targets": {
      "tables": 10,
      "diagrams": 1,
      "internalLinks": 19,
      "externalLinks": 12,
      "footnotes": 7,
      "references": 7
    },
    "actuals": {
      "tables": 3,
      "diagrams": 0,
      "internalLinks": 19,
      "externalLinks": 2,
      "footnotes": 0,
      "references": 24,
      "quotesWithQuotes": 42,
      "quotesTotal": 50,
      "accuracyChecked": 41,
      "accuracyTotal": 50
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "green",
      "quotes": "green",
      "accuracy": "green"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:6 R:7 A:6 C:8"
  },
  "readerRank": 46,
  "researchRank": 250,
  "recommendedScore": 200.49
}
External Links

No external links

Backlinks (4)
idtitletyperelationship
sparse-autoencodersSparse Autoencoders (SAEs)approach
safety-orgs-overviewAI Safety Organizations (Overview)concept
seldon-labSeldon Laborganization
interpretabilityMechanistic Interpretabilitysafety-agenda
Longterm Wiki