Longterm Wiki

Conjecture

conjectureorganizationPath: /knowledge-base/organizations/conjecture/
E70Entity ID (EID)
← Back to page8 backlinksQuality: 37Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "conjecture",
  "numericId": null,
  "path": "/knowledge-base/organizations/conjecture/",
  "filePath": "knowledge-base/organizations/conjecture.mdx",
  "title": "Conjecture",
  "quality": 37,
  "readerImportance": 35.5,
  "researchImportance": 57,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Conjecture is a 30-40 person London-based AI safety org founded 2022, pursuing Cognitive Emulation (CoEm) - building interpretable AI from ground-up rather than aligning LLMs - with \\$30M+ Series A funding. Founded by Connor Leahy (EleutherAI), they face high uncertainty about CoEm competitiveness (3-5 year timeline) and commercial pressure risks.",
  "description": "AI safety research organization focused on cognitive emulation and mechanistic interpretability, pursuing interpretability-first approaches to building safe AI systems",
  "ratings": {
    "novelty": 2.5,
    "rigor": 4,
    "actionability": 2,
    "completeness": 5.5
  },
  "category": "organizations",
  "subcategory": "safety-orgs",
  "clusters": [
    "ai-safety",
    "community"
  ],
  "metrics": {
    "wordCount": 1552,
    "tableCount": 18,
    "diagramCount": 0,
    "internalLinks": 44,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0.14,
    "sectionCount": 34,
    "hasOverview": true,
    "structuralScore": 11
  },
  "suggestedQuality": 73,
  "updateFrequency": 21,
  "evergreen": true,
  "wordCount": 1552,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 16,
  "backlinkCount": 8,
  "hallucinationRisk": {
    "level": "high",
    "score": 85,
    "factors": [
      "biographical-claims",
      "no-citations",
      "low-quality-score",
      "few-external-sources"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 14,
    "similarPages": [
      {
        "id": "holden-karnofsky",
        "title": "Holden Karnofsky",
        "path": "/knowledge-base/people/holden-karnofsky/",
        "similarity": 14
      },
      {
        "id": "safety-research-value",
        "title": "Expected Value of AI Safety Research",
        "path": "/knowledge-base/models/safety-research-value/",
        "similarity": 12
      },
      {
        "id": "apollo-research",
        "title": "Apollo Research",
        "path": "/knowledge-base/organizations/apollo-research/",
        "similarity": 12
      },
      {
        "id": "chai",
        "title": "CHAI (Center for Human-Compatible AI)",
        "path": "/knowledge-base/organizations/chai/",
        "similarity": 12
      },
      {
        "id": "dario-amodei",
        "title": "Dario Amodei",
        "path": "/knowledge-base/people/dario-amodei/",
        "similarity": 12
      }
    ]
  },
  "coverage": {
    "passing": 7,
    "total": 13,
    "targets": {
      "tables": 6,
      "diagrams": 1,
      "internalLinks": 12,
      "externalLinks": 8,
      "footnotes": 5,
      "references": 5
    },
    "actuals": {
      "tables": 18,
      "diagrams": 0,
      "internalLinks": 44,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 14,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "green",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:2.5 R:4 A:2 C:5.5"
  },
  "readerRank": 410,
  "researchRank": 242,
  "recommendedScore": 113.3
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/conjecture-org",
  "eaForum": "https://forum.effectivealtruism.org/topics/conjecture"
}
Backlinks (8)
idtitletyperelationship
controlaiControlAIorganization
__index__/knowledge-base/organizationsOrganizationsconcept
safety-orgs-overviewAI Safety Organizations (Overview)concept
connor-leahyConnor Leahyperson
__index__/knowledge-base/peoplePeopleconcept
evaluationAI Evaluationapproach
training-programsAI Safety Training Programsapproach
doomerAI Doomer Worldviewconcept
Longterm Wiki