Longterm Wiki

Ajeya Cotra

ajeya-cotrapersonPath: /knowledge-base/people/ajeya-cotra/
E864Entity ID (EID)
← Back to page14 backlinksQuality: 55Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "ajeya-cotra",
  "numericId": null,
  "path": "/knowledge-base/people/ajeya-cotra/",
  "filePath": "knowledge-base/people/ajeya-cotra.mdx",
  "title": "Ajeya Cotra",
  "quality": 55,
  "readerImportance": 55,
  "researchImportance": 60,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-20",
  "llmSummary": "Ajeya Cotra is a member of technical staff at METR and former senior advisor at Coefficient Giving (formerly Open Philanthropy), where she led technical AI safety grantmaking including a \\$25M agent benchmarks RFP. Author of the Bio Anchors AI timelines report (15% transformative AI by 2036, 50% by 2060), she has become influential for her work on intelligence explosion dynamics, crunch time strategy (the 6-12 month window after AI automates AI R&D), and the case for using early transformative AI for defensive work.",
  "description": "Member of technical staff at METR and former senior advisor at Coefficient Giving (formerly Open Philanthropy), known for the Bio Anchors AI timelines report and influential work on intelligence explosion dynamics, crunch time strategy, and AI safety grantmaking. Placed 3rd out of 413 participants in AI development forecasting.",
  "ratings": {
    "novelty": 4,
    "rigor": 6,
    "actionability": 5,
    "completeness": 6.5
  },
  "category": "people",
  "subcategory": "safety-researchers",
  "clusters": [
    "ai-safety",
    "community"
  ],
  "metrics": {
    "wordCount": 1950,
    "tableCount": 6,
    "diagramCount": 0,
    "internalLinks": 18,
    "externalLinks": 6,
    "footnoteCount": 0,
    "bulletRatio": 0.25,
    "sectionCount": 18,
    "hasOverview": true,
    "structuralScore": 14
  },
  "suggestedQuality": 93,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 1950,
  "unconvertedLinks": [
    {
      "text": "Bio Anchors report",
      "url": "https://www.lesswrong.com/posts/KrJfoZzpSDpnrv9va/draft-report-on-ai-timelines",
      "resourceId": "cd9f1d771d9a34c5",
      "resourceTitle": "Draft report on AI timelines"
    },
    {
      "text": "Bio Anchors report",
      "url": "https://www.lesswrong.com/posts/KrJfoZzpSDpnrv9va/draft-report-on-ai-timelines",
      "resourceId": "cd9f1d771d9a34c5",
      "resourceTitle": "Draft report on AI timelines"
    },
    {
      "text": "Bio Anchors Report",
      "url": "https://www.lesswrong.com/posts/KrJfoZzpSDpnrv9va/draft-report-on-ai-timelines",
      "resourceId": "cd9f1d771d9a34c5",
      "resourceTitle": "Draft report on AI timelines"
    },
    {
      "text": "Ajeya Cotra joins METR",
      "url": "https://metr.org",
      "resourceId": "45370a5153534152",
      "resourceTitle": "metr.org"
    }
  ],
  "unconvertedLinkCount": 4,
  "convertedLinkCount": 0,
  "backlinkCount": 14,
  "hallucinationRisk": {
    "level": "high",
    "score": 75,
    "factors": [
      "biographical-claims",
      "no-citations"
    ]
  },
  "entityType": "person",
  "redundancy": {
    "maxSimilarity": 14,
    "similarPages": [
      {
        "id": "self-improvement",
        "title": "Self-Improvement and Recursive Enhancement",
        "path": "/knowledge-base/capabilities/self-improvement/",
        "similarity": 14
      },
      {
        "id": "coefficient-giving",
        "title": "Coefficient Giving",
        "path": "/knowledge-base/organizations/coefficient-giving/",
        "similarity": 14
      },
      {
        "id": "miri-era",
        "title": "The MIRI Era (2000-2015)",
        "path": "/knowledge-base/history/miri-era/",
        "similarity": 13
      },
      {
        "id": "ai-timelines",
        "title": "AI Timelines",
        "path": "/knowledge-base/models/ai-timelines/",
        "similarity": 13
      },
      {
        "id": "ai-futures-project",
        "title": "AI Futures Project",
        "path": "/knowledge-base/organizations/ai-futures-project/",
        "similarity": 13
      }
    ]
  },
  "coverage": {
    "passing": 5,
    "total": 13,
    "targets": {
      "tables": 8,
      "diagrams": 1,
      "internalLinks": 16,
      "externalLinks": 10,
      "footnotes": 6,
      "references": 6
    },
    "actuals": {
      "tables": 6,
      "diagrams": 0,
      "internalLinks": 18,
      "externalLinks": 6,
      "footnotes": 0,
      "references": 2,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "red",
      "overview": "green",
      "tables": "amber",
      "diagrams": "red",
      "internalLinks": "green",
      "externalLinks": "amber",
      "footnotes": "red",
      "references": "amber",
      "quotes": "red",
      "accuracy": "red"
    },
    "ratingsString": "N:4 R:6 A:5 C:6.5"
  },
  "readerRank": 267,
  "researchRank": 223,
  "recommendedScore": 159.15
}
External Links

No external links

Backlinks (14)
idtitletyperelationship
self-improvementSelf-Improvement and Recursive Enhancementcapability
ai-timelinesAI Timelinesconcept
80000-hours80,000 Hoursorganization
arcARC (Alignment Research Center)organization
coefficient-givingCoefficient Givingorganization
matsMATS ML Alignment Theory Scholars programorganization
metrMETRorganization
redwood-researchRedwood Researchorganization
holden-karnofskyHolden Karnofskyperson
ai-controlAI Controlsafety-agenda
biosecurity-overviewBiosecurity Interventions (Overview)concept
evalsEvals & Red-teamingsafety-agenda
governance-policyAI Governance and Policycrux
lock-inAI Value Lock-inrisk
Longterm Wiki