Longterm Wiki

Sentinel (Catastrophic Risk Foresight)

sentinelorganizationPath: /knowledge-base/organizations/sentinel/
E566Entity ID (EID)
← Back to page4 backlinksQuality: 39Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "sentinel",
  "numericId": null,
  "path": "/knowledge-base/organizations/sentinel/",
  "filePath": "knowledge-base/organizations/sentinel.mdx",
  "title": "Sentinel (Catastrophic Risk Foresight)",
  "quality": 39,
  "readerImportance": 28.5,
  "researchImportance": 45,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-15",
  "llmSummary": "Sentinel is a catastrophic risk foresight organization co-founded by Nuño Sempere and Rai Sur, operating informally from around 2023 and incorporated as a US 501(c)(3) nonprofit in or after late 2024. It combines AI-powered news filtering with probabilistic human forecaster assessment to produce weekly risk briefings. Its multi-stage pipeline parses millions of news items, uses LLMs for triage, and routes flagged signals to forecasters drawn from groups including Samotsvety and Good Judgment. Outputs include a Substack newsletter at blog.sentinel-team.org and the Sentinel Minutes podcast. The organization has approximately \\$700K of a ~\\$1.6M target budget as of November 2025 and has grown its newsletter from 552 to 3,936 subscribers since its first major public fundraise in November 2024.",
  "description": "Global catastrophic risk foresight and early warning organization co-founded by Nuño Sempere, providing weekly risk assessments from experienced forecasters",
  "ratings": {
    "focus": 8.5,
    "novelty": 2,
    "rigor": 3,
    "completeness": 6,
    "concreteness": 4.5,
    "actionability": 1.5
  },
  "category": "organizations",
  "subcategory": "epistemic-orgs",
  "clusters": [
    "epistemics",
    "community",
    "ai-safety"
  ],
  "metrics": {
    "wordCount": 2059,
    "tableCount": 5,
    "diagramCount": 1,
    "internalLinks": 23,
    "externalLinks": 14,
    "footnoteCount": 0,
    "bulletRatio": 0.12,
    "sectionCount": 15,
    "hasOverview": true,
    "structuralScore": 15
  },
  "suggestedQuality": 100,
  "updateFrequency": 45,
  "evergreen": true,
  "wordCount": 2059,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 0,
  "backlinkCount": 4,
  "citationHealth": {
    "total": 1,
    "withQuotes": 1,
    "verified": 1,
    "accuracyChecked": 1,
    "accurate": 1,
    "inaccurate": 0,
    "avgScore": 1
  },
  "hallucinationRisk": {
    "level": "high",
    "score": 90,
    "factors": [
      "biographical-claims",
      "no-citations",
      "low-rigor-score",
      "low-quality-score"
    ]
  },
  "entityType": "organization",
  "redundancy": {
    "maxSimilarity": 15,
    "similarPages": [
      {
        "id": "swift-centre",
        "title": "Swift Centre",
        "path": "/knowledge-base/organizations/swift-centre/",
        "similarity": 15
      },
      {
        "id": "nuno-sempere",
        "title": "Nuño Sempere",
        "path": "/knowledge-base/people/nuno-sempere/",
        "similarity": 14
      },
      {
        "id": "ea-longtermist-wins-losses",
        "title": "EA and Longtermist Wins and Losses",
        "path": "/knowledge-base/history/ea-longtermist-wins-losses/",
        "similarity": 13
      },
      {
        "id": "cais",
        "title": "CAIS (Center for AI Safety)",
        "path": "/knowledge-base/organizations/cais/",
        "similarity": 13
      },
      {
        "id": "centre-for-long-term-resilience",
        "title": "Centre for Long-Term Resilience",
        "path": "/knowledge-base/organizations/centre-for-long-term-resilience/",
        "similarity": 13
      }
    ]
  },
  "changeHistory": [
    {
      "date": "2026-02-23",
      "branch": "feat/batch-improve-high-risk-pages",
      "title": "Auto-improve (standard): Sentinel (Catastrophic Risk Foresight)",
      "summary": "Improved \"Sentinel (Catastrophic Risk Foresight)\" via standard pipeline (1475.3s). Quality score: 76. Issues resolved: Footnote [^17] references a podcast transcript as evidence t; The mermaid diagram and the numbered list (steps 1–4) immedi; The 'Overview' section repeats the Scott Alexander quote and.",
      "duration": "1475.3s",
      "cost": "$5-8"
    }
  ],
  "coverage": {
    "passing": 10,
    "total": 13,
    "targets": {
      "tables": 8,
      "diagrams": 1,
      "internalLinks": 16,
      "externalLinks": 10,
      "footnotes": 6,
      "references": 6
    },
    "actuals": {
      "tables": 5,
      "diagrams": 1,
      "internalLinks": 23,
      "externalLinks": 14,
      "footnotes": 0,
      "references": 0,
      "quotesWithQuotes": 1,
      "quotesTotal": 1,
      "accuracyChecked": 1,
      "accuracyTotal": 1
    },
    "items": {
      "llmSummary": "green",
      "schedule": "green",
      "entity": "green",
      "editHistory": "green",
      "overview": "green",
      "tables": "amber",
      "diagrams": "green",
      "internalLinks": "green",
      "externalLinks": "green",
      "footnotes": "red",
      "references": "red",
      "quotes": "green",
      "accuracy": "green"
    },
    "editHistoryCount": 1,
    "ratingsString": "N:2 R:3 A:1.5 C:6"
  },
  "readerRank": 463,
  "researchRank": 308,
  "recommendedScore": 113.92
}
External Links

No external links

Backlinks (4)
idtitletyperelationship
samotsvetySamotsvetyorganization
nuno-sempereNuño Sempereperson
vidur-kapurVidur Kapurperson
ai-for-human-reasoning-fellowshipAI for Human Reasoning Fellowshipapproach
Longterm Wiki