Longterm Wiki

AI Safety Field Building

field-buildingcruxPath: /knowledge-base/responses/field-building/
E141Entity ID (EID)
← Back to page2 backlinksQuality: 0Updated: 2026-03-13
Page Recorddatabase.json — merged from MDX frontmatter + Entity YAML + computed metrics at build time
{
  "id": "field-building",
  "numericId": null,
  "path": "/knowledge-base/responses/field-building/",
  "filePath": "knowledge-base/responses/field-building.mdx",
  "title": "AI Safety Field Building",
  "quality": 0,
  "readerImportance": 67,
  "researchImportance": 25,
  "tacticalValue": null,
  "contentFormat": "article",
  "tractability": null,
  "neglectedness": null,
  "uncertainty": null,
  "causalLevel": null,
  "lastUpdated": "2026-03-13",
  "dateCreated": "2026-02-20",
  "llmSummary": null,
  "description": "Growing the AI safety research community through funding, training, and outreach",
  "ratings": null,
  "category": "responses",
  "subcategory": "field-building",
  "clusters": [
    "ai-safety",
    "community"
  ],
  "metrics": {
    "wordCount": 7,
    "tableCount": 0,
    "diagramCount": 0,
    "internalLinks": 0,
    "externalLinks": 0,
    "footnoteCount": 0,
    "bulletRatio": 0,
    "sectionCount": 0,
    "hasOverview": false,
    "structuralScore": 2
  },
  "suggestedQuality": 13,
  "updateFrequency": null,
  "evergreen": true,
  "wordCount": 7,
  "unconvertedLinks": [],
  "unconvertedLinkCount": 0,
  "convertedLinkCount": 0,
  "backlinkCount": 2,
  "hallucinationRisk": {
    "level": "low",
    "score": 25,
    "factors": [
      "low-quality-score",
      "conceptual-content",
      "minimal-content"
    ]
  },
  "entityType": "crux",
  "redundancy": {
    "maxSimilarity": 0,
    "similarPages": []
  },
  "coverage": {
    "passing": 2,
    "total": 13,
    "targets": {
      "tables": 1,
      "diagrams": 0,
      "internalLinks": 3,
      "externalLinks": 1,
      "footnotes": 2,
      "references": 1
    },
    "actuals": {
      "tables": 0,
      "diagrams": 0,
      "internalLinks": 0,
      "externalLinks": 0,
      "footnotes": 0,
      "references": 30,
      "quotesWithQuotes": 0,
      "quotesTotal": 0,
      "accuracyChecked": 0,
      "accuracyTotal": 0
    },
    "items": {
      "llmSummary": "red",
      "schedule": "red",
      "entity": "green",
      "editHistory": "red",
      "overview": "red",
      "tables": "red",
      "diagrams": "red",
      "internalLinks": "red",
      "externalLinks": "red",
      "footnotes": "red",
      "references": "green",
      "quotes": "red",
      "accuracy": "red"
    }
  },
  "readerRank": 181,
  "researchRank": 455,
  "recommendedScore": 52.76
}
External Links
{
  "lesswrong": "https://www.lesswrong.com/tag/ai-alignment-fieldbuilding",
  "eaForum": "https://forum.effectivealtruism.org/topics/building-the-field-of-ai-safety"
}
Backlinks (2)
idtitletyperelationship
intervention-portfolioAI Safety Intervention Portfolioapproach
state-capacity-ai-governanceState Capacity and AI Governanceconcept
Longterm Wiki