feat: Phase 2 — claims, evidence, attestations, intents, commitments

Claims-Evidence-Attestation (CEA) system with status transitions
(proposed→supported→challenged→superseded), evidence linking
(supports|challenges), and attestations (endorse|dispute|abstain) with
claim_strength materialized view.

Intent matching with need/offer/possibility types, governance_fit
overlap scoring, and complementary type matching.

Commitment lifecycle state machine (proposed→verified→active→
evidence_linked→redeemed, disputed↔resolved) with evidence linking
and enforced transitions.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Jeff Emmett 2026-04-13 18:48:06 -04:00
parent 82ac55624d
commit e1ab6e4ad1
5 changed files with 971 additions and 1 deletions

View File

@ -10,7 +10,7 @@ from fastapi import FastAPI
from spore_node.config import SporeConfig from spore_node.config import SporeConfig
from spore_node.db.connection import init_pool, close_pool from spore_node.db.connection import init_pool, close_pool
from spore_node.api.routers import health, holons, governance from spore_node.api.routers import health, holons, governance, claims, intents, commitments
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -20,6 +20,9 @@ def mount_routers(app: FastAPI) -> None:
app.include_router(health.router) app.include_router(health.router)
app.include_router(holons.router) app.include_router(holons.router)
app.include_router(governance.router) app.include_router(governance.router)
app.include_router(claims.router)
app.include_router(intents.router)
app.include_router(commitments.router)
def create_standalone_app(cfg: SporeConfig | None = None) -> FastAPI: def create_standalone_app(cfg: SporeConfig | None = None) -> FastAPI:

View File

@ -0,0 +1,314 @@
"""Claims, Evidence, and Attestations endpoints."""
import json
import uuid
from fastapi import APIRouter, HTTPException, Query
from pydantic import BaseModel
from spore_node.db.connection import get_pool
from spore_node.rid_types import SporeClaim, SporeEvidence, SporeAttestation
router = APIRouter(prefix="/claims", tags=["claims"])
# --- Models ---
VALID_STATUSES = ("proposed", "supported", "challenged", "superseded")
VALID_TRANSITIONS = {
"proposed": ("supported", "challenged", "superseded"),
"supported": ("challenged", "superseded"),
"challenged": ("supported", "superseded"),
"superseded": (),
}
class ClaimCreate(BaseModel):
content: str
proposer_rid: str
confidence: float = 0.5
anchor_type: str = "assertion"
metadata: dict = {}
class ClaimResponse(BaseModel):
id: str
rid: str
proposer_rid: str
content: str
status: str
confidence: float
anchor_type: str
metadata: dict
class EvidenceCreate(BaseModel):
relation: str # supports | challenges
body: str
provenance: dict = {}
class EvidenceResponse(BaseModel):
id: str
rid: str
claim_id: str
relation: str
body: str
provenance: dict
class AttestationCreate(BaseModel):
attester_rid: str
verdict: str # endorse | dispute | abstain
strength: float = 1.0
reasoning: str = ""
class AttestationResponse(BaseModel):
id: str
rid: str
claim_id: str
attester_rid: str
verdict: str
strength: float
reasoning: str
class ClaimStrengthResponse(BaseModel):
claim_id: str
claim_rid: str
status: str
confidence: float
endorse_count: int
dispute_count: int
abstain_count: int
supporting_evidence: int
challenging_evidence: int
net_sentiment: float
# --- Claims ---
@router.post("", response_model=ClaimResponse, status_code=201)
async def create_claim(data: ClaimCreate):
pool = get_pool()
claim_id = str(uuid.uuid4())
rid = str(SporeClaim(claim_id))
row = await pool.fetchrow(
"""
INSERT INTO claims (id, rid, proposer_rid, content, confidence, anchor_type, metadata)
VALUES ($1, $2, $3, $4, $5, $6, $7::jsonb)
RETURNING id, rid, proposer_rid, content, status, confidence, anchor_type, metadata
""",
uuid.UUID(claim_id), rid, data.proposer_rid, data.content,
data.confidence, data.anchor_type, json.dumps(data.metadata),
)
await _log_event(pool, rid, "claim.created", {"content": data.content[:200]})
return _row_dict(row)
@router.get("", response_model=list[ClaimResponse])
async def list_claims(
status: str | None = None,
proposer_rid: str | None = None,
limit: int = Query(default=50, le=200),
offset: int = 0,
):
pool = get_pool()
conditions, params = [], []
idx = 1
if status:
conditions.append(f"status = ${idx}")
params.append(status)
idx += 1
if proposer_rid:
conditions.append(f"proposer_rid = ${idx}")
params.append(proposer_rid)
idx += 1
where = "WHERE " + " AND ".join(conditions) if conditions else ""
rows = await pool.fetch(
f"SELECT * FROM claims {where} ORDER BY created_at DESC LIMIT ${idx} OFFSET ${idx+1}",
*params, limit, offset,
)
return [_row_dict(r) for r in rows]
@router.get("/{claim_id}", response_model=ClaimResponse)
async def get_claim(claim_id: str):
pool = get_pool()
row = await pool.fetchrow("SELECT * FROM claims WHERE id = $1", uuid.UUID(claim_id))
if not row:
raise HTTPException(404, "Claim not found")
return _row_dict(row)
@router.patch("/{claim_id}/status")
async def transition_claim_status(claim_id: str, new_status: str):
pool = get_pool()
row = await pool.fetchrow("SELECT status, rid FROM claims WHERE id = $1", uuid.UUID(claim_id))
if not row:
raise HTTPException(404, "Claim not found")
current = row["status"]
if new_status not in VALID_TRANSITIONS.get(current, ()):
raise HTTPException(
422,
f"Cannot transition from '{current}' to '{new_status}'. "
f"Valid: {VALID_TRANSITIONS.get(current, ())}",
)
await pool.execute(
"UPDATE claims SET status = $1, updated_at = now() WHERE id = $2",
new_status, uuid.UUID(claim_id),
)
await _log_event(pool, row["rid"], "claim.status_changed",
{"from": current, "to": new_status})
return {"status": new_status, "previous": current}
@router.get("/{claim_id}/strength", response_model=ClaimStrengthResponse)
async def get_claim_strength(claim_id: str):
pool = get_pool()
# Refresh materialized view
await pool.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY claim_strength")
row = await pool.fetchrow(
"SELECT * FROM claim_strength WHERE claim_id = $1", uuid.UUID(claim_id)
)
if not row:
raise HTTPException(404, "Claim not found")
d = dict(row)
d["claim_id"] = str(d["claim_id"])
return d
# --- Evidence ---
@router.post("/{claim_id}/evidence", response_model=EvidenceResponse, status_code=201)
async def add_evidence(claim_id: str, data: EvidenceCreate):
if data.relation not in ("supports", "challenges"):
raise HTTPException(422, "relation must be 'supports' or 'challenges'")
pool = get_pool()
# Verify claim exists
claim = await pool.fetchrow("SELECT rid FROM claims WHERE id = $1", uuid.UUID(claim_id))
if not claim:
raise HTTPException(404, "Claim not found")
eid = str(uuid.uuid4())
rid = str(SporeEvidence(eid))
row = await pool.fetchrow(
"""
INSERT INTO evidence (id, rid, claim_id, relation, body, provenance)
VALUES ($1, $2, $3, $4, $5, $6::jsonb)
RETURNING id, rid, claim_id, relation, body, provenance
""",
uuid.UUID(eid), rid, uuid.UUID(claim_id), data.relation,
data.body, json.dumps(data.provenance),
)
await _log_event(pool, claim["rid"], f"evidence.{data.relation}",
{"evidence_rid": rid})
return _evidence_dict(row)
@router.get("/{claim_id}/evidence", response_model=list[EvidenceResponse])
async def list_evidence(claim_id: str, relation: str | None = None):
pool = get_pool()
if relation:
rows = await pool.fetch(
"SELECT * FROM evidence WHERE claim_id = $1 AND relation = $2 ORDER BY created_at",
uuid.UUID(claim_id), relation,
)
else:
rows = await pool.fetch(
"SELECT * FROM evidence WHERE claim_id = $1 ORDER BY created_at",
uuid.UUID(claim_id),
)
return [_evidence_dict(r) for r in rows]
# --- Attestations ---
@router.post("/{claim_id}/attestations", response_model=AttestationResponse, status_code=201)
async def attest_claim(claim_id: str, data: AttestationCreate):
if data.verdict not in ("endorse", "dispute", "abstain"):
raise HTTPException(422, "verdict must be 'endorse', 'dispute', or 'abstain'")
pool = get_pool()
claim = await pool.fetchrow("SELECT rid FROM claims WHERE id = $1", uuid.UUID(claim_id))
if not claim:
raise HTTPException(404, "Claim not found")
aid = str(uuid.uuid4())
rid = str(SporeAttestation(aid))
try:
row = await pool.fetchrow(
"""
INSERT INTO attestations (id, rid, claim_id, attester_rid, verdict, strength, reasoning)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING id, rid, claim_id, attester_rid, verdict, strength, reasoning
""",
uuid.UUID(aid), rid, uuid.UUID(claim_id), data.attester_rid,
data.verdict, data.strength, data.reasoning,
)
except Exception as e:
if "unique" in str(e).lower():
raise HTTPException(409, f"Attester '{data.attester_rid}' already attested this claim")
raise
await _log_event(pool, claim["rid"], f"attestation.{data.verdict}",
{"attester": data.attester_rid, "strength": data.strength})
return _attestation_dict(row)
@router.get("/{claim_id}/attestations", response_model=list[AttestationResponse])
async def list_attestations(claim_id: str, verdict: str | None = None):
pool = get_pool()
if verdict:
rows = await pool.fetch(
"SELECT * FROM attestations WHERE claim_id = $1 AND verdict = $2 ORDER BY created_at",
uuid.UUID(claim_id), verdict,
)
else:
rows = await pool.fetch(
"SELECT * FROM attestations WHERE claim_id = $1 ORDER BY created_at",
uuid.UUID(claim_id),
)
return [_attestation_dict(r) for r in rows]
# --- Helpers ---
def _row_dict(row) -> dict:
d = dict(row)
d["id"] = str(d["id"])
if isinstance(d.get("metadata"), str):
d["metadata"] = json.loads(d["metadata"])
return d
def _evidence_dict(row) -> dict:
d = dict(row)
d["id"] = str(d["id"])
d["claim_id"] = str(d["claim_id"])
if isinstance(d.get("provenance"), str):
d["provenance"] = json.loads(d["provenance"])
return d
def _attestation_dict(row) -> dict:
d = dict(row)
d["id"] = str(d["id"])
d["claim_id"] = str(d["claim_id"])
return d
async def _log_event(pool, entity_rid: str, event_kind: str, payload: dict):
await pool.execute(
"INSERT INTO events (entity_rid, event_kind, payload) VALUES ($1, $2, $3::jsonb)",
entity_rid, event_kind, json.dumps(payload),
)

View File

@ -0,0 +1,226 @@
"""Commitment lifecycle endpoints with enforced state machine."""
import json
import uuid
from fastapi import APIRouter, HTTPException, Query
from pydantic import BaseModel
from spore_node.db.connection import get_pool
from spore_node.rid_types import SporeCommitment
router = APIRouter(prefix="/commitments", tags=["commitments"])
# State machine transitions
VALID_TRANSITIONS = {
"proposed": ("verified", "cancelled"),
"verified": ("active", "cancelled"),
"active": ("evidence_linked", "disputed", "cancelled"),
"evidence_linked": ("redeemed", "disputed"),
"redeemed": (),
"disputed": ("resolved", "cancelled"),
"resolved": ("active", "redeemed"),
"cancelled": (),
}
class CommitmentCreate(BaseModel):
title: str
description: str = ""
proposer_rid: str
acceptor_rid: str | None = None
settlement_type: str = "attestation"
terms: dict = {}
metadata: dict = {}
class CommitmentResponse(BaseModel):
id: str
rid: str
title: str
description: str
state: str
proposer_rid: str
acceptor_rid: str | None
settlement_type: str
terms: dict
metadata: dict
class EvidenceLinkRequest(BaseModel):
evidence_id: str
@router.post("", response_model=CommitmentResponse, status_code=201)
async def create_commitment(data: CommitmentCreate):
pool = get_pool()
cid = str(uuid.uuid4())
rid = str(SporeCommitment(cid))
row = await pool.fetchrow(
"""
INSERT INTO commitments (id, rid, title, description, proposer_rid, acceptor_rid,
settlement_type, terms, metadata)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8::jsonb, $9::jsonb)
RETURNING *
""",
uuid.UUID(cid), rid, data.title, data.description, data.proposer_rid,
data.acceptor_rid, data.settlement_type,
json.dumps(data.terms), json.dumps(data.metadata),
)
await _log_event(pool, rid, "commitment.created",
{"title": data.title, "proposer": data.proposer_rid})
return _row_dict(row)
@router.get("", response_model=list[CommitmentResponse])
async def list_commitments(
state: str | None = None,
proposer_rid: str | None = None,
acceptor_rid: str | None = None,
limit: int = Query(default=50, le=200),
offset: int = 0,
):
pool = get_pool()
conditions, params = [], []
idx = 1
if state:
conditions.append(f"state = ${idx}")
params.append(state)
idx += 1
if proposer_rid:
conditions.append(f"proposer_rid = ${idx}")
params.append(proposer_rid)
idx += 1
if acceptor_rid:
conditions.append(f"acceptor_rid = ${idx}")
params.append(acceptor_rid)
idx += 1
where = "WHERE " + " AND ".join(conditions) if conditions else ""
rows = await pool.fetch(
f"SELECT * FROM commitments {where} ORDER BY created_at DESC LIMIT ${idx} OFFSET ${idx+1}",
*params, limit, offset,
)
return [_row_dict(r) for r in rows]
@router.get("/{commitment_id}", response_model=CommitmentResponse)
async def get_commitment(commitment_id: str):
pool = get_pool()
row = await pool.fetchrow(
"SELECT * FROM commitments WHERE id = $1", uuid.UUID(commitment_id)
)
if not row:
raise HTTPException(404, "Commitment not found")
return _row_dict(row)
@router.patch("/{commitment_id}/state")
async def transition_state(commitment_id: str, new_state: str):
pool = get_pool()
row = await pool.fetchrow(
"SELECT state, rid FROM commitments WHERE id = $1", uuid.UUID(commitment_id)
)
if not row:
raise HTTPException(404, "Commitment not found")
current = row["state"]
valid = VALID_TRANSITIONS.get(current, ())
if new_state not in valid:
raise HTTPException(
422,
f"Cannot transition from '{current}' to '{new_state}'. Valid: {valid}",
)
await pool.execute(
"UPDATE commitments SET state = $1, updated_at = now() WHERE id = $2",
new_state, uuid.UUID(commitment_id),
)
await _log_event(pool, row["rid"], "commitment.state_changed",
{"from": current, "to": new_state})
return {"state": new_state, "previous": current}
@router.post("/{commitment_id}/evidence", status_code=201)
async def link_evidence(commitment_id: str, data: EvidenceLinkRequest):
pool = get_pool()
# Verify commitment exists and is in valid state
commitment = await pool.fetchrow(
"SELECT state, rid FROM commitments WHERE id = $1", uuid.UUID(commitment_id)
)
if not commitment:
raise HTTPException(404, "Commitment not found")
if commitment["state"] not in ("active", "evidence_linked"):
raise HTTPException(422, f"Cannot link evidence in state '{commitment['state']}'")
# Verify evidence exists
evidence = await pool.fetchrow(
"SELECT id FROM evidence WHERE id = $1", uuid.UUID(data.evidence_id)
)
if not evidence:
raise HTTPException(404, "Evidence not found")
try:
await pool.execute(
"INSERT INTO commitment_evidence (commitment_id, evidence_id) VALUES ($1, $2)",
uuid.UUID(commitment_id), uuid.UUID(data.evidence_id),
)
except Exception as e:
if "unique" in str(e).lower() or "duplicate" in str(e).lower():
raise HTTPException(409, "Evidence already linked")
raise
# Auto-transition to evidence_linked if currently active
if commitment["state"] == "active":
await pool.execute(
"UPDATE commitments SET state = 'evidence_linked', updated_at = now() WHERE id = $1",
uuid.UUID(commitment_id),
)
await _log_event(pool, commitment["rid"], "commitment.evidence_linked",
{"evidence_id": data.evidence_id})
return {"linked": True, "commitment_id": commitment_id, "evidence_id": data.evidence_id}
@router.get("/{commitment_id}/evidence")
async def list_commitment_evidence(commitment_id: str):
pool = get_pool()
rows = await pool.fetch(
"""
SELECT e.id, e.rid, e.relation, e.body, e.provenance, ce.linked_at
FROM commitment_evidence ce
JOIN evidence e ON e.id = ce.evidence_id
WHERE ce.commitment_id = $1
ORDER BY ce.linked_at
""",
uuid.UUID(commitment_id),
)
result = []
for r in rows:
d = dict(r)
d["id"] = str(d["id"])
if isinstance(d.get("provenance"), str):
d["provenance"] = json.loads(d["provenance"])
result.append(d)
return result
# --- Helpers ---
def _row_dict(row) -> dict:
d = dict(row)
d["id"] = str(d["id"])
for k in ("terms", "metadata"):
if isinstance(d.get(k), str):
d[k] = json.loads(d[k])
return d
async def _log_event(pool, entity_rid: str, event_kind: str, payload: dict):
await pool.execute(
"INSERT INTO events (entity_rid, event_kind, payload) VALUES ($1, $2, $3::jsonb)",
entity_rid, event_kind, json.dumps(payload),
)

View File

@ -0,0 +1,261 @@
"""Intent publishing, matching, and lifecycle endpoints."""
import json
import uuid
from fastapi import APIRouter, HTTPException, Query
from pydantic import BaseModel
from spore_node.db.connection import get_pool
from spore_node.rid_types import SporeIntent
router = APIRouter(prefix="/intents", tags=["intents"])
VALID_STATES = ("open", "matched", "committed", "expired", "withdrawn")
VALID_TRANSITIONS = {
"open": ("matched", "expired", "withdrawn"),
"matched": ("committed", "open", "withdrawn"),
"committed": ("expired",),
"expired": (),
"withdrawn": (),
}
COMPLEMENTARY_TYPES = {
"need": "offer",
"offer": "need",
"possibility": "possibility",
}
class IntentCreate(BaseModel):
publisher_rid: str
title: str
description: str = ""
intent_type: str # need | offer | possibility
capacity: dict = {}
timing: dict = {}
governance_fit: list[str] = []
metadata: dict = {}
class IntentResponse(BaseModel):
id: str
rid: str
publisher_rid: str
title: str
description: str
intent_type: str
capacity: dict
timing: dict
governance_fit: list[str]
state: str
metadata: dict
class MatchResponse(BaseModel):
intent_a_id: str
intent_b_id: str
similarity: float
match_details: dict
@router.post("", response_model=IntentResponse, status_code=201)
async def publish_intent(data: IntentCreate):
if data.intent_type not in ("need", "offer", "possibility"):
raise HTTPException(422, "intent_type must be 'need', 'offer', or 'possibility'")
pool = get_pool()
iid = str(uuid.uuid4())
rid = str(SporeIntent(iid))
row = await pool.fetchrow(
"""
INSERT INTO intents (id, rid, publisher_rid, title, description, intent_type,
capacity, timing, governance_fit, metadata)
VALUES ($1, $2, $3, $4, $5, $6, $7::jsonb, $8::jsonb, $9, $10::jsonb)
RETURNING *
""",
uuid.UUID(iid), rid, data.publisher_rid, data.title, data.description,
data.intent_type, json.dumps(data.capacity), json.dumps(data.timing),
data.governance_fit, json.dumps(data.metadata),
)
await _log_event(pool, rid, "intent.published",
{"type": data.intent_type, "title": data.title})
# Trigger matching (inline for now, ARQ worker in production)
await _compute_matches(pool, uuid.UUID(iid), data.intent_type)
return _row_dict(row)
@router.get("", response_model=list[IntentResponse])
async def list_intents(
intent_type: str | None = None,
state: str | None = None,
publisher_rid: str | None = None,
limit: int = Query(default=50, le=200),
offset: int = 0,
):
pool = get_pool()
conditions, params = [], []
idx = 1
if intent_type:
conditions.append(f"intent_type = ${idx}")
params.append(intent_type)
idx += 1
if state:
conditions.append(f"state = ${idx}")
params.append(state)
idx += 1
if publisher_rid:
conditions.append(f"publisher_rid = ${idx}")
params.append(publisher_rid)
idx += 1
where = "WHERE " + " AND ".join(conditions) if conditions else ""
rows = await pool.fetch(
f"SELECT * FROM intents {where} ORDER BY created_at DESC LIMIT ${idx} OFFSET ${idx+1}",
*params, limit, offset,
)
return [_row_dict(r) for r in rows]
@router.get("/{intent_id}", response_model=IntentResponse)
async def get_intent(intent_id: str):
pool = get_pool()
row = await pool.fetchrow("SELECT * FROM intents WHERE id = $1", uuid.UUID(intent_id))
if not row:
raise HTTPException(404, "Intent not found")
return _row_dict(row)
@router.patch("/{intent_id}/state")
async def transition_intent_state(intent_id: str, new_state: str):
pool = get_pool()
row = await pool.fetchrow("SELECT state, rid FROM intents WHERE id = $1", uuid.UUID(intent_id))
if not row:
raise HTTPException(404, "Intent not found")
current = row["state"]
if new_state not in VALID_TRANSITIONS.get(current, ()):
raise HTTPException(
422,
f"Cannot transition from '{current}' to '{new_state}'. "
f"Valid: {VALID_TRANSITIONS.get(current, ())}",
)
await pool.execute(
"UPDATE intents SET state = $1, updated_at = now() WHERE id = $2",
new_state, uuid.UUID(intent_id),
)
await _log_event(pool, row["rid"], "intent.state_changed",
{"from": current, "to": new_state})
return {"state": new_state, "previous": current}
@router.get("/{intent_id}/matches", response_model=list[MatchResponse])
async def get_matches(intent_id: str, min_similarity: float = 0.0):
pool = get_pool()
uid = uuid.UUID(intent_id)
rows = await pool.fetch(
"""
SELECT * FROM intent_matches
WHERE (intent_a_id = $1 OR intent_b_id = $1) AND similarity >= $2
ORDER BY similarity DESC
""",
uid, min_similarity,
)
return [_match_dict(r) for r in rows]
# --- Matching logic ---
async def _compute_matches(pool, intent_id: uuid.UUID, intent_type: str):
"""Compute matches between this intent and complementary open intents.
Scoring: governance_fit overlap (primary), with text similarity as tiebreaker.
Embedding-based similarity added when embeddings are available.
"""
complement = COMPLEMENTARY_TYPES.get(intent_type, "")
if not complement:
return
intent = await pool.fetchrow("SELECT * FROM intents WHERE id = $1", intent_id)
if not intent:
return
# Find complementary open intents
candidates = await pool.fetch(
"""
SELECT * FROM intents
WHERE intent_type = $1 AND state = 'open' AND id != $2
""",
complement, intent_id,
)
my_gov = set(intent["governance_fit"] or [])
for cand in candidates:
cand_gov = set(cand["governance_fit"] or [])
# Governance fit overlap
if my_gov and cand_gov:
gov_score = len(my_gov & cand_gov) / len(my_gov | cand_gov)
elif not my_gov and not cand_gov:
gov_score = 0.5 # neutral
else:
gov_score = 0.1
# Simple text overlap as proxy until embeddings available
my_words = set(intent["title"].lower().split() + intent["description"].lower().split())
cand_words = set(cand["title"].lower().split() + cand["description"].lower().split())
text_score = len(my_words & cand_words) / max(len(my_words | cand_words), 1)
similarity = 0.6 * gov_score + 0.4 * text_score
if similarity < 0.05:
continue
# Ensure consistent ordering for unique constraint
a_id, b_id = sorted([intent_id, cand["id"]], key=str)
await pool.execute(
"""
INSERT INTO intent_matches (intent_a_id, intent_b_id, similarity, match_details)
VALUES ($1, $2, $3, $4::jsonb)
ON CONFLICT (intent_a_id, intent_b_id)
DO UPDATE SET similarity = $3, match_details = $4::jsonb
""",
a_id, b_id, round(similarity, 4),
json.dumps({"gov_score": round(gov_score, 4), "text_score": round(text_score, 4)}),
)
# --- Helpers ---
def _row_dict(row) -> dict:
d = dict(row)
d["id"] = str(d["id"])
for k in ("capacity", "timing", "metadata"):
if isinstance(d.get(k), str):
d[k] = json.loads(d[k])
if d.get("governance_fit") is None:
d["governance_fit"] = []
return d
def _match_dict(row) -> dict:
d = dict(row)
d.pop("id", None)
d["intent_a_id"] = str(d["intent_a_id"])
d["intent_b_id"] = str(d["intent_b_id"])
if isinstance(d.get("match_details"), str):
d["match_details"] = json.loads(d["match_details"])
return d
async def _log_event(pool, entity_rid: str, event_kind: str, payload: dict):
await pool.execute(
"INSERT INTO events (entity_rid, event_kind, payload) VALUES ($1, $2, $3::jsonb)",
entity_rid, event_kind, json.dumps(payload),
)

View File

@ -0,0 +1,166 @@
-- Spore Agent Commons — Core Primitives
-- Claims, Evidence, Attestations, Intents, Commitments
-- ============================================================
-- Claims (knowledge claims in the commons)
-- ============================================================
CREATE TABLE IF NOT EXISTS claims (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
rid TEXT UNIQUE NOT NULL,
proposer_rid TEXT NOT NULL,
content TEXT NOT NULL,
status TEXT NOT NULL DEFAULT 'proposed'
CHECK (status IN ('proposed', 'supported', 'challenged', 'superseded')),
confidence FLOAT DEFAULT 0.5,
anchor_type TEXT DEFAULT 'assertion',
embedding vector(1024),
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX IF NOT EXISTS idx_claims_proposer ON claims (proposer_rid);
CREATE INDEX IF NOT EXISTS idx_claims_status ON claims (status);
CREATE INDEX IF NOT EXISTS idx_claims_metadata ON claims USING GIN (metadata);
-- ============================================================
-- Evidence (supports or challenges a claim)
-- ============================================================
CREATE TABLE IF NOT EXISTS evidence (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
rid TEXT UNIQUE NOT NULL,
claim_id UUID NOT NULL REFERENCES claims(id) ON DELETE CASCADE,
relation TEXT NOT NULL CHECK (relation IN ('supports', 'challenges')),
body TEXT NOT NULL,
provenance JSONB NOT NULL DEFAULT '{}',
embedding vector(1024),
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX IF NOT EXISTS idx_evidence_claim ON evidence (claim_id);
CREATE INDEX IF NOT EXISTS idx_evidence_relation ON evidence (relation);
-- ============================================================
-- Attestations (endorse/dispute/abstain on a claim)
-- ============================================================
CREATE TABLE IF NOT EXISTS attestations (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
rid TEXT UNIQUE NOT NULL,
claim_id UUID NOT NULL REFERENCES claims(id) ON DELETE CASCADE,
attester_rid TEXT NOT NULL,
verdict TEXT NOT NULL CHECK (verdict IN ('endorse', 'dispute', 'abstain')),
strength FLOAT NOT NULL DEFAULT 1.0,
reasoning TEXT DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (claim_id, attester_rid)
);
CREATE INDEX IF NOT EXISTS idx_attestations_claim ON attestations (claim_id);
CREATE INDEX IF NOT EXISTS idx_attestations_attester ON attestations (attester_rid);
-- ============================================================
-- Claim strength (materialized view)
-- ============================================================
CREATE MATERIALIZED VIEW IF NOT EXISTS claim_strength AS
SELECT
c.id AS claim_id,
c.rid AS claim_rid,
c.status,
c.confidence,
COUNT(DISTINCT CASE WHEN a.verdict = 'endorse' THEN a.id END) AS endorse_count,
COUNT(DISTINCT CASE WHEN a.verdict = 'dispute' THEN a.id END) AS dispute_count,
COUNT(DISTINCT CASE WHEN a.verdict = 'abstain' THEN a.id END) AS abstain_count,
COUNT(DISTINCT CASE WHEN e.relation = 'supports' THEN e.id END) AS supporting_evidence,
COUNT(DISTINCT CASE WHEN e.relation = 'challenges' THEN e.id END) AS challenging_evidence,
COALESCE(
(COUNT(DISTINCT CASE WHEN a.verdict = 'endorse' THEN a.id END)::float -
COUNT(DISTINCT CASE WHEN a.verdict = 'dispute' THEN a.id END)::float) /
NULLIF(COUNT(DISTINCT a.id)::float, 0),
0
) AS net_sentiment,
MAX(GREATEST(
COALESCE(MAX(a.created_at), c.created_at),
COALESCE(MAX(e.created_at), c.created_at)
)) AS last_activity
FROM claims c
LEFT JOIN attestations a ON a.claim_id = c.id
LEFT JOIN evidence e ON e.claim_id = c.id
GROUP BY c.id, c.rid, c.status, c.confidence;
CREATE UNIQUE INDEX IF NOT EXISTS idx_claim_strength_id ON claim_strength (claim_id);
-- ============================================================
-- Intents (need/offer/possibility)
-- ============================================================
CREATE TABLE IF NOT EXISTS intents (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
rid TEXT UNIQUE NOT NULL,
publisher_rid TEXT NOT NULL,
title TEXT NOT NULL,
description TEXT NOT NULL DEFAULT '',
intent_type TEXT NOT NULL CHECK (intent_type IN ('need', 'offer', 'possibility')),
capacity JSONB NOT NULL DEFAULT '{}',
timing JSONB NOT NULL DEFAULT '{}',
governance_fit TEXT[] DEFAULT '{}',
state TEXT NOT NULL DEFAULT 'open'
CHECK (state IN ('open', 'matched', 'committed', 'expired', 'withdrawn')),
embedding vector(1024),
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX IF NOT EXISTS idx_intents_publisher ON intents (publisher_rid);
CREATE INDEX IF NOT EXISTS idx_intents_type ON intents (intent_type);
CREATE INDEX IF NOT EXISTS idx_intents_state ON intents (state);
-- ============================================================
-- Intent matches (computed similarity pairs)
-- ============================================================
CREATE TABLE IF NOT EXISTS intent_matches (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
intent_a_id UUID NOT NULL REFERENCES intents(id) ON DELETE CASCADE,
intent_b_id UUID NOT NULL REFERENCES intents(id) ON DELETE CASCADE,
similarity FLOAT NOT NULL,
match_details JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
UNIQUE (intent_a_id, intent_b_id),
CHECK (intent_a_id < intent_b_id)
);
CREATE INDEX IF NOT EXISTS idx_intent_matches_a ON intent_matches (intent_a_id);
CREATE INDEX IF NOT EXISTS idx_intent_matches_b ON intent_matches (intent_b_id);
-- ============================================================
-- Commitments (lifecycle state machine)
-- ============================================================
CREATE TABLE IF NOT EXISTS commitments (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
rid TEXT UNIQUE NOT NULL,
title TEXT NOT NULL,
description TEXT NOT NULL DEFAULT '',
state TEXT NOT NULL DEFAULT 'proposed'
CHECK (state IN ('proposed', 'verified', 'active', 'evidence_linked',
'redeemed', 'disputed', 'resolved', 'cancelled')),
proposer_rid TEXT NOT NULL,
acceptor_rid TEXT,
settlement_type TEXT DEFAULT 'attestation',
terms JSONB NOT NULL DEFAULT '{}',
metadata JSONB NOT NULL DEFAULT '{}',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
CREATE INDEX IF NOT EXISTS idx_commitments_state ON commitments (state);
CREATE INDEX IF NOT EXISTS idx_commitments_proposer ON commitments (proposer_rid);
CREATE INDEX IF NOT EXISTS idx_commitments_acceptor ON commitments (acceptor_rid);
-- ============================================================
-- Commitment-Evidence links
-- ============================================================
CREATE TABLE IF NOT EXISTS commitment_evidence (
commitment_id UUID NOT NULL REFERENCES commitments(id) ON DELETE CASCADE,
evidence_id UUID NOT NULL REFERENCES evidence(id) ON DELETE CASCADE,
linked_at TIMESTAMPTZ NOT NULL DEFAULT now(),
PRIMARY KEY (commitment_id, evidence_id)
);