Skip to main content

Agent Infrastructure

The Paradigm SDK provides comprehensive infrastructure for multi-agent collaboration with full traceability and auditing. This enables multiple AI agents to collaborate on identity construction while maintaining complete transparency about who contributed what.

Overview

Agent infrastructure supports:

  • Multi-agent editing - Multiple agents can contribute to the same node
  • Full traceability - Every change tracked with agent metadata
  • Node ledger - Complete history of all agents that contributed
  • Confidence scoring - Agents report confidence in their contributions
  • Session tracking - Group related agent actions together

Core Concepts

###Agent Metadata

Every node tracks the agent that last modified it with comprehensive metadata:

{
"agent_id": "0c59fea4-fe7e-41af-a246-e3eb1819be13",
"agent_name": "Memory Extraction Agent v2.1",
"agent_type": "experience_extractor",
"agent_version": "2.1.0",
"processing_time_ms": 1850,
"confidence_score": 0.92,
"source_models": ["gpt-4", "claude-3-sonnet"],
"extraction_method": "narrative_analysis",
"validation_status": "verified",
"timestamp": "2024-01-15T10:30:00Z",
"session_id": "sess_exp_1234567890",
"contribution_details": {
"fields_extracted": ["when", "where", "significance"],
"verification_passed": true,
"quality_score": 0.92,
"extraction_notes": "High emotional significance detected"
}
}

Node Ledger

The node ledger tracks all agents that have ever contributed to a node, not just the most recent:

{
"node_id": "223845bf-4397-4135-899f-c72f84289ca",
"total_agents": 3,
"agents": [
{
"agent_id": "agent_001",
"agent_name": "Memory Extraction Agent v2.1",
"agent_type": "experience_extractor",
"contribution_count": 1,
"last_contribution": "2024-01-15T10:30:00Z"
},
{
"agent_id": "agent_002",
"agent_name": "Content Enhancement Agent v2.5",
"agent_type": "content_enhancer",
"contribution_count": 2,
"last_contribution": "2024-01-16T14:20:00Z"
},
{
"agent_id": "agent_003",
"agent_name": "Quality Assurance Agent v3.1",
"agent_type": "qa_validator",
"contribution_count": 1,
"last_contribution": "2024-01-17T09:15:00Z"
}
]
}

Node History

Every change to a node creates a history entry with:

  • Full snapshot of node state at that moment
  • Delta of changes (what fields changed)
  • Agent metadata at time of change
  • Version number for tracking evolution

Creating Nodes with Agents

Basic Node Creation

node = client.nodes.create(
title="First day teaching my own class",
node_type="EXPERIENCE",
meaning_level="IDENTITY",
metadata={
"properties": {
"when": {
"type": "instance",
"status": "done",
"start": "2020-09-15"
},
"significance": "Realized teaching was my calling"
}
},
# Agent metadata
agent_metadata={
"agent_id": "agent_001",
"agent_name": "Memory Extraction Agent v2.1",
"agent_type": "experience_extractor",
"agent_version": "2.1.0",
"confidence_score": 0.92,
"source_models": ["gpt-4"],
"extraction_method": "narrative_analysis",
"timestamp": "2024-01-15T10:30:00Z",
"session_id": "sess_001"
},
last_modified_by_agent_id="agent_001"
)

Multi-Agent Collaboration

Multiple agents can refine the same node:

# Agent 1 creates initial node
node = client.nodes.create(
title="Started teaching in 2020",
node_type="EXPERIENCE",
meaning_level="CONTEXT",
agent_metadata={
"agent_id": "agent_001",
"agent_name": "Extraction Agent",
"confidence_score": 0.75
},
last_modified_by_agent_id="agent_001"
)

# Agent 2 enhances with more context
client.nodes.update(
node_id=node['id'],
title="First day teaching my own university class",
meaning_level="IDENTITY", # Elevated significance
metadata={
"properties": {
"significance": "Moment I realized my calling"
}
},
agent_metadata={
"agent_id": "agent_002",
"agent_name": "Enhancement Agent",
"confidence_score": 0.88,
"contribution_details": {
"fields_modified": ["title", "meaning_level", "significance"],
"modification_type": "enhancement"
}
},
last_modified_by_agent_id="agent_002"
)

# Agent 3 validates and finalizes
client.nodes.update(
node_id=node['id'],
agent_metadata={
"agent_id": "agent_003",
"agent_name": "QA Agent",
"confidence_score": 0.95,
"validation_status": "verified",
"contribution_details": {
"modification_type": "validation",
"quality_score": 0.95
}
},
last_modified_by_agent_id="agent_003"
)

Querying Agent Information

Get Node Ledger

See all agents that contributed to a node:

ledger = client.nodes.get_ledger(node_id="node_abc")

print(f"Total agents: {ledger['total_agents']}")

for agent in ledger['agents']:
print(f"{agent['agent_name']}")
print(f" Type: {agent['agent_type']}")
print(f" Contributions: {agent['contribution_count']}")
print(f" Last activity: {agent['last_contribution']}")

Get Node History

Track how a node evolved with different agents:

history = client.nodes.get_history(
node_id="node_abc",
include_snapshots=False # Set True to get full snapshots
)

for entry in history['items']:
print(f"Version {entry['version']} - {entry['change_type']}")

if entry['agent_metadata']:
print(f" Agent: {entry['agent_metadata']['agent_name']}")
print(f" Confidence: {entry['agent_metadata']['confidence_score']}")

if entry['changed_fields']:
print(f" Changed: {list(entry['changed_fields'].keys())}")

Get Agent Contributions Across Nodes

See all contributions by a specific agent:

# Get all nodes modified by an agent
contributions = client.agents.get_contributions(
agent_id="agent_001",
limit=50
)

for contrib in contributions:
print(f"{contrib['node_id']}: {contrib['contribution_type']}")
print(f" Fields: {contrib['field_name']}")
print(f" Confidence: {contrib['confidence_score']}")

Agent Types

Common agent type classifications:

Agent TypePurposeExample Use
experience_extractorExtract experiences from narrativesParse journal entries
belief_extractorIdentify beliefs and patternsAnalyze stated values
entity_extractorRecognize people, places, practicesBuild relationship graph
goal_analyzerIdentify goals and aspirationsExtract intentions
content_enhancerRefine and expand contentAdd context and detail
qa_validatorValidate quality and accuracyVerify extractions
semantic_analyzerAnalyze semantic relationshipsFind connections
deduplicatorIdentify and merge duplicatesClean up redundancy

Session Tracking

Group related agent actions together with sessions:

session_id = f"sess_{int(time.time())}"

# All nodes in this session
for memory in user_memories:
node = client.nodes.create(
title=memory['title'],
node_type="EXPERIENCE",
agent_metadata={
"agent_id": "agent_001",
"agent_name": "Batch Processor",
"session_id": session_id, # Same session
"processing_time_ms": memory['processing_time']
},
last_modified_by_agent_id="agent_001"
)

# Query nodes from this session
nodes = client.nodes.list(
filter_metadata={"agent_metadata.session_id": session_id}
)

Confidence Scoring

Agents should report confidence in their contributions:

Confidence Score Guidelines

ScoreMeaningWhen to Use
0.9-1.0Very HighHuman-verified or multiple model agreement
0.8-0.9HighClear extraction with strong signals
0.7-0.8GoodNormal extraction quality
0.6-0.7ModerateSome ambiguity or uncertainty
< 0.6LowSignificant uncertainty, needs review

Using Confidence Scores

# Filter by confidence
high_confidence = client.nodes.list(
filter_metadata={
"agent_metadata.confidence_score": {"$gte": 0.9}
}
)

# Flag low confidence for review
low_confidence = client.nodes.list(
filter_metadata={
"agent_metadata.confidence_score": {"$lt": 0.7}
}
)

for node in low_confidence:
print(f"Review needed: {node['title']}")
print(f"Confidence: {node['agent_metadata']['confidence_score']}")

Validation Status

Track validation state of agent contributions:

StatusMeaning
verifiedValidated and approved
pendingAwaiting validation
rejectedFailed validation
needs_reviewFlagged for human review
# Create with validation status
node = client.nodes.create(
title="Important memory",
node_type="EXPERIENCE",
agent_metadata={
"agent_id": "agent_001",
"validation_status": "pending"
},
last_modified_by_agent_id="agent_001"
)

# Later: validate with QA agent
client.nodes.update(
node_id=node['id'],
agent_metadata={
"agent_id": "agent_qa",
"agent_name": "QA Agent",
"validation_status": "verified",
"contribution_details": {
"validation_passed": True,
"quality_score": 0.95
}
},
last_modified_by_agent_id="agent_qa"
)

Analytics

Agent Performance Metrics

# Get agent analytics
analytics = client.analytics.agents()

for agent in analytics['agents']:
print(f"{agent['agent_name']}")
print(f" Total contributions: {agent['total_contributions']}")
print(f" Avg confidence: {agent['avg_confidence']:.2f}")
print(f" Avg processing time: {agent['avg_processing_time_ms']}ms")
print(f" Success rate: {agent['success_rate']:.1%}")

Node Quality Over Time

# Track how nodes evolve with multiple agents
history = client.nodes.get_history(node_id="node_abc")

quality_progression = []
for entry in history['items']:
if entry['agent_metadata']:
quality_progression.append({
'version': entry['version'],
'agent': entry['agent_metadata']['agent_name'],
'confidence': entry['agent_metadata']['confidence_score'],
'timestamp': entry['changed_at']
})

# Analyze improvement
for i, point in enumerate(quality_progression):
improvement = (point['confidence'] - quality_progression[i-1]['confidence']) if i > 0 else 0
print(f"v{point['version']}: {point['agent']} - {point['confidence']:.2f} ({improvement:+.2f})")

Best Practices

1. Always Include Agent Metadata

Every agent action should include comprehensive metadata:

agent_metadata = {
"agent_id": str(uuid.uuid4()), # Unique agent ID
"agent_name": "Descriptive Name v1.0",
"agent_type": "experience_extractor",
"agent_version": "1.0.0",
"confidence_score": 0.87,
"timestamp": datetime.now().isoformat(),
"session_id": session_id
}

2. Use Meaningful Agent Names

Include version in agent names for tracking improvements:

# Good
"Memory Extraction Agent v2.1"
"Content Enhancement Agent v1.5"

# Bad
"Agent1"
"Bot"

3. Track Processing Metrics

Include performance data for optimization:

agent_metadata = {
"processing_time_ms": 1850,
"source_models": ["gpt-4", "claude-3"],
"extraction_method": "narrative_analysis"
}

4. Report Honest Confidence

Don't inflate confidence scores - they're for quality tracking:

# Good - reflects actual uncertainty
if has_ambiguous_signals:
confidence_score = 0.65

# Bad - artificially high
confidence_score = 0.95 # When not actually confident

5. Use Validation Workflow

Implement multi-stage processing:

  1. Extraction Agent - Initial extraction (confidence: 0.7-0.8)
  2. Enhancement Agent - Add context (confidence: 0.8-0.9)
  3. QA Agent - Validate (confidence: 0.9-1.0)
# Stage 1: Extract
node = create_with_agent(agent_type="extractor", confidence=0.75)

# Stage 2: Enhance
update_with_agent(node_id, agent_type="enhancer", confidence=0.85)

# Stage 3: Validate
update_with_agent(node_id, agent_type="qa", confidence=0.95, status="verified")

Error Handling

Handle Agent Failures

try:
node = client.nodes.create(
title=extracted_title,
node_type=extracted_type,
agent_metadata={
"agent_id": "agent_001",
"validation_status": "pending"
}
)
except Exception as e:
# Log agent failure
client.logging.agent_error(
agent_id="agent_001",
error_type=type(e).__name__,
error_message=str(e),
context={
"attempted_action": "node_creation",
"input_data": extracted_title
}
)

Retry with Different Agent

def create_with_fallback(data, primary_agent, fallback_agent):
try:
return client.nodes.create(
**data,
agent_metadata=primary_agent,
last_modified_by_agent_id=primary_agent['agent_id']
)
except Exception:
# Retry with fallback
return client.nodes.create(
**data,
agent_metadata=fallback_agent,
last_modified_by_agent_id=fallback_agent['agent_id']
)

Next Steps