Replace old rule-based agent with pure LLM interpretation system. Agent Changes: - Rename agent.py → agent_legacy.py (preserve old hard-coded agent) - Rename agent_refactored.py → agent.py (make LLM agent primary) - Agent now interprets constitution to understand authority and processes - No hard-coded checks for specific users, roles, or governance models - Fully generic: works with any constitutional design Constitution Interpreter: - Updated interpret_proposal() to detect authority structures from text - LLM determines who has decision-making power from constitution - No assumptions about voting, proposals, or specific governance models Mastodon Formatting: - Improved line break handling for bullet points and paragraphs - Better plain-text formatting for Mastodon posts Primitives: - Added support for admin_approval threshold type Architecture: - Bot now uses pure LLM interpretation instead of scripted logic - Each instance can develop implementation guidelines separately - Guidelines not included in main codebase (instance-specific) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
562 lines
18 KiB
Python
562 lines
18 KiB
Python
"""
|
|
LLM-Driven Governance Agent.
|
|
|
|
This agent relies on:
|
|
1. LLM interpretation (not hard-coded logic)
|
|
2. Structured memory (not just database records)
|
|
3. Tools for correctness (calculator, datetime, etc.)
|
|
4. Audit trails (human-readable explanations)
|
|
|
|
Key principle: Constitution defines ALL governance rules in natural language.
|
|
The agent interprets and executes based on constitution + memory + tools.
|
|
"""
|
|
|
|
import json
|
|
from typing import Dict, Any, Optional, List
|
|
from datetime import datetime, timedelta
|
|
from sqlalchemy.orm import Session
|
|
|
|
from .governance.constitution import ConstitutionalReasoner
|
|
from .memory import GovernanceMemory, ProcessMemory, ProcessStatus, Event, Decision
|
|
from .tools import GovernanceTools, ToolRegistry
|
|
from .audit import AuditTrail, create_audit_trail
|
|
|
|
|
|
class GovernanceAgent:
|
|
"""
|
|
LLM-driven governance agent that interprets constitution and uses memory + tools.
|
|
|
|
This agent does NOT have hard-coded governance logic. Instead:
|
|
- Reads constitution to understand rules
|
|
- Uses memory to track state
|
|
- Uses tools for calculations
|
|
- Makes decisions through LLM reasoning
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
db_session: Session,
|
|
constitution_path: str,
|
|
model: Optional[str] = None,
|
|
api_keys: Optional[Dict[str, str]] = None,
|
|
):
|
|
"""
|
|
Initialize the agentic governance bot.
|
|
|
|
Args:
|
|
db_session: Database session
|
|
constitution_path: Path to constitution file
|
|
model: LLM model to use (None for default)
|
|
api_keys: Dict with 'openai' and/or 'anthropic' API keys
|
|
"""
|
|
self.db = db_session
|
|
self.constitution = ConstitutionalReasoner(constitution_path, model, api_keys)
|
|
self.memory = GovernanceMemory(db_session)
|
|
self.tools = GovernanceTools()
|
|
self.model = model
|
|
self.api_keys = api_keys or {}
|
|
|
|
def process_request(
|
|
self,
|
|
request: str,
|
|
actor: str,
|
|
context: Optional[Dict[str, Any]] = None
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Process a governance request using agentic interpretation.
|
|
|
|
Flow:
|
|
1. Understand what the user wants (intent parsing)
|
|
2. Query constitution for relevant rules
|
|
3. Query memory for current state
|
|
4. Reason about what to do (using LLM)
|
|
5. Execute actions using tools
|
|
6. Update memory with decision
|
|
7. Generate audit trail
|
|
|
|
Args:
|
|
request: Natural language request
|
|
actor: Who made the request
|
|
context: Optional context (thread ID, etc.)
|
|
|
|
Returns:
|
|
Response dictionary with action taken and audit trail
|
|
"""
|
|
# Step 1: Parse intent
|
|
intent_result = self._parse_intent_with_llm(request, actor)
|
|
|
|
if "error" in intent_result:
|
|
return {"response": intent_result["error"], "success": False}
|
|
|
|
# Step 2: Query constitution
|
|
constitutional_guidance = self.constitution.query(
|
|
question=intent_result["query"],
|
|
context=f"Actor: {actor}\nRequest: {request}"
|
|
)
|
|
|
|
# Step 3: Query memory for relevant state
|
|
memory_context = self._gather_memory_context(intent_result, actor)
|
|
|
|
# Step 4: Use LLM to decide what to do
|
|
decision_result = self._make_decision_with_llm(
|
|
intent=intent_result,
|
|
constitution=constitutional_guidance,
|
|
memory=memory_context,
|
|
actor=actor,
|
|
context=context
|
|
)
|
|
|
|
# Step 5: Execute the decision
|
|
execution_result = self._execute_decision(
|
|
decision_result,
|
|
actor,
|
|
context
|
|
)
|
|
|
|
# Step 6: Generate audit trail
|
|
if execution_result.get("process_id"):
|
|
process = self.memory.get_process(execution_result["process_id"])
|
|
if process and process.decisions:
|
|
audit = create_audit_trail(process, process.decisions[-1])
|
|
execution_result["audit_trail"] = audit
|
|
|
|
return execution_result
|
|
|
|
def _parse_intent_with_llm(self, request: str, actor: str) -> Dict[str, Any]:
|
|
"""
|
|
Use LLM to understand what the user wants to do.
|
|
|
|
Returns:
|
|
Dict with:
|
|
- intent_type: "create_proposal", "cast_vote", "query", etc.
|
|
- query: Question to ask constitution
|
|
- parameters: Extracted parameters
|
|
"""
|
|
prompt = f"""Analyze this governance request and determine the intent.
|
|
|
|
Request: {request}
|
|
Actor: {actor}
|
|
|
|
What is the user trying to do? Choose from:
|
|
- create_proposal: User wants to create a governance proposal
|
|
- cast_vote: User wants to vote on something
|
|
- query_status: User wants to know the status of something
|
|
- query_constitution: User wants to know governance rules
|
|
- dispute_initiation: User wants to start a dispute resolution process
|
|
- other: Something else
|
|
|
|
Also extract any relevant parameters (proposal text, vote type, process ID, etc.).
|
|
|
|
Return your analysis as JSON:
|
|
{{
|
|
"intent_type": "...",
|
|
"query": "question to ask the constitution",
|
|
"parameters": {{}},
|
|
"confidence": "high|medium|low"
|
|
}}
|
|
"""
|
|
|
|
try:
|
|
result = self.constitution._call_llm(prompt)
|
|
# Parse JSON from response
|
|
# (In production, would use proper JSON parsing from LLM response)
|
|
return json.loads(result.get("answer", "{}"))
|
|
except Exception as e:
|
|
return {"error": f"Failed to parse intent: {e}"}
|
|
|
|
def _gather_memory_context(
|
|
self,
|
|
intent: Dict[str, Any],
|
|
actor: str
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Gather relevant information from memory based on intent.
|
|
|
|
Returns:
|
|
Dict with memory context
|
|
"""
|
|
context = {}
|
|
|
|
# Get active processes
|
|
active_processes = self.memory.get_active_processes()
|
|
context["active_processes"] = [
|
|
self.memory.summarize_for_llm(p.id) for p in active_processes[:5]
|
|
]
|
|
|
|
# Get overdue processes
|
|
overdue = self.memory.get_overdue_processes()
|
|
if overdue:
|
|
context["overdue_processes"] = [p.id for p in overdue]
|
|
|
|
# If specific process mentioned, get its details
|
|
if "process_id" in intent.get("parameters", {}):
|
|
process_id = intent["parameters"]["process_id"]
|
|
process = self.memory.get_process(process_id)
|
|
if process:
|
|
context["target_process"] = self.memory.summarize_for_llm(process_id)
|
|
|
|
# Get recent precedent if relevant
|
|
if intent.get("intent_type") == "query_constitution":
|
|
recent_decisions = self.memory.search_decisions(limit=5)
|
|
context["recent_precedent"] = [
|
|
f"{d.decision_type}: {d.result}" for d in recent_decisions
|
|
]
|
|
|
|
return context
|
|
|
|
def _make_decision_with_llm(
|
|
self,
|
|
intent: Dict[str, Any],
|
|
constitution: Dict[str, Any],
|
|
memory: Dict[str, Any],
|
|
actor: str,
|
|
context: Optional[Dict[str, Any]]
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Use LLM to decide what action to take.
|
|
|
|
This is where the agent interprets the constitution and decides
|
|
how to handle the request.
|
|
|
|
Returns:
|
|
Decision dict with:
|
|
- action: What to do
|
|
- reasoning: Why
|
|
- constitution_citations: Which articles apply
|
|
- calculations: Any math needed
|
|
- state_updates: Changes to memory
|
|
"""
|
|
prompt = f"""You are a governance bot interpreting a community constitution.
|
|
|
|
INTENT:
|
|
{json.dumps(intent, indent=2)}
|
|
|
|
CONSTITUTIONAL GUIDANCE:
|
|
{json.dumps(constitution, indent=2)}
|
|
|
|
CURRENT MEMORY STATE:
|
|
{json.dumps(memory, indent=2)}
|
|
|
|
ACTOR: {actor}
|
|
|
|
Based on the constitution and current state, decide what action to take.
|
|
|
|
For proposals:
|
|
- What type of proposal is this? (standard, urgent, constitutional, etc.)
|
|
- What discussion period does the constitution specify?
|
|
- What voting threshold is required?
|
|
- Are there any special requirements?
|
|
|
|
For votes:
|
|
- Is this person eligible to vote on this process?
|
|
- What vote types are allowed?
|
|
- Should this vote be recorded?
|
|
|
|
For deadline checks:
|
|
- Has the deadline passed?
|
|
- What votes were cast?
|
|
- What threshold does the constitution require?
|
|
- Does the proposal pass or fail?
|
|
|
|
Available tools for calculations:
|
|
- calculate(expression, variables): Evaluate math expressions
|
|
- get_datetime(): Get current time
|
|
- is_past_deadline(deadline): Check if deadline passed
|
|
- tally(votes, key): Count votes by type
|
|
- random_select(items, count): Random selection
|
|
|
|
Return your decision as JSON:
|
|
{{
|
|
"action": "create_process|record_vote|complete_process|query_response",
|
|
"reasoning": "explain your interpretation",
|
|
"constitution_citations": ["Article X, Section Y", ...],
|
|
"parameters": {{
|
|
// Action-specific parameters
|
|
"process_type": "...",
|
|
"deadline_days": X,
|
|
"threshold_expression": "agree > disagree",
|
|
// etc.
|
|
}},
|
|
"calculations": [
|
|
{{
|
|
"tool": "calculate",
|
|
"expression": "agree > disagree",
|
|
"variables": {{"agree": 10, "disagree": 3}}
|
|
}}
|
|
]
|
|
}}
|
|
"""
|
|
|
|
try:
|
|
result = self.constitution._call_llm(prompt)
|
|
decision = json.loads(result.get("answer", "{}"))
|
|
|
|
# Execute any calculations using tools
|
|
if "calculations" in decision:
|
|
for calc in decision["calculations"]:
|
|
tool_name = calc["tool"]
|
|
if tool_name == "calculate":
|
|
calc["result"] = self.tools.calculate(
|
|
calc["expression"],
|
|
calc["variables"]
|
|
)
|
|
# Handle other tools...
|
|
|
|
return decision
|
|
|
|
except Exception as e:
|
|
return {
|
|
"action": "error",
|
|
"reasoning": f"Failed to make decision: {e}",
|
|
"constitution_citations": []
|
|
}
|
|
|
|
def _execute_decision(
|
|
self,
|
|
decision: Dict[str, Any],
|
|
actor: str,
|
|
context: Optional[Dict[str, Any]]
|
|
) -> Dict[str, Any]:
|
|
"""
|
|
Execute the decision made by the LLM.
|
|
|
|
This updates memory and performs platform actions.
|
|
"""
|
|
action = decision.get("action")
|
|
params = decision.get("parameters", {})
|
|
|
|
try:
|
|
if action == "create_process":
|
|
return self._create_process_from_decision(decision, actor, context)
|
|
|
|
elif action == "record_vote":
|
|
return self._record_vote_from_decision(decision, actor, context)
|
|
|
|
elif action == "complete_process":
|
|
return self._complete_process_from_decision(decision, actor)
|
|
|
|
elif action == "query_response":
|
|
return {
|
|
"response": decision.get("reasoning"),
|
|
"constitution_citations": decision.get("constitution_citations", []),
|
|
"success": True
|
|
}
|
|
|
|
else:
|
|
return {
|
|
"response": f"Unknown action: {action}",
|
|
"success": False
|
|
}
|
|
|
|
except Exception as e:
|
|
return {
|
|
"response": f"Error executing decision: {e}",
|
|
"success": False
|
|
}
|
|
|
|
def _create_process_from_decision(
|
|
self,
|
|
decision: Dict[str, Any],
|
|
actor: str,
|
|
context: Optional[Dict[str, Any]]
|
|
) -> Dict[str, Any]:
|
|
"""Create a new governance process based on LLM decision"""
|
|
params = decision["parameters"]
|
|
|
|
# Calculate deadline
|
|
deadline_days = params.get("deadline_days", 7)
|
|
deadline = self.tools.datetime_add(
|
|
self.tools.get_datetime(),
|
|
days=deadline_days
|
|
)
|
|
|
|
# Generate process ID
|
|
process_id = f"process_{int(datetime.utcnow().timestamp())}"
|
|
|
|
# Create in memory
|
|
process = self.memory.create_process(
|
|
process_id=process_id,
|
|
process_type=params.get("process_type", "unknown"),
|
|
created_by=actor,
|
|
constitution_basis=decision.get("constitution_citations", []),
|
|
deadline=deadline,
|
|
initial_state=params.get("initial_state", {}),
|
|
metadata=context or {}
|
|
)
|
|
|
|
# Log creation event
|
|
self.memory.add_event(
|
|
process_id=process_id,
|
|
actor=actor,
|
|
event_type="process_created",
|
|
data=params,
|
|
context=f"Process created by {actor}"
|
|
)
|
|
|
|
# Log creation decision
|
|
self.memory.add_decision(
|
|
process_id=process_id,
|
|
decision_type="process_creation",
|
|
reasoning=decision["reasoning"],
|
|
constitution_citations=decision.get("constitution_citations", []),
|
|
result="created"
|
|
)
|
|
|
|
return {
|
|
"response": f"Created {params.get('process_type')} (ID: {process_id}). Deadline: {deadline.strftime('%Y-%m-%d %H:%M UTC')}",
|
|
"process_id": process_id,
|
|
"deadline": deadline.isoformat(),
|
|
"success": True
|
|
}
|
|
|
|
def _record_vote_from_decision(
|
|
self,
|
|
decision: Dict[str, Any],
|
|
actor: str,
|
|
context: Optional[Dict[str, Any]]
|
|
) -> Dict[str, Any]:
|
|
"""Record a vote based on LLM decision"""
|
|
params = decision["parameters"]
|
|
process_id = params.get("process_id")
|
|
|
|
if not process_id:
|
|
return {"response": "No process ID specified", "success": False}
|
|
|
|
# Get process
|
|
process = self.memory.get_process(process_id)
|
|
if not process:
|
|
return {"response": f"Process {process_id} not found", "success": False}
|
|
|
|
# Record vote in state
|
|
votes = process.state.get("votes", {})
|
|
votes[actor] = {
|
|
"vote": params.get("vote_type"),
|
|
"timestamp": datetime.utcnow().isoformat()
|
|
}
|
|
|
|
self.memory.update_process(
|
|
process_id=process_id,
|
|
state_updates={"votes": votes}
|
|
)
|
|
|
|
# Log event
|
|
self.memory.add_event(
|
|
process_id=process_id,
|
|
actor=actor,
|
|
event_type="vote_cast",
|
|
data={"vote": params.get("vote_type")},
|
|
context=f"{actor} voted {params.get('vote_type')}"
|
|
)
|
|
|
|
return {
|
|
"response": f"Vote recorded: {params.get('vote_type')}",
|
|
"process_id": process_id,
|
|
"success": True
|
|
}
|
|
|
|
def _complete_process_from_decision(
|
|
self,
|
|
decision: Dict[str, Any],
|
|
actor: str
|
|
) -> Dict[str, Any]:
|
|
"""Complete a process based on LLM decision"""
|
|
params = decision["parameters"]
|
|
process_id = params.get("process_id")
|
|
|
|
process = self.memory.get_process(process_id)
|
|
if not process:
|
|
return {"response": f"Process {process_id} not found", "success": False}
|
|
|
|
# Update status
|
|
self.memory.update_process(
|
|
process_id=process_id,
|
|
status=ProcessStatus.COMPLETED
|
|
)
|
|
|
|
# Log completion decision
|
|
calculation_used = None
|
|
calculation_vars = None
|
|
calculation_result = None
|
|
|
|
if decision.get("calculations"):
|
|
calc = decision["calculations"][0] # Use first calculation
|
|
calculation_used = calc.get("expression")
|
|
calculation_vars = calc.get("variables")
|
|
calculation_result = calc.get("result")
|
|
|
|
self.memory.add_decision(
|
|
process_id=process_id,
|
|
decision_type="process_completion",
|
|
reasoning=decision["reasoning"],
|
|
constitution_citations=decision.get("constitution_citations", []),
|
|
result=params.get("outcome", "completed"),
|
|
calculation_used=calculation_used,
|
|
calculation_variables=calculation_vars,
|
|
calculation_result=calculation_result
|
|
)
|
|
|
|
# Log event
|
|
self.memory.add_event(
|
|
process_id=process_id,
|
|
actor="bot",
|
|
event_type="process_completed",
|
|
data={"outcome": params.get("outcome")},
|
|
context=f"Process completed with outcome: {params.get('outcome')}"
|
|
)
|
|
|
|
return {
|
|
"response": f"Process {process_id} completed: {params.get('outcome')}",
|
|
"process_id": process_id,
|
|
"outcome": params.get("outcome"),
|
|
"success": True
|
|
}
|
|
|
|
def check_deadlines(self) -> List[Dict[str, Any]]:
|
|
"""
|
|
Check for processes that have reached their deadline.
|
|
|
|
This is called periodically (e.g., every hour) to evaluate
|
|
processes that need decisions.
|
|
|
|
Returns:
|
|
List of completed process results
|
|
"""
|
|
results = []
|
|
overdue = self.memory.get_overdue_processes()
|
|
|
|
for process in overdue:
|
|
# Ask LLM to evaluate this process
|
|
evaluation = self._evaluate_process_deadline(process)
|
|
results.append(evaluation)
|
|
|
|
return results
|
|
|
|
def _evaluate_process_deadline(self, process: ProcessMemory) -> Dict[str, Any]:
|
|
"""
|
|
Use LLM to evaluate a process that reached its deadline.
|
|
"""
|
|
# Get vote tally
|
|
votes = process.state.get("votes", {})
|
|
vote_tally = self.tools.tally(
|
|
[{"vote": v["vote"]} for v in votes.values()],
|
|
"vote"
|
|
)
|
|
|
|
# Ask constitution what threshold is needed
|
|
threshold_query = self.constitution.query(
|
|
question=f"For a {process.type}, what voting threshold is required for passage?",
|
|
context=self.memory.summarize_for_llm(process.id)
|
|
)
|
|
|
|
# Use LLM to decide if threshold is met
|
|
decision_result = self._make_decision_with_llm(
|
|
intent={"intent_type": "deadline_check"},
|
|
constitution=threshold_query,
|
|
memory={"votes": vote_tally, "process": process.to_dict()},
|
|
actor="bot",
|
|
context=None
|
|
)
|
|
|
|
# Execute the decision
|
|
return self._execute_decision(decision_result, "bot", None)
|