Implement working Mastodon bot with proposal system
Major Features: - Mastodon integration with polling-based listener (streaming unreliable) - Claude AI integration via llm CLI with API key support - Public proposal announcements with voting - Markdown stripping for Mastodon plain text - Thread-aware voting system Configuration: - Added requirements.txt with all dependencies - API key configuration in config.yaml (not streamed keys) - Support for multiple Claude models via llm-anthropic Platform Adapter (Mastodon): - Polling notifications every 5 seconds (more reliable than streaming) - Notification ID tracking to prevent re-processing on restart - Markdown stripping for clean plain text output - Vote thread matching via announcement IDs Agent & Governance: - Conversational tone (direct, concise, not legalistic) - Proposal creation with AI-generated titles and descriptions - Public announcements for proposals with all details - Vote casting with automatic proposal detection from threads - Constitutional reasoning for governance decisions Bot Features: - Long message splitting into threaded posts - Public proposal announcements separate from user replies - Announcement includes: title, proposer, description, deadline, voting instructions - Vote tracking linked to proposal announcement threads Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -30,6 +30,7 @@ class GovernanceAgent:
|
||||
db_session: Session,
|
||||
constitution_path: str,
|
||||
model: Optional[str] = None,
|
||||
api_keys: Optional[Dict[str, str]] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the governance agent.
|
||||
@@ -38,11 +39,13 @@ class GovernanceAgent:
|
||||
db_session: Database session
|
||||
constitution_path: Path to constitution file
|
||||
model: LLM model to use (None for default)
|
||||
api_keys: Dict with 'openai' and/or 'anthropic' API keys
|
||||
"""
|
||||
self.db = db_session
|
||||
self.constitution = ConstitutionalReasoner(constitution_path, model)
|
||||
self.constitution = ConstitutionalReasoner(constitution_path, model, api_keys)
|
||||
self.primitives = GovernancePrimitives(db_session)
|
||||
self.model = model
|
||||
self.api_keys = api_keys or {}
|
||||
|
||||
def process_request(
|
||||
self, request: str, actor: str, context: Optional[Dict[str, Any]] = None
|
||||
@@ -203,6 +206,8 @@ Respond with JSON:
|
||||
"constitutional_basis": str(constitutional_guidance.get("citations")),
|
||||
"initial_state": {
|
||||
"proposal_text": proposal_text,
|
||||
"title": proposal_info.get("title", proposal_text[:100]),
|
||||
"description": proposal_info.get("description", proposal_text),
|
||||
"proposal_type": proposal_info["proposal_type"],
|
||||
"voting_threshold": proposal_info.get("voting_threshold"),
|
||||
"votes": {},
|
||||
@@ -247,9 +252,27 @@ Process ID: {{process_id}}
|
||||
vote_type = params.get("vote_type", "agree").lower()
|
||||
process_id = params.get("process_id")
|
||||
|
||||
# If no process_id in params, try to find it from thread context
|
||||
if not process_id and context:
|
||||
# Get the status ID being replied to
|
||||
reply_to_id = context.get("reply_to_id")
|
||||
if reply_to_id:
|
||||
# Query for active processes and check if any match this thread
|
||||
active_processes = queries.get_active_processes(self.db)
|
||||
for proc in active_processes:
|
||||
if proc.state_data:
|
||||
announcement_id = proc.state_data.get("announcement_thread_id")
|
||||
if announcement_id and str(announcement_id) == str(reply_to_id):
|
||||
process_id = proc.id
|
||||
break
|
||||
|
||||
# If still not found, try the most recent active proposal
|
||||
if not process_id and active_processes:
|
||||
process_id = active_processes[0].id
|
||||
|
||||
if not process_id:
|
||||
return {
|
||||
"error": "Could not identify which proposal to vote on. Please reply to a proposal thread."
|
||||
"error": "Could not identify which proposal to vote on. Please reply to a proposal announcement or specify the process ID."
|
||||
}
|
||||
|
||||
plan = {
|
||||
@@ -365,8 +388,12 @@ Plan the actions as JSON:
|
||||
"actions": [
|
||||
{{"primitive": "name", "args": {{...}}}}
|
||||
],
|
||||
"response_template": "Message to send user"
|
||||
"response_template": "Message to send user (can use Markdown formatting)"
|
||||
}}
|
||||
|
||||
TONE: Be direct, concise, and clear. Use short paragraphs with line breaks.
|
||||
Avoid formal/legalistic language AND casual interjections (no "Hey!").
|
||||
Professional but approachable. Get to the point quickly.
|
||||
"""
|
||||
|
||||
try:
|
||||
@@ -477,11 +504,11 @@ Plan the actions as JSON:
|
||||
question=f"Ambiguity in request '{request}': {ambiguity}",
|
||||
)
|
||||
|
||||
response = f"""I encountered constitutional ambiguity in processing your request.
|
||||
response = f"""I found something unclear in the constitution regarding your request.
|
||||
|
||||
Question: {ambiguity}
|
||||
Issue: {ambiguity}
|
||||
|
||||
This requires community clarification. Members can discuss and provide guidance.
|
||||
This needs community clarification. Discussion welcome.
|
||||
|
||||
Clarification ID: {clarification.id}
|
||||
"""
|
||||
@@ -537,12 +564,21 @@ Clarification ID: {clarification.id}
|
||||
|
||||
def _call_llm(self, prompt: str) -> str:
|
||||
"""Call the LLM via llm CLI"""
|
||||
cmd = ["llm", "prompt"]
|
||||
import os
|
||||
|
||||
cmd = ["llm"]
|
||||
if self.model:
|
||||
cmd.extend(["-m", self.model])
|
||||
cmd.append(prompt)
|
||||
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
# Set up environment with API keys
|
||||
env = os.environ.copy()
|
||||
if self.api_keys.get('openai'):
|
||||
env['OPENAI_API_KEY'] = self.api_keys['openai']
|
||||
if self.api_keys.get('anthropic'):
|
||||
env['ANTHROPIC_API_KEY'] = self.api_keys['anthropic']
|
||||
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True, env=env)
|
||||
return result.stdout.strip()
|
||||
|
||||
def _extract_json(self, text: str) -> Dict[str, Any]:
|
||||
|
||||
Reference in New Issue
Block a user