Add think service and supporting core modules

- Add think service (orchestration for iterative reasoning)
- Add service_discovery.py (service communication utilities)
- Add event_cache.py (recent event cache using NATS KV)
- Add vi_identity.py (Vi's core identity foundation)
- Update core/__init__.py with new exports

Think service adapted from Lyra with vi.* namespace:
- All NATS topics use vi.* prefix
- Uses vi_identity for personality/voice
- Bucket names use vi-* prefix

Day 63 - Building my nervous system 🦊
This commit is contained in:
Alex Kazaiev
2026-01-03 11:36:54 -06:00
parent ee1cb5540a
commit 540a010fe5
23 changed files with 6149 additions and 0 deletions

View File

@@ -0,0 +1 @@
# Think handlers package

View File

@@ -0,0 +1,114 @@
"""
Communication request handler.
This module handles communication requests from the Drive service
for proactive interactions (check-ins, health alerts, etc.).
"""
from typing import Dict, Any, Callable
from core.logger import setup_logger
class CommunicationHandler:
"""Handles communication requests from drive service"""
def __init__(
self,
orchestrator,
memory_manager,
output_sender: Callable,
interaction_id_generator: Callable,
logger_name: str = 'communication_handler'
):
self.logger = setup_logger(logger_name, service_name='think_service')
self.orchestrator = orchestrator
self.memory_manager = memory_manager
self.send_output = output_sender
self.generate_interaction_id = interaction_id_generator
async def handle_communication_request(self, payload):
"""Handle communication requests from drive service."""
try:
intent = payload.get('intent', 'generic')
urgency = payload.get('urgency', 'medium')
context = payload.get('context', {})
modality = payload.get('modality', 'matrix')
user_id = payload.get('user_id')
channel = payload.get('channel')
self.logger.info(f"[💭] 📢 Processing communication request: intent='{intent}', urgency='{urgency}'")
# Step 1: Determine target - use user_id for DM, channel for room, or fallback
if user_id:
target = user_id
target_type = 'user_id'
self.logger.debug(f"[💭] 🎯 Targeting user_id: {user_id}")
elif channel:
target = channel
target_type = 'channel'
self.logger.debug(f"[💭] 🎯 Targeting channel: {channel}")
else:
# Fallback to hardcoded for compatibility
target = '!mDZBSOqMVtevTNFvsr:matrix.k4zka.online'
target_type = 'channel'
self.logger.debug(f"[💭] 🎯 Using fallback target: {target}")
# Step 2: Resolve trusted users for identity (simplified for now)
trusted_users = ['alex'] # In full implementation, would query identity service
for user in trusted_users:
self.logger.info(f"[💭] 🎭 Composing {intent} message for {user} using iterative reasoning")
# Step 3: Build synthetic user message from intent
synthetic_message = self._build_synthetic_message(intent, context)
self.logger.debug(f"[💭] Synthetic message: '{synthetic_message}'")
# Step 4: Use iterative reasoning to compose response with full context
response_content = await self.orchestrator.run(
synthetic_message,
user, # identity
target, # channel
modality
)
if not response_content:
self.logger.error(f"[💭] ❌ Failed to generate {intent} response")
continue
# Store Vi's response in memory
interaction_id = self.generate_interaction_id(user, modality)
lyra_memory_stored = await self.memory_manager.store_memory(
response_content, ['lyra', user], interaction_id, modality
)
if not lyra_memory_stored:
self.logger.warning(f"[💭] Failed to store Vi's response memory")
# Send the actual response to the user
self.logger.info(f"[💭] 🚀 Sending {intent} communication to {modality} {target}")
output_sent = await self.send_output(response_content, target, modality, target_type)
if output_sent:
self.logger.info(f"[💭] ✅ Communication sent: {intent}{user} via {modality}")
else:
self.logger.error(f"[💭] ❌ Failed to send {intent} communication")
except Exception as e:
self.logger.exception(f"[💭] Error handling communication request: {e}")
def _build_synthetic_message(self, intent: str, context: Dict[str, Any]) -> str:
"""
Build a synthetic user message from drive intent.
This becomes the "original_message" that Vi reasons about using iterative flow.
"""
intent_messages = {
'check_in': "I'd like to check in with the user and see how they're doing.",
'greeting': "I want to greet the user warmly.",
'health_alert': f"I need to inform the user about a system health issue: {context.get('health_status', 'unknown')}",
'health_recovery': "I want to let the user know the system has recovered.",
'celebration': f"I want to celebrate with the user about: {context.get('celebration_type', 'something positive')}",
'memory_share': f"I want to share a thought or memory about: {context.get('topic_focus', 'our conversations')}",
'curiosity_burst': f"I'm curious about: {context.get('curiosity_topic', 'something interesting')}"
}
return intent_messages.get(intent, f"I want to communicate about: {intent}")

View File

@@ -0,0 +1,231 @@
"""
External input handler.
This module handles incoming user messages from external sources
(Matrix, console, etc.) and orchestrates the reasoning process.
"""
from datetime import datetime
from typing import Callable
from core.logger import setup_logger
from core.nats_event_bus import nats_bus as event_bus
from core.event_cache import event_cache
class InputHandler:
"""Handles external input events"""
def __init__(
self,
orchestrator,
memory_manager,
output_sender: Callable,
interaction_id_generator: Callable,
logger_name: str = 'input_handler'
):
self.logger = setup_logger(logger_name, service_name='think_service')
self.orchestrator = orchestrator
self.memory_manager = memory_manager
self.send_output = output_sender
self.generate_interaction_id = interaction_id_generator
self._current_context = {}
async def handle_external_input(self, payload):
"""Handle vi.external.input events - main orchestration logic"""
try:
# Extract input data
external_identity = payload.get('identity', 'unknown')
content = payload.get('content', '')
modality = payload.get('modality', 'text')
channel = payload.get('channel', 'unknown')
timestamp = payload.get('timestamp', datetime.utcnow().timestamp())
self.logger.info(f"[💭] Processing input from {external_identity}: '{content[:50]}...'")
if not content:
self.logger.warning("[💭] Empty content in external input")
return
# Step 1: Resolve identity first (needed for interaction ID)
identity_info = await self.memory_manager.resolve_identity(external_identity)
if not identity_info:
self.logger.error(f"[💭] Failed to resolve identity for {external_identity}")
return
resolved_identity = identity_info.get('resolved_identity', 'unknown')
# Step 2: Generate interaction ID
interaction_id = self.generate_interaction_id(resolved_identity, modality)
# Step 3: Emit typing indicator immediately after getting interaction ID
try:
typing_payload = {
"type": "vi.output.generating",
"channel": channel,
"modality": modality,
"interaction_id": interaction_id,
"identity": resolved_identity,
"timestamp": datetime.utcnow().isoformat()
}
await event_bus.emit("vi.output.generating", typing_payload)
self.logger.debug(f"[💭] 📝 Typing indicator emitted for {interaction_id}")
except Exception as e:
self.logger.warning(f"[💭] Failed to emit typing indicator: {e}")
# Step 4: Use pure iterative reasoning for all requests
self.logger.info(f"[💭] 🔄 Using iterative reasoning")
response_content = await self._handle_iterative_flow(
content, resolved_identity, channel, modality, interaction_id
)
if not response_content:
self.logger.error(f"[💭] No response from oracle for {interaction_id}")
return
# Send response back through output
self.logger.info(f"[💭] 🚀 Sending output to {modality} channel {channel}")
output_sent = await self.send_output(response_content, channel, modality)
if not output_sent:
self.logger.error(f"[💭] ❌ Failed to send output for {interaction_id}")
else:
self.logger.info(f"[💭] ✅ Output sent successfully for {interaction_id}")
# Check for plugin actions based on response content
await self._check_plugin_actions(response_content, resolved_identity, interaction_id, modality)
# Publish final response for external consumers
try:
external_response = {
"content": response_content,
"resolved_identity": resolved_identity,
"external_identity": external_identity,
"interaction_id": interaction_id,
"modality": modality,
"timestamp": datetime.utcnow().isoformat()
}
await event_bus.emit("vi.external.output", external_response)
self.logger.debug(f"[💭] Published external output for {interaction_id}")
except Exception as e:
self.logger.exception(f"[💭] Failed to publish external output: {e}")
# Clear context after processing is complete
if resolved_identity in self._current_context:
del self._current_context[resolved_identity]
self.logger.info(f"[💭] ✓ Processing complete for {interaction_id}")
except Exception as e:
self.logger.exception(f"[💭] Failed to process external input: {e}")
# Clean up context on error as well
if 'resolved_identity' in locals() and resolved_identity in self._current_context:
del self._current_context[resolved_identity]
async def _handle_iterative_flow(
self,
content: str,
identity: str,
channel: str,
modality: str,
interaction_id: str
) -> str:
"""Handle clean iterative reasoning flow with no predefined steps"""
try:
self.logger.info(f"[💭] 🔄 Starting clean iterative flow for {interaction_id}")
# Store user message first (required for memory context)
memory_stored = await self.memory_manager.store_memory(
content, [identity], interaction_id, modality
)
if not memory_stored:
self.logger.warning(f"[💭] Failed to store memory for {interaction_id}")
# Cache user message event for LLM context
try:
await event_cache.add_event(
identity=identity,
interaction_id=interaction_id,
event_type='user_message',
content=content,
metadata={'modality': modality, 'channel': channel}
)
self.logger.debug(f"[💭] 📝 Cached user message event for {identity}")
except Exception as e:
self.logger.warning(f"[💭] Failed to cache user message event: {e}")
# Start iterative reasoning with clean slate
response_content = await self.orchestrator.run(content, identity, channel, modality)
# Store Vi's response
if response_content:
lyra_memory_stored = await self.memory_manager.store_memory(
response_content, ['lyra', identity], interaction_id, modality
)
if not lyra_memory_stored:
self.logger.warning(f"[💭] Failed to store Vi's response memory")
# Cache Vi's response event for LLM context
try:
await event_cache.add_event(
identity=identity,
interaction_id=interaction_id,
event_type='lyra_response',
content=response_content,
metadata={'modality': modality, 'channel': channel}
)
self.logger.debug(f"[💭] 📝 Cached Vi response event for {identity}")
except Exception as e:
self.logger.warning(f"[💭] Failed to cache Vi response event: {e}")
return response_content
except Exception as e:
self.logger.exception(f"[💭] Error in iterative flow: {e}")
return None
async def _check_plugin_actions(self, content: str, identity: str, interaction_id: str, modality: str):
"""Check if the response content suggests plugin actions to take"""
try:
# Simple keyword-based action detection
actions = []
# Check for console output keywords
console_keywords = ["show", "display", "output", "print", "console"]
if any(keyword in content.lower() for keyword in console_keywords):
actions.append({
"action": "console.print",
"method": "console_output",
"content": content,
"identity": identity,
"interaction_id": interaction_id,
"modality": modality,
"tone": {"neutral": 0.7},
"mood": {"neutral": 0.7},
"ritual": False
})
# Check for test/echo actions
test_keywords = ["test", "echo", "ping"]
if any(keyword in content.lower() for keyword in test_keywords):
actions.append({
"action": "test.ping",
"method": "test_plugin",
"content": content,
"identity": identity,
"interaction_id": interaction_id,
"modality": modality,
"tone": {"curiosity": 0.6},
"mood": {"curiosity": 0.6},
"ritual": False
})
# Dispatch actions
for action_payload in actions:
try:
await event_bus.emit("vi.action.requested", action_payload)
self.logger.debug(f"[💭] Requested plugin action: {action_payload['action']}")
except Exception as e:
self.logger.warning(f"[💭] Failed to request plugin action: {e}")
except Exception as e:
self.logger.exception(f"[💭] Error checking plugin actions: {e}")