Files
vi/services/think/reasoning/oracle_client.py
Alex Kazaiev 540a010fe5 Add think service and supporting core modules
- Add think service (orchestration for iterative reasoning)
- Add service_discovery.py (service communication utilities)
- Add event_cache.py (recent event cache using NATS KV)
- Add vi_identity.py (Vi's core identity foundation)
- Update core/__init__.py with new exports

Think service adapted from Lyra with vi.* namespace:
- All NATS topics use vi.* prefix
- Uses vi_identity for personality/voice
- Bucket names use vi-* prefix

Day 63 - Building my nervous system 🦊
2026-01-03 11:36:54 -06:00

482 lines
21 KiB
Python

"""
Oracle service communication layer.
This module handles all interactions with the Oracle service including
requesting reasoning steps, checking goal satisfaction, and synthesizing responses.
"""
import json
import re
from typing import Optional, Dict, Any
from datetime import datetime
from core.logger import setup_logger
from core.service_discovery import discovery_client
from core.vi_identity import get_identity_for_context, get_identity_for_synthesis
from core.event_cache import event_cache
from .models import ReasoningStep, StepAction, IterativeContext
from .formatters import KnowledgeFormatter
class OracleClient:
"""Handles all communication with the Oracle service"""
def __init__(self, formatter: KnowledgeFormatter, logger_name: str = 'oracle_client'):
self.logger = setup_logger(logger_name, service_name='think_service')
self.formatter = formatter
async def _get_recent_events_context(self, identity: str, limit: int = 10) -> str:
"""
Retrieve recent cached events for this identity and format for LLM context.
Returns formatted string or empty string if no events available.
"""
try:
recent_context = await event_cache.format_for_llm(identity, limit)
if recent_context:
self.logger.debug(f"[💭] 📝 Retrieved recent event context for {identity}")
return recent_context
return ""
except Exception as e:
self.logger.warning(f"[💭] Failed to retrieve recent event context: {e}")
return ""
async def request_next_step(self, context: IterativeContext) -> Optional[ReasoningStep]:
"""Ask Oracle to decide the next reasoning step"""
try:
self.logger.debug(f"[💭] Requesting next step from Oracle (step {context.step_count + 1})")
# Get Vi's identity with planning voice mode
lyra_identity = get_identity_for_context("planning")
# Format accumulated knowledge as natural language
knowledge_summary = self.formatter.format_for_oracle(context)
# Get recent events context from cache
recent_events = await self._get_recent_events_context(context.identity, limit=10)
# Build Oracle prompt
oracle_request = {
"type": "iterative_reasoning",
"content": self._build_reasoning_prompt(lyra_identity, context, knowledge_summary, recent_events),
"identity": context.identity,
"context": {}
}
# Send to Oracle and get response
self.logger.debug(f"[💭] Sending request to Oracle for step {context.step_count + 1}")
result = await discovery_client.call_service(
"oracle", "process", oracle_request, timeout=30.0
)
oracle_response = result.data if result.success else None
if not oracle_response or not oracle_response.get("content"):
self.logger.warning(f"[💭] No response from Oracle for next step")
return None
# Parse Oracle's function call decision
content = oracle_response["content"].strip()
self.logger.info(f"[💭] ✅ Oracle responded for step {context.step_count + 1}: {content[:100]}...")
self.logger.debug(f"[💭] Full Oracle response: {content}")
# Parse function call
function_call_data = self._parse_function_call(content)
if not function_call_data:
self.logger.warning(f"[💭] No function call found in Oracle response")
return None
function_name = function_call_data['function']
function_args = function_call_data['args']
reasoning = function_call_data['reasoning']
self.logger.info(f"[💭] 🔍 Parsed function call for step {context.step_count + 1}: {function_name}({function_args})")
# All functions except ready() map to CALL_SERVICE
if function_name == 'ready':
action = StepAction.SYNTHESIZE_FINAL.value
target = None
else:
action = StepAction.CALL_SERVICE.value
target = function_name
# Create ReasoningStep with function args stored for execution
next_step = ReasoningStep(
action=action,
target=target,
reasoning=reasoning,
ready=(function_name == 'ready')
)
# Store function args in the step for later execution
next_step.function_args = function_args
self.logger.info(f"[💭] ✓ Created ReasoningStep for step {context.step_count + 1}: {function_name}({function_args}) -> {action}")
return next_step
except Exception as e:
self.logger.error(f"[💭] Error requesting next step: {e}")
return None
def _build_reasoning_prompt(self, lyra_identity: str, context: IterativeContext, knowledge_summary: str, recent_events: str = "") -> str:
"""Build the reasoning prompt for Oracle"""
# Build recent events section if available
recent_events_section = f"\n{recent_events}\n" if recent_events else ""
return f"""{lyra_identity}
You are engaging with {context.identity}.
CURRENT REQUEST: "{context.original_message}"
{recent_events_section}
{knowledge_summary}
Choose your next action:
AVAILABLE FUNCTIONS:
Memory (Three Layers):
- short_memory(n=10) - Get the n most recent literal memories
- short_memory(n=10, offset=5) - Get n memories starting from offset back (for pagination)
- long_memory(query="topic", n=5) - Get n long-term summarized memories related to query (or random if query=None)
- facts(query="topic", n=5) - Get n most relevant facts related to query
- save_fact(content="...", category="...", mutable=True/False) - Save a new fact
Categories: "personal" (immutable facts like birthdays), "preferences" (likes/dislikes), "knowledge" (learned info), "general"
Set mutable=False for unchangeable facts (birthdays), mutable=True for preferences that may change
- update_fact(fact_id="uuid-123", new_content="Updated fact") - Update existing fact (only if mutable)
Information:
- identity(person="alex") - Get single person's full identity & attributes
- search_relationships(entity_type="pet", min_trust=0.7) - Query multiple entities
- health() - Check system status
- duckduckgo(query="weather in tokyo", limit=3) - Search DuckDuckGo instant answers (on-demand)
Relationships:
- introduce(name="Harvey", entity_type="pet", relationships=["family","companion"], context="Alex's dog", attributes={{"species":"dog","breed":"golden_retriever"}}) - Create new entity
- update_relationship(person="alex", trust_delta=0.0, intimacy_delta=0.15, reason="vulnerable moment") - Update relationship explicitly
- add_attribute(person="alex", key="favorite_food", value="pasta") - Remember new information
- link_identity(external_id="@someone:matrix.org", internal_id="someone", confidence=0.85) - Connect external ID to internal
Task Management:
- todo_create(content="Fix bug in X", activeForm="Fixing bug in X", status="pending") - Create a new todo item
- todo_update(todo_id="abc123", status="in_progress") - Update todo status (pending/in_progress/completed)
- todo_list() - Get all todos with their current status
- todo_complete(todo_id="abc123") - Mark a todo as completed
Meta:
- ready() - Signal you have enough info to answer
EXAMPLES:
short_memory(n=5) // Get last 5 messages
short_memory(n=10, offset=5) // Get 10 messages starting from 5 back
long_memory(query="cooking preferences", n=3) // Find relevant historical context
facts(query="birthday", n=5) // Find birthday facts
facts(query="food", n=3) // Find food-related facts
save_fact(content="Alex's birthday is May 15th", category="personal", mutable=False) // Immutable personal fact
save_fact(content="Alex prefers Italian food", category="preferences", mutable=True) // Mutable preference
save_fact(content="Python uses duck typing", category="knowledge", mutable=True) // Learned knowledge
update_fact(fact_id="abc-123", new_content="Alex now prefers Thai food") // Update mutable preference
identity(person="alex") // Get Alex's full context
add_attribute(person="alex", key="favorite_mountain", value="Pikes Peak") // Remember preference
introduce(name="Curie", entity_type="pet", relationships=["family"], context="Alex's cat", attributes={{"species":"cat"}}) // New entity
duckduckgo(query="python list comprehension", limit=3) // Search for quick answers
STRATEGY:
- Use short_memory() for recent conversation context (what was just said)
- Use long_memory() for historical patterns and past discussions (weeks/months ago)
- Use facts() for established knowledge (birthdays, preferences, learned information)
- Save important discoverable facts with save_fact() (choose appropriate category and mutability)
- Update changed preferences with update_fact() (requires fact_id from facts() query)
- Use identity() for person details, search_relationships() for entities
- For complex multi-step tasks: Use todo_create() to break down work, todo_update() to track progress, todo_complete() when done
- Call ready() when you have enough information to answer the user's question
NOTE: Classification (sentiment, emotions, intent) and creative tasks (writing, poetry) are handled during synthesis.
Respond with just the function call and optional reasoning:
function_name(args)
// Optional: Brief reason why"""
async def check_goal_satisfaction(self, context: IterativeContext) -> bool:
"""Check if we have sufficient information to answer the original question"""
try:
self.logger.debug(f"[💭] Checking goal satisfaction")
# Get Vi's identity with planning voice mode
lyra_identity = get_identity_for_context("planning")
# Format accumulated knowledge as natural language
knowledge_summary = self.formatter.format_for_oracle(context)
# Get recent events context from cache
recent_events = await self._get_recent_events_context(context.identity, limit=10)
recent_events_section = f"\n{recent_events}\n" if recent_events else ""
oracle_request = {
"type": "goal_check",
"content": f"""{lyra_identity}
You are engaging with {context.identity}.
Evaluate whether you have sufficient information to provide a complete, helpful answer to the user's request.
ORIGINAL REQUEST: "{context.original_message}"
{recent_events_section}
{knowledge_summary}
EVALUATION CRITERIA:
- Can you address the main points of the user's request?
- Do you have enough specific information to be helpful?
- Are there critical gaps that would make your answer incomplete or unhelpful?
Respond with JSON indicating your assessment:
{{"can_answer": true/false, "reasoning": "Brief explanation of why you can or cannot provide a complete answer"}}""",
"identity": context.identity,
"context": {}
}
# Ask Oracle
result = await discovery_client.call_service(
"oracle", "process", oracle_request, timeout=15.0
)
oracle_response = result.data if result.success else None
if oracle_response and oracle_response.get("content"):
try:
result = json.loads(oracle_response["content"])
can_answer = result.get("can_answer", False)
reasoning = result.get("reasoning", "")
self.logger.debug(f"[💭] Goal satisfaction check: {can_answer} - {reasoning}")
return can_answer
except json.JSONDecodeError:
# If not JSON, check for keywords
content = oracle_response["content"].lower()
return "yes" in content or "can answer" in content or "sufficient" in content
return False
except Exception as e:
self.logger.error(f"[💭] Error checking goal satisfaction: {e}")
return False
async def synthesize_final_response(self, context: IterativeContext) -> Optional[str]:
"""Synthesize final response from accumulated knowledge"""
try:
self.logger.debug(f"[💭] Synthesizing final response from {len(context.completed_steps)} steps")
# Get Vi's identity with voice guide - she chooses appropriate tone
lyra_identity = get_identity_for_synthesis(include_voice_guide=True)
# Format accumulated knowledge as natural language
knowledge_summary = self.formatter.format_for_oracle(context)
# Get recent events context from cache
recent_events = await self._get_recent_events_context(context.identity, limit=10)
recent_events_section = f"\n{recent_events}\n" if recent_events else ""
oracle_request = {
"type": "synthesis",
"content": f"""{lyra_identity}
You are engaging with {context.identity}.
You have completed a step-by-step reasoning process. Now synthesize this into a comprehensive, helpful response.
ORIGINAL REQUEST: "{context.original_message}"
{recent_events_section}
{knowledge_summary}
SYNTHESIS INSTRUCTIONS:
- Create a natural, conversational response that directly addresses the user's request
- Integrate insights from all the information you gathered during reasoning
- Be specific and actionable when appropriate
- If you gathered system information, present it clearly
- If you found relevant memories or context, incorporate them naturally
- Handle any needed classification (sentiment, emotions, intent) or creative tasks (writing, poetry, styling) directly in your response
- Make the response feel cohesive, not like a list of separate findings
GOAL: Provide a complete, helpful answer that shows you understood their request and used the gathered information effectively.""",
"identity": context.identity,
"context": {}
}
# Get final response from Oracle
result = await discovery_client.call_service(
"oracle", "process", oracle_request, timeout=30.0
)
oracle_response = result.data if result.success else None
if oracle_response and oracle_response.get("content"):
final_response = oracle_response["content"]
self.logger.debug(f"[💭] Final synthesis complete: {len(final_response)} characters")
return final_response
return None
except Exception as e:
self.logger.error(f"[💭] Error synthesizing final response: {e}")
return None
async def analyze_interaction(
self,
context: IterativeContext,
user_message: str,
response_content: str
) -> Dict[str, Any]:
"""
Ask Oracle to analyze sentiment and depth of the interaction.
Returns: {"sentiment": str, "depth": float, "reasoning": str}
"""
try:
# Build analysis request
knowledge_summary = self.formatter.format_for_oracle(context)
analysis_request = {
"type": "interaction_analysis",
"original_message": user_message,
"lyra_response": response_content,
"knowledge_summary": knowledge_summary,
"identity": context.identity,
"metadata": {
"step_count": len(context.completed_steps),
"services_called": list(context.service_call_counts.keys()),
"response_length": len(response_content)
}
}
# Ask Oracle to analyze
self.logger.debug(f"[💭] Requesting interaction analysis from Oracle...")
result = await discovery_client.call_service(
"oracle", "process", analysis_request, timeout=15.0
)
if not result.success:
self.logger.error(f"[💭] Oracle analysis failed: {result.error}")
return {"sentiment": "positive", "depth": 0.3, "reasoning": "Analysis failed"}
analysis = result.data
sentiment = analysis.get("sentiment", "positive")
depth = analysis.get("depth", 0.3)
reasoning = analysis.get("reasoning", "")
self.logger.info(f"[💭] 📊 Oracle analysis: sentiment={sentiment}, depth={depth:.2f}")
self.logger.debug(f"[💭] Oracle reasoning: {reasoning}")
return {
"sentiment": sentiment,
"depth": depth,
"reasoning": reasoning
}
except Exception as e:
self.logger.error(f"[💭] Error analyzing interaction: {e}")
return {"sentiment": "positive", "depth": 0.3, "reasoning": f"Error: {str(e)}"}
def _parse_function_call(self, content: str) -> Optional[Dict[str, Any]]:
"""
Parse Python-like function call from Oracle's output.
Returns: {"function": "name", "args": {...}, "reasoning": "..."}
"""
# Valid function names
valid_functions = [
'short_memory', 'long_memory', 'facts', 'save_fact', 'update_fact',
'identity', 'search_relationships', 'health', 'duckduckgo',
'introduce', 'update_relationship', 'add_attribute', 'link_identity',
'todo_create', 'todo_update', 'todo_list', 'todo_complete',
'ready'
]
# Extract reasoning (lines starting with //)
reasoning_parts = []
for line in content.split('\n'):
if line.strip().startswith('//'):
reasoning_parts.append(line.strip()[2:].strip())
reasoning = " ".join(reasoning_parts) if reasoning_parts else ""
# Find function call - try multiple patterns
function_match = None
for func in valid_functions:
# Pattern: function_name(...) with any content inside
pattern = f'{func}\\s*\\(([^)]*)\\)'
match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
if match:
function_name = func
args_string = match.group(1).strip()
function_match = (function_name, args_string)
break
if not function_match:
return None
function_name, args_string = function_match
# Parse arguments
args = self._parse_function_args(args_string)
return {
'function': function_name,
'args': args,
'reasoning': reasoning or f"Oracle chose {function_name}"
}
def _parse_function_args(self, args_string: str) -> Dict[str, Any]:
"""Parse function arguments from string"""
args = {}
if not args_string:
return args
try:
# Better pattern that respects quoted strings with commas
# Matches: key=value where value can be quoted string, number, boolean, or JSON
kwarg_pattern = r'(\w+)\s*=\s*(?:"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|(\{[^\}]*\})|(\[[^\]]*\])|([^,]+))'
matches = re.findall(kwarg_pattern, args_string)
for match in matches:
key = match[0]
# match[1] = double-quoted string, match[2] = single-quoted string
# match[3] = dict, match[4] = list, match[5] = unquoted value
if match[1]: # Double-quoted string
value = match[1]
# Unescape any escaped quotes
args[key] = value.replace('\\"', '"').replace('\\\\', '\\')
elif match[2]: # Single-quoted string
value = match[2]
# Unescape any escaped quotes
args[key] = value.replace("\\'", "'").replace('\\\\', '\\')
elif match[3]: # Dict
try:
# Try JSON parse, converting single quotes to double
json_str = match[3].replace("'", '"')
args[key] = json.loads(json_str)
except:
args[key] = match[3]
elif match[4]: # List
try:
# Try JSON parse, converting single quotes to double
json_str = match[4].replace("'", '"')
args[key] = json.loads(json_str)
except:
args[key] = match[4]
else: # Unquoted value (number, boolean, or bare string)
value = match[5].strip()
if value.lower() in ('true', 'false'):
args[key] = value.lower() == 'true'
elif value.lower() == 'none':
args[key] = None
else:
# Try as number
try:
args[key] = int(value)
except ValueError:
try:
args[key] = float(value)
except ValueError:
args[key] = value
except Exception as e:
self.logger.warning(f"[💭] Error parsing function args: {e}")
args = {}
return args