Add Oracle service - LLM wrapper

First service for Vi's nervous system:
- Oracle service with NATS integration
- vLLM backend for Qwen3-32B
- GPTQ quantization support
- Thinking mode sampling configs

Simplified from Lyra's patterns, ready to test.

🦊
This commit is contained in:
Alex Kazaiev
2026-01-02 13:19:15 -06:00
parent e2d24a66f1
commit ee1cb5540a
8 changed files with 552 additions and 0 deletions

View File

@@ -0,0 +1,4 @@
# LLM module for Oracle service
from .llm_manager import LLMManager
__all__ = ['LLMManager']

View File

@@ -0,0 +1,69 @@
"""
Text generation using vLLM.
"""
from typing import Optional
from core.logger import setup_logger
logger = setup_logger('text_generator', service_name='oracle_service')
class TextGenerator:
"""Text generation with vLLM"""
def __init__(self, llm_model, sampling_config: dict):
self.llm = llm_model
self.sampling_config = sampling_config
def generate(
self,
prompt: str,
max_tokens: int = None,
temperature: float = None,
top_p: float = None,
top_k: int = None,
min_p: float = None
) -> Optional[str]:
"""Generate text using vLLM"""
if not self.llm:
logger.error("[✺] LLM not initialized")
return None
try:
params = self.sampling_config.copy()
if max_tokens is not None:
params["max_new_tokens"] = max_tokens
if temperature is not None:
params["temperature"] = temperature
if top_p is not None:
params["top_p"] = top_p
if top_k is not None:
params["top_k"] = top_k
if min_p is not None:
params["min_p"] = min_p
from vllm import SamplingParams
sampling_params = SamplingParams(
temperature=params["temperature"],
top_p=params["top_p"],
top_k=params.get("top_k", -1),
min_p=params.get("min_p", 0.0),
max_tokens=params["max_new_tokens"],
repetition_penalty=params["repetition_penalty"]
)
outputs = self.llm.generate([prompt], sampling_params)
if outputs and outputs[0].outputs:
raw_text = outputs[0].outputs[0].text
logger.info(f"[✺] Generated {len(raw_text)} chars")
return raw_text.strip()
else:
logger.warning("[✺] Empty output")
return ""
except Exception as e:
logger.error(f"[✺] Generation failed: {e}")
return None

View File

@@ -0,0 +1,127 @@
"""
LLM Manager for Vi's Oracle service.
Coordinates model loading and text generation.
"""
import time
from typing import Optional, Dict, Any
from core.logger import setup_logger
from .model_loader import ModelLoader
from .generator import TextGenerator
logger = setup_logger('llm_manager', service_name='oracle_service')
class LLMManager:
"""High-level LLM manager"""
def __init__(self, model_path: str = None):
self.model_loader = ModelLoader(model_path)
self.generator = None
# Sampling config for Qwen3 thinking mode
self.thinking_mode_config = {
"temperature": 0.6,
"top_p": 0.95,
"top_k": 20,
"min_p": 0.0,
"max_new_tokens": 8192,
"repetition_penalty": 1.1,
"do_sample": True,
}
self.non_thinking_mode_config = {
"temperature": 0.7,
"top_p": 0.8,
"top_k": 20,
"min_p": 0.0,
"max_new_tokens": 8192,
"repetition_penalty": 1.1,
"do_sample": True,
}
self.sampling_config = self.thinking_mode_config.copy()
@property
def is_loaded(self) -> bool:
return self.model_loader.is_loaded
@property
def model_path(self) -> str:
return self.model_loader.model_path
@property
def model_name(self) -> Optional[str]:
return self.model_loader.model_name
@property
def backend_type(self) -> Optional[str]:
return self.model_loader.backend_type
async def load_model(self, model_path: Optional[str] = None) -> bool:
"""Load model and initialize generator"""
success = await self.model_loader.load_model(model_path)
if success and self.model_loader.llm:
self.generator = TextGenerator(self.model_loader.llm, self.sampling_config)
logger.info("[✺] TextGenerator initialized")
return success
async def unload_model(self):
"""Unload model"""
self.generator = None
await self.model_loader.unload_model()
def get_model_info(self) -> Dict[str, Any]:
"""Get model information"""
return self.model_loader.get_model_info()
async def generate_response(
self,
prompt: str,
temperature: float = None,
max_tokens: int = None,
enable_thinking: bool = True
) -> Optional[str]:
"""Generate a response using the loaded model"""
try:
if not self.is_loaded:
logger.warning("[✺] Model not loaded")
if not await self.load_model():
return "I'm having trouble thinking right now."
mode_config = self.thinking_mode_config if enable_thinking else self.non_thinking_mode_config
if temperature is None:
temperature = mode_config["temperature"]
if max_tokens is None:
max_tokens = mode_config["max_new_tokens"]
logger.info(f"[✺] Generating - temp: {temperature}, max_tokens: {max_tokens}")
start_time = time.time()
raw_text = self.generator.generate(
prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=mode_config["top_p"],
top_k=mode_config["top_k"],
min_p=mode_config["min_p"]
)
elapsed = time.time() - start_time
if raw_text:
logger.info(f"[✺] Generated {len(raw_text)} chars in {elapsed:.2f}s")
return raw_text.strip()
else:
logger.warning("[✺] Empty response")
return ""
except Exception as e:
logger.error(f"[✺] Generation failed: {e}")
return "I encountered an error while thinking."

View File

@@ -0,0 +1,164 @@
"""
Model loading for vLLM backend.
"""
import os
from typing import Optional, Dict, Any
from core.logger import setup_logger
logger = setup_logger('model_loader', service_name='oracle_service')
class ModelLoader:
"""Handles model loading with vLLM"""
def __init__(self, model_path: str = None):
if model_path is None:
model_path = os.getenv('ORACLE_MODEL_PATH', "/data/models/Qwen3-32B-GPTQ-Int4")
self.model_path = model_path
if model_path.startswith(('/', './', '../')):
self.model_cache_dir = os.path.dirname(model_path)
else:
self.model_cache_dir = os.getenv('HF_HOME', "/data/models")
self.llm = None
self.model_name: Optional[str] = None
self.backend_type: Optional[str] = None
self.is_loaded = False
async def load_model(self, model_path: Optional[str] = None) -> bool:
"""Load model using vLLM"""
try:
target_model = model_path or self.model_path
logger.info(f"[✺] Loading model: {target_model}")
if self.is_loaded and self.model_name == target_model:
logger.info("[✺] Model already loaded")
return True
if not self._validate_model_path(target_model):
logger.error(f"[✺] Model not found: {target_model}")
return False
return await self._load_vllm(target_model)
except Exception as e:
logger.exception(f"[✺] Failed to load model: {e}")
self.is_loaded = False
return False
def _validate_model_path(self, model_path: str) -> bool:
"""Check if model exists locally"""
if not model_path.startswith('/'):
return False
if not os.path.exists(model_path) or not os.path.isdir(model_path):
return False
# Check for config.json
if not os.path.exists(os.path.join(model_path, 'config.json')):
return False
# Check for model files
has_model = (
os.path.exists(os.path.join(model_path, 'model.safetensors')) or
os.path.exists(os.path.join(model_path, 'pytorch_model.bin')) or
os.path.exists(os.path.join(model_path, 'model.safetensors.index.json'))
)
return has_model
async def _load_vllm(self, model_path: str) -> bool:
"""Load with vLLM"""
try:
from vllm import LLM
import torch
if not torch.cuda.is_available():
logger.warning("[✺] CUDA not available")
return False
gpu_name = torch.cuda.get_device_name(0)
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
logger.info(f"[✺] GPU: {gpu_name}, {gpu_memory:.1f}GB")
is_gptq = 'GPTQ' in model_path or 'gptq' in model_path.lower()
max_context = int(os.getenv("MAX_MODEL_LEN", "8192"))
vllm_config = {
"model": model_path,
"gpu_memory_utilization": float(os.getenv("GPU_MEMORY_UTILIZATION", "0.90")),
"max_model_len": max_context,
"tensor_parallel_size": 1,
"trust_remote_code": False,
"download_dir": self.model_cache_dir,
"disable_log_stats": True,
"max_num_seqs": 16,
"swap_space": 4,
}
if is_gptq:
vllm_config["dtype"] = "float16"
vllm_config["quantization"] = "gptq_marlin"
logger.info("[✺] GPTQ mode enabled")
logger.info(f"[✺] Initializing vLLM - max_context: {max_context}")
self.llm = LLM(**vllm_config)
self.backend_type = "vllm"
self.model_name = model_path
self.is_loaded = True
logger.info(f"[✺] Model loaded: {model_path}")
return True
except Exception as e:
logger.error(f"[✺] vLLM failed: {e}")
return False
async def unload_model(self):
"""Unload model"""
try:
if self.llm is not None:
del self.llm
self.llm = None
self.model_name = None
self.backend_type = None
self.is_loaded = False
try:
import torch
if torch.cuda.is_available():
torch.cuda.empty_cache()
except ImportError:
pass
logger.info("[✺] Model unloaded")
except Exception as e:
logger.exception(f"[✺] Unload error: {e}")
def get_model_info(self) -> Dict[str, Any]:
"""Get model info"""
info = {
"model_name": self.model_name,
"model_path": self.model_path,
"backend_type": self.backend_type,
"is_loaded": self.is_loaded,
}
try:
import torch
if torch.cuda.is_available():
info["gpu_available"] = True
info["gpu_memory_total"] = torch.cuda.get_device_properties(0).total_memory / 1024**3
info["gpu_memory_reserved"] = torch.cuda.memory_reserved(0) / 1024**3
else:
info["gpu_available"] = False
except ImportError:
info["gpu_available"] = False
return info