This commit is contained in:
Alex
2026-02-08 17:47:49 -06:00
parent 09117f9c62
commit 41dd6d9a64
7 changed files with 465 additions and 569 deletions

View File

@@ -179,7 +179,7 @@ This prevents messages from being overwritten when using Claude Desktop on multi
- **Timed wakes**: CMD+R ensures chat is synced before system check message - **Timed wakes**: CMD+R ensures chat is synced before system check message
- **Matrix wakes**: CMD+R ensures you see the latest conversation context - **Matrix wakes**: CMD+R ensures you see the latest conversation context
The daemon waits **10 seconds** after sending CMD+R to ensure the refresh completes before sending the message. The daemon waits **15 seconds** after sending CMD+R to ensure the refresh completes before sending the message.
**Why this matters:** **Why this matters:**
- Multi-device sync: If you're active on mobile/web, desktop chat stays current - Multi-device sync: If you're active on mobile/web, desktop chat stays current
@@ -277,14 +277,6 @@ def generate_message() -> str:
return f"Your custom message template with {timestamp}" return f"Your custom message template with {timestamp}"
``` ```
### Grace Period
After sending a message, the daemon waits 30 seconds for Claude to call MCP tools. Adjust in `automation_daemon.py` line 246:
```python
grace_period = 30 # seconds
```
## Monitoring ## Monitoring
### Logs ### Logs
@@ -707,7 +699,7 @@ claude-desktop-automation/
⚠️ **macOS only** - Uses AppleScript and launchd (Linux port would require xdotool/ydotool) ⚠️ **macOS only** - Uses AppleScript and launchd (Linux port would require xdotool/ydotool)
⚠️ **Requires Accessibility** - Special macOS permissions for UI automation ⚠️ **Requires Accessibility** - Special macOS permissions for UI automation
⚠️ **Screen saver handled** - Uses `caffeinate` to wake screen automatically ⚠️ **Screen saver handled** - Uses `caffeinate` to wake screen automatically
⚠️ **Fixed refresh delay** - Waits 10 seconds after CMD+R (configurable if needed) ⚠️ **Fixed refresh delay** - Waits 15 seconds after CMD+R (configurable if needed)
## Future Plans ## Future Plans

79
matrix_helpers.py Normal file
View File

@@ -0,0 +1,79 @@
#!/usr/bin/env python3
"""
Matrix Helper Utilities
Shared utilities for Matrix client operations, reducing code duplication
across matrix_mcp.py and matrix_integration.py.
"""
import json
import logging
from contextlib import asynccontextmanager
from pathlib import Path
from typing import AsyncIterator
try:
from nio import AsyncClient
except ImportError:
raise ImportError(
"matrix-nio not installed. Install with: pip3 install matrix-nio\n"
"Note: Without [e2e] extra, only unencrypted rooms are supported"
)
# Configuration
CREDENTIALS_FILE = Path.home() / ".matrix-credentials.json"
MATRIX_DATA_DIR = Path.home() / ".matrix-data"
logger = logging.getLogger(__name__)
def load_credentials() -> dict:
"""Load Matrix credentials from JSON file"""
if not CREDENTIALS_FILE.exists():
raise FileNotFoundError(
f"Credentials file not found: {CREDENTIALS_FILE}\n"
f"Run setup_matrix.sh to create credentials"
)
try:
with open(CREDENTIALS_FILE, 'r') as f:
return json.load(f)
except Exception as e:
logger.error(f"Failed to load credentials: {e}")
raise
@asynccontextmanager
async def matrix_client() -> AsyncIterator[AsyncClient]:
"""
Async context manager for Matrix client operations.
Handles client setup, session restoration, and cleanup automatically.
Ensures client.close() is always called, preventing resource leaks.
Usage:
async with matrix_client() as client:
response = await client.room_send(...)
Yields:
AsyncClient: Configured and authenticated Matrix client
"""
creds = load_credentials()
# Create data directory for E2EE store
MATRIX_DATA_DIR.mkdir(exist_ok=True)
client = AsyncClient(
homeserver=creds['homeserver'],
user=creds['user_id'],
store_path=str(MATRIX_DATA_DIR),
)
# Restore session from credentials
client.access_token = creds['access_token']
client.device_id = creds['device_id']
try:
yield client
finally:
await client.close()

View File

@@ -449,7 +449,7 @@ class MatrixMonitor:
# Trigger wake if rate limit allows # Trigger wake if rate limit allows
await self._maybe_trigger_wake() await self._maybe_trigger_wake()
else: else:
logger.warning(f"Failed to download/compress image: {event.body}") logger.warning(f"Image rejected (too large or download failed): {event.body}")
except Exception as e: except Exception as e:
logger.error(f"Error processing image: {e}") logger.error(f"Error processing image: {e}")
@@ -596,7 +596,19 @@ class MatrixMonitor:
buffer = BytesIO() buffer = BytesIO()
final_img = img.resize((800, 600), Image.Resampling.LANCZOS) final_img = img.resize((800, 600), Image.Resampling.LANCZOS)
final_img.save(buffer, format='JPEG', quality=50, optimize=True) final_img.save(buffer, format='JPEG', quality=50, optimize=True)
return buffer.getvalue() final_data = buffer.getvalue()
# Final size validation - reject if still too large
final_base64 = base64.b64encode(final_data)
final_size_mb = len(final_base64) / (1024 * 1024)
if final_size_mb > target_size_mb:
logger.error(
f"Image still too large after all compression attempts: "
f"{final_size_mb:.2f}MB base64 (limit: {target_size_mb}MB)"
)
return None
return final_data
except Exception as e: except Exception as e:
logger.error(f"Compression failed: {e}") logger.error(f"Compression failed: {e}")

File diff suppressed because it is too large Load Diff

View File

@@ -81,7 +81,7 @@ def send_refresh(delay_seconds=REFRESH_DELAY_SECONDS):
Send CMD+R to refresh and wait for completion Send CMD+R to refresh and wait for completion
Args: Args:
delay_seconds: Time to wait after CMD+R (default: 10) delay_seconds: Time to wait after CMD+R (default: 15)
Returns: Returns:
bool: True if successful bool: True if successful

View File

@@ -9,11 +9,15 @@ Day 83: Added movement tracking to filter static false positives (posters!)
""" """
import json import json
import logging
import sqlite3 import sqlite3
import requests import requests
from pathlib import Path from pathlib import Path
from datetime import datetime, timedelta from datetime import datetime, timedelta
# Setup logging
logger = logging.getLogger("vixy_status")
# Service endpoints # Service endpoints
ENVIRO_URL = "http://eye1.local:8767" ENVIRO_URL = "http://eye1.local:8767"
OAK_URL = "http://head-vixy.local:8100" OAK_URL = "http://head-vixy.local:8100"
@@ -90,8 +94,8 @@ def get_enviro_status() -> str:
humidity = data.get('humidity', 0) humidity = data.get('humidity', 0)
light = data.get('light', 0) light = data.get('light', 0)
return f"Basement: {temp_f:.1f}F, {humidity:.1f}% humidity, {light:.1f} lux" return f"Basement: {temp_f:.1f}F, {humidity:.1f}% humidity, {light:.1f} lux"
except Exception: except Exception as e:
pass logger.warning(f"Enviro service unavailable: {e}")
return "Basement: sensors unavailable" return "Basement: sensors unavailable"
@@ -139,8 +143,8 @@ def get_presence_status() -> str:
return f"Foxy: away (last seen {last_seen:.0f}s ago)" return f"Foxy: away (last seen {last_seen:.0f}s ago)"
else: else:
return "Foxy: away" return "Foxy: away"
except Exception: except Exception as e:
pass logger.warning(f"OAK-D presence service unavailable: {e}")
return None # Return None to omit line if camera unavailable return None # Return None to omit line if camera unavailable
@@ -167,8 +171,8 @@ def get_sound_status() -> str:
return f"{category} ({top_score}% {classes_str})" return f"{category} ({top_score}% {classes_str})"
else: else:
return f"{category}" return f"{category}"
except Exception: except Exception as e:
pass logger.warning(f"Headmic sound service unavailable: {e}")
return None # Return None to omit line if service unavailable return None # Return None to omit line if service unavailable
@@ -178,60 +182,125 @@ def get_matrix_status() -> str:
if STATE_FILE.exists(): if STATE_FILE.exists():
with open(STATE_FILE, 'r') as f: with open(STATE_FILE, 'r') as f:
state = json.load(f) state = json.load(f)
messages = state.get('matrix_messages', []) messages = state.get('matrix_messages', [])
unprocessed = [m for m in messages if not m.get('processed', False)] unprocessed = [m for m in messages if not m.get('processed', False)]
if unprocessed: if unprocessed:
return f"Matrix: {len(unprocessed)} new message(s)" return f"Matrix: {len(unprocessed)} new message(s)"
else: else:
return "Matrix: no new messages" return "Matrix: no new messages"
except Exception: except Exception as e:
pass logger.warning(f"Matrix status unavailable: {e}")
return "Matrix: status unavailable" return "Matrix: status unavailable"
def get_vision_status() -> str: def get_vision_status() -> str:
"""Get vision/motion event status from SQLite database""" """Get vision/motion event status from SQLite database.
Uses object detection data when available to show what was seen
(person, cat, etc.) rather than just raw motion counts.
"""
try: try:
if not EVENTS_DB.exists(): if not EVENTS_DB.exists():
return "Vision: no events database" return "no events database"
conn = sqlite3.connect(str(EVENTS_DB))
conn.row_factory = sqlite3.Row
# Get events from last 2 hours # Get events from last 2 hours
cutoff = (datetime.now() - timedelta(hours=2)).isoformat() cutoff = (datetime.now() - timedelta(hours=2)).isoformat()
cursor = conn.execute( with sqlite3.connect(str(EVENTS_DB)) as conn:
"""SELECT camera_id, annotation FROM events conn.row_factory = sqlite3.Row
WHERE timestamp > ? # Check if detections column exists
ORDER BY timestamp DESC""", columns = [row[1] for row in conn.execute("PRAGMA table_info(events)").fetchall()]
(cutoff,) has_detections = "detections" in columns
)
if has_detections:
events = cursor.fetchall() cursor = conn.execute(
conn.close() """SELECT camera_id, event_type, detections, timestamp FROM events
WHERE timestamp > ?
ORDER BY timestamp DESC""",
(cutoff,)
)
else:
cursor = conn.execute(
"""SELECT camera_id, event_type, NULL as detections, timestamp FROM events
WHERE timestamp > ?
ORDER BY timestamp DESC""",
(cutoff,)
)
events = cursor.fetchall()
if not events: if not events:
return "Vision: no recent motion" return "no recent activity"
# Count by camera # Count labels per camera, track most recent detection
by_camera = {} # Structure: {camera: {label: count}}
unannotated = 0 camera_labels = {}
camera_motion_only = {}
latest_detection = None # (label, camera, timestamp)
for event in events: for event in events:
cam = event['camera_id'] or 'unknown' cam = event['camera_id'] or 'unknown'
by_camera[cam] = by_camera.get(cam, 0) + 1
if not event['annotation']: # Parse detections JSON
unannotated += 1 dets = None
if event['detections']:
total = len(events) try:
camera_breakdown = ", ".join(f"{cam}: {count}" for cam, count in by_camera.items()) dets = json.loads(event['detections'])
except (json.JSONDecodeError, TypeError):
return f"{camera_breakdown}" pass
if dets:
if cam not in camera_labels:
camera_labels[cam] = {}
for d in dets:
label = d.get('label', 'unknown')
camera_labels[cam][label] = camera_labels[cam].get(label, 0) + 1
# Track most recent detection (first one since ordered DESC)
if latest_detection is None:
latest_detection = (dets[0].get('label', 'unknown'), cam, event['timestamp'])
else:
camera_motion_only[cam] = camera_motion_only.get(cam, 0) + 1
# Format per-camera summaries
cam_parts = []
all_cams = sorted(set(list(camera_labels.keys()) + list(camera_motion_only.keys())))
for cam in all_cams:
labels = camera_labels.get(cam, {})
motion_count = camera_motion_only.get(cam, 0)
parts = []
if labels:
# Sort by count descending
for label, count in sorted(labels.items(), key=lambda x: -x[1]):
parts.append(f"{count} {label}")
if motion_count:
parts.append(f"{motion_count} motion")
cam_parts.append(f"{cam}: {', '.join(parts)}")
result = " | ".join(cam_parts)
# Add "last seen" for most recent detection
if latest_detection:
label, cam, ts = latest_detection
try:
event_time = datetime.fromisoformat(ts.replace('Z', '+00:00'))
now = datetime.now(event_time.tzinfo)
mins_ago = int((now - event_time).total_seconds() / 60)
if mins_ago < 1:
result += f" (last: {label} in {cam}, just now)"
else:
result += f" (last: {label} in {cam}, {mins_ago}m ago)"
except Exception:
result += f" (last: {label} in {cam})"
return result
except Exception as e: except Exception as e:
return f"Vision: error ({e})" return f"error ({e})"
def format_status_for_wakeup() -> str: def format_status_for_wakeup() -> str:

View File

@@ -221,7 +221,7 @@ def get_status() -> str:
last_wake = datetime.fromisoformat(state['last_wake']) last_wake = datetime.fromisoformat(state['last_wake'])
time_ago = datetime.now() - last_wake time_ago = datetime.now() - last_wake
status_lines.append(f"Last wake: {last_wake.strftime('%Y-%m-%d %H:%M:%S')} ({int(time_ago.total_seconds()/60)} min ago)") status_lines.append(f"Last wake: {last_wake.strftime('%Y-%m-%d %H:%M:%S')} ({int(time_ago.total_seconds()/60)} min ago)")
except: except Exception:
status_lines.append(f"Last wake: {state.get('last_wake')}") status_lines.append(f"Last wake: {state.get('last_wake')}")
else: else:
status_lines.append("Last wake: Never (daemon just started)") status_lines.append("Last wake: Never (daemon just started)")
@@ -235,7 +235,7 @@ def get_status() -> str:
status_lines.append(f"Next wake: {next_wake.strftime('%Y-%m-%d %H:%M:%S')} (in {int(time_until.total_seconds()/60)} min)") status_lines.append(f"Next wake: {next_wake.strftime('%Y-%m-%d %H:%M:%S')} (in {int(time_until.total_seconds()/60)} min)")
else: else:
status_lines.append(f"Next wake: OVERDUE (was {next_wake.strftime('%Y-%m-%d %H:%M:%S')})") status_lines.append(f"Next wake: OVERDUE (was {next_wake.strftime('%Y-%m-%d %H:%M:%S')})")
except: except Exception:
status_lines.append(f"Next wake: {state.get('next_wake_timestamp')}") status_lines.append(f"Next wake: {state.get('next_wake_timestamp')}")
else: else:
interval = state.get('interval_minutes', 60) interval = state.get('interval_minutes', 60)