1
0
mirror of https://github.com/coleam00/Archon.git synced 2026-01-11 09:07:05 -05:00

- Fix the threading service to properly handle rate limiting.

- Fix the clipboard functionality to work on non local hosts and https
- Improvements in sockets on front-end and backend. Storing session in local browser storage for reconnect. Logic to prevent socket echos coausing rerender and performance issues.
- Fixes and udpates to re-ordering logic in adding a new task, reordering items on the task table.
- Allowing assignee to not be hardcoded enum.
- Fix to Document Version Control (Improvements still needed in the Milkdown editor conversion to store in the docs.
- Adding types to remove [any] typescript issues.
This commit is contained in:
sean-eskerium
2025-08-20 02:28:02 -04:00
parent cd22089b87
commit 1b5196d70f
44 changed files with 3490 additions and 331 deletions

View File

@@ -93,17 +93,18 @@ class RateLimiter:
self._clean_old_entries(now)
# Check if we can make the request
if not self._can_make_request(estimated_tokens):
while not self._can_make_request(estimated_tokens):
wait_time = self._calculate_wait_time(estimated_tokens)
if wait_time > 0:
logfire_logger.info(
f"Rate limiting: waiting {wait_time:.1f}s",
tokens=estimated_tokens,
current_usage=self._get_current_usage(),
f"Rate limiting: waiting {wait_time:.1f}s (tokens={estimated_tokens}, current_usage={self._get_current_usage()})"
)
await asyncio.sleep(wait_time)
return await self.acquire(estimated_tokens)
return False
# Clean old entries after waiting
now = time.time()
self._clean_old_entries(now)
else:
return False
# Record the request
self.request_times.append(now)
@@ -198,17 +199,13 @@ class MemoryAdaptiveDispatcher:
# Reduce workers when memory is high
workers = max(1, base // 2)
logfire_logger.warning(
"High memory usage detected, reducing workers",
memory_percent=metrics.memory_percent,
workers=workers,
f"High memory usage detected, reducing workers (memory_percent={metrics.memory_percent}, workers={workers})"
)
elif metrics.cpu_percent > self.config.cpu_threshold * 100:
# Reduce workers when CPU is high
workers = max(1, base // 2)
logfire_logger.warning(
"High CPU usage detected, reducing workers",
cpu_percent=metrics.cpu_percent,
workers=workers,
f"High CPU usage detected, reducing workers (cpu_percent={metrics.cpu_percent}, workers={workers})"
)
elif metrics.memory_percent < 50 and metrics.cpu_percent < 50:
# Increase workers when resources are available
@@ -238,12 +235,7 @@ class MemoryAdaptiveDispatcher:
semaphore = asyncio.Semaphore(optimal_workers)
logfire_logger.info(
"Starting adaptive processing",
items_count=len(items),
workers=optimal_workers,
mode=mode,
memory_percent=self.last_metrics.memory_percent,
cpu_percent=self.last_metrics.cpu_percent,
f"Starting adaptive processing (items_count={len(items)}, workers={optimal_workers}, mode={mode}, memory_percent={self.last_metrics.memory_percent}, cpu_percent={self.last_metrics.cpu_percent})"
)
# Track active workers
@@ -318,7 +310,7 @@ class MemoryAdaptiveDispatcher:
del active_workers[worker_id]
logfire_logger.error(
f"Processing failed for item {index}", error=str(e), item_index=index
f"Processing failed for item {index} (error={str(e)}, item_index={index})"
)
return None
@@ -333,11 +325,7 @@ class MemoryAdaptiveDispatcher:
success_rate = len(successful_results) / len(items) * 100
logfire_logger.info(
"Adaptive processing completed",
total_items=len(items),
successful=len(successful_results),
success_rate=f"{success_rate:.1f}%",
workers_used=optimal_workers,
f"Adaptive processing completed (total_items={len(items)}, successful={len(successful_results)}, success_rate={success_rate:.1f}%, workers_used={optimal_workers})"
)
return successful_results
@@ -355,7 +343,7 @@ class WebSocketSafeProcessor:
await websocket.accept()
self.active_connections.append(websocket)
logfire_logger.info(
"WebSocket client connected", total_connections=len(self.active_connections)
f"WebSocket client connected (total_connections={len(self.active_connections)})"
)
def disconnect(self, websocket: WebSocket):
@@ -363,7 +351,7 @@ class WebSocketSafeProcessor:
if websocket in self.active_connections:
self.active_connections.remove(websocket)
logfire_logger.info(
"WebSocket client disconnected", remaining_connections=len(self.active_connections)
f"WebSocket client disconnected (remaining_connections={len(self.active_connections)})"
)
async def broadcast_progress(self, message: dict[str, Any]):
@@ -474,7 +462,7 @@ class ThreadingService:
self._running = True
self._health_check_task = asyncio.create_task(self._health_check_loop())
logfire_logger.info("Threading service started", config=self.config.__dict__)
logfire_logger.info(f"Threading service started (config={self.config.__dict__})")
async def stop(self):
"""Stop the threading service"""
@@ -510,7 +498,7 @@ class ThreadingService:
finally:
duration = time.time() - start_time
logfire_logger.debug(
"Rate limited operation completed", duration=duration, tokens=estimated_tokens
f"Rate limited operation completed (duration={duration}, tokens={estimated_tokens})"
)
async def run_cpu_intensive(self, func: Callable, *args, **kwargs) -> Any:
@@ -562,37 +550,30 @@ class ThreadingService:
# Log system metrics
logfire_logger.info(
"System health check",
memory_percent=metrics.memory_percent,
cpu_percent=metrics.cpu_percent,
available_memory_gb=metrics.available_memory_gb,
active_threads=metrics.active_threads,
active_websockets=len(self.websocket_processor.active_connections),
f"System health check (memory_percent={metrics.memory_percent}, cpu_percent={metrics.cpu_percent}, available_memory_gb={metrics.available_memory_gb}, active_threads={metrics.active_threads}, active_websockets={len(self.websocket_processor.active_connections)})"
)
# Alert on critical thresholds
if metrics.memory_percent > 90:
logfire_logger.warning(
"Critical memory usage", memory_percent=metrics.memory_percent
f"Critical memory usage (memory_percent={metrics.memory_percent})"
)
# Force garbage collection
gc.collect()
if metrics.cpu_percent > 95:
logfire_logger.warning("Critical CPU usage", cpu_percent=metrics.cpu_percent)
logfire_logger.warning(f"Critical CPU usage (cpu_percent={metrics.cpu_percent})")
# Check for memory leaks (too many threads)
if metrics.active_threads > self.config.max_workers * 3:
logfire_logger.warning(
"High thread count detected",
active_threads=metrics.active_threads,
max_expected=self.config.max_workers * 3,
f"High thread count detected (active_threads={metrics.active_threads}, max_expected={self.config.max_workers * 3})"
)
await asyncio.sleep(self.config.health_check_interval)
except Exception as e:
logfire_logger.error("Health check failed", error=str(e))
logfire_logger.error(f"Health check failed (error={str(e)})")
await asyncio.sleep(self.config.health_check_interval)