Code review updates and moving the prp-review step to before the Commit.

This commit is contained in:
sean-eskerium
2025-10-31 22:21:40 -04:00
parent ea88d754d4
commit a292ce2dfb
26 changed files with 769 additions and 201 deletions

View File

@@ -67,6 +67,11 @@ components/ # Legacy components (migrating)
**Purpose**: Document processing, code analysis, project generation
**Port**: 8052
### Agent Work Orders (Optional)
**Location**: `python/src/agent_work_orders/`
**Purpose**: Workflow execution engine using Claude Code CLI
**Port**: 8053
## API Structure
### RESTful Endpoints

View File

@@ -204,12 +204,13 @@ The reset script safely removes all tables, functions, triggers, and policies wi
### Core Services
| Service | Container Name | Default URL | Purpose |
| ------------------ | -------------- | --------------------- | --------------------------------- |
| **Web Interface** | archon-ui | http://localhost:3737 | Main dashboard and controls |
| **API Service** | archon-server | http://localhost:8181 | Web crawling, document processing |
| **MCP Server** | archon-mcp | http://localhost:8051 | Model Context Protocol interface |
| **Agents Service** | archon-agents | http://localhost:8052 | AI/ML operations, reranking |
| Service | Container Name | Default URL | Purpose |
| -------------------------- | -------------------------- | --------------------- | ------------------------------------------ |
| **Web Interface** | archon-ui | http://localhost:3737 | Main dashboard and controls |
| **API Service** | archon-server | http://localhost:8181 | Web crawling, document processing |
| **MCP Server** | archon-mcp | http://localhost:8051 | Model Context Protocol interface |
| **Agents Service** | archon-agents | http://localhost:8052 | AI/ML operations, reranking |
| **Agent Work Orders** *(optional)* | archon-agent-work-orders | http://localhost:8053 | Workflow execution with Claude Code CLI |
## Upgrading
@@ -293,12 +294,13 @@ Archon uses true microservices architecture with clear separation of concerns:
### Service Responsibilities
| Service | Location | Purpose | Key Features |
| -------------- | -------------------- | ---------------------------- | ------------------------------------------------------------------ |
| **Frontend** | `archon-ui-main/` | Web interface and dashboard | React, TypeScript, TailwindCSS, Socket.IO client |
| **Server** | `python/src/server/` | Core business logic and APIs | FastAPI, service layer, Socket.IO broadcasts, all ML/AI operations |
| **MCP Server** | `python/src/mcp/` | MCP protocol interface | Lightweight HTTP wrapper, MCP tools, session management |
| **Agents** | `python/src/agents/` | PydanticAI agent hosting | Document and RAG agents, streaming responses |
| Service | Location | Purpose | Key Features |
| ------------------------ | ------------------------------ | -------------------------------- | ------------------------------------------------------------------ |
| **Frontend** | `archon-ui-main/` | Web interface and dashboard | React, TypeScript, TailwindCSS, Socket.IO client |
| **Server** | `python/src/server/` | Core business logic and APIs | FastAPI, service layer, Socket.IO broadcasts, all ML/AI operations |
| **MCP Server** | `python/src/mcp/` | MCP protocol interface | Lightweight HTTP wrapper, MCP tools, session management |
| **Agents** | `python/src/agents/` | PydanticAI agent hosting | Document and RAG agents, streaming responses |
| **Agent Work Orders** *(optional)* | `python/src/agent_work_orders/` | Workflow execution engine | Claude Code CLI automation, repository management, SSE updates |
### Communication Patterns
@@ -321,7 +323,8 @@ By default, Archon services run on the following ports:
- **archon-ui**: 3737
- **archon-server**: 8181
- **archon-mcp**: 8051
- **archon-agents**: 8052
- **archon-agents**: 8052 (optional)
- **archon-agent-work-orders**: 8053 (optional)
- **archon-docs**: 3838 (optional)
### Changing Ports
@@ -334,6 +337,7 @@ ARCHON_UI_PORT=3737
ARCHON_SERVER_PORT=8181
ARCHON_MCP_PORT=8051
ARCHON_AGENTS_PORT=8052
AGENT_WORK_ORDERS_PORT=8053
ARCHON_DOCS_PORT=3838
```

View File

@@ -1,4 +1,4 @@
import { BookOpen, Bot, Palette, Settings, TestTube } from "lucide-react";
import { BookOpen, Bot, Palette, Settings } from "lucide-react";
import type React from "react";
import { Link, useLocation } from "react-router-dom";
// TEMPORARY: Use old SettingsContext until settings are migrated

View File

@@ -30,9 +30,9 @@ const WORKFLOW_STEPS: { value: WorkflowStep; label: string; description: string;
{ value: "create-branch", label: "Create Branch", description: "Create a new git branch for isolated work" },
{ value: "planning", label: "Planning", description: "Generate implementation plan" },
{ value: "execute", label: "Execute", description: "Implement the planned changes" },
{ value: "prp-review", label: "Review/Fix", description: "Review implementation and fix issues", dependsOn: ["execute"] },
{ value: "commit", label: "Commit", description: "Commit changes to git", dependsOn: ["execute"] },
{ value: "create-pr", label: "Create PR", description: "Create pull request", dependsOn: ["execute"] },
{ value: "prp-review", label: "PRP Review", description: "Review against PRP document" },
{ value: "create-pr", label: "Create PR", description: "Create pull request", dependsOn: ["commit"] },
];
/**
@@ -58,11 +58,27 @@ export function AddRepositoryModal({ open, onOpenChange }: AddRepositoryModalPro
/**
* Toggle workflow step selection
* When unchecking a step, also uncheck steps that depend on it (cascade removal)
*/
const toggleStep = (step: WorkflowStep) => {
setSelectedSteps((prev) => {
if (prev.includes(step)) {
return prev.filter((s) => s !== step);
// Removing a step - also remove steps that depend on it
const stepsToRemove = new Set([step]);
// Find all steps that transitively depend on the one being removed (cascade)
let changed = true;
while (changed) {
changed = false;
WORKFLOW_STEPS.forEach((s) => {
if (!stepsToRemove.has(s.value) && s.dependsOn?.some((dep) => stepsToRemove.has(dep))) {
stepsToRemove.add(s.value);
changed = true;
}
});
}
return prev.filter((s) => !stepsToRemove.has(s));
}
return [...prev, step];
});

View File

@@ -34,9 +34,9 @@ const WORKFLOW_STEPS: { value: WorkflowStep; label: string; dependsOn?: Workflow
{ value: "create-branch", label: "Create Branch" },
{ value: "planning", label: "Planning" },
{ value: "execute", label: "Execute" },
{ value: "prp-review", label: "Review/Fix", dependsOn: ["execute"] },
{ value: "commit", label: "Commit Changes", dependsOn: ["execute"] },
{ value: "create-pr", label: "Create Pull Request", dependsOn: ["execute"] },
{ value: "prp-review", label: "PRP Review" },
{ value: "create-pr", label: "Create Pull Request", dependsOn: ["commit"] },
];
export function CreateWorkOrderModal({ open, onOpenChange }: CreateWorkOrderModalProps) {
@@ -51,7 +51,7 @@ export function CreateWorkOrderModal({ open, onOpenChange }: CreateWorkOrderModa
const [sandboxType, setSandboxType] = useState<SandboxType>("git_worktree");
const [userRequest, setUserRequest] = useState("");
const [githubIssueNumber, setGithubIssueNumber] = useState("");
const [selectedCommands, setSelectedCommands] = useState<WorkflowStep[]>(["create-branch", "planning", "execute"]);
const [selectedCommands, setSelectedCommands] = useState<WorkflowStep[]>(["create-branch", "planning", "execute", "prp-review", "commit", "create-pr"]);
const [error, setError] = useState("");
const [isSubmitting, setIsSubmitting] = useState(false);
@@ -85,11 +85,27 @@ export function CreateWorkOrderModal({ open, onOpenChange }: CreateWorkOrderModa
/**
* Toggle workflow step selection
* When unchecking a step, also uncheck steps that depend on it (cascade removal)
*/
const toggleStep = (step: WorkflowStep) => {
setSelectedCommands((prev) => {
if (prev.includes(step)) {
return prev.filter((s) => s !== step);
// Removing a step - also remove steps that depend on it
const stepsToRemove = new Set([step]);
// Find all steps that transitively depend on the one being removed (cascade)
let changed = true;
while (changed) {
changed = false;
WORKFLOW_STEPS.forEach((s) => {
if (!stepsToRemove.has(s.value) && s.dependsOn?.some((dep) => stepsToRemove.has(dep))) {
stepsToRemove.add(s.value);
changed = true;
}
});
}
return prev.filter((s) => !stepsToRemove.has(s));
}
return [...prev, step];
});
@@ -139,19 +155,41 @@ export function CreateWorkOrderModal({ open, onOpenChange }: CreateWorkOrderModa
try {
setIsSubmitting(true);
// Sort selected commands by WORKFLOW_STEPS order before sending to backend
// This ensures correct execution order regardless of checkbox click order
const sortedCommands = WORKFLOW_STEPS
.filter(step => selectedCommands.includes(step.value))
.map(step => step.value);
await createWorkOrder.mutateAsync({
repository_url: repositoryUrl,
sandbox_type: sandboxType,
user_request: userRequest,
github_issue_number: githubIssueNumber || undefined,
selected_commands: selectedCommands,
selected_commands: sortedCommands,
});
// Success - close modal and reset
resetForm();
onOpenChange(false);
} catch (err) {
setError(err instanceof Error ? err.message : "Failed to create work order");
// Preserve error details by truncating long messages instead of hiding them
// Show up to 500 characters to capture important debugging information
// while keeping the UI readable
const maxLength = 500;
let userMessage = "Failed to create work order. Please try again.";
if (err instanceof Error && err.message) {
if (err.message.length <= maxLength) {
userMessage = err.message;
} else {
// Truncate but preserve the start which often contains the most important details
userMessage = `${err.message.slice(0, maxLength)}... (truncated, ${err.message.length - maxLength} more characters)`;
}
}
setError(userMessage);
} finally {
setIsSubmitting(false);
}

View File

@@ -31,9 +31,9 @@ const WORKFLOW_STEPS: { value: WorkflowStep; label: string; description: string;
{ value: "create-branch", label: "Create Branch", description: "Create a new git branch for isolated work" },
{ value: "planning", label: "Planning", description: "Generate implementation plan" },
{ value: "execute", label: "Execute", description: "Implement the planned changes" },
{ value: "prp-review", label: "Review/Fix", description: "Review implementation and fix issues", dependsOn: ["execute"] },
{ value: "commit", label: "Commit", description: "Commit changes to git", dependsOn: ["execute"] },
{ value: "create-pr", label: "Create PR", description: "Create pull request", dependsOn: ["execute"] },
{ value: "prp-review", label: "PRP Review", description: "Review against PRP document" },
{ value: "create-pr", label: "Create PR", description: "Create pull request", dependsOn: ["commit"] },
];
export function EditRepositoryModal({ open, onOpenChange }: EditRepositoryModalProps) {
@@ -55,29 +55,29 @@ export function EditRepositoryModal({ open, onOpenChange }: EditRepositoryModalP
}
}, [repository]);
/**
* Check if any selected steps depend on the given step
*/
const hasSelectedDependents = (step: WorkflowStep): boolean => {
return selectedSteps.some((selectedStep) => {
const stepDef = WORKFLOW_STEPS.find((s) => s.value === selectedStep);
return stepDef?.dependsOn?.includes(step) ?? false;
});
};
/**
* Toggle workflow step selection
* Prevents unchecking a step if other selected steps depend on it
* When unchecking a step, also uncheck steps that depend on it (cascade removal)
*/
const toggleStep = (step: WorkflowStep) => {
setSelectedSteps((prev) => {
if (prev.includes(step)) {
// Attempting to uncheck - check if any selected steps depend on this one
if (hasSelectedDependents(step)) {
// Don't allow unchecking if dependents exist
return prev;
// Removing a step - also remove steps that depend on it
const stepsToRemove = new Set([step]);
// Find all steps that transitively depend on the one being removed (cascade)
let changed = true;
while (changed) {
changed = false;
WORKFLOW_STEPS.forEach((s) => {
if (!stepsToRemove.has(s.value) && s.dependsOn?.some((dep) => stepsToRemove.has(dep))) {
stepsToRemove.add(s.value);
changed = true;
}
});
}
return prev.filter((s) => s !== step);
return prev.filter((s) => !stepsToRemove.has(s));
}
return [...prev, step];
});
@@ -108,11 +108,17 @@ export function EditRepositoryModal({ open, onOpenChange }: EditRepositoryModalP
try {
setIsSubmitting(true);
// Sort selected steps by WORKFLOW_STEPS order before sending to backend
const sortedSteps = WORKFLOW_STEPS
.filter(step => selectedSteps.includes(step.value))
.map(step => step.value);
await updateRepository.mutateAsync({
id: repository.id,
request: {
default_sandbox_type: repository.default_sandbox_type,
default_commands: selectedSteps,
default_commands: sortedSteps,
},
});
@@ -186,26 +192,8 @@ export function EditRepositoryModal({ open, onOpenChange }: EditRepositoryModalP
{WORKFLOW_STEPS.map((step) => {
const isSelected = selectedSteps.includes(step.value);
const isDisabledForEnable = isStepDisabled(step);
const hasDependents = isSelected && hasSelectedDependents(step.value);
const cannotUncheck = hasDependents;
const isCheckboxDisabled = isDisabledForEnable || cannotUncheck;
// Get dependent step names for tooltip message
const dependentSteps = isSelected
? selectedSteps
.filter((selectedStep) => {
const stepDef = WORKFLOW_STEPS.find((s) => s.value === selectedStep);
return stepDef?.dependsOn?.includes(step.value) ?? false;
})
.map((depStep) => {
const stepDef = WORKFLOW_STEPS.find((s) => s.value === depStep);
return stepDef?.label ?? depStep;
})
: [];
const tooltipMessage = cannotUncheck
? `Cannot uncheck: ${dependentSteps.join(", ")} ${dependentSteps.length === 1 ? "depends" : "depend"} on this step`
: isDisabledForEnable && step.dependsOn
const tooltipMessage = isDisabledForEnable && step.dependsOn
? `Requires: ${step.dependsOn.map((dep) => WORKFLOW_STEPS.find((s) => s.value === dep)?.label ?? dep).join(", ")}`
: undefined;
@@ -214,13 +202,12 @@ export function EditRepositoryModal({ open, onOpenChange }: EditRepositoryModalP
id={`edit-step-${step.value}`}
checked={isSelected}
onCheckedChange={() => {
if (!isCheckboxDisabled) {
if (!isDisabledForEnable) {
toggleStep(step.value);
}
}}
disabled={isCheckboxDisabled}
disabled={isDisabledForEnable}
aria-label={step.label}
className={cannotUncheck ? "cursor-not-allowed opacity-75" : ""}
/>
);
@@ -236,17 +223,12 @@ export function EditRepositoryModal({ open, onOpenChange }: EditRepositoryModalP
<Label
htmlFor={`edit-step-${step.value}`}
className={
isCheckboxDisabled
isDisabledForEnable
? "text-gray-400 dark:text-gray-500 cursor-not-allowed"
: "cursor-pointer"
}
>
{step.label}
{cannotUncheck && (
<span className="ml-1 text-xs text-cyan-500 dark:text-cyan-400" aria-hidden="true">
(locked)
</span>
)}
</Label>
</div>
);

View File

@@ -1,4 +1,6 @@
import { Trash2 } from "lucide-react";
import { useEffect, useRef, useState } from "react";
import { Button } from "@/features/ui/primitives/button";
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/features/ui/primitives/select";
import { cn } from "@/features/ui/primitives/styles";
import { Switch } from "@/features/ui/primitives/switch";
@@ -10,6 +12,9 @@ interface ExecutionLogsProps {
/** Whether logs are from live SSE stream (shows "Live" indicator) */
isLive?: boolean;
/** Callback to clear logs (optional, defaults to no-op) */
onClearLogs?: () => void;
}
/**
@@ -59,13 +64,47 @@ function LogEntryRow({ log }: { log: LogEntry }) {
);
}
export function ExecutionLogs({ logs, isLive = false }: ExecutionLogsProps) {
export function ExecutionLogs({ logs, isLive = false, onClearLogs = () => {} }: ExecutionLogsProps) {
const [autoScroll, setAutoScroll] = useState(true);
const [levelFilter, setLevelFilter] = useState<string>("all");
const [localLogs, setLocalLogs] = useState<LogEntry[]>(logs);
const [isCleared, setIsCleared] = useState(false);
const previousLogsLengthRef = useRef<number>(logs.length);
const scrollContainerRef = useRef<HTMLDivElement>(null);
// Update local logs when props change
useEffect(() => {
const currentLogsLength = logs.length;
const previousLogsLength = previousLogsLengthRef.current;
// If we cleared logs, only update if new logs arrive (length increases)
if (isCleared) {
if (currentLogsLength > previousLogsLength) {
// New logs arrived after clear - reset cleared state and show new logs
setLocalLogs(logs);
setIsCleared(false);
}
// Otherwise, keep local logs empty (user's cleared view)
} else {
// Normal case: update local logs with prop changes
setLocalLogs(logs);
}
previousLogsLengthRef.current = currentLogsLength;
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [logs]);
// Filter logs by level
const filteredLogs = levelFilter === "all" ? logs : logs.filter((log) => log.level === levelFilter);
const filteredLogs = levelFilter === "all" ? localLogs : localLogs.filter((log) => log.level === levelFilter);
/**
* Handle clear logs button click
*/
const handleClearLogs = () => {
setLocalLogs([]);
setIsCleared(true);
onClearLogs();
};
/**
* Auto-scroll to bottom when new logs arrive (if enabled)
@@ -74,7 +113,7 @@ export function ExecutionLogs({ logs, isLive = false }: ExecutionLogsProps) {
if (autoScroll && scrollContainerRef.current) {
scrollContainerRef.current.scrollTop = scrollContainerRef.current.scrollHeight;
}
}, [logs.length, autoScroll]); // Trigger on new logs, not filtered logs
}, [localLogs.length, autoScroll]); // Trigger on new logs, not filtered logs
return (
<div className="border border-white/10 dark:border-gray-700/30 rounded-lg overflow-hidden bg-black/20 dark:bg-white/5 backdrop-blur">
@@ -136,6 +175,18 @@ export function ExecutionLogs({ logs, isLive = false }: ExecutionLogsProps) {
</span>
</div>
{/* Clear logs button */}
<Button
variant="ghost"
size="sm"
onClick={handleClearLogs}
className="h-8 text-xs text-gray-600 dark:text-gray-400 hover:text-red-600 dark:hover:text-red-400"
aria-label="Clear logs"
disabled={localLogs.length === 0}
>
<Trash2 className="w-3.5 h-3.5 mr-1.5" aria-hidden="true" />
Clear logs
</Button>
</div>
</div>

View File

@@ -18,6 +18,18 @@ interface RealTimeStatsProps {
*/
const EMPTY_LOGS: never[] = [];
/**
* Type guard to narrow LogEntry to one with required step_number and total_steps
*/
type LogEntryWithSteps = LogEntry & {
step_number: number;
total_steps: number;
};
function hasStepInfo(log: LogEntry): log is LogEntryWithSteps {
return log.step_number !== undefined && log.total_steps !== undefined;
}
/**
* Calculate progress metrics from log entries
* Used as fallback when no SSE progress data exists (e.g., after refresh)
@@ -26,8 +38,8 @@ function useCalculateProgressFromLogs(logs: LogEntry[]): LiveProgress | null {
return useMemo(() => {
if (logs.length === 0) return null;
// Find latest progress-related logs
const stepLogs = logs.filter((log) => log.step_number !== undefined && log.total_steps !== undefined);
// Find latest progress-related logs using type guard for proper narrowing
const stepLogs = logs.filter(hasStepInfo);
const latestStepLog = stepLogs[stepLogs.length - 1];
const workflowCompleted = logs.some((log) => log.event === "workflow_completed");
@@ -48,13 +60,15 @@ function useCalculateProgressFromLogs(logs: LogEntry[]): LiveProgress | null {
}
if (latestStepLog) {
const completedSteps = latestStepLog.step_number! - 1;
const totalSteps = latestStepLog.total_steps!;
// Type guard ensures step_number and total_steps are defined, so safe to access
const stepNumber = latestStepLog.step_number;
const totalSteps = latestStepLog.total_steps;
const completedSteps = stepNumber - 1;
return {
currentStep: latestStepLog.step || "unknown",
stepNumber: latestStepLog.step_number,
totalSteps: latestStepLog.total_steps,
stepNumber: stepNumber,
totalSteps: totalSteps,
progressPct: workflowCompleted ? 100 : Math.round((completedSteps / totalSteps) * 100),
elapsedSeconds: latestElapsed,
status: workflowCompleted ? "completed" : workflowFailed ? "failed" : "running",
@@ -137,6 +151,7 @@ export function RealTimeStats({ workOrderId }: RealTimeStatsProps) {
// Zustand SSE slice - connection management and live data
const connectToLogs = useAgentWorkOrdersStore((s) => s.connectToLogs);
const disconnectFromLogs = useAgentWorkOrdersStore((s) => s.disconnectFromLogs);
const clearLogs = useAgentWorkOrdersStore((s) => s.clearLogs);
const sseProgress = useAgentWorkOrdersStore((s) => s.liveProgress[workOrderId ?? ""]);
const sseLogs = useAgentWorkOrdersStore((s) => s.liveLogs[workOrderId ?? ""]);
@@ -325,7 +340,17 @@ export function RealTimeStats({ workOrderId }: RealTimeStatsProps) {
</div>
{/* Collapsible Execution Logs */}
{showLogs && <ExecutionLogs logs={logs} isLive={isLiveData} />}
{showLogs && (
<ExecutionLogs
logs={logs}
isLive={isLiveData}
onClearLogs={() => {
if (workOrderId) {
clearLogs(workOrderId);
}
}}
/>
)}
</div>
);
}

View File

@@ -107,7 +107,41 @@ export const createSSESlice: StateCreator<SSESlice, [], [], SSESlice> = (set, ge
}
};
eventSource.onerror = () => {
eventSource.onerror = (event) => {
// Check if this is a 404 (work order doesn't exist)
// EventSource doesn't give us status code, but we can check if it's a permanent failure
// by attempting to determine if the server is reachable
const target = event.target as EventSource;
// If the EventSource readyState is CLOSED (2), it won't reconnect
// This typically happens on 404 or permanent errors
if (target.readyState === EventSource.CLOSED) {
// Permanent failure (likely 404) - clean up and don't retry
eventSource.close();
set((state) => {
const newConnections = new Map(state.logConnections);
newConnections.delete(workOrderId);
// Remove from persisted state too
const newLiveLogs = { ...state.liveLogs };
const newLiveProgress = { ...state.liveProgress };
delete newLiveLogs[workOrderId];
delete newLiveProgress[workOrderId];
return {
logConnections: newConnections,
liveLogs: newLiveLogs,
liveProgress: newLiveProgress,
connectionStates: {
...state.connectionStates,
[workOrderId]: "disconnected" as SSEConnectionState,
},
};
});
return;
}
// Temporary error - retry after 5 seconds
set((state) => ({
connectionStates: {
...state.connectionStates,
@@ -115,16 +149,13 @@ export const createSSESlice: StateCreator<SSESlice, [], [], SSESlice> = (set, ge
},
}));
// Auto-reconnect after 5 seconds
setTimeout(() => {
eventSource.close();
// Use set() to properly update state instead of mutating stale reference
set((state) => {
const newConnections = new Map(state.logConnections);
newConnections.delete(workOrderId);
return { logConnections: newConnections };
});
// Use fresh get() to ensure we have the latest state before retry
get().connectToLogs(workOrderId); // Retry
}, 5000);
};

View File

@@ -15,6 +15,7 @@ import { RealTimeStats } from "../components/RealTimeStats";
import { StepHistoryCard } from "../components/StepHistoryCard";
import { WorkflowStepButton } from "../components/WorkflowStepButton";
import { useStepHistory, useWorkOrder } from "../hooks/useAgentWorkOrderQueries";
import { useAgentWorkOrdersStore } from "../state/agentWorkOrdersStore";
import type { WorkflowStep } from "../types";
/**
@@ -24,9 +25,9 @@ const ALL_WORKFLOW_STEPS: WorkflowStep[] = [
"create-branch",
"planning",
"execute",
"prp-review",
"commit",
"create-pr",
"prp-review",
];
export function AgentWorkOrderDetailView() {
@@ -38,6 +39,9 @@ export function AgentWorkOrderDetailView() {
const { data: workOrder, isLoading: isLoadingWorkOrder, isError: isErrorWorkOrder } = useWorkOrder(id);
const { data: stepHistory, isLoading: isLoadingSteps, isError: isErrorSteps } = useStepHistory(id);
// Get live progress from SSE for total steps count
const liveProgress = useAgentWorkOrdersStore((s) => (id ? s.liveProgress[id] : undefined));
/**
* Toggle step expansion
*/
@@ -294,7 +298,7 @@ export function AgentWorkOrderDetailView() {
<div>
<p className="text-xs text-gray-500 dark:text-gray-400">Steps Completed</p>
<p className="text-2xl font-bold text-gray-900 dark:text-white mt-0.5">
{stepHistory.steps.filter((s) => s.success).length} / {stepHistory.steps.length}
{stepHistory.steps.filter((s) => s.success).length} / {liveProgress?.totalSteps ?? stepHistory.steps.length}
</p>
</div>
</div>

View File

@@ -245,7 +245,7 @@ export function useMultipleOperations(
completedIds.current.clear();
errorIds.current.clear();
notFoundCounts.current.clear();
}, []); // Stable dependency across reorderings
}, [_progressIdsKey]); // Stable dependency across reorderings
const queries = useQueries({
queries: progressIds.map((progressId) => ({

View File

@@ -1,8 +1,8 @@
/**
* Agent Work Orders 2 Page
* Agent Work Orders Page
*
* Page wrapper for the redesigned agent work orders interface.
* Routes to this page from /agent-work-orders2
* Page wrapper for the agent work orders interface.
* Routes to this page from /agent-work-orders
*/
import { AgentWorkOrdersView } from "../features/agent-work-orders/views/AgentWorkOrdersView";

View File

@@ -294,52 +294,105 @@ export default defineConfig(({ mode }: ConfigEnv): UserConfig => {
: [];
return [...new Set([...defaultHosts, ...hostFromEnv, ...customHosts])];
})(),
proxy: {
// Agent Work Orders API proxy (must come before general /api)
'/api/agent-work-orders': {
target: isDocker ? 'http://archon-agent-work-orders:8053' : 'http://localhost:8053',
proxy: (() => {
const proxyConfig: Record<string, any> = {};
// Check if agent work orders service should be enabled
// This can be disabled via environment variable to prevent hard dependency
const agentWorkOrdersEnabled = env.AGENT_WORK_ORDERS_ENABLED !== 'false';
const agentWorkOrdersPort = env.AGENT_WORK_ORDERS_PORT || '8053';
// Agent Work Orders API proxy (must come before general /api if enabled)
if (agentWorkOrdersEnabled) {
proxyConfig['/api/agent-work-orders'] = {
target: isDocker ? `http://archon-agent-work-orders:${agentWorkOrdersPort}` : `http://localhost:${agentWorkOrdersPort}`,
changeOrigin: true,
secure: false,
configure: (proxy, options) => {
const targetUrl = isDocker ? 'http://archon-agent-work-orders:8053' : 'http://localhost:8053';
proxy.on('error', (err, req, res) => {
timeout: 10000, // 10 second timeout
configure: (proxy: any, options: any) => {
const targetUrl = isDocker ? `http://archon-agent-work-orders:${agentWorkOrdersPort}` : `http://localhost:${agentWorkOrdersPort}`;
// Handle proxy errors (e.g., service is down)
proxy.on('error', (err: Error, req: any, res: any) => {
console.log('🚨 [VITE PROXY ERROR - Agent Work Orders]:', err.message);
console.log('🚨 [VITE PROXY ERROR] Target:', targetUrl);
console.log('🚨 [VITE PROXY ERROR] Request:', req.url);
});
proxy.on('proxyReq', (proxyReq, req, res) => {
// Send proper error response instead of hanging
if (!res.headersSent) {
res.writeHead(503, {
'Content-Type': 'application/json',
'X-Service-Unavailable': 'agent-work-orders'
});
res.end(JSON.stringify({
error: 'Service Unavailable',
message: 'Agent Work Orders service is not available',
service: 'agent-work-orders',
target: targetUrl
}));
}
});
// Handle connection timeout
proxy.on('proxyReq', (proxyReq: any, req: any, res: any) => {
console.log('🔄 [VITE PROXY - Agent Work Orders] Forwarding:', req.method, req.url, 'to', `${targetUrl}${req.url}`);
});
}
},
'/api': {
// Set timeout for the proxy request
proxyReq.setTimeout(10000, () => {
console.log('⏱️ [VITE PROXY - Agent Work Orders] Request timeout');
if (!res.headersSent) {
res.writeHead(504, {
'Content-Type': 'application/json',
'X-Service-Unavailable': 'agent-work-orders'
});
res.end(JSON.stringify({
error: 'Gateway Timeout',
message: 'Agent Work Orders service did not respond in time',
service: 'agent-work-orders',
target: targetUrl
}));
}
});
});
}
};
} else {
console.log('⚠️ [VITE PROXY] Agent Work Orders proxy disabled via AGENT_WORK_ORDERS_ENABLED=false');
}
// General /api proxy (always enabled, comes after specific routes if agent work orders is enabled)
proxyConfig['/api'] = {
target: `http://${proxyHost}:${port}`,
changeOrigin: true,
secure: false,
configure: (proxy, options) => {
proxy.on('error', (err, req, res) => {
configure: (proxy: any, options: any) => {
proxy.on('error', (err: Error, req: any, res: any) => {
console.log('🚨 [VITE PROXY ERROR]:', err.message);
console.log('🚨 [VITE PROXY ERROR] Target:', `http://${proxyHost}:${port}`);
console.log('🚨 [VITE PROXY ERROR] Request:', req.url);
});
proxy.on('proxyReq', (proxyReq, req, res) => {
proxy.on('proxyReq', (proxyReq: any, req: any, res: any) => {
console.log('🔄 [VITE PROXY] Forwarding:', req.method, req.url, 'to', `http://${proxyHost}:${port}${req.url}`);
});
}
},
};
// Health check endpoint proxy
'/health': {
proxyConfig['/health'] = {
target: `http://${host}:${port}`,
changeOrigin: true,
secure: false
},
};
// Socket.IO specific proxy configuration
'/socket.io': {
proxyConfig['/socket.io'] = {
target: `http://${host}:${port}`,
changeOrigin: true,
ws: true
}
},
};
return proxyConfig;
})(),
},
define: {
// CRITICAL: Don't inject Docker internal hostname into the build

View File

@@ -66,7 +66,7 @@ So that <benefit/value>
Use these files to implement the feature:
<find and list the files that are relevant to the feature describe why they are relevant in bullet points. If there are new files that need to be created to implement the feature, list them in an h3 'New Files' section. inlcude line numbers for the relevant sections>
<find and list the files that are relevant to the feature describe why they are relevant in bullet points. If there are new files that need to be created to implement the feature, list them in an h3 'New Files' section. include line numbers for the relevant sections>
## Relevant research docstring

View File

@@ -1,6 +1,6 @@
# Code Review
# Review and Fix
Review implemented work against a PRP specification to ensure code quality, correctness, and adherence to project standards.
Review implemented work against a PRP specification, identify issues, and automatically fix blocker/major problems before committing.
## Variables
@@ -87,3 +87,34 @@ Return ONLY valid JSON (no markdown, no explanations) save to [report-#.json] in
}
}
```
## Fix Issues
After generating the review report, automatically fix blocker and major issues:
**Parse the Report:**
- Read the generated `PRPs/reports/report-#.json` file
- Extract all issues with severity "blocker" or "major"
**Apply Fixes:**
For each blocker/major issue:
1. Read the file mentioned in `file_path`
2. Apply the fix described in `issue_resolution`
3. Log what was fixed
**Re-validate:**
- Rerun linters: `uv run ruff check src/ --fix`
- Rerun type checker: `uv run mypy src/`
- Rerun tests: `uv run pytest tests/ -v`
**Report Results:**
- If all blockers fixed and validation passes → Output "✅ All critical issues fixed, validation passing"
- If fixes failed or validation still failing → Output "⚠️ Some issues remain" with details
- Minor issues can be left for manual review later
**Important:**
- Only fix blocker/major issues automatically
- Minor issues should be left in the report for human review
- If a fix might break something, skip it and note in output
- Run validation after ALL fixes applied, not after each individual fix

View File

@@ -295,7 +295,7 @@ claude --version
Check health endpoint to see dependency status:
```bash
curl http://localhost:8052/health
curl http://localhost:8053/health
```
This shows:

View File

@@ -5,7 +5,7 @@ FastAPI routes for agent work orders.
import asyncio
from datetime import datetime
from typing import Any
from typing import Any, Callable
from fastapi import APIRouter, HTTPException, Query
from sse_starlette.sse import EventSourceResponse
@@ -41,6 +41,93 @@ from .sse_streams import stream_work_order_logs
logger = get_logger(__name__)
router = APIRouter()
# Registry to track background workflow tasks by work order ID
# Enables monitoring, exception tracking, and cleanup
_workflow_tasks: dict[str, asyncio.Task] = {}
def _create_task_done_callback(agent_work_order_id: str) -> Callable[[asyncio.Task], None]:
"""Create a done callback for workflow tasks
Logs exceptions, updates work order status, and removes task from registry.
Note: This callback is synchronous but schedules async operations for status updates.
Args:
agent_work_order_id: Work order ID to track
"""
def on_task_done(task: asyncio.Task) -> None:
"""Callback invoked when workflow task completes
Inspects task.exception() to determine if workflow succeeded or failed,
logs appropriately, and updates work order status.
"""
try:
# Check if task raised an exception
exception = task.exception()
if exception is None:
# Task completed successfully
logger.info(
"workflow_task_completed",
agent_work_order_id=agent_work_order_id,
status="completed",
)
# Note: Orchestrator handles updating status to COMPLETED
# so we don't need to update it here
else:
# Task failed with an exception
# Log full exception details with context
logger.exception(
"workflow_task_failed",
agent_work_order_id=agent_work_order_id,
status="failed",
exception_type=type(exception).__name__,
exception_message=str(exception),
exc_info=True,
)
# Schedule async operation to update work order status if needed
# (execute_workflow_with_error_handling may have already done this)
async def update_status_if_needed() -> None:
try:
result = await state_repository.get(agent_work_order_id)
if result:
_, metadata = result
current_status = metadata.get("status")
if current_status != AgentWorkOrderStatus.FAILED:
error_msg = f"Workflow task failed: {str(exception)}"
await state_repository.update_status(
agent_work_order_id,
AgentWorkOrderStatus.FAILED,
error_message=error_msg,
)
logger.info(
"workflow_status_updated_to_failed",
agent_work_order_id=agent_work_order_id,
)
except Exception as update_error:
# Log but don't raise - task is already failed
logger.error(
"workflow_status_update_failed_in_callback",
agent_work_order_id=agent_work_order_id,
update_error=str(update_error),
original_exception=str(exception),
exc_info=True,
)
# Schedule the async status update
asyncio.create_task(update_status_if_needed())
finally:
# Always remove task from registry when done (success or failure)
_workflow_tasks.pop(agent_work_order_id, None)
logger.debug(
"workflow_task_removed_from_registry",
agent_work_order_id=agent_work_order_id,
)
return on_task_done
# Initialize dependencies (singletons for MVP)
state_repository = create_repository()
repository_config_repo = RepositoryConfigRepository()
@@ -103,9 +190,15 @@ async def create_agent_work_order(
# Save to repository
await state_repository.create(state, metadata)
# Start workflow in background
asyncio.create_task(
orchestrator.execute_workflow(
# Wrapper function to handle exceptions from workflow execution
async def execute_workflow_with_error_handling() -> None:
"""Execute workflow and handle any unhandled exceptions
Broad exception handler ensures all exceptions are caught and logged,
with full context for debugging. Status is updated to FAILED on errors.
"""
try:
await orchestrator.execute_workflow(
agent_work_order_id=agent_work_order_id,
repository_url=request.repository_url,
sandbox_type=request.sandbox_type,
@@ -113,6 +206,47 @@ async def create_agent_work_order(
selected_commands=request.selected_commands,
github_issue_number=request.github_issue_number,
)
except Exception as e:
# Catch any exceptions that weren't handled by the orchestrator
# (e.g., exceptions during initialization, argument validation, etc.)
error_msg = str(e)
logger.exception(
"workflow_execution_unhandled_exception",
agent_work_order_id=agent_work_order_id,
error=error_msg,
exception_type=type(e).__name__,
exc_info=True,
)
try:
# Update work order status to FAILED
await state_repository.update_status(
agent_work_order_id,
AgentWorkOrderStatus.FAILED,
error_message=f"Workflow execution failed before orchestrator could handle it: {error_msg}",
)
except Exception as update_error:
# Log but don't raise - we've already caught the original error
logger.error(
"workflow_status_update_failed_after_exception",
agent_work_order_id=agent_work_order_id,
update_error=str(update_error),
original_error=error_msg,
exc_info=True,
)
# Re-raise to ensure task.exception() returns the exception
raise
# Create and track background workflow task
task = asyncio.create_task(execute_workflow_with_error_handling())
_workflow_tasks[agent_work_order_id] = task
# Attach done callback to log exceptions and update status
task.add_done_callback(_create_task_done_callback(agent_work_order_id))
logger.debug(
"workflow_task_created_and_tracked",
agent_work_order_id=agent_work_order_id,
task_count=len(_workflow_tasks),
)
logger.info(

View File

@@ -3,7 +3,7 @@
All models follow exact naming from the PRD specification.
"""
from datetime import datetime
from datetime import datetime, timezone
from enum import Enum
from pydantic import BaseModel, Field, field_validator
@@ -284,7 +284,7 @@ class StepExecutionResult(BaseModel):
error_message: str | None = None
duration_seconds: float
session_id: str | None = None
timestamp: datetime = Field(default_factory=datetime.now)
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
class StepHistory(BaseModel):

View File

@@ -112,14 +112,27 @@ class GitBranchSandbox:
duration_seconds=duration,
)
# Explicit check for None returncode (should never happen after communicate())
if process.returncode is None:
self._logger.error(
"command_execution_unexpected_state",
command=command,
error="process.returncode is None after communicate() - this indicates a serious bug",
)
raise RuntimeError(
f"Process returncode is None after communicate() for command: {command}. "
"This should never happen and indicates a serious issue."
)
duration = time.time() - start_time
success = process.returncode == 0
exit_code = process.returncode
success = exit_code == 0
result = CommandExecutionResult(
success=success,
stdout=stdout.decode() if stdout else None,
stderr=stderr.decode() if stderr else None,
exit_code=process.returncode or 0,
exit_code=exit_code,
error_message=None if success else stderr.decode() if stderr else "Command failed",
duration_seconds=duration,
)
@@ -132,7 +145,7 @@ class GitBranchSandbox:
self._logger.error(
"command_execution_failed",
command=command,
exit_code=process.returncode,
exit_code=exit_code,
duration=duration,
)

View File

@@ -5,6 +5,8 @@ Enables parallel execution of multiple work orders without conflicts.
"""
import asyncio
import os
import subprocess
import time
from ..models import CommandExecutionResult, SandboxSetupError
@@ -13,6 +15,7 @@ from ..utils.port_allocation import find_available_port_range
from ..utils.structured_logger import get_logger
from ..utils.worktree_operations import (
create_worktree,
get_base_repo_path,
get_worktree_path,
remove_worktree,
setup_worktree_environment,
@@ -36,6 +39,7 @@ class GitWorktreeSandbox:
self.port_range_start: int | None = None
self.port_range_end: int | None = None
self.available_ports: list[int] = []
self.temp_branch: str | None = None # Track temporary branch for cleanup
self._logger = logger.bind(
sandbox_identifier=sandbox_identifier,
repository_url=repository_url,
@@ -63,12 +67,13 @@ class GitWorktreeSandbox:
# Create worktree with temporary branch name
# Agent will create the actual feature branch during execution
temp_branch = f"wo-{self.sandbox_identifier}"
# The temporary branch will be cleaned up in cleanup() method
self.temp_branch = f"wo-{self.sandbox_identifier}"
worktree_path, error = create_worktree(
self.repository_url,
self.sandbox_identifier,
temp_branch,
self.temp_branch,
self._logger
)
@@ -143,13 +148,15 @@ class GitWorktreeSandbox:
)
duration = time.time() - start_time
success = process.returncode == 0
# Use actual returncode when available, or -1 as sentinel for None
exit_code = process.returncode if process.returncode is not None else -1
success = exit_code == 0
result = CommandExecutionResult(
success=success,
stdout=stdout.decode() if stdout else None,
stderr=stderr.decode() if stderr else None,
exit_code=process.returncode or 0,
exit_code=exit_code,
error_message=None if success else stderr.decode() if stderr else "Command failed",
duration_seconds=duration,
)
@@ -162,7 +169,7 @@ class GitWorktreeSandbox:
self._logger.error(
"command_execution_failed",
command=command,
exit_code=process.returncode,
exit_code=exit_code,
duration=duration,
)
@@ -195,25 +202,101 @@ class GitWorktreeSandbox:
return None
async def cleanup(self) -> None:
"""Remove worktree"""
"""Remove worktree and temporary branch
Removes the worktree directory and the temporary branch that was created
during setup. This ensures cleanup even if the agent failed before creating
the actual feature branch.
"""
self._logger.info("worktree_sandbox_cleanup_started")
try:
success, error = remove_worktree(
# Remove the worktree first
worktree_success, error = remove_worktree(
self.repository_url,
self.sandbox_identifier,
self._logger
)
if success:
self._logger.info("worktree_sandbox_cleanup_completed")
else:
if not worktree_success:
self._logger.error(
"worktree_sandbox_cleanup_failed",
error=error
)
# Delete the temporary branch if it was created
# Always try to delete branch even if worktree removal failed,
# as the branch may still exist and need cleanup
if self.temp_branch:
await self._delete_temp_branch()
# Only log success if worktree removal succeeded
if worktree_success:
self._logger.info("worktree_sandbox_cleanup_completed")
except Exception as e:
self._logger.error(
"worktree_sandbox_cleanup_failed",
error=str(e),
exc_info=True
)
async def _delete_temp_branch(self) -> None:
"""Delete the temporary branch from the base repository
Attempts to delete the temporary branch created during setup.
Fails gracefully if the branch doesn't exist or was already deleted.
"""
if not self.temp_branch:
return
base_repo_path = get_base_repo_path(self.repository_url)
try:
# Check if base repo exists
if not os.path.exists(base_repo_path):
self._logger.warning(
"temp_branch_cleanup_skipped",
reason="Base repository does not exist",
temp_branch=self.temp_branch
)
return
# Delete the branch (local only - don't force push to remote)
# Use -D to force delete even if not merged
cmd = ["git", "branch", "-D", self.temp_branch]
result = subprocess.run(
cmd,
capture_output=True,
text=True,
cwd=base_repo_path,
)
if result.returncode == 0:
self._logger.info(
"temp_branch_deleted",
temp_branch=self.temp_branch
)
else:
# Branch might not exist (already deleted or wasn't created)
if "not found" in result.stderr.lower() or "no such branch" in result.stderr.lower():
self._logger.debug(
"temp_branch_not_found",
temp_branch=self.temp_branch,
message="Branch may have been already deleted or never created"
)
else:
# Other error (e.g., branch is checked out)
self._logger.warning(
"temp_branch_deletion_failed",
temp_branch=self.temp_branch,
error=result.stderr,
message="Branch may need manual cleanup"
)
except Exception as e:
self._logger.warning(
"temp_branch_deletion_error",
temp_branch=self.temp_branch,
error=str(e),
exc_info=True,
message="Failed to delete temporary branch - may need manual cleanup"
)

View File

@@ -6,7 +6,7 @@ Enables state persistence across service restarts and debugging.
import asyncio
import json
from datetime import datetime
from datetime import datetime, timezone
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
@@ -203,7 +203,7 @@ class FileStateRepository:
return
data["metadata"]["status"] = status
data["metadata"]["updated_at"] = datetime.now().isoformat()
data["metadata"]["updated_at"] = datetime.now(timezone.utc).isoformat()
for key, value in kwargs.items():
data["metadata"][key] = value
@@ -235,7 +235,7 @@ class FileStateRepository:
return
data["state"]["git_branch_name"] = git_branch_name
data["metadata"]["updated_at"] = datetime.now().isoformat()
data["metadata"]["updated_at"] = datetime.now(timezone.utc).isoformat()
await self._write_state_file(agent_work_order_id, data)
@@ -264,7 +264,7 @@ class FileStateRepository:
return
data["state"]["agent_session_id"] = agent_session_id
data["metadata"]["updated_at"] = datetime.now().isoformat()
data["metadata"]["updated_at"] = datetime.now(timezone.utc).isoformat()
await self._write_state_file(agent_work_order_id, data)

View File

@@ -5,7 +5,7 @@ Stores repository metadata, verification status, and per-repository preferences.
"""
import os
from datetime import datetime
from datetime import datetime, timezone
from typing import Any
from supabase import Client, create_client
@@ -63,17 +63,20 @@ class RepositoryConfigRepository:
self._logger = logger.bind(table=self.table_name)
self._logger.info("repository_config_repository_initialized")
def _row_to_model(self, row: dict[str, Any]) -> ConfiguredRepository:
def _row_to_model(self, row: dict[str, Any]) -> ConfiguredRepository | None:
"""Convert database row to ConfiguredRepository model
Args:
row: Database row dictionary
Returns:
ConfiguredRepository model instance
ConfiguredRepository model instance, or None if row contains invalid enum values
that cannot be converted (allows callers to skip invalid rows)
Raises:
ValueError: If row contains invalid enum values that cannot be converted
Note:
Invalid enum values are logged but do not raise exceptions, allowing operations
to continue with valid data. This prevents the entire table from becoming unreadable
due to schema mismatches or corrupted data.
"""
repository_id = row.get("id", "unknown")
@@ -87,11 +90,10 @@ class RepositoryConfigRepository:
repository_id=repository_id,
invalid_commands=default_commands_raw,
error=str(e),
exc_info=True
exc_info=True,
action="Skipping invalid row - consider running data migration to fix enum values"
)
raise ValueError(
f"Database contains invalid workflow steps for repository {repository_id}: {default_commands_raw}"
) from e
return None
# Convert default_sandbox_type from string to SandboxType enum
sandbox_type_raw = row.get("default_sandbox_type", "git_worktree")
@@ -103,11 +105,10 @@ class RepositoryConfigRepository:
repository_id=repository_id,
invalid_type=sandbox_type_raw,
error=str(e),
exc_info=True
exc_info=True,
action="Skipping invalid row - consider running data migration to fix enum values"
)
raise ValueError(
f"Database contains invalid sandbox type for repository {repository_id}: {sandbox_type_raw}"
) from e
return None
return ConfiguredRepository(
id=row["id"],
@@ -127,7 +128,8 @@ class RepositoryConfigRepository:
"""List all configured repositories
Returns:
List of ConfiguredRepository models ordered by created_at DESC
List of ConfiguredRepository models ordered by created_at DESC.
Invalid rows (with bad enum values) are skipped and logged.
Raises:
Exception: If database query fails
@@ -135,7 +137,22 @@ class RepositoryConfigRepository:
try:
response = self.client.table(self.table_name).select("*").order("created_at", desc=True).execute()
repositories = [self._row_to_model(row) for row in response.data]
repositories = []
skipped_count = 0
for row in response.data:
repository = self._row_to_model(row)
if repository is not None:
repositories.append(repository)
else:
skipped_count += 1
if skipped_count > 0:
self._logger.warning(
"repositories_skipped_due_to_invalid_data",
skipped_count=skipped_count,
total_rows=len(response.data),
valid_count=len(repositories)
)
self._logger.info(
"repositories_listed",
@@ -158,7 +175,7 @@ class RepositoryConfigRepository:
repository_id: UUID of the repository
Returns:
ConfiguredRepository model or None if not found
ConfiguredRepository model or None if not found or if data is invalid
Raises:
Exception: If database query fails
@@ -175,6 +192,15 @@ class RepositoryConfigRepository:
repository = self._row_to_model(response.data[0])
if repository is None:
# Invalid enum values in database - treat as not found
self._logger.warning(
"repository_has_invalid_data",
repository_id=repository_id,
message="Repository exists but contains invalid enum values - consider data migration"
)
return None
self._logger.info(
"repository_retrieved",
repository_id=repository_id,
@@ -226,11 +252,21 @@ class RepositoryConfigRepository:
# Set last_verified_at if verified
if is_verified:
data["last_verified_at"] = datetime.now().isoformat()
data["last_verified_at"] = datetime.now(timezone.utc).isoformat()
response = self.client.table(self.table_name).insert(data).execute()
repository = self._row_to_model(response.data[0])
if repository is None:
# This should not happen for newly created repositories with valid data
# but handle defensively
error_msg = "Failed to convert newly created repository to model - data corruption detected"
self._logger.error(
"repository_creation_model_conversion_failed",
repository_url=repository_url,
error=error_msg
)
raise ValueError(error_msg)
self._logger.info(
"repository_created",
@@ -272,13 +308,13 @@ class RepositoryConfigRepository:
for key, value in updates.items():
if isinstance(value, SandboxType):
prepared_updates[key] = value.value
elif isinstance(value, list) and value and isinstance(value[0], WorkflowStep):
elif isinstance(value, list) and value and all(isinstance(item, WorkflowStep) for item in value):
prepared_updates[key] = [step.value for step in value]
else:
prepared_updates[key] = value
# Always update updated_at timestamp
prepared_updates["updated_at"] = datetime.now().isoformat()
prepared_updates["updated_at"] = datetime.now(timezone.utc).isoformat()
response = (
self.client.table(self.table_name)
@@ -295,6 +331,18 @@ class RepositoryConfigRepository:
return None
repository = self._row_to_model(response.data[0])
if repository is None:
# Repository exists but has invalid enum values - cannot update
error_msg = (
f"Repository {repository_id} exists but contains invalid enum values. "
"Cannot update - consider fixing data first via migration."
)
self._logger.error(
"repository_update_failed_invalid_data",
repository_id=repository_id,
error=error_msg
)
raise ValueError(error_msg)
self._logger.info(
"repository_updated",

View File

@@ -4,6 +4,8 @@ Creates appropriate repository instances based on configuration.
Supports in-memory (dev/testing), file-based (legacy), and Supabase (production) storage.
"""
import os
from ..config import config
from ..utils.structured_logger import get_logger
from .file_state_repository import FileStateRepository
@@ -12,6 +14,9 @@ from .work_order_repository import WorkOrderRepository
logger = get_logger(__name__)
# Supported storage types
SUPPORTED_STORAGE_TYPES = ["memory", "file", "supabase"]
def create_repository() -> WorkOrderRepository | FileStateRepository | SupabaseWorkOrderRepository:
"""Create a work order repository based on configuration
@@ -20,11 +25,28 @@ def create_repository() -> WorkOrderRepository | FileStateRepository | SupabaseW
Repository instance (in-memory, file-based, or Supabase)
Raises:
ValueError: If Supabase is configured but credentials are missing
ValueError: If Supabase is configured but credentials are missing, or if storage_type is invalid
"""
storage_type = config.STATE_STORAGE_TYPE.lower()
if storage_type == "supabase":
# Validate Supabase credentials before creating repository
supabase_url = os.getenv("SUPABASE_URL")
supabase_key = os.getenv("SUPABASE_SERVICE_KEY")
if not supabase_url or not supabase_key:
error_msg = (
"Supabase storage is configured (STATE_STORAGE_TYPE=supabase) but required "
"credentials are missing. Set SUPABASE_URL and SUPABASE_SERVICE_KEY environment variables."
)
logger.error(
"supabase_credentials_missing",
storage_type="supabase",
missing_url=not bool(supabase_url),
missing_key=not bool(supabase_key),
)
raise ValueError(error_msg)
logger.info("repository_created", storage_type="supabase")
return SupabaseWorkOrderRepository()
elif storage_type == "file":
@@ -42,9 +64,13 @@ def create_repository() -> WorkOrderRepository | FileStateRepository | SupabaseW
)
return WorkOrderRepository()
else:
logger.warning(
"unknown_storage_type",
storage_type=storage_type,
fallback="memory"
error_msg = (
f"Invalid storage type '{storage_type}'. "
f"Supported types are: {', '.join(SUPPORTED_STORAGE_TYPES)}"
)
return WorkOrderRepository()
logger.error(
"invalid_storage_type",
storage_type=storage_type,
supported_types=SUPPORTED_STORAGE_TYPES,
)
raise ValueError(error_msg)

View File

@@ -10,7 +10,7 @@ Architecture Note - async/await Pattern:
This maintains a consistent async API contract across all repositories.
"""
from datetime import datetime
from datetime import datetime, timezone
from typing import Any
from supabase import Client
@@ -247,7 +247,7 @@ class SupabaseWorkOrderRepository:
# Prepare updates
updates: dict[str, Any] = {
"status": status.value,
"updated_at": datetime.now().isoformat(),
"updated_at": datetime.now(timezone.utc).isoformat(),
}
# Add any metadata updates to the JSONB column
@@ -307,7 +307,7 @@ class SupabaseWorkOrderRepository:
try:
self.client.table(self.table_name).update({
"git_branch_name": git_branch_name,
"updated_at": datetime.now().isoformat(),
"updated_at": datetime.now(timezone.utc).isoformat(),
}).eq("agent_work_order_id", agent_work_order_id).execute()
self._logger.info(
@@ -341,7 +341,7 @@ class SupabaseWorkOrderRepository:
try:
self.client.table(self.table_name).update({
"agent_session_id": agent_session_id,
"updated_at": datetime.now().isoformat(),
"updated_at": datetime.now(timezone.utc).isoformat(),
}).eq("agent_work_order_id", agent_work_order_id).execute()
self._logger.info(
@@ -384,7 +384,7 @@ class SupabaseWorkOrderRepository:
... agent_name="test-agent",
... success=True,
... duration_seconds=1.5,
... timestamp=datetime.now()
... timestamp=datetime.now(timezone.utc)
... )
... ]
... )

View File

@@ -5,6 +5,7 @@ These tools help identify orphaned worktrees (exist on filesystem but not in dat
and dangling state (exist in database but worktree deleted).
"""
import os
import shutil
from pathlib import Path
from typing import Any
@@ -139,11 +140,47 @@ async def reconcile_state(
if fix:
# Clean up orphaned worktrees
worktree_base = Path(config.WORKTREE_BASE_DIR)
base_dir_resolved = os.path.abspath(os.path.normpath(str(worktree_base)))
for orphan_path in orphans:
try:
# Safety check: verify orphan_path is inside worktree base directory
orphan_path_resolved = os.path.abspath(os.path.normpath(orphan_path))
# Verify path is within base directory and not the base directory itself
try:
common_path = os.path.commonpath([base_dir_resolved, orphan_path_resolved])
is_inside_base = common_path == base_dir_resolved
is_not_base = orphan_path_resolved != base_dir_resolved
# Check if path is a root directory (Unix / or Windows drive root like C:\)
path_obj = Path(orphan_path_resolved)
is_not_root = not (
orphan_path_resolved in ("/", "\\") or
(os.name == "nt" and len(path_obj.parts) == 2 and path_obj.parts[1] == "")
)
except ValueError:
# commonpath raises ValueError if paths are on different drives (Windows)
is_inside_base = False
is_not_base = True
is_not_root = True
if is_inside_base and is_not_base and is_not_root:
shutil.rmtree(orphan_path)
actions.append(f"Deleted orphaned worktree: {orphan_path}")
logger.info("orphaned_worktree_deleted", path=orphan_path)
else:
# Safety check failed - do not delete
actions.append(f"Skipped deletion of {orphan_path} (safety check failed: outside worktree base or invalid path)")
logger.error(
"orphaned_worktree_deletion_skipped_safety_check_failed",
path=orphan_path,
path_resolved=orphan_path_resolved,
base_dir=base_dir_resolved,
is_inside_base=is_inside_base,
is_not_base=is_not_base,
is_not_root=is_not_root,
)
except Exception as e:
actions.append(f"Failed to delete {orphan_path}: {e}")
logger.error("orphaned_worktree_delete_failed", path=orphan_path, error=str(e), exc_info=True)

View File

@@ -70,7 +70,7 @@ class WorkflowOrchestrator:
"""
# Default commands if not provided
if selected_commands is None:
selected_commands = ["create-branch", "planning", "execute", "commit", "create-pr"]
selected_commands = ["create-branch", "planning", "execute", "prp-review", "commit", "create-pr"]
# Bind work order context for structured logging
bind_work_order_context(agent_work_order_id)
@@ -198,43 +198,30 @@ class WorkflowOrchestrator:
agent_work_order_id, result.output or ""
)
elif command_name == "create-pr":
# Calculate git stats before marking as completed
# Branch name is stored in context from create-branch step
branch_name = context.get("create-branch")
git_stats = await self._calculate_git_stats(
branch_name,
sandbox.working_dir
)
# Store PR URL for final metadata update
context["github_pull_request_url"] = result.output
await self.state_repository.update_status(
agent_work_order_id,
AgentWorkOrderStatus.COMPLETED,
github_pull_request_url=result.output,
git_commit_count=git_stats["commit_count"],
git_files_changed=git_stats["files_changed"],
)
# Save final step history
await self.state_repository.save_step_history(agent_work_order_id, step_history)
bound_logger.info(
"agent_work_order_completed",
total_steps=len(step_history.steps),
git_commit_count=git_stats["commit_count"],
git_files_changed=git_stats["files_changed"],
)
return # Exit early if PR created
# Calculate git stats for workflows that complete without PR
# Calculate git stats and mark as completed
branch_name = context.get("create-branch")
completion_metadata = {}
if branch_name:
git_stats = await self._calculate_git_stats(
branch_name, sandbox.working_dir
)
await self.state_repository.update_status(
agent_work_order_id,
AgentWorkOrderStatus.COMPLETED,
git_commit_count=git_stats["commit_count"],
git_files_changed=git_stats["files_changed"],
)
completion_metadata["git_commit_count"] = git_stats["commit_count"]
completion_metadata["git_files_changed"] = git_stats["files_changed"]
# Include PR URL if create-pr step was executed
pr_url = context.get("github_pull_request_url")
if pr_url:
completion_metadata["github_pull_request_url"] = pr_url
await self.state_repository.update_status(
agent_work_order_id,
AgentWorkOrderStatus.COMPLETED,
**completion_metadata
)
# Save final step history
await self.state_repository.save_step_history(agent_work_order_id, step_history)