mirror of
https://github.com/coleam00/Archon.git
synced 2026-01-07 07:07:59 -05:00
The New Archon (Beta) - The Operating System for AI Coding Assistants!
This commit is contained in:
@@ -0,0 +1 @@
|
||||
# This file makes the streamlit_ui directory a Python package
|
||||
@@ -0,0 +1,230 @@
|
||||
import streamlit as st
|
||||
import subprocess
|
||||
import threading
|
||||
import platform
|
||||
import queue
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from utils.utils import reload_archon_graph
|
||||
|
||||
def agent_service_tab():
|
||||
"""Display the agent service interface for managing the graph service"""
|
||||
st.header("MCP Agent Service")
|
||||
st.write("Start, restart, and monitor the Archon agent service for MCP.")
|
||||
|
||||
# Initialize session state variables if they don't exist
|
||||
if "service_process" not in st.session_state:
|
||||
st.session_state.service_process = None
|
||||
if "service_running" not in st.session_state:
|
||||
st.session_state.service_running = False
|
||||
if "service_output" not in st.session_state:
|
||||
st.session_state.service_output = []
|
||||
if "output_queue" not in st.session_state:
|
||||
st.session_state.output_queue = queue.Queue()
|
||||
|
||||
# Function to check if the service is running
|
||||
def is_service_running():
|
||||
if st.session_state.service_process is None:
|
||||
return False
|
||||
|
||||
# Check if process is still running
|
||||
return st.session_state.service_process.poll() is None
|
||||
|
||||
# Function to kill any process using port 8100
|
||||
def kill_process_on_port(port):
|
||||
try:
|
||||
if platform.system() == "Windows":
|
||||
# Windows: use netstat to find the process using the port
|
||||
result = subprocess.run(
|
||||
f'netstat -ano | findstr :{port}',
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.stdout:
|
||||
# Extract the PID from the output
|
||||
for line in result.stdout.splitlines():
|
||||
if f":{port}" in line and "LISTENING" in line:
|
||||
parts = line.strip().split()
|
||||
pid = parts[-1]
|
||||
# Kill the process
|
||||
subprocess.run(f'taskkill /F /PID {pid}', shell=True)
|
||||
st.session_state.output_queue.put(f"[{time.strftime('%H:%M:%S')}] Killed any existing process using port {port} (PID: {pid})\n")
|
||||
return True
|
||||
else:
|
||||
# Unix-like systems: use lsof to find the process using the port
|
||||
result = subprocess.run(
|
||||
f'lsof -i :{port} -t',
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if result.stdout:
|
||||
# Extract the PID from the output
|
||||
pid = result.stdout.strip()
|
||||
# Kill the process
|
||||
subprocess.run(f'kill -9 {pid}', shell=True)
|
||||
st.session_state.output_queue.put(f"[{time.strftime('%H:%M:%S')}] Killed process using port {port} (PID: {pid})\n")
|
||||
return True
|
||||
|
||||
return False
|
||||
except Exception as e:
|
||||
st.session_state.output_queue.put(f"[{time.strftime('%H:%M:%S')}] Error killing process on port {port}: {str(e)}\n")
|
||||
return False
|
||||
|
||||
# Update service status
|
||||
st.session_state.service_running = is_service_running()
|
||||
|
||||
# Process any new output in the queue
|
||||
try:
|
||||
while not st.session_state.output_queue.empty():
|
||||
line = st.session_state.output_queue.get_nowait()
|
||||
if line:
|
||||
st.session_state.service_output.append(line)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Create button text based on service status
|
||||
button_text = "Restart Agent Service" if st.session_state.service_running else "Start Agent Service"
|
||||
|
||||
# Create columns for buttons
|
||||
col1, col2 = st.columns([1, 1])
|
||||
|
||||
# Start/Restart button
|
||||
with col1:
|
||||
if st.button(button_text, use_container_width=True):
|
||||
# Stop existing process if running
|
||||
if st.session_state.service_running:
|
||||
try:
|
||||
st.session_state.service_process.terminate()
|
||||
time.sleep(1) # Give it time to terminate
|
||||
if st.session_state.service_process.poll() is None:
|
||||
# Force kill if still running
|
||||
st.session_state.service_process.kill()
|
||||
except Exception as e:
|
||||
st.error(f"Error stopping service: {str(e)}")
|
||||
|
||||
# Clear previous output
|
||||
st.session_state.service_output = []
|
||||
st.session_state.output_queue = queue.Queue()
|
||||
|
||||
# Kill any process using port 8100
|
||||
kill_process_on_port(8100)
|
||||
|
||||
# Start new process
|
||||
try:
|
||||
# Get the absolute path to the graph service script
|
||||
base_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
|
||||
graph_service_path = os.path.join(base_path, 'graph_service.py')
|
||||
|
||||
# Start the process with output redirection
|
||||
process = subprocess.Popen(
|
||||
[sys.executable, graph_service_path],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
universal_newlines=True
|
||||
)
|
||||
|
||||
st.session_state.service_process = process
|
||||
st.session_state.service_running = True
|
||||
|
||||
# Start threads to read output
|
||||
def read_output(stream, queue_obj):
|
||||
for line in iter(stream.readline, ''):
|
||||
queue_obj.put(line)
|
||||
stream.close()
|
||||
|
||||
# Start threads for stdout and stderr
|
||||
threading.Thread(target=read_output, args=(process.stdout, st.session_state.output_queue), daemon=True).start()
|
||||
threading.Thread(target=read_output, args=(process.stderr, st.session_state.output_queue), daemon=True).start()
|
||||
|
||||
# Add startup message
|
||||
st.session_state.output_queue.put(f"[{time.strftime('%H:%M:%S')}] Agent service started\n")
|
||||
|
||||
st.success("Agent service started successfully!")
|
||||
st.rerun()
|
||||
|
||||
except Exception as e:
|
||||
st.error(f"Error starting service: {str(e)}")
|
||||
st.session_state.output_queue.put(f"[{time.strftime('%H:%M:%S')}] Error: {str(e)}\n")
|
||||
|
||||
# Stop button
|
||||
with col2:
|
||||
stop_button = st.button("Stop Agent Service", disabled=not st.session_state.service_running, use_container_width=True)
|
||||
if stop_button and st.session_state.service_running:
|
||||
try:
|
||||
st.session_state.service_process.terminate()
|
||||
time.sleep(1) # Give it time to terminate
|
||||
if st.session_state.service_process.poll() is None:
|
||||
# Force kill if still running
|
||||
st.session_state.service_process.kill()
|
||||
|
||||
st.session_state.service_running = False
|
||||
st.session_state.output_queue.put(f"[{time.strftime('%H:%M:%S')}] Agent service stopped\n")
|
||||
st.success("Agent service stopped successfully!")
|
||||
st.rerun()
|
||||
|
||||
except Exception as e:
|
||||
st.error(f"Error stopping service: {str(e)}")
|
||||
st.session_state.output_queue.put(f"[{time.strftime('%H:%M:%S')}] Error stopping: {str(e)}\n")
|
||||
|
||||
# Service status indicator
|
||||
status_color = "🟢" if st.session_state.service_running else "🔴"
|
||||
status_text = "Running" if st.session_state.service_running else "Stopped"
|
||||
st.write(f"**Service Status:** {status_color} {status_text}")
|
||||
|
||||
# Add auto-refresh option
|
||||
auto_refresh = st.checkbox("Auto-refresh output (uncheck this before copying any error message)", value=True)
|
||||
|
||||
# Display output in a scrollable container
|
||||
st.subheader("Service Output")
|
||||
|
||||
# Calculate height based on number of lines, but cap it
|
||||
output_height = min(400, max(200, len(st.session_state.service_output) * 20))
|
||||
|
||||
# Create a scrollable container for the output
|
||||
with st.container():
|
||||
# Join all output lines and display in the container
|
||||
output_text = "".join(st.session_state.service_output)
|
||||
|
||||
# For auto-scrolling, we'll use a different approach
|
||||
if auto_refresh and st.session_state.service_running and output_text:
|
||||
# We'll reverse the output text so the newest lines appear at the top
|
||||
# This way they're always visible without needing to scroll
|
||||
lines = output_text.splitlines()
|
||||
reversed_lines = lines[::-1] # Reverse the lines
|
||||
output_text = "\n".join(reversed_lines)
|
||||
|
||||
# Add a note at the top (which will appear at the bottom of the reversed text)
|
||||
note = "--- SHOWING NEWEST LOGS FIRST (AUTO-SCROLL MODE) ---\n\n"
|
||||
output_text = note + output_text
|
||||
|
||||
# Use a text area for scrollable output
|
||||
st.text_area(
|
||||
label="Realtime Logs from Archon Service",
|
||||
value=output_text,
|
||||
height=output_height,
|
||||
disabled=True,
|
||||
key="output_text_area" # Use a fixed key to maintain state between refreshes
|
||||
)
|
||||
|
||||
# Add a toggle for reversed mode
|
||||
if auto_refresh and st.session_state.service_running:
|
||||
st.caption("Logs are shown newest-first for auto-scrolling. Disable auto-refresh to see logs in chronological order.")
|
||||
|
||||
# Add a clear output button
|
||||
if st.button("Clear Output"):
|
||||
st.session_state.service_output = []
|
||||
st.rerun()
|
||||
|
||||
# Auto-refresh if enabled and service is running
|
||||
if auto_refresh and st.session_state.service_running:
|
||||
time.sleep(0.1) # Small delay to prevent excessive CPU usage
|
||||
st.rerun()
|
||||
@@ -0,0 +1,86 @@
|
||||
from langgraph.types import Command
|
||||
import streamlit as st
|
||||
import uuid
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add the current directory to Python path
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from archon.archon_graph import agentic_flow
|
||||
|
||||
@st.cache_resource
|
||||
def get_thread_id():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
thread_id = get_thread_id()
|
||||
|
||||
async def run_agent_with_streaming(user_input: str):
|
||||
"""
|
||||
Run the agent with streaming text for the user_input prompt,
|
||||
while maintaining the entire conversation in `st.session_state.messages`.
|
||||
"""
|
||||
config = {
|
||||
"configurable": {
|
||||
"thread_id": thread_id
|
||||
}
|
||||
}
|
||||
|
||||
# First message from user
|
||||
if len(st.session_state.messages) == 1:
|
||||
async for msg in agentic_flow.astream(
|
||||
{"latest_user_message": user_input}, config, stream_mode="custom"
|
||||
):
|
||||
yield msg
|
||||
# Continue the conversation
|
||||
else:
|
||||
async for msg in agentic_flow.astream(
|
||||
Command(resume=user_input), config, stream_mode="custom"
|
||||
):
|
||||
yield msg
|
||||
|
||||
async def chat_tab():
|
||||
"""Display the chat interface for talking to Archon"""
|
||||
st.write("Describe to me an AI agent you want to build and I'll code it for you with Pydantic AI.")
|
||||
st.write("Example: Build me an AI agent that can search the web with the Brave API.")
|
||||
|
||||
# Initialize chat history in session state if not present
|
||||
if "messages" not in st.session_state:
|
||||
st.session_state.messages = []
|
||||
|
||||
# Add a clear conversation button
|
||||
if st.button("Clear Conversation"):
|
||||
st.session_state.messages = []
|
||||
st.rerun()
|
||||
|
||||
# Display chat messages from history on app rerun
|
||||
for message in st.session_state.messages:
|
||||
message_type = message["type"]
|
||||
if message_type in ["human", "ai", "system"]:
|
||||
with st.chat_message(message_type):
|
||||
st.markdown(message["content"])
|
||||
|
||||
# Chat input for the user
|
||||
user_input = st.chat_input("What do you want to build today?")
|
||||
|
||||
if user_input:
|
||||
# We append a new request to the conversation explicitly
|
||||
st.session_state.messages.append({"type": "human", "content": user_input})
|
||||
|
||||
# Display user prompt in the UI
|
||||
with st.chat_message("user"):
|
||||
st.markdown(user_input)
|
||||
|
||||
# Display assistant response in chat message container
|
||||
response_content = ""
|
||||
with st.chat_message("assistant"):
|
||||
message_placeholder = st.empty() # Placeholder for updating the message
|
||||
|
||||
# Add a spinner while loading
|
||||
with st.spinner("Archon is thinking..."):
|
||||
# Run the async generator to fetch responses
|
||||
async for chunk in run_agent_with_streaming(user_input):
|
||||
response_content += chunk
|
||||
# Update the placeholder with the current response content
|
||||
message_placeholder.markdown(response_content)
|
||||
|
||||
st.session_state.messages.append({"type": "ai", "content": response_content})
|
||||
@@ -0,0 +1,180 @@
|
||||
import streamlit as st
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from utils.utils import get_env_var
|
||||
|
||||
@st.cache_data
|
||||
def load_sql_template():
|
||||
"""Load the SQL template file and cache it"""
|
||||
with open(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "utils", "site_pages.sql"), "r") as f:
|
||||
return f.read()
|
||||
|
||||
def get_supabase_sql_editor_url(supabase_url):
|
||||
"""Get the URL for the Supabase SQL Editor"""
|
||||
try:
|
||||
# Extract the project reference from the URL
|
||||
# Format is typically: https://<project-ref>.supabase.co
|
||||
if '//' in supabase_url and 'supabase' in supabase_url:
|
||||
parts = supabase_url.split('//')
|
||||
if len(parts) > 1:
|
||||
domain_parts = parts[1].split('.')
|
||||
if len(domain_parts) > 0:
|
||||
project_ref = domain_parts[0]
|
||||
return f"https://supabase.com/dashboard/project/{project_ref}/sql/new"
|
||||
|
||||
# Fallback to a generic URL
|
||||
return "https://supabase.com/dashboard"
|
||||
except Exception:
|
||||
return "https://supabase.com/dashboard"
|
||||
|
||||
def show_manual_sql_instructions(sql, vector_dim, recreate=False):
|
||||
"""Show instructions for manually executing SQL in Supabase"""
|
||||
st.info("### Manual SQL Execution Instructions")
|
||||
|
||||
# Provide a link to the Supabase SQL Editor
|
||||
supabase_url = get_env_var("SUPABASE_URL")
|
||||
if supabase_url:
|
||||
dashboard_url = get_supabase_sql_editor_url(supabase_url)
|
||||
st.markdown(f"**Step 1:** [Open Your Supabase SQL Editor with this URL]({dashboard_url})")
|
||||
else:
|
||||
st.markdown("**Step 1:** Open your Supabase Dashboard and navigate to the SQL Editor")
|
||||
|
||||
st.markdown("**Step 2:** Create a new SQL query")
|
||||
|
||||
if recreate:
|
||||
st.markdown("**Step 3:** Copy and execute the following SQL:")
|
||||
drop_sql = f"DROP FUNCTION IF EXISTS match_site_pages(vector({vector_dim}), int, jsonb);\nDROP TABLE IF EXISTS site_pages CASCADE;"
|
||||
st.code(drop_sql, language="sql")
|
||||
|
||||
st.markdown("**Step 4:** Then copy and execute this SQL:")
|
||||
st.code(sql, language="sql")
|
||||
else:
|
||||
st.markdown("**Step 3:** Copy and execute the following SQL:")
|
||||
st.code(sql, language="sql")
|
||||
|
||||
st.success("After executing the SQL, return to this page and refresh to see the updated table status.")
|
||||
|
||||
def database_tab(supabase):
|
||||
"""Display the database configuration interface"""
|
||||
st.header("Database Configuration")
|
||||
st.write("Set up and manage your Supabase database tables for Archon.")
|
||||
|
||||
# Check if Supabase is configured
|
||||
if not supabase:
|
||||
st.error("Supabase is not configured. Please set your Supabase URL and Service Key in the Environment tab.")
|
||||
return
|
||||
|
||||
# Site Pages Table Setup
|
||||
st.subheader("Site Pages Table")
|
||||
st.write("This table stores web page content and embeddings for semantic search.")
|
||||
|
||||
# Add information about the table
|
||||
with st.expander("About the Site Pages Table", expanded=False):
|
||||
st.markdown("""
|
||||
This table is used to store:
|
||||
- Web page content split into chunks
|
||||
- Vector embeddings for semantic search
|
||||
- Metadata for filtering results
|
||||
|
||||
The table includes:
|
||||
- URL and chunk number (unique together)
|
||||
- Title and summary of the content
|
||||
- Full text content
|
||||
- Vector embeddings for similarity search
|
||||
- Metadata in JSON format
|
||||
|
||||
It also creates:
|
||||
- A vector similarity search function
|
||||
- Appropriate indexes for performance
|
||||
- Row-level security policies for Supabase
|
||||
""")
|
||||
|
||||
# Check if the table already exists
|
||||
table_exists = False
|
||||
table_has_data = False
|
||||
|
||||
try:
|
||||
# Try to query the table to see if it exists
|
||||
response = supabase.table("site_pages").select("id").limit(1).execute()
|
||||
table_exists = True
|
||||
|
||||
# Check if the table has data
|
||||
count_response = supabase.table("site_pages").select("*", count="exact").execute()
|
||||
row_count = count_response.count if hasattr(count_response, 'count') else 0
|
||||
table_has_data = row_count > 0
|
||||
|
||||
st.success("✅ The site_pages table already exists in your database.")
|
||||
if table_has_data:
|
||||
st.info(f"The table contains data ({row_count} rows).")
|
||||
else:
|
||||
st.info("The table exists but contains no data.")
|
||||
except Exception as e:
|
||||
error_str = str(e)
|
||||
if "relation" in error_str and "does not exist" in error_str:
|
||||
st.info("The site_pages table does not exist yet. You can create it below.")
|
||||
else:
|
||||
st.error(f"Error checking table status: {error_str}")
|
||||
st.info("Proceeding with the assumption that the table needs to be created.")
|
||||
table_exists = False
|
||||
|
||||
# Vector dimensions selection
|
||||
st.write("### Vector Dimensions")
|
||||
st.write("Select the embedding dimensions based on your embedding model:")
|
||||
|
||||
vector_dim = st.selectbox(
|
||||
"Embedding Dimensions",
|
||||
options=[1536, 768, 384, 1024],
|
||||
index=0,
|
||||
help="Use 1536 for OpenAI embeddings, 768 for nomic-embed-text with Ollama, or select another dimension based on your model."
|
||||
)
|
||||
|
||||
# Get the SQL with the selected vector dimensions
|
||||
sql_template = load_sql_template()
|
||||
|
||||
# Replace the vector dimensions in the SQL
|
||||
sql = sql_template.replace("vector(1536)", f"vector({vector_dim})")
|
||||
|
||||
# Also update the match_site_pages function dimensions
|
||||
sql = sql.replace("query_embedding vector(1536)", f"query_embedding vector({vector_dim})")
|
||||
|
||||
# Show the SQL
|
||||
with st.expander("View SQL", expanded=False):
|
||||
st.code(sql, language="sql")
|
||||
|
||||
# Create table button
|
||||
if not table_exists:
|
||||
if st.button("Get Instructions for Creating Site Pages Table"):
|
||||
show_manual_sql_instructions(sql, vector_dim)
|
||||
else:
|
||||
# Option to recreate the table or clear data
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
st.warning("⚠️ Recreating will delete all existing data.")
|
||||
if st.button("Get Instructions for Recreating Site Pages Table"):
|
||||
show_manual_sql_instructions(sql, vector_dim, recreate=True)
|
||||
|
||||
with col2:
|
||||
if table_has_data:
|
||||
st.warning("⚠️ Clear all data but keep structure.")
|
||||
if st.button("Clear Table Data"):
|
||||
try:
|
||||
with st.spinner("Clearing table data..."):
|
||||
# Use the Supabase client to delete all rows
|
||||
response = supabase.table("site_pages").delete().neq("id", 0).execute()
|
||||
st.success("✅ Table data cleared successfully!")
|
||||
st.rerun()
|
||||
except Exception as e:
|
||||
st.error(f"Error clearing table data: {str(e)}")
|
||||
# Fall back to manual SQL
|
||||
truncate_sql = "TRUNCATE TABLE site_pages;"
|
||||
st.code(truncate_sql, language="sql")
|
||||
st.info("Execute this SQL in your Supabase SQL Editor to clear the table data.")
|
||||
|
||||
# Provide a link to the Supabase SQL Editor
|
||||
supabase_url = get_env_var("SUPABASE_URL")
|
||||
if supabase_url:
|
||||
dashboard_url = get_supabase_sql_editor_url(supabase_url)
|
||||
st.markdown(f"[Open Your Supabase SQL Editor with this URL]({dashboard_url})")
|
||||
@@ -0,0 +1,158 @@
|
||||
import streamlit as st
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from archon.crawl_pydantic_ai_docs import start_crawl_with_requests, clear_existing_records
|
||||
from utils.utils import get_env_var, create_new_tab_button
|
||||
|
||||
def documentation_tab(supabase_client):
|
||||
"""Display the documentation interface"""
|
||||
st.header("Documentation")
|
||||
|
||||
# Create tabs for different documentation sources
|
||||
doc_tabs = st.tabs(["Pydantic AI Docs", "Future Sources"])
|
||||
|
||||
with doc_tabs[0]:
|
||||
st.subheader("Pydantic AI Documentation")
|
||||
st.markdown("""
|
||||
This section allows you to crawl and index the Pydantic AI documentation.
|
||||
The crawler will:
|
||||
|
||||
1. Fetch URLs from the Pydantic AI sitemap
|
||||
2. Crawl each page and extract content
|
||||
3. Split content into chunks
|
||||
4. Generate embeddings for each chunk
|
||||
5. Store the chunks in the Supabase database
|
||||
|
||||
This process may take several minutes depending on the number of pages.
|
||||
""")
|
||||
|
||||
# Check if the database is configured
|
||||
supabase_url = get_env_var("SUPABASE_URL")
|
||||
supabase_key = get_env_var("SUPABASE_SERVICE_KEY")
|
||||
|
||||
if not supabase_url or not supabase_key:
|
||||
st.warning("⚠️ Supabase is not configured. Please set up your environment variables first.")
|
||||
create_new_tab_button("Go to Environment Section", "Environment", key="goto_env_from_docs")
|
||||
else:
|
||||
# Initialize session state for tracking crawl progress
|
||||
if "crawl_tracker" not in st.session_state:
|
||||
st.session_state.crawl_tracker = None
|
||||
|
||||
if "crawl_status" not in st.session_state:
|
||||
st.session_state.crawl_status = None
|
||||
|
||||
if "last_update_time" not in st.session_state:
|
||||
st.session_state.last_update_time = time.time()
|
||||
|
||||
# Create columns for the buttons
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
# Button to start crawling
|
||||
if st.button("Crawl Pydantic AI Docs", key="crawl_pydantic") and not (st.session_state.crawl_tracker and st.session_state.crawl_tracker.is_running):
|
||||
try:
|
||||
# Define a callback function to update the session state
|
||||
def update_progress(status):
|
||||
st.session_state.crawl_status = status
|
||||
|
||||
# Start the crawling process in a separate thread
|
||||
st.session_state.crawl_tracker = start_crawl_with_requests(update_progress)
|
||||
st.session_state.crawl_status = st.session_state.crawl_tracker.get_status()
|
||||
|
||||
# Force a rerun to start showing progress
|
||||
st.rerun()
|
||||
except Exception as e:
|
||||
st.error(f"❌ Error starting crawl: {str(e)}")
|
||||
|
||||
with col2:
|
||||
# Button to clear existing Pydantic AI docs
|
||||
if st.button("Clear Pydantic AI Docs", key="clear_pydantic"):
|
||||
with st.spinner("Clearing existing Pydantic AI docs..."):
|
||||
try:
|
||||
# Run the function to clear records
|
||||
clear_existing_records()
|
||||
st.success("✅ Successfully cleared existing Pydantic AI docs from the database.")
|
||||
|
||||
# Force a rerun to update the UI
|
||||
st.rerun()
|
||||
except Exception as e:
|
||||
st.error(f"❌ Error clearing Pydantic AI docs: {str(e)}")
|
||||
|
||||
# Display crawling progress if a crawl is in progress or has completed
|
||||
if st.session_state.crawl_tracker:
|
||||
# Create a container for the progress information
|
||||
progress_container = st.container()
|
||||
|
||||
with progress_container:
|
||||
# Get the latest status
|
||||
current_time = time.time()
|
||||
# Update status every second
|
||||
if current_time - st.session_state.last_update_time >= 1:
|
||||
st.session_state.crawl_status = st.session_state.crawl_tracker.get_status()
|
||||
st.session_state.last_update_time = current_time
|
||||
|
||||
status = st.session_state.crawl_status
|
||||
|
||||
# Display a progress bar
|
||||
if status and status["urls_found"] > 0:
|
||||
progress = status["urls_processed"] / status["urls_found"]
|
||||
st.progress(progress)
|
||||
|
||||
# Display status metrics
|
||||
col1, col2, col3, col4 = st.columns(4)
|
||||
if status:
|
||||
col1.metric("URLs Found", status["urls_found"])
|
||||
col2.metric("URLs Processed", status["urls_processed"])
|
||||
col3.metric("Successful", status["urls_succeeded"])
|
||||
col4.metric("Failed", status["urls_failed"])
|
||||
else:
|
||||
col1.metric("URLs Found", 0)
|
||||
col2.metric("URLs Processed", 0)
|
||||
col3.metric("Successful", 0)
|
||||
col4.metric("Failed", 0)
|
||||
|
||||
# Display logs in an expander
|
||||
with st.expander("Crawling Logs", expanded=True):
|
||||
if status and "logs" in status:
|
||||
logs_text = "\n".join(status["logs"][-20:]) # Show last 20 logs
|
||||
st.code(logs_text)
|
||||
else:
|
||||
st.code("No logs available yet...")
|
||||
|
||||
# Show completion message
|
||||
if status and not status["is_running"] and status["end_time"]:
|
||||
if status["urls_failed"] == 0:
|
||||
st.success("✅ Crawling process completed successfully!")
|
||||
else:
|
||||
st.warning(f"⚠️ Crawling process completed with {status['urls_failed']} failed URLs.")
|
||||
|
||||
# Auto-refresh while crawling is in progress
|
||||
if not status or status["is_running"]:
|
||||
st.rerun()
|
||||
|
||||
# Display database statistics
|
||||
st.subheader("Database Statistics")
|
||||
try:
|
||||
# Query the count of Pydantic AI docs
|
||||
result = supabase_client.table("site_pages").select("count", count="exact").eq("metadata->>source", "pydantic_ai_docs").execute()
|
||||
count = result.count if hasattr(result, "count") else 0
|
||||
|
||||
# Display the count
|
||||
st.metric("Pydantic AI Docs Chunks", count)
|
||||
|
||||
# Add a button to view the data
|
||||
if count > 0 and st.button("View Indexed Data", key="view_pydantic_data"):
|
||||
# Query a sample of the data
|
||||
sample_data = supabase_client.table("site_pages").select("url,title,summary,chunk_number").eq("metadata->>source", "pydantic_ai_docs").limit(10).execute()
|
||||
|
||||
# Display the sample data
|
||||
st.dataframe(sample_data.data)
|
||||
st.info("Showing up to 10 sample records. The database contains more records.")
|
||||
except Exception as e:
|
||||
st.error(f"Error querying database: {str(e)}")
|
||||
|
||||
with doc_tabs[1]:
|
||||
st.info("Additional documentation sources will be available in future updates.")
|
||||
@@ -0,0 +1,362 @@
|
||||
import streamlit as st
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from utils.utils import (
|
||||
get_env_var, save_env_var, reload_archon_graph,
|
||||
get_current_profile, set_current_profile, get_all_profiles,
|
||||
create_profile, delete_profile, get_profile_env_vars
|
||||
)
|
||||
|
||||
def environment_tab():
|
||||
# Get all available profiles and current profile
|
||||
profiles = get_all_profiles()
|
||||
current_profile = get_current_profile()
|
||||
|
||||
# Profile management section
|
||||
st.subheader("Profile Management")
|
||||
st.write("Profiles allow you to store different sets of environment variables for different providers or use cases.")
|
||||
|
||||
col1, col2 = st.columns([3, 1])
|
||||
|
||||
with col1:
|
||||
# Profile selector
|
||||
selected_profile = st.selectbox(
|
||||
"Select Profile",
|
||||
options=profiles,
|
||||
index=profiles.index(current_profile) if current_profile in profiles else 0,
|
||||
key="profile_selector"
|
||||
)
|
||||
|
||||
if selected_profile != current_profile:
|
||||
if set_current_profile(selected_profile):
|
||||
# Clear provider session state variables to force them to reload from the new profile
|
||||
if "llm_provider" in st.session_state:
|
||||
del st.session_state.llm_provider
|
||||
if "embedding_provider" in st.session_state:
|
||||
del st.session_state.embedding_provider
|
||||
|
||||
st.success(f"Switched to profile: {selected_profile}, reloading...")
|
||||
reload_archon_graph(show_reload_success=False)
|
||||
st.rerun()
|
||||
else:
|
||||
st.error("Failed to switch profile.")
|
||||
|
||||
with col2:
|
||||
# Add CSS for precise margin control
|
||||
st.markdown("""
|
||||
<style>
|
||||
div[data-testid="stChatInput"] {
|
||||
margin-top: 10px !important;
|
||||
}
|
||||
</style>
|
||||
""", unsafe_allow_html=True)
|
||||
|
||||
# New profile creation with CSS applied directly to the chat input
|
||||
new_profile_name = st.chat_input("New Profile Name", key="new_profile_name")
|
||||
|
||||
# Add a button to create the profile
|
||||
if new_profile_name:
|
||||
if new_profile_name in profiles:
|
||||
st.error(f"Profile '{new_profile_name}' already exists.")
|
||||
else:
|
||||
if create_profile(new_profile_name):
|
||||
# Clear provider session state variables for the new profile
|
||||
if "llm_provider" in st.session_state:
|
||||
del st.session_state.llm_provider
|
||||
if "embedding_provider" in st.session_state:
|
||||
del st.session_state.embedding_provider
|
||||
|
||||
st.success(f"Created profile: {new_profile_name}")
|
||||
st.rerun()
|
||||
else:
|
||||
st.error("Failed to create profile.")
|
||||
|
||||
# Delete profile option (not for default)
|
||||
if selected_profile != "default" and selected_profile == current_profile:
|
||||
if st.button("Delete Current Profile", key="delete_profile"):
|
||||
if delete_profile(selected_profile):
|
||||
# Clear provider session state variables to force them to reload from the default profile
|
||||
if "llm_provider" in st.session_state:
|
||||
del st.session_state.llm_provider
|
||||
if "embedding_provider" in st.session_state:
|
||||
del st.session_state.embedding_provider
|
||||
|
||||
st.success(f"Deleted profile: {selected_profile}, reloading...")
|
||||
reload_archon_graph(show_reload_success=False)
|
||||
st.rerun()
|
||||
else:
|
||||
st.error("Failed to delete profile.")
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# Environment variables section
|
||||
st.subheader(f"Environment Variables for Profile: {current_profile}")
|
||||
st.write("- Configure your environment variables for Archon. These settings will be saved and used for future sessions.")
|
||||
st.write("- NOTE: Press 'enter' to save after inputting a variable, otherwise click the 'save' button at the bottom.")
|
||||
st.write("- HELP: Hover over the '?' icon on the right for each environment variable for help/examples.")
|
||||
st.warning("⚠️ If your agent service for MCP is already running, you'll need to restart it after changing environment variables.")
|
||||
|
||||
# Get current profile's environment variables
|
||||
profile_env_vars = get_profile_env_vars()
|
||||
|
||||
# Define default URLs for providers
|
||||
llm_default_urls = {
|
||||
"OpenAI": "https://api.openai.com/v1",
|
||||
"Anthropic": "https://api.anthropic.com/v1",
|
||||
"OpenRouter": "https://openrouter.ai/api/v1",
|
||||
"Ollama": "http://localhost:11434/v1"
|
||||
}
|
||||
|
||||
embedding_default_urls = {
|
||||
"OpenAI": "https://api.openai.com/v1",
|
||||
"Ollama": "http://localhost:11434/v1"
|
||||
}
|
||||
|
||||
# Initialize session state for provider selections if not already set
|
||||
if "llm_provider" not in st.session_state:
|
||||
st.session_state.llm_provider = profile_env_vars.get("LLM_PROVIDER", "OpenAI")
|
||||
|
||||
if "embedding_provider" not in st.session_state:
|
||||
st.session_state.embedding_provider = profile_env_vars.get("EMBEDDING_PROVIDER", "OpenAI")
|
||||
|
||||
# 1. Large Language Models Section - Provider Selection (outside form)
|
||||
st.subheader("1. Select Your LLM Provider")
|
||||
|
||||
# LLM Provider dropdown
|
||||
llm_providers = ["OpenAI", "Anthropic", "OpenRouter", "Ollama"]
|
||||
|
||||
selected_llm_provider = st.selectbox(
|
||||
"LLM Provider",
|
||||
options=llm_providers,
|
||||
index=llm_providers.index(st.session_state.llm_provider) if st.session_state.llm_provider in llm_providers else 0,
|
||||
key="llm_provider_selector"
|
||||
)
|
||||
|
||||
# Update session state if provider changed
|
||||
if selected_llm_provider != st.session_state.llm_provider:
|
||||
st.session_state.llm_provider = selected_llm_provider
|
||||
st.rerun() # Force a rerun to update the form
|
||||
|
||||
# 2. Embedding Models Section - Provider Selection (outside form)
|
||||
st.subheader("2. Select Your Embedding Model Provider")
|
||||
|
||||
# Embedding Provider dropdown
|
||||
embedding_providers = ["OpenAI", "Ollama"]
|
||||
|
||||
selected_embedding_provider = st.selectbox(
|
||||
"Embedding Provider",
|
||||
options=embedding_providers,
|
||||
index=embedding_providers.index(st.session_state.embedding_provider) if st.session_state.embedding_provider in embedding_providers else 0,
|
||||
key="embedding_provider_selector"
|
||||
)
|
||||
|
||||
# Update session state if provider changed
|
||||
if selected_embedding_provider != st.session_state.embedding_provider:
|
||||
st.session_state.embedding_provider = selected_embedding_provider
|
||||
st.rerun() # Force a rerun to update the form
|
||||
|
||||
# 3. Set environment variables (within the form)
|
||||
st.subheader("3. Set All Environment Variables")
|
||||
|
||||
# Create a form for the environment variables
|
||||
with st.form("env_vars_form"):
|
||||
updated_values = {}
|
||||
|
||||
# Store the selected providers in the updated values
|
||||
updated_values["LLM_PROVIDER"] = selected_llm_provider
|
||||
updated_values["EMBEDDING_PROVIDER"] = selected_embedding_provider
|
||||
|
||||
# 1. Large Language Models Section - Settings
|
||||
st.subheader("LLM Settings")
|
||||
|
||||
# BASE_URL
|
||||
base_url_help = "Base URL for your LLM provider:\n\n" + \
|
||||
"OpenAI: https://api.openai.com/v1\n\n" + \
|
||||
"Anthropic: https://api.anthropic.com/v1\n\n" + \
|
||||
"OpenRouter: https://openrouter.ai/api/v1\n\n" + \
|
||||
"Ollama: http://localhost:11434/v1"
|
||||
|
||||
# Get current BASE_URL or use default for selected provider
|
||||
current_base_url = profile_env_vars.get("BASE_URL", llm_default_urls.get(selected_llm_provider, ""))
|
||||
|
||||
# If provider changed or BASE_URL is empty, use the default
|
||||
if not current_base_url or profile_env_vars.get("LLM_PROVIDER", "") != selected_llm_provider:
|
||||
current_base_url = llm_default_urls.get(selected_llm_provider, "")
|
||||
|
||||
llm_base_url = st.text_input(
|
||||
"BASE_URL:",
|
||||
value=current_base_url,
|
||||
help=base_url_help,
|
||||
key="input_BASE_URL"
|
||||
)
|
||||
updated_values["BASE_URL"] = llm_base_url
|
||||
|
||||
# API_KEY
|
||||
api_key_help = "API key for your LLM provider:\n\n" + \
|
||||
"For OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key\n\n" + \
|
||||
"For Anthropic: https://console.anthropic.com/account/keys\n\n" + \
|
||||
"For OpenRouter: https://openrouter.ai/keys\n\n" + \
|
||||
"For Ollama, no need to set this unless you specifically configured an API key"
|
||||
|
||||
# Get current API_KEY or set default for Ollama
|
||||
current_api_key = profile_env_vars.get("LLM_API_KEY", "")
|
||||
|
||||
# If provider is Ollama and LLM_API_KEY is empty or provider changed, set to NOT_REQUIRED
|
||||
if selected_llm_provider == "Ollama" and (not current_api_key or profile_env_vars.get("LLM_PROVIDER", "") != selected_llm_provider):
|
||||
current_api_key = "NOT_REQUIRED"
|
||||
|
||||
# If there's already a value, show asterisks in the placeholder
|
||||
placeholder = current_api_key if current_api_key == "NOT_REQUIRED" else "Set but hidden" if current_api_key else ""
|
||||
api_key = st.text_input(
|
||||
"API_KEY:",
|
||||
type="password" if current_api_key != "NOT_REQUIRED" else "default",
|
||||
help=api_key_help,
|
||||
key="input_LLM_API_KEY",
|
||||
placeholder=placeholder
|
||||
)
|
||||
# Only update if user entered something (to avoid overwriting with empty string)
|
||||
if api_key:
|
||||
updated_values["LLM_API_KEY"] = api_key
|
||||
elif selected_llm_provider == "Ollama" and (not current_api_key or current_api_key == "NOT_REQUIRED"):
|
||||
updated_values["LLM_API_KEY"] = "NOT_REQUIRED"
|
||||
|
||||
# PRIMARY_MODEL
|
||||
primary_model_help = "The LLM you want to use for the primary agent/coder\n\n" + \
|
||||
"Example: gpt-4o-mini\n\n" + \
|
||||
"Example: qwen2.5:14b-instruct-8k"
|
||||
|
||||
primary_model = st.text_input(
|
||||
"PRIMARY_MODEL:",
|
||||
value=profile_env_vars.get("PRIMARY_MODEL", ""),
|
||||
help=primary_model_help,
|
||||
key="input_PRIMARY_MODEL"
|
||||
)
|
||||
updated_values["PRIMARY_MODEL"] = primary_model
|
||||
|
||||
# REASONER_MODEL
|
||||
reasoner_model_help = "The LLM you want to use for the reasoner\n\n" + \
|
||||
"Example: o3-mini\n\n" + \
|
||||
"Example: deepseek-r1:7b-8k"
|
||||
|
||||
reasoner_model = st.text_input(
|
||||
"REASONER_MODEL:",
|
||||
value=profile_env_vars.get("REASONER_MODEL", ""),
|
||||
help=reasoner_model_help,
|
||||
key="input_REASONER_MODEL"
|
||||
)
|
||||
updated_values["REASONER_MODEL"] = reasoner_model
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# 2. Embedding Models Section - Settings
|
||||
st.subheader("Embedding Settings")
|
||||
|
||||
# EMBEDDING_BASE_URL
|
||||
embedding_base_url_help = "Base URL for your embedding provider:\n\n" + \
|
||||
"OpenAI: https://api.openai.com/v1\n\n" + \
|
||||
"Ollama: http://localhost:11434/v1"
|
||||
|
||||
# Get current EMBEDDING_BASE_URL or use default for selected provider
|
||||
current_embedding_base_url = profile_env_vars.get("EMBEDDING_BASE_URL", embedding_default_urls.get(selected_embedding_provider, ""))
|
||||
|
||||
# If provider changed or EMBEDDING_BASE_URL is empty, use the default
|
||||
if not current_embedding_base_url or profile_env_vars.get("EMBEDDING_PROVIDER", "") != selected_embedding_provider:
|
||||
current_embedding_base_url = embedding_default_urls.get(selected_embedding_provider, "")
|
||||
|
||||
embedding_base_url = st.text_input(
|
||||
"EMBEDDING_BASE_URL:",
|
||||
value=current_embedding_base_url,
|
||||
help=embedding_base_url_help,
|
||||
key="input_EMBEDDING_BASE_URL"
|
||||
)
|
||||
updated_values["EMBEDDING_BASE_URL"] = embedding_base_url
|
||||
|
||||
# EMBEDDING_API_KEY
|
||||
embedding_api_key_help = "API key for your embedding provider:\n\n" + \
|
||||
"For OpenAI: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key\n\n" + \
|
||||
"For Ollama, no need to set this unless you specifically configured an API key"
|
||||
|
||||
# Get current EMBEDDING_API_KEY or set default for Ollama
|
||||
current_embedding_api_key = profile_env_vars.get("EMBEDDING_API_KEY", "")
|
||||
|
||||
# If provider is Ollama and EMBEDDING_API_KEY is empty or provider changed, set to NOT_REQUIRED
|
||||
if selected_embedding_provider == "Ollama" and (not current_embedding_api_key or profile_env_vars.get("EMBEDDING_PROVIDER", "") != selected_embedding_provider):
|
||||
current_embedding_api_key = "NOT_REQUIRED"
|
||||
|
||||
# If there's already a value, show asterisks in the placeholder
|
||||
placeholder = "Set but hidden" if current_embedding_api_key else ""
|
||||
embedding_api_key = st.text_input(
|
||||
"EMBEDDING_API_KEY:",
|
||||
type="password",
|
||||
help=embedding_api_key_help,
|
||||
key="input_EMBEDDING_API_KEY",
|
||||
placeholder=placeholder
|
||||
)
|
||||
# Only update if user entered something (to avoid overwriting with empty string)
|
||||
if embedding_api_key:
|
||||
updated_values["EMBEDDING_API_KEY"] = embedding_api_key
|
||||
elif selected_embedding_provider == "Ollama" and (not current_embedding_api_key or current_embedding_api_key == "NOT_REQUIRED"):
|
||||
updated_values["EMBEDDING_API_KEY"] = "NOT_REQUIRED"
|
||||
|
||||
# EMBEDDING_MODEL
|
||||
embedding_model_help = "Embedding model you want to use\n\n" + \
|
||||
"Example for Ollama: nomic-embed-text\n\n" + \
|
||||
"Example for OpenAI: text-embedding-3-small"
|
||||
|
||||
embedding_model = st.text_input(
|
||||
"EMBEDDING_MODEL:",
|
||||
value=profile_env_vars.get("EMBEDDING_MODEL", ""),
|
||||
help=embedding_model_help,
|
||||
key="input_EMBEDDING_MODEL"
|
||||
)
|
||||
updated_values["EMBEDDING_MODEL"] = embedding_model
|
||||
|
||||
st.markdown("---")
|
||||
|
||||
# 3. Database Section
|
||||
st.header("3. Database")
|
||||
|
||||
# SUPABASE_URL
|
||||
supabase_url_help = "Get your SUPABASE_URL from the API section of your Supabase project settings -\nhttps://supabase.com/dashboard/project/<your project ID>/settings/api"
|
||||
|
||||
supabase_url = st.text_input(
|
||||
"SUPABASE_URL:",
|
||||
value=profile_env_vars.get("SUPABASE_URL", ""),
|
||||
help=supabase_url_help,
|
||||
key="input_SUPABASE_URL"
|
||||
)
|
||||
updated_values["SUPABASE_URL"] = supabase_url
|
||||
|
||||
# SUPABASE_SERVICE_KEY
|
||||
supabase_key_help = "Get your SUPABASE_SERVICE_KEY from the API section of your Supabase project settings -\nhttps://supabase.com/dashboard/project/<your project ID>/settings/api\nOn this page it is called the service_role secret."
|
||||
|
||||
# If there's already a value, show asterisks in the placeholder
|
||||
placeholder = "Set but hidden" if profile_env_vars.get("SUPABASE_SERVICE_KEY", "") else ""
|
||||
supabase_key = st.text_input(
|
||||
"SUPABASE_SERVICE_KEY:",
|
||||
type="password",
|
||||
help=supabase_key_help,
|
||||
key="input_SUPABASE_SERVICE_KEY",
|
||||
placeholder=placeholder
|
||||
)
|
||||
# Only update if user entered something (to avoid overwriting with empty string)
|
||||
if supabase_key:
|
||||
updated_values["SUPABASE_SERVICE_KEY"] = supabase_key
|
||||
|
||||
# Submit button
|
||||
submitted = st.form_submit_button("Save Environment Variables")
|
||||
|
||||
if submitted:
|
||||
# Save all updated values to the current profile
|
||||
success = True
|
||||
for var_name, value in updated_values.items():
|
||||
if value or var_name in ["LLM_API_KEY", "EMBEDDING_API_KEY"]: # Allow empty strings for API keys (they might be intentionally cleared)
|
||||
if not save_env_var(var_name, value):
|
||||
success = False
|
||||
st.error(f"Failed to save {var_name}.")
|
||||
|
||||
if success:
|
||||
st.success(f"Environment variables saved successfully to profile: {current_profile}!")
|
||||
reload_archon_graph()
|
||||
@@ -0,0 +1,831 @@
|
||||
import streamlit as st
|
||||
|
||||
def future_enhancements_tab():
|
||||
# Display the future enhancements and integrations interface
|
||||
st.write("## Future Enhancements")
|
||||
|
||||
st.write("Explore what's coming next for Archon - from specialized multi-agent workflows to autonomous framework learning.")
|
||||
|
||||
# Future Iterations section
|
||||
st.write("### Future Iterations")
|
||||
|
||||
# V5: Multi-Agent Coding Workflow
|
||||
with st.expander("V5: Multi-Agent Coding Workflow"):
|
||||
st.write("Specialized agents for different parts of the agent creation process")
|
||||
|
||||
# Create a visual representation of multi-agent workflow
|
||||
st.write("#### Multi-Agent Coding Architecture")
|
||||
|
||||
# Describe the parallel architecture
|
||||
st.markdown("""
|
||||
The V5 architecture introduces specialized parallel agents that work simultaneously on different aspects of agent creation:
|
||||
|
||||
1. **Reasoner Agent**: Analyzes requirements and plans the overall agent architecture
|
||||
2. **Parallel Coding Agents**:
|
||||
- **Prompt Engineering Agent**: Designs optimal prompts for the agent
|
||||
- **Tool Definition Agent**: Creates tool specifications and interfaces
|
||||
- **Dependencies Agent**: Identifies required libraries and dependencies
|
||||
- **Model Selection Agent**: Determines the best model configuration
|
||||
3. **Final Coding Agent**: Integrates all components into a cohesive agent
|
||||
4. **Human-in-the-Loop**: Iterative refinement with the final coding agent
|
||||
""")
|
||||
|
||||
# Display parallel agents
|
||||
st.write("#### Parallel Coding Agents")
|
||||
|
||||
col1, col2, col3, col4 = st.columns(4)
|
||||
|
||||
with col1:
|
||||
st.info("**Prompt Engineering Agent**\n\nDesigns optimal prompts for different agent scenarios")
|
||||
|
||||
with col2:
|
||||
st.success("**Tool Definition Agent**\n\nCreates tool specifications and interfaces")
|
||||
|
||||
with col3:
|
||||
st.warning("**Dependencies Agent**\n\nIdentifies required libraries and dependencies")
|
||||
|
||||
with col4:
|
||||
st.error("**Model Selection Agent**\n\nDetermines the best model configuration")
|
||||
|
||||
# Updated flow chart visualization with better colors for ovals
|
||||
st.graphviz_chart('''
|
||||
digraph {
|
||||
rankdir=LR;
|
||||
node [shape=box, style=filled, color=lightblue];
|
||||
|
||||
User [label="User Request", shape=ellipse, style=filled, color=purple, fontcolor=black];
|
||||
Reasoner [label="Reasoner\nAgent"];
|
||||
|
||||
subgraph cluster_parallel {
|
||||
label = "Parallel Coding Agents";
|
||||
color = lightgrey;
|
||||
style = filled;
|
||||
|
||||
Prompt [label="Prompt\nEngineering\nAgent", color=lightskyblue];
|
||||
Tools [label="Tool\nDefinition\nAgent", color=green];
|
||||
Dependencies [label="Dependencies\nAgent", color=yellow];
|
||||
Model [label="Model\nSelection\nAgent", color=pink];
|
||||
}
|
||||
|
||||
Final [label="Final\nCoding\nAgent"];
|
||||
Human [label="Human-in-the-Loop\nIteration", shape=ellipse, style=filled, color=orange, fontcolor=black];
|
||||
|
||||
User -> Reasoner;
|
||||
Reasoner -> Prompt;
|
||||
Reasoner -> Tools;
|
||||
Reasoner -> Dependencies;
|
||||
Reasoner -> Model;
|
||||
|
||||
Prompt -> Final;
|
||||
Tools -> Final;
|
||||
Dependencies -> Final;
|
||||
Model -> Final;
|
||||
|
||||
Final -> Human;
|
||||
Human -> Final [label="Feedback Loop", color=red, constraint=false];
|
||||
}
|
||||
''')
|
||||
|
||||
st.write("#### Benefits of Parallel Agent Architecture")
|
||||
st.markdown("""
|
||||
- **Specialization**: Each agent focuses on its area of expertise
|
||||
- **Efficiency**: Parallel processing reduces overall development time
|
||||
- **Quality**: Specialized agents produce higher quality components
|
||||
- **Flexibility**: Easy to add new specialized agents as needed
|
||||
- **Scalability**: Architecture can handle complex agent requirements
|
||||
""")
|
||||
|
||||
# V6: Tool Library and Example Integration
|
||||
with st.expander("V6: Tool Library and Example Integration"):
|
||||
st.write("Pre-built external tool and agent examples incorporation")
|
||||
st.write("""
|
||||
With pre-built tools, the agent can pull full functions from the tool library so it doesn't have to
|
||||
create them from scratch. On top of that, pre-built agents will give Archon a starting point
|
||||
so it doesn't have to build the agent structure from scratch either.
|
||||
""")
|
||||
|
||||
st.write("#### Example Integration Configuration")
|
||||
|
||||
# Add tabs for different aspects of V6
|
||||
tool_tab, example_tab = st.tabs(["Tool Library", "Example Agents"])
|
||||
|
||||
with tool_tab:
|
||||
st.write("##### Example Tool Library Config (could be a RAG implementation too, still deciding)")
|
||||
|
||||
sample_config = """
|
||||
{
|
||||
"tool_library": {
|
||||
"web_tools": {
|
||||
"web_search": {
|
||||
"type": "search_engine",
|
||||
"api_key_env": "SEARCH_API_KEY",
|
||||
"description": "Search the web for information"
|
||||
},
|
||||
"web_browser": {
|
||||
"type": "browser",
|
||||
"description": "Navigate web pages and extract content"
|
||||
}
|
||||
},
|
||||
"data_tools": {
|
||||
"database_query": {
|
||||
"type": "sql_executor",
|
||||
"description": "Execute SQL queries against databases"
|
||||
},
|
||||
"data_analysis": {
|
||||
"type": "pandas_processor",
|
||||
"description": "Analyze data using pandas"
|
||||
}
|
||||
},
|
||||
"ai_service_tools": {
|
||||
"image_generation": {
|
||||
"type": "text_to_image",
|
||||
"api_key_env": "IMAGE_GEN_API_KEY",
|
||||
"description": "Generate images from text descriptions"
|
||||
},
|
||||
"text_to_speech": {
|
||||
"type": "tts_converter",
|
||||
"api_key_env": "TTS_API_KEY",
|
||||
"description": "Convert text to spoken audio"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
st.code(sample_config, language="json")
|
||||
|
||||
st.write("##### Pydantic AI Tool Definition Example")
|
||||
|
||||
pydantic_tool_example = """
|
||||
from pydantic_ai import Agent, RunContext, Tool
|
||||
from typing import Union, List, Dict, Any
|
||||
import requests
|
||||
|
||||
@agent.tool
|
||||
async def weather_tool(ctx: RunContext[Dict[str, Any]], location: str) -> str:
|
||||
\"\"\"Get current weather information for a location.
|
||||
|
||||
Args:
|
||||
location: The city and state/country (e.g., 'San Francisco, CA')
|
||||
|
||||
Returns:
|
||||
A string with current weather conditions and temperature
|
||||
\"\"\"
|
||||
api_key = ctx.deps.get("WEATHER_API_KEY")
|
||||
if not api_key:
|
||||
return "Error: Weather API key not configured"
|
||||
|
||||
try:
|
||||
url = f"https://api.weatherapi.com/v1/current.json?key={api_key}&q={location}"
|
||||
response = requests.get(url)
|
||||
data = response.json()
|
||||
|
||||
if "error" in data:
|
||||
return f"Error: {data['error']['message']}"
|
||||
|
||||
current = data["current"]
|
||||
location_name = f"{data['location']['name']}, {data['location']['country']}"
|
||||
condition = current["condition"]["text"]
|
||||
temp_c = current["temp_c"]
|
||||
temp_f = current["temp_f"]
|
||||
humidity = current["humidity"]
|
||||
|
||||
return f"Weather in {location_name}: {condition}, {temp_c}°C ({temp_f}°F), {humidity}% humidity"
|
||||
except Exception as e:
|
||||
return f"Error retrieving weather data: {str(e)}"
|
||||
"""
|
||||
st.code(pydantic_tool_example, language="python")
|
||||
|
||||
st.write("##### Tool Usage in Agent")
|
||||
tool_usage_example = """
|
||||
async def use_weather_tool(location: str) -> str:
|
||||
\"\"\"Search for weather information\"\"\"
|
||||
tool = agent.get_tool("get_weather")
|
||||
result = await tool.execute({"location": location})
|
||||
return result.content
|
||||
"""
|
||||
st.code(tool_usage_example, language="python")
|
||||
|
||||
with example_tab:
|
||||
st.write("##### Example Agents")
|
||||
st.markdown("""
|
||||
V6 will include pre-built example agents that serve as templates and learning resources. These examples will be baked directly into agent prompts to improve results and consistency.
|
||||
|
||||
**Benefits of Example Agents:**
|
||||
- Provide concrete implementation patterns for common agent types
|
||||
- Demonstrate best practices for tool usage and error handling
|
||||
- Serve as starting points that can be customized for specific needs
|
||||
- Improve consistency in agent behavior and output format
|
||||
- Reduce the learning curve for new users
|
||||
""")
|
||||
|
||||
st.write("##### Example Agent Types")
|
||||
|
||||
example_agents = {
|
||||
"Research Assistant": {
|
||||
"description": "Performs comprehensive research on topics using web search and content analysis",
|
||||
"tools": ["web_search", "web_browser", "summarization"],
|
||||
"example_prompt": "Research the latest advancements in quantum computing and provide a summary"
|
||||
},
|
||||
"Data Analyst": {
|
||||
"description": "Analyzes datasets, generates visualizations, and provides insights",
|
||||
"tools": ["database_query", "data_analysis", "chart_generation"],
|
||||
"example_prompt": "Analyze this sales dataset and identify key trends over the past quarter"
|
||||
},
|
||||
"Content Creator": {
|
||||
"description": "Generates various types of content including text, images, and code",
|
||||
"tools": ["text_generation", "image_generation", "code_generation"],
|
||||
"example_prompt": "Create a blog post about sustainable living with accompanying images"
|
||||
},
|
||||
"Conversational Assistant": {
|
||||
"description": "Engages in helpful, informative conversations with natural dialogue",
|
||||
"tools": ["knowledge_base", "memory_management", "personalization"],
|
||||
"example_prompt": "I'd like to learn more about machine learning. Where should I start?"
|
||||
}
|
||||
}
|
||||
|
||||
# Create a table of example agents
|
||||
example_data = {
|
||||
"Agent Type": list(example_agents.keys()),
|
||||
"Description": [example_agents[a]["description"] for a in example_agents],
|
||||
"Core Tools": [", ".join(example_agents[a]["tools"]) for a in example_agents]
|
||||
}
|
||||
|
||||
st.dataframe(example_data, use_container_width=True)
|
||||
|
||||
st.write("##### Example Agent Implementation")
|
||||
|
||||
st.code("""
|
||||
# Example Weather Agent based on Pydantic AI documentation
|
||||
from pydantic_ai import Agent, RunContext
|
||||
from typing import Dict, Any
|
||||
from dataclasses import dataclass
|
||||
from httpx import AsyncClient
|
||||
|
||||
@dataclass
|
||||
class WeatherDeps:
|
||||
client: AsyncClient
|
||||
weather_api_key: str | None
|
||||
geo_api_key: str | None
|
||||
|
||||
# Create the agent with appropriate system prompt
|
||||
weather_agent = Agent(
|
||||
'openai:gpt-4o',
|
||||
system_prompt=(
|
||||
'Be concise, reply with one sentence. '
|
||||
'Use the `get_lat_lng` tool to get the latitude and longitude of locations, '
|
||||
'then use the `get_weather` tool to get the weather.'
|
||||
),
|
||||
deps_type=WeatherDeps,
|
||||
)
|
||||
|
||||
@weather_agent.tool
|
||||
async def get_lat_lng(ctx: RunContext[WeatherDeps], location_description: str) -> Dict[str, float]:
|
||||
\"\"\"Get the latitude and longitude of a location.
|
||||
|
||||
Args:
|
||||
location_description: A description of a location (e.g., 'London, UK')
|
||||
|
||||
Returns:
|
||||
Dictionary with lat and lng keys
|
||||
\"\"\"
|
||||
if ctx.deps.geo_api_key is None:
|
||||
# Return dummy data if no API key
|
||||
return {'lat': 51.1, 'lng': -0.1}
|
||||
|
||||
# Call geocoding API
|
||||
params = {'q': location_description, 'api_key': ctx.deps.geo_api_key}
|
||||
r = await ctx.deps.client.get('https://geocode.maps.co/search', params=params)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
if data:
|
||||
return {'lat': float(data[0]['lat']), 'lng': float(data[0]['lon'])}
|
||||
else:
|
||||
return {'error': 'Location not found'}
|
||||
|
||||
@weather_agent.tool
|
||||
async def get_weather(ctx: RunContext[WeatherDeps], lat: float, lng: float) -> Dict[str, Any]:
|
||||
\"\"\"Get the weather at a location.
|
||||
|
||||
Args:
|
||||
lat: Latitude of the location
|
||||
lng: Longitude of the location
|
||||
|
||||
Returns:
|
||||
Dictionary with temperature and description
|
||||
\"\"\"
|
||||
if ctx.deps.weather_api_key is None:
|
||||
# Return dummy data if no API key
|
||||
return {'temperature': '21°C', 'description': 'Sunny'}
|
||||
|
||||
# Call weather API
|
||||
params = {
|
||||
'apikey': ctx.deps.weather_api_key,
|
||||
'location': f'{lat},{lng}',
|
||||
'units': 'metric',
|
||||
}
|
||||
r = await ctx.deps.client.get(
|
||||
'https://api.tomorrow.io/v4/weather/realtime',
|
||||
params=params
|
||||
)
|
||||
r.raise_for_status()
|
||||
data = r.json()
|
||||
|
||||
values = data['data']['values']
|
||||
weather_codes = {
|
||||
1000: 'Clear, Sunny',
|
||||
1100: 'Mostly Clear',
|
||||
1101: 'Partly Cloudy',
|
||||
4001: 'Rain',
|
||||
5000: 'Snow',
|
||||
8000: 'Thunderstorm',
|
||||
}
|
||||
|
||||
return {
|
||||
'temperature': f'{values["temperatureApparent"]:0.0f}°C',
|
||||
'description': weather_codes.get(values['weatherCode'], 'Unknown'),
|
||||
}
|
||||
|
||||
# Example usage
|
||||
async def get_weather_report(location: str) -> str:
|
||||
\"\"\"Get weather report for a location.\"\"\"
|
||||
async with AsyncClient() as client:
|
||||
deps = WeatherDeps(
|
||||
client=client,
|
||||
weather_api_key="YOUR_API_KEY", # Replace with actual key
|
||||
geo_api_key="YOUR_API_KEY", # Replace with actual key
|
||||
)
|
||||
result = await weather_agent.run(
|
||||
f"What is the weather like in {location}?",
|
||||
deps=deps
|
||||
)
|
||||
return result.data
|
||||
""", language="python")
|
||||
|
||||
st.info("""
|
||||
**In-Context Learning with Examples**
|
||||
|
||||
These example agents will be used in the system prompt for Archon, providing concrete examples that help the LLM understand the expected structure and quality of agent code. This approach leverages in-context learning to significantly improve code generation quality and consistency.
|
||||
""")
|
||||
|
||||
# V7: LangGraph Documentation
|
||||
with st.expander("V7: LangGraph Documentation"):
|
||||
st.write("Integrating LangGraph for complex agent workflows")
|
||||
|
||||
st.markdown("""
|
||||
### Pydantic AI vs LangGraph with Pydantic AI
|
||||
|
||||
V7 will integrate LangGraph to enable complex agent workflows while maintaining compatibility with Pydantic AI agents.
|
||||
This allows for creating sophisticated multi-agent systems with well-defined state management and workflow control.
|
||||
""")
|
||||
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
st.markdown("#### Pydantic AI Agent")
|
||||
st.markdown("Simple, standalone agent with tools")
|
||||
|
||||
pydantic_agent_code = """
|
||||
# Simple Pydantic AI Weather Agent
|
||||
from pydantic_ai import Agent, RunContext
|
||||
from typing import Dict, Any
|
||||
from dataclasses import dataclass
|
||||
from httpx import AsyncClient
|
||||
|
||||
@dataclass
|
||||
class WeatherDeps:
|
||||
client: AsyncClient
|
||||
weather_api_key: str | None
|
||||
|
||||
# Create the agent
|
||||
weather_agent = Agent(
|
||||
'openai:gpt-4o',
|
||||
system_prompt="You provide weather information.",
|
||||
deps_type=WeatherDeps,
|
||||
)
|
||||
|
||||
@weather_agent.tool
|
||||
async def get_weather(
|
||||
ctx: RunContext[WeatherDeps],
|
||||
location: str
|
||||
) -> Dict[str, Any]:
|
||||
\"\"\"Get weather for a location.\"\"\"
|
||||
# Implementation details...
|
||||
return {"temperature": "21°C", "description": "Sunny"}
|
||||
|
||||
# Usage
|
||||
async def main():
|
||||
async with AsyncClient() as client:
|
||||
deps = WeatherDeps(
|
||||
client=client,
|
||||
weather_api_key="API_KEY"
|
||||
)
|
||||
result = await weather_agent.run(
|
||||
"What's the weather in London?",
|
||||
deps=deps
|
||||
)
|
||||
print(result.data)
|
||||
"""
|
||||
st.code(pydantic_agent_code, language="python")
|
||||
|
||||
with col2:
|
||||
st.markdown("#### LangGraph with Pydantic AI Agent")
|
||||
st.markdown("Complex workflow using Pydantic AI agents in a graph")
|
||||
|
||||
langgraph_code = """
|
||||
# LangGraph with Pydantic AI Agents
|
||||
from pydantic_ai import Agent, RunContext
|
||||
from typing import TypedDict, Literal
|
||||
from dataclasses import dataclass
|
||||
from httpx import AsyncClient
|
||||
from langgraph.graph import StateGraph, START, END
|
||||
|
||||
# Define state for LangGraph
|
||||
class GraphState(TypedDict):
|
||||
query: str
|
||||
weather_result: str
|
||||
verified: bool
|
||||
response: str
|
||||
|
||||
# Create a verifier agent
|
||||
verifier_agent = Agent(
|
||||
'openai:gpt-4o',
|
||||
system_prompt=(
|
||||
"You verify weather information for accuracy and completeness. "
|
||||
"Check if the weather report includes temperature, conditions, "
|
||||
"and is properly formatted."
|
||||
)
|
||||
)
|
||||
|
||||
# Define nodes for the graph
|
||||
async def get_weather_info(state: GraphState) -> GraphState:
|
||||
\"\"\"Use the weather agent to get weather information.\"\"\"
|
||||
# Simply use the weather agent directly
|
||||
async with AsyncClient() as client:
|
||||
deps = WeatherDeps(
|
||||
client=client,
|
||||
weather_api_key="API_KEY"
|
||||
)
|
||||
result = await weather_agent.run(
|
||||
state["query"],
|
||||
deps=deps
|
||||
)
|
||||
return {"weather_result": result.data}
|
||||
|
||||
async def verify_information(state: GraphState) -> GraphState:
|
||||
\"\"\"Use the verifier agent to check the weather information.\"\"\"
|
||||
result = await verifier_agent.run(
|
||||
f"Verify this weather information: {state['weather_result']}"
|
||||
)
|
||||
# Simple verification logic
|
||||
verified = "accurate" in result.data.lower()
|
||||
return {"verified": verified}
|
||||
|
||||
async def route(state: GraphState) -> Literal["regenerate", "finalize"]:
|
||||
"\"\"Decide whether to regenerate or finalize based on verification.\"\"\"
|
||||
if state["verified"]:
|
||||
return "finalize"
|
||||
else:
|
||||
return "regenerate"
|
||||
|
||||
async def regenerate_response(state: GraphState) -> GraphState:
|
||||
\"\"\"Regenerate a better response if verification failed.\"\"\"
|
||||
result = await verifier_agent.run(
|
||||
result = await weather_agent.run(
|
||||
f"Please provide more detailed weather information for: {state['query']}"
|
||||
)
|
||||
return {"weather_result": result.data, "verified": True}
|
||||
|
||||
async def finalize_response(state: GraphState) -> GraphState:
|
||||
\"\"\"Format the final response.\"\"\"
|
||||
return {"response": f"Verified Weather Report: {state['weather_result']}"}
|
||||
|
||||
# Build the graph
|
||||
workflow = StateGraph(GraphState)
|
||||
|
||||
# Add nodes
|
||||
workflow.add_node("get_weather", get_weather_info)
|
||||
workflow.add_node("verify", verify_information)
|
||||
workflow.add_node("regenerate", regenerate_response)
|
||||
workflow.add_node("finalize", finalize_response)
|
||||
|
||||
# Add edges
|
||||
workflow.add_edge(START, "get_weather")
|
||||
workflow.add_edge("get_weather", "verify")
|
||||
|
||||
# Add conditional edges based on verification
|
||||
workflow.add_conditional_edges(
|
||||
"verify",
|
||||
route,
|
||||
{
|
||||
"regenerate": "regenerate",
|
||||
"finalize": "finalize"
|
||||
}
|
||||
)
|
||||
|
||||
workflow.add_edge("regenerate", "finalize")
|
||||
workflow.add_edge("finalize", END)
|
||||
|
||||
# Compile the graph
|
||||
app = workflow.compile()
|
||||
|
||||
# Usage
|
||||
async def main():
|
||||
result = await app.ainvoke({
|
||||
"query": "What's the weather in London?",
|
||||
"verified": False
|
||||
})
|
||||
print(result["response"])
|
||||
"""
|
||||
st.code(langgraph_code, language="python")
|
||||
|
||||
st.markdown("""
|
||||
### Key Benefits of Integration
|
||||
|
||||
1. **Workflow Management**: LangGraph provides a structured way to define complex agent workflows with clear state transitions.
|
||||
|
||||
2. **Reusability**: Pydantic AI agents can be reused within LangGraph nodes, maintaining their tool capabilities.
|
||||
|
||||
3. **Visualization**: LangGraph offers built-in visualization of agent workflows, making it easier to understand and debug complex systems.
|
||||
|
||||
4. **State Management**: The typed state in LangGraph ensures type safety and clear data flow between nodes.
|
||||
|
||||
5. **Parallel Execution**: LangGraph supports parallel execution of nodes, enabling more efficient processing.
|
||||
|
||||
6. **Human-in-the-Loop**: Both frameworks support human intervention points, which can be combined for powerful interactive systems.
|
||||
""")
|
||||
|
||||
st.image("https://blog.langchain.dev/content/images/2024/01/simple_multi_agent_diagram--1-.png",
|
||||
caption="Example LangGraph Multi-Agent Workflow", width=600)
|
||||
|
||||
# V8: Self-Feedback Loop
|
||||
with st.expander("V8: Self-Feedback Loop"):
|
||||
st.write("Automated validation and error correction")
|
||||
|
||||
# Create a visual feedback loop
|
||||
st.graphviz_chart('''
|
||||
digraph {
|
||||
rankdir=TB;
|
||||
node [shape=box, style=filled, color=lightblue];
|
||||
|
||||
Agent [label="Agent Generation"];
|
||||
Test [label="Automated Testing"];
|
||||
Validate [label="Validation"];
|
||||
Error [label="Error Detection"];
|
||||
Fix [label="Self-Correction"];
|
||||
|
||||
Agent -> Test;
|
||||
Test -> Validate;
|
||||
Validate -> Error [label="Issues Found"];
|
||||
Error -> Fix;
|
||||
Fix -> Agent [label="Regenerate"];
|
||||
Validate -> Agent [label="Success", color=green];
|
||||
}
|
||||
''')
|
||||
|
||||
st.write("#### Validation Process")
|
||||
st.info("""
|
||||
1. Generate agent code
|
||||
2. Run automated tests
|
||||
3. Analyze test results
|
||||
4. Identify errors or improvement areas
|
||||
5. Apply self-correction algorithms
|
||||
6. Regenerate improved code
|
||||
7. Repeat until validation passes
|
||||
""")
|
||||
|
||||
# V9: Self Agent Execution
|
||||
with st.expander("V9: Self Agent Execution"):
|
||||
st.write("Testing and iterating on agents in an isolated environment")
|
||||
|
||||
st.write("#### Agent Execution Process")
|
||||
|
||||
execution_process = [
|
||||
{"phase": "Sandbox Creation", "description": "Set up isolated environment using Local AI package"},
|
||||
{"phase": "Agent Deployment", "description": "Load the generated agent into the testing environment"},
|
||||
{"phase": "Test Execution", "description": "Run the agent against predefined scenarios and user queries"},
|
||||
{"phase": "Performance Monitoring", "description": "Track response quality, latency, and resource usage"},
|
||||
{"phase": "Error Detection", "description": "Identify runtime errors and logical inconsistencies"},
|
||||
{"phase": "Iterative Improvement", "description": "Refine agent based on execution results"}
|
||||
]
|
||||
|
||||
for i, phase in enumerate(execution_process):
|
||||
st.write(f"**{i+1}. {phase['phase']}:** {phase['description']}")
|
||||
|
||||
st.write("#### Local AI Package Integration")
|
||||
st.markdown("""
|
||||
The [Local AI package](https://github.com/coleam00/local-ai-packaged) provides a containerized environment for:
|
||||
- Running LLMs locally for agent testing
|
||||
- Simulating API calls and external dependencies
|
||||
- Monitoring agent behavior in a controlled setting
|
||||
- Collecting performance metrics for optimization
|
||||
""")
|
||||
|
||||
st.info("This enables Archon to test and refine agents in a controlled environment before deployment, significantly improving reliability and performance through empirical iteration.")
|
||||
|
||||
# V10: Multi-Framework Support
|
||||
with st.expander("V10: Multi-Framework Support"):
|
||||
st.write("Framework-agnostic agent generation")
|
||||
|
||||
frameworks = {
|
||||
"Pydantic AI": {"status": "Supported", "description": "Native support for function-based agents"},
|
||||
"LangGraph": {"status": "Coming in V7", "description": "Declarative multi-agent orchestration"},
|
||||
"LangChain": {"status": "Planned", "description": "Popular agent framework with extensive tools"},
|
||||
"Agno (Phidata)": {"status": "Planned", "description": "Multi-agent workflow framework"},
|
||||
"CrewAI": {"status": "Planned", "description": "Role-based collaborative agents"},
|
||||
"LlamaIndex": {"status": "Planned", "description": "RAG-focused agent framework"}
|
||||
}
|
||||
|
||||
# Create a frameworks comparison table
|
||||
df_data = {
|
||||
"Framework": list(frameworks.keys()),
|
||||
"Status": [frameworks[f]["status"] for f in frameworks],
|
||||
"Description": [frameworks[f]["description"] for f in frameworks]
|
||||
}
|
||||
|
||||
st.dataframe(df_data, use_container_width=True)
|
||||
|
||||
# V11: Autonomous Framework Learning
|
||||
with st.expander("V11: Autonomous Framework Learning"):
|
||||
st.write("Self-learning from mistakes and continuous improvement")
|
||||
|
||||
st.write("#### Self-Improvement Process")
|
||||
|
||||
improvement_process = [
|
||||
{"phase": "Error Detection", "description": "Identifies patterns in failed agent generations and runtime errors"},
|
||||
{"phase": "Root Cause Analysis", "description": "Analyzes error patterns to determine underlying issues in prompts or examples"},
|
||||
{"phase": "Prompt Refinement", "description": "Automatically updates system prompts to address identified weaknesses"},
|
||||
{"phase": "Example Augmentation", "description": "Adds new examples to the prompt library based on successful generations"},
|
||||
{"phase": "Tool Enhancement", "description": "Creates or modifies tools to handle edge cases and common failure modes"},
|
||||
{"phase": "Validation", "description": "Tests improvements against historical failure cases to ensure progress"}
|
||||
]
|
||||
|
||||
for i, phase in enumerate(improvement_process):
|
||||
st.write(f"**{i+1}. {phase['phase']}:** {phase['description']}")
|
||||
|
||||
st.info("This enables Archon to stay updated with the latest AI frameworks without manual intervention.")
|
||||
|
||||
# V12: Advanced RAG Techniques
|
||||
with st.expander("V12: Advanced RAG Techniques"):
|
||||
st.write("Enhanced retrieval and incorporation of framework documentation")
|
||||
|
||||
st.write("#### Advanced RAG Components")
|
||||
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
st.markdown("#### Document Processing")
|
||||
st.markdown("""
|
||||
- **Hierarchical Chunking**: Multi-level chunking strategy that preserves document structure
|
||||
- **Semantic Headers**: Extraction of meaningful section headers for better context
|
||||
- **Code-Text Separation**: Specialized embedding models for code vs. natural language
|
||||
- **Metadata Enrichment**: Automatic tagging with framework version, function types, etc.
|
||||
""")
|
||||
|
||||
st.markdown("#### Query Processing")
|
||||
st.markdown("""
|
||||
- **Query Decomposition**: Breaking complex queries into sub-queries
|
||||
- **Framework Detection**: Identifying which framework the query relates to
|
||||
- **Intent Classification**: Determining if query is about usage, concepts, or troubleshooting
|
||||
- **Query Expansion**: Adding relevant framework-specific terminology
|
||||
""")
|
||||
|
||||
with col2:
|
||||
st.markdown("#### Retrieval Enhancements")
|
||||
st.markdown("""
|
||||
- **Hybrid Search**: Combining dense and sparse retrievers for optimal results
|
||||
- **Re-ranking**: Post-retrieval scoring based on relevance to the specific task
|
||||
- **Cross-Framework Retrieval**: Finding analogous patterns across different frameworks
|
||||
- **Code Example Prioritization**: Boosting practical examples in search results
|
||||
""")
|
||||
|
||||
st.markdown("#### Knowledge Integration")
|
||||
st.markdown("""
|
||||
- **Context Stitching**: Intelligently combining information from multiple chunks
|
||||
- **Framework Translation**: Converting patterns between frameworks (e.g., LangChain to LangGraph)
|
||||
- **Version Awareness**: Handling differences between framework versions
|
||||
- **Adaptive Retrieval**: Learning from successful and unsuccessful retrievals
|
||||
""")
|
||||
|
||||
st.info("This enables Archon to more effectively retrieve and incorporate framework documentation, leading to more accurate and contextually appropriate agent generation.")
|
||||
|
||||
# V13: MCP Agent Marketplace
|
||||
with st.expander("V13: MCP Agent Marketplace"):
|
||||
st.write("Integrating Archon agents as MCP servers and publishing to marketplaces")
|
||||
|
||||
st.write("#### MCP Integration Process")
|
||||
|
||||
mcp_integration_process = [
|
||||
{"phase": "Protocol Implementation", "description": "Implement the Model Context Protocol to enable IDE integration"},
|
||||
{"phase": "Agent Conversion", "description": "Transform Archon-generated agents into MCP-compatible servers"},
|
||||
{"phase": "Specialized Agent Creation", "description": "Build purpose-specific agents for code review, refactoring, and testing"},
|
||||
{"phase": "Marketplace Publishing", "description": "Package and publish agents to MCP marketplaces for distribution"},
|
||||
{"phase": "IDE Integration", "description": "Enable seamless operation within Windsurf, Cursor, and other MCP-enabled IDEs"}
|
||||
]
|
||||
|
||||
for i, phase in enumerate(mcp_integration_process):
|
||||
st.write(f"**{i+1}. {phase['phase']}:** {phase['description']}")
|
||||
|
||||
st.info("This enables Archon to create specialized agents that operate directly within IDEs through the MCP protocol, while also making them available through marketplace distribution channels.")
|
||||
|
||||
# Future Integrations section
|
||||
st.write("### Future Integrations")
|
||||
|
||||
# LangSmith
|
||||
with st.expander("LangSmith"):
|
||||
st.write("Integration with LangChain's tracing and monitoring platform")
|
||||
|
||||
st.image("https://docs.smith.langchain.com/assets/images/trace-9510284b5b15ba55fc1cca6af2404657.png", width=600)
|
||||
|
||||
st.write("#### LangSmith Benefits")
|
||||
st.markdown("""
|
||||
- **Tracing**: Monitor agent execution steps and decisions
|
||||
- **Debugging**: Identify issues in complex agent workflows
|
||||
- **Analytics**: Track performance and cost metrics
|
||||
- **Evaluation**: Assess agent quality with automated testing
|
||||
- **Feedback Collection**: Gather human feedback to improve agents
|
||||
""")
|
||||
|
||||
# MCP Marketplace
|
||||
with st.expander("MCP Marketplace"):
|
||||
st.write("Integration with AI IDE marketplaces")
|
||||
|
||||
st.write("#### MCP Marketplace Integration")
|
||||
st.markdown("""
|
||||
- Publish Archon itself as a premium agent in MCP marketplaces
|
||||
- Create specialized Archon variants for different development needs
|
||||
- Enable one-click installation directly from within IDEs
|
||||
- Integrate seamlessly with existing development workflows
|
||||
""")
|
||||
|
||||
st.warning("The Model Context Protocol (MCP) is an emerging standard for AI assistant integration with IDEs like Windsurf, Cursor, Cline, and Roo Code.")
|
||||
|
||||
# Other Frameworks
|
||||
with st.expander("Other Frameworks besides Pydantic AI"):
|
||||
st.write("Support for additional agent frameworks")
|
||||
|
||||
st.write("#### Framework Adapter Architecture")
|
||||
|
||||
st.graphviz_chart('''
|
||||
digraph {
|
||||
rankdir=TB;
|
||||
node [shape=box, style=filled, color=lightblue];
|
||||
|
||||
Archon [label="Archon Core"];
|
||||
Adapter [label="Framework Adapter Layer"];
|
||||
|
||||
Pydantic [label="Pydantic AI", color=lightskyblue];
|
||||
LangGraph [label="LangGraph", color=lightskyblue];
|
||||
LangChain [label="LangChain", color=lightskyblue];
|
||||
Agno [label="Agno", color=lightskyblue];
|
||||
CrewAI [label="CrewAI", color=lightskyblue];
|
||||
LlamaIndex [label="LlamaIndex", color=lightskyblue];
|
||||
|
||||
Archon -> Adapter;
|
||||
Adapter -> Pydantic;
|
||||
Adapter -> LangGraph;
|
||||
Adapter -> LangChain;
|
||||
Adapter -> Agno;
|
||||
Adapter -> CrewAI;
|
||||
Adapter -> LlamaIndex;
|
||||
}
|
||||
''')
|
||||
|
||||
# Vector Databases
|
||||
with st.expander("Other Vector Databases besides Supabase"):
|
||||
st.write("Support for additional vector databases")
|
||||
|
||||
vector_dbs = {
|
||||
"Supabase": {"status": "Supported", "features": ["pgvector integration", "SQL API", "Real-time subscriptions"]},
|
||||
"Pinecone": {"status": "Planned", "features": ["High scalability", "Low latency", "Serverless"]},
|
||||
"Qdrant": {"status": "Planned", "features": ["Filtering", "Self-hosted option", "REST API"]},
|
||||
"Milvus": {"status": "Planned", "features": ["Horizontal scaling", "Cloud-native", "Hybrid search"]},
|
||||
"Chroma": {"status": "Planned", "features": ["Local-first", "Lightweight", "Simple API"]},
|
||||
"Weaviate": {"status": "Planned", "features": ["GraphQL", "Multi-modal", "RESTful API"]}
|
||||
}
|
||||
|
||||
# Create vector DB comparison table
|
||||
df_data = {
|
||||
"Vector Database": list(vector_dbs.keys()),
|
||||
"Status": [vector_dbs[db]["status"] for db in vector_dbs],
|
||||
"Key Features": [", ".join(vector_dbs[db]["features"]) for db in vector_dbs]
|
||||
}
|
||||
|
||||
st.dataframe(df_data, use_container_width=True)
|
||||
|
||||
# Local AI Package
|
||||
with st.expander("Local AI Package Integration"):
|
||||
st.write("Integration with [Local AI Package](https://github.com/coleam00/local-ai-packaged)")
|
||||
|
||||
st.markdown("""
|
||||
The Local AI Package enables running models entirely locally, providing:
|
||||
|
||||
- **Complete Privacy**: No data leaves your machine
|
||||
- **Cost Savings**: Eliminate API usage fees
|
||||
- **Offline Operation**: Work without internet connectivity
|
||||
- **Custom Fine-tuning**: Adapt models to specific domains
|
||||
- **Lower Latency**: Reduce response times for better UX
|
||||
""")
|
||||
|
||||
st.info("This integration will allow Archon to operate fully offline with local models for both agent creation and execution.")
|
||||
@@ -0,0 +1,140 @@
|
||||
import streamlit as st
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add the parent directory to sys.path to allow importing from the parent directory
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from utils.utils import create_new_tab_button
|
||||
|
||||
def intro_tab():
|
||||
"""Display the introduction and setup guide for Archon"""
|
||||
# Welcome message
|
||||
st.markdown("""
|
||||
Archon is an AI meta-agent designed to autonomously build, refine, and optimize other AI agents.
|
||||
|
||||
It serves both as a practical tool for developers and as an educational framework demonstrating the evolution of agentic systems.
|
||||
Archon is developed in iterations, starting with a simple Pydantic AI agent that can build other Pydantic AI agents,
|
||||
all the way to a full agentic workflow using LangGraph that can build other AI agents with any framework.
|
||||
|
||||
Through its iterative development, Archon showcases the power of planning, feedback loops, and domain-specific knowledge in creating robust AI agents.
|
||||
""")
|
||||
|
||||
# Environment variables update notice
|
||||
st.warning("""
|
||||
**🔄 IMPORTANT UPDATE (March 20th):** Archon now uses a multi-agent workflow with specialized refiner agents for autonomous prompt, tools, and agent definition improvements. The primary coding agent still creates the initial agent by itself, but then you can say 'refine' or something along those lines as a follow up prompt to kick off the specialized agents in parallel.
|
||||
""")
|
||||
|
||||
# Setup guide with expandable sections
|
||||
st.markdown("## Setup Guide")
|
||||
st.markdown("Follow these concise steps to get Archon up and running (IMPORTANT: come back here after each step):")
|
||||
|
||||
# Step 1: Environment Configuration
|
||||
with st.expander("Step 1: Environment Configuration", expanded=True):
|
||||
st.markdown("""
|
||||
### Environment Configuration
|
||||
|
||||
First, you need to set up your environment variables:
|
||||
|
||||
1. Go to the **Environment** tab
|
||||
2. Configure the following essential variables:
|
||||
- `BASE_URL`: API endpoint (OpenAI, OpenRouter, or Ollama)
|
||||
- `LLM_API_KEY`: Your API key for the LLM service
|
||||
- `OPENAI_API_KEY`: Required for embeddings
|
||||
- `SUPABASE_URL`: Your Supabase project URL
|
||||
- `SUPABASE_SERVICE_KEY`: Your Supabase service key
|
||||
- `PRIMARY_MODEL`: Main agent model (e.g., gpt-4o-mini)
|
||||
- `REASONER_MODEL`: Planning model (e.g., o3-mini)
|
||||
|
||||
These settings determine how Archon connects to external services and which models it uses.
|
||||
""")
|
||||
# Add a button to navigate to the Environment tab
|
||||
create_new_tab_button("Go to Environment Section (New Tab)", "Environment", key="goto_env", use_container_width=True)
|
||||
|
||||
# Step 2: Database Setup
|
||||
with st.expander("Step 2: Database Setup", expanded=False):
|
||||
st.markdown("""
|
||||
### Database Setup
|
||||
|
||||
Archon uses Supabase for vector storage and retrieval:
|
||||
|
||||
1. Go to the **Database** tab
|
||||
2. Select your embedding dimensions (1536 for OpenAI, 768 for nomic-embed-text)
|
||||
3. Follow the instructions to create the `site_pages` table
|
||||
|
||||
This creates the necessary tables, indexes, and functions for vector similarity search.
|
||||
""")
|
||||
# Add a button to navigate to the Database tab
|
||||
create_new_tab_button("Go to Database Section (New Tab)", "Database", key="goto_db", use_container_width=True)
|
||||
|
||||
# Step 3: Documentation Crawling
|
||||
with st.expander("Step 3: Documentation Crawling", expanded=False):
|
||||
st.markdown("""
|
||||
### Documentation Crawling
|
||||
|
||||
Populate the database with framework documentation:
|
||||
|
||||
1. Go to the **Documentation** tab
|
||||
2. Click on "Crawl Pydantic AI Docs"
|
||||
3. Wait for the crawling process to complete
|
||||
|
||||
This step downloads and processes documentation, creating embeddings for semantic search.
|
||||
""")
|
||||
# Add a button to navigate to the Documentation tab
|
||||
create_new_tab_button("Go to the Documentation Section (New Tab)", "Documentation", key="goto_docs", use_container_width=True)
|
||||
|
||||
# Step 4: Agent Service
|
||||
with st.expander("Step 4: Agent Service Setup (for MCP)", expanded=False):
|
||||
st.markdown("""
|
||||
### MCP Agent Service Setup
|
||||
|
||||
Start the graph service for agent generation:
|
||||
|
||||
1. Go to the **Agent Service** tab
|
||||
2. Click on "Start Agent Service"
|
||||
3. Verify the service is running
|
||||
|
||||
The agent service powers the LangGraph workflow for agent creation.
|
||||
""")
|
||||
# Add a button to navigate to the Agent Service tab
|
||||
create_new_tab_button("Go to Agent Service Section (New Tab)", "Agent Service", key="goto_service", use_container_width=True)
|
||||
|
||||
# Step 5: MCP Configuration (Optional)
|
||||
with st.expander("Step 5: MCP Configuration (Optional)", expanded=False):
|
||||
st.markdown("""
|
||||
### MCP Configuration
|
||||
|
||||
For integration with AI IDEs:
|
||||
|
||||
1. Go to the **MCP** tab
|
||||
2. Select your IDE (Windsurf, Cursor, or Cline/Roo Code)
|
||||
3. Follow the instructions to configure your IDE
|
||||
|
||||
This enables you to use Archon directly from your AI-powered IDE.
|
||||
""")
|
||||
# Add a button to navigate to the MCP tab
|
||||
create_new_tab_button("Go to MCP Section (New Tab)", "MCP", key="goto_mcp", use_container_width=True)
|
||||
|
||||
# Step 6: Using Archon
|
||||
with st.expander("Step 6: Using Archon", expanded=False):
|
||||
st.markdown("""
|
||||
### Using Archon
|
||||
|
||||
Once everything is set up:
|
||||
|
||||
1. Go to the **Chat** tab
|
||||
2. Describe the agent you want to build
|
||||
3. Archon will plan and generate the necessary code
|
||||
|
||||
You can also use Archon directly from your AI IDE if you've configured MCP.
|
||||
""")
|
||||
# Add a button to navigate to the Chat tab
|
||||
create_new_tab_button("Go to Chat Section (New Tab)", "Chat", key="goto_chat", use_container_width=True)
|
||||
|
||||
# Resources
|
||||
st.markdown("""
|
||||
## Additional Resources
|
||||
|
||||
- [GitHub Repository](https://github.com/coleam00/archon)
|
||||
- [Archon Community Forum](https://thinktank.ottomator.ai/c/archon/30)
|
||||
- [GitHub Kanban Board](https://github.com/users/coleam00/projects/1)
|
||||
""")
|
||||
@@ -0,0 +1,171 @@
|
||||
import streamlit as st
|
||||
import platform
|
||||
import json
|
||||
import os
|
||||
|
||||
def get_paths():
|
||||
# Get the absolute path to the current directory
|
||||
base_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
|
||||
|
||||
# Determine the correct python path based on the OS
|
||||
if platform.system() == "Windows":
|
||||
python_path = os.path.join(base_path, 'venv', 'Scripts', 'python.exe')
|
||||
else: # macOS or Linux
|
||||
python_path = os.path.join(base_path, 'venv', 'bin', 'python')
|
||||
|
||||
server_script_path = os.path.join(base_path, 'mcp', 'mcp_server.py')
|
||||
|
||||
return python_path, server_script_path
|
||||
|
||||
def generate_mcp_config(ide_type, python_path, server_script_path):
|
||||
"""
|
||||
Generate MCP configuration for the selected IDE type.
|
||||
"""
|
||||
# Create the config dictionary for Python
|
||||
python_config = {
|
||||
"mcpServers": {
|
||||
"archon": {
|
||||
"command": python_path,
|
||||
"args": [server_script_path]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Create the config dictionary for Docker
|
||||
docker_config = {
|
||||
"mcpServers": {
|
||||
"archon": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run",
|
||||
"-i",
|
||||
"--rm",
|
||||
"-e",
|
||||
"GRAPH_SERVICE_URL",
|
||||
"archon-mcp:latest"
|
||||
],
|
||||
"env": {
|
||||
"GRAPH_SERVICE_URL": "http://host.docker.internal:8100"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Return appropriate configuration based on IDE type
|
||||
if ide_type == "Windsurf":
|
||||
return json.dumps(python_config, indent=2), json.dumps(docker_config, indent=2)
|
||||
elif ide_type == "Cursor":
|
||||
return f"{python_path} {server_script_path}", f"docker run -i --rm -e GRAPH_SERVICE_URL=http://host.docker.internal:8100 archon-mcp:latest"
|
||||
elif ide_type == "Cline/Roo Code":
|
||||
return json.dumps(python_config, indent=2), json.dumps(docker_config, indent=2)
|
||||
elif ide_type == "Claude Code":
|
||||
return f"Not Required", "Not Required"
|
||||
else:
|
||||
return "Unknown IDE type selected", "Unknown IDE type selected"
|
||||
|
||||
def mcp_tab():
|
||||
"""Display the MCP configuration interface"""
|
||||
st.header("MCP Configuration")
|
||||
st.write("Select your AI IDE to get the appropriate MCP configuration:")
|
||||
|
||||
# IDE selection with side-by-side buttons
|
||||
col1, col2, col3, col4 = st.columns(4)
|
||||
|
||||
with col1:
|
||||
windsurf_button = st.button("Windsurf", use_container_width=True, key="windsurf_button")
|
||||
with col2:
|
||||
cursor_button = st.button("Cursor", use_container_width=True, key="cursor_button")
|
||||
with col3:
|
||||
cline_button = st.button("Cline/Roo Code", use_container_width=True, key="cline_button")
|
||||
with col4:
|
||||
claude_button = st.button("Claude Code", use_container_width=True, key="claude_button")
|
||||
|
||||
# Initialize session state for selected IDE if not present
|
||||
if "selected_ide" not in st.session_state:
|
||||
st.session_state.selected_ide = None
|
||||
|
||||
# Update selected IDE based on button clicks
|
||||
if windsurf_button:
|
||||
st.session_state.selected_ide = "Windsurf"
|
||||
elif cursor_button:
|
||||
st.session_state.selected_ide = "Cursor"
|
||||
elif cline_button:
|
||||
st.session_state.selected_ide = "Cline/Roo Code"
|
||||
elif claude_button:
|
||||
st.session_state.selected_ide = "Claude Code"
|
||||
|
||||
# Display configuration if an IDE is selected
|
||||
if st.session_state.selected_ide:
|
||||
selected_ide = st.session_state.selected_ide
|
||||
st.subheader(f"MCP Configuration for {selected_ide}")
|
||||
python_path, server_script_path = get_paths()
|
||||
python_config, docker_config = generate_mcp_config(selected_ide, python_path, server_script_path)
|
||||
|
||||
# Configuration type tabs
|
||||
config_tab1, config_tab2 = st.tabs(["Docker Configuration", "Python Configuration"])
|
||||
|
||||
with config_tab1:
|
||||
st.markdown("### Docker Configuration")
|
||||
st.code(docker_config, language="json" if selected_ide != "Cursor" else None)
|
||||
|
||||
st.markdown("#### Requirements:")
|
||||
st.markdown("- Docker installed")
|
||||
st.markdown("- Run the setup script to build and start both containers:")
|
||||
st.code("python run_docker.py", language="bash")
|
||||
|
||||
with config_tab2:
|
||||
st.markdown("### Python Configuration")
|
||||
st.code(python_config, language="json" if selected_ide != "Cursor" else None)
|
||||
|
||||
st.markdown("#### Requirements:")
|
||||
st.markdown("- Python 3.11+ installed")
|
||||
st.markdown("- Virtual environment created and activated")
|
||||
st.markdown("- All dependencies installed via `pip install -r requirements.txt`")
|
||||
st.markdown("- Must be running Archon not within a container")
|
||||
|
||||
# Instructions based on IDE type
|
||||
st.markdown("---")
|
||||
st.markdown("### Setup Instructions")
|
||||
|
||||
if selected_ide == "Windsurf":
|
||||
st.markdown("""
|
||||
#### How to use in Windsurf:
|
||||
1. Click on the hammer icon above the chat input
|
||||
2. Click on "Configure"
|
||||
3. Paste the JSON from your preferred configuration tab above
|
||||
4. Click "Refresh" next to "Configure"
|
||||
""")
|
||||
elif selected_ide == "Cursor":
|
||||
st.markdown("""
|
||||
#### How to use in Cursor:
|
||||
1. Go to Cursor Settings > Features > MCP
|
||||
2. Click on "+ Add New MCP Server"
|
||||
3. Name: Archon
|
||||
4. Type: command (equivalent to stdio)
|
||||
5. Command: Paste the command from your preferred configuration tab above
|
||||
""")
|
||||
elif selected_ide == "Cline/Roo Code":
|
||||
st.markdown("""
|
||||
#### How to use in Cline or Roo Code:
|
||||
1. From the Cline/Roo Code extension, click the "MCP Server" tab
|
||||
2. Click the "Edit MCP Settings" button
|
||||
3. The MCP settings file should be displayed in a tab in VS Code
|
||||
4. Paste the JSON from your preferred configuration tab above
|
||||
5. Cline/Roo Code will automatically detect and start the MCP server
|
||||
""")
|
||||
elif selected_ide == "Claude Code":
|
||||
st.markdown(f"""
|
||||
#### How to use in Claude Code:
|
||||
1. Deploy and run Archon in Docker
|
||||
2. In the Archon UI, start the MCP service.
|
||||
3. Open a terminal and navigate to your work folder.
|
||||
4. Execute the command:
|
||||
|
||||
\tFor Docker: `claude mcp add Archon docker run -i --rm -e GRAPH_SERVICE_URL=http://host.docker.internal:8100 archon-mcp:latest `
|
||||
\tFor Python: `claude mcp add Archon {python_path} {server_script_path}`
|
||||
|
||||
5. Start Claude Code with the command `claude`. When Claude Code starts, at the bottom of the welcome section will be a listing of connected MCP Services, Archon should be listed with a status of _connected_.
|
||||
6. You can now use the Archon MCP service in your Claude Code projects
|
||||
|
||||
(NOTE: If you close the terminal, or start a session in a new terminal, you will need to re-add the MCP service.)
|
||||
""")
|
||||
@@ -0,0 +1,94 @@
|
||||
"""
|
||||
This module contains the CSS styles for the Streamlit UI.
|
||||
"""
|
||||
|
||||
import streamlit as st
|
||||
|
||||
def load_css():
|
||||
"""
|
||||
Load the custom CSS styles for the Archon UI.
|
||||
"""
|
||||
st.markdown("""
|
||||
<style>
|
||||
:root {
|
||||
--primary-color: #00CC99; /* Green */
|
||||
--secondary-color: #EB2D8C; /* Pink */
|
||||
--text-color: #262730;
|
||||
}
|
||||
|
||||
/* Style the buttons */
|
||||
.stButton > button {
|
||||
color: white;
|
||||
border: 2px solid var(--primary-color);
|
||||
padding: 0.5rem 1rem;
|
||||
font-weight: bold;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.stButton > button:hover {
|
||||
color: white;
|
||||
border: 2px solid var(--secondary-color);
|
||||
}
|
||||
|
||||
/* Override Streamlit's default focus styles that make buttons red */
|
||||
.stButton > button:focus,
|
||||
.stButton > button:focus:hover,
|
||||
.stButton > button:active,
|
||||
.stButton > button:active:hover {
|
||||
color: white !important;
|
||||
border: 2px solid var(--secondary-color) !important;
|
||||
box-shadow: none !important;
|
||||
outline: none !important;
|
||||
}
|
||||
|
||||
/* Style headers */
|
||||
h1, h2, h3 {
|
||||
color: var(--primary-color);
|
||||
}
|
||||
|
||||
/* Hide spans within h3 elements */
|
||||
h1 span, h2 span, h3 span {
|
||||
display: none !important;
|
||||
visibility: hidden;
|
||||
width: 0;
|
||||
height: 0;
|
||||
opacity: 0;
|
||||
position: absolute;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
/* Style code blocks */
|
||||
pre {
|
||||
border-left: 4px solid var(--primary-color);
|
||||
}
|
||||
|
||||
/* Style links */
|
||||
a {
|
||||
color: var(--secondary-color);
|
||||
}
|
||||
|
||||
/* Style the chat messages */
|
||||
.stChatMessage {
|
||||
border-left: 4px solid var(--secondary-color);
|
||||
}
|
||||
|
||||
/* Style the chat input */
|
||||
.stChatInput > div {
|
||||
border: 2px solid var(--primary-color) !important;
|
||||
}
|
||||
|
||||
/* Remove red outline on focus */
|
||||
.stChatInput > div:focus-within {
|
||||
box-shadow: none !important;
|
||||
border: 2px solid var(--secondary-color) !important;
|
||||
outline: none !important;
|
||||
}
|
||||
|
||||
/* Remove red outline on all inputs when focused */
|
||||
input:focus, textarea:focus, [contenteditable]:focus {
|
||||
box-shadow: none !important;
|
||||
border-color: var(--secondary-color) !important;
|
||||
outline: none !important;
|
||||
}
|
||||
</style>
|
||||
""", unsafe_allow_html=True)
|
||||
Reference in New Issue
Block a user