fix: Change Ollama default URL to host.docker.internal for Docker compatibility

- Changed default Ollama URL from localhost:11434 to host.docker.internal:11434
- This allows Docker containers to connect to Ollama running on the host machine
- Updated in backend services, frontend components, migration scripts, and documentation
- Most users run Archon in Docker but Ollama as a local binary, making this a better default
This commit is contained in:
John Fitzpatrick
2025-09-20 13:36:33 -07:00
parent 035f90e721
commit d4e80a945a
7 changed files with 13 additions and 13 deletions

View File

@@ -475,7 +475,7 @@ class CredentialService:
def _get_provider_base_url(self, provider: str, rag_settings: dict) -> str | None:
"""Get base URL for provider."""
if provider == "ollama":
return rag_settings.get("LLM_BASE_URL", "http://localhost:11434/v1")
return rag_settings.get("LLM_BASE_URL", "http://host.docker.internal:11434/v1")
elif provider == "google":
return "https://generativelanguage.googleapis.com/v1beta/openai/"
return None # Use default for OpenAI

View File

@@ -203,7 +203,7 @@ async def _get_optimal_ollama_instance(instance_type: str | None = None,
return embedding_url if embedding_url.endswith('/v1') else f"{embedding_url}/v1"
# Default to LLM base URL for chat operations
fallback_url = rag_settings.get("LLM_BASE_URL", "http://localhost:11434")
fallback_url = rag_settings.get("LLM_BASE_URL", "http://host.docker.internal:11434")
return fallback_url if fallback_url.endswith('/v1') else f"{fallback_url}/v1"
except Exception as e:
@@ -211,11 +211,11 @@ async def _get_optimal_ollama_instance(instance_type: str | None = None,
# Final fallback to localhost only if we can't get RAG settings
try:
rag_settings = await credential_service.get_credentials_by_category("rag_strategy")
fallback_url = rag_settings.get("LLM_BASE_URL", "http://localhost:11434")
fallback_url = rag_settings.get("LLM_BASE_URL", "http://host.docker.internal:11434")
return fallback_url if fallback_url.endswith('/v1') else f"{fallback_url}/v1"
except Exception as fallback_error:
logger.error(f"Could not retrieve fallback configuration: {fallback_error}")
return "http://localhost:11434/v1"
return "http://host.docker.internal:11434/v1"
async def get_embedding_model(provider: str | None = None) -> str:

View File

@@ -23,7 +23,7 @@ _provider_cache: dict[str, tuple[Any, float]] = {}
_CACHE_TTL_SECONDS = 300 # 5 minutes
# Default Ollama instance URL (configurable via environment/settings)
DEFAULT_OLLAMA_URL = "http://localhost:11434"
DEFAULT_OLLAMA_URL = "http://host.docker.internal:11434"
# Model pattern detection for dynamic capabilities (no hardcoded model names)
CHAT_MODEL_PATTERNS = ["llama", "qwen", "mistral", "codellama", "phi", "gemma", "vicuna", "orca"]