From 1f535c3d7f5bbbc113573a65d26cb2e881c7781d Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sun, 26 Oct 2025 14:13:40 +0000 Subject: [PATCH] Add comprehensive cluster documentation and configuration examples Co-authored-by: samanhappy <2755122+samanhappy@users.noreply.github.com> --- docs/cluster-deployment.md | 516 ++++++++++++++++++++++++++ docs/cluster-deployment.zh.md | 510 +++++++++++++++++++++++++ examples/cluster-config-examples.md | 444 ++++++++++++++++++++++ tests/services/clusterService.test.ts | 6 +- 4 files changed, 1473 insertions(+), 3 deletions(-) create mode 100644 docs/cluster-deployment.md create mode 100644 docs/cluster-deployment.zh.md create mode 100644 examples/cluster-config-examples.md diff --git a/docs/cluster-deployment.md b/docs/cluster-deployment.md new file mode 100644 index 0000000..4c67c8f --- /dev/null +++ b/docs/cluster-deployment.md @@ -0,0 +1,516 @@ +# Cluster Deployment Guide + +MCPHub supports cluster deployment, allowing you to run multiple nodes that work together as a unified system. This enables: + +- **High Availability**: Distribute MCP servers across multiple nodes for redundancy +- **Load Distribution**: Balance requests across multiple replicas of the same MCP server +- **Sticky Sessions**: Ensure client sessions are routed to the same node consistently +- **Centralized Management**: One coordinator manages the entire cluster + +## Architecture + +MCPHub cluster has three operating modes: + +1. **Standalone Mode** (Default): Single node operation, no cluster features +2. **Coordinator Mode**: Central node that manages the cluster, routes requests, and maintains session affinity +3. **Node Mode**: Worker nodes that register with the coordinator and run MCP servers + +``` +┌─────────────────────────────────────────┐ +│ Coordinator Node │ +│ - Manages cluster state │ +│ - Routes client requests │ +│ - Maintains session affinity │ +│ - Health monitoring │ +└───────────┬─────────────────────────────┘ + │ + ┌───────┴───────────────────┐ + │ │ +┌───▼────────┐ ┌────────▼────┐ +│ Node 1 │ │ Node 2 │ +│ - MCP A │ │ - MCP A │ +│ - MCP B │ │ - MCP C │ +└────────────┘ └─────────────┘ +``` + +## Configuration + +### Coordinator Configuration + +Create or update `mcp_settings.json` on the coordinator node: + +```json +{ + "mcpServers": { + // Optional: coordinator can also run MCP servers + "example": { + "command": "npx", + "args": ["-y", "example-mcp-server"] + } + }, + "systemConfig": { + "cluster": { + "enabled": true, + "mode": "coordinator", + "coordinator": { + "nodeTimeout": 15000, + "cleanupInterval": 30000, + "stickySessionTimeout": 3600000 + }, + "stickySession": { + "enabled": true, + "strategy": "consistent-hash", + "cookieName": "MCPHUB_NODE", + "headerName": "X-MCPHub-Node" + } + } + } +} +``` + +**Configuration Options:** + +- `nodeTimeout`: Time (ms) before marking a node as unhealthy (default: 15000) +- `cleanupInterval`: Interval (ms) for cleaning up inactive nodes (default: 30000) +- `stickySessionTimeout`: Session affinity timeout (ms) (default: 3600000 - 1 hour) +- `stickySession.enabled`: Enable sticky session routing (default: true) +- `stickySession.strategy`: Session affinity strategy: + - `consistent-hash`: Hash-based routing (default) + - `cookie`: Cookie-based routing + - `header`: Header-based routing + +### Node Configuration + +Create or update `mcp_settings.json` on each worker node: + +```json +{ + "mcpServers": { + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"] + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + } + }, + "systemConfig": { + "cluster": { + "enabled": true, + "mode": "node", + "node": { + "id": "node-1", + "name": "Worker Node 1", + "coordinatorUrl": "http://coordinator:3000", + "heartbeatInterval": 5000, + "registerOnStartup": true + } + } + } +} +``` + +**Configuration Options:** + +- `node.id`: Unique node identifier (auto-generated if not provided) +- `node.name`: Human-readable node name (defaults to hostname) +- `node.coordinatorUrl`: URL of the coordinator node (required) +- `node.heartbeatInterval`: Heartbeat interval (ms) (default: 5000) +- `node.registerOnStartup`: Auto-register on startup (default: true) + +## Deployment Scenarios + +### Scenario 1: Docker Compose + +Create a `docker-compose.yml`: + +```yaml +version: '3.8' + +services: + coordinator: + image: samanhappy/mcphub:latest + ports: + - "3000:3000" + volumes: + - ./coordinator-config.json:/app/mcp_settings.json + - coordinator-data:/app/data + environment: + - NODE_ENV=production + + node1: + image: samanhappy/mcphub:latest + volumes: + - ./node1-config.json:/app/mcp_settings.json + - node1-data:/app/data + environment: + - NODE_ENV=production + depends_on: + - coordinator + + node2: + image: samanhappy/mcphub:latest + volumes: + - ./node2-config.json:/app/mcp_settings.json + - node2-data:/app/data + environment: + - NODE_ENV=production + depends_on: + - coordinator + +volumes: + coordinator-data: + node1-data: + node2-data: +``` + +Start the cluster: + +```bash +docker-compose up -d +``` + +### Scenario 2: Kubernetes + +Create Kubernetes manifests: + +**Coordinator Deployment:** + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mcphub-coordinator +spec: + replicas: 1 + selector: + matchLabels: + app: mcphub-coordinator + template: + metadata: + labels: + app: mcphub-coordinator + spec: + containers: + - name: mcphub + image: samanhappy/mcphub:latest + ports: + - containerPort: 3000 + volumeMounts: + - name: config + mountPath: /app/mcp_settings.json + subPath: mcp_settings.json + volumes: + - name: config + configMap: + name: mcphub-coordinator-config +--- +apiVersion: v1 +kind: Service +metadata: + name: mcphub-coordinator +spec: + selector: + app: mcphub-coordinator + ports: + - port: 3000 + targetPort: 3000 + type: LoadBalancer +``` + +**Worker Node Deployment:** + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mcphub-node +spec: + replicas: 3 + selector: + matchLabels: + app: mcphub-node + template: + metadata: + labels: + app: mcphub-node + spec: + containers: + - name: mcphub + image: samanhappy/mcphub:latest + volumeMounts: + - name: config + mountPath: /app/mcp_settings.json + subPath: mcp_settings.json + volumes: + - name: config + configMap: + name: mcphub-node-config +``` + +Apply the manifests: + +```bash +kubectl apply -f coordinator.yaml +kubectl apply -f nodes.yaml +``` + +### Scenario 3: Manual Deployment + +**On Coordinator (192.168.1.100):** + +```bash +# Install MCPHub +npm install -g @samanhappy/mcphub + +# Configure as coordinator +cat > mcp_settings.json < mcp_settings.json <50 nodes), consider: +- Increasing coordinator resources +- Tuning heartbeat intervals +- Using header-based sticky sessions (lower overhead) + +### Network Latency + +Minimize latency between coordinator and nodes: +- Deploy in the same datacenter/region +- Use low-latency networking +- Consider coordinator placement near clients + +### Session Timeout + +Balance session timeout with resource usage: +- Shorter timeout: Less memory, more re-routing +- Longer timeout: Better stickiness, more memory + +Default is 1 hour, adjust based on your use case. + +## Limitations + +1. **Stateful Sessions**: Node-local state is lost if a node fails. Use external storage for persistent state. +2. **Single Coordinator**: Currently supports one coordinator. Consider load balancing at the infrastructure level. +3. **Network Partitions**: Nodes that lose connection to coordinator will be marked unhealthy. + +## Best Practices + +1. **Use Groups**: Organize MCP servers into groups for easier management +2. **Monitor Health**: Set up alerts for unhealthy nodes +3. **Version Consistency**: Run the same MCPHub version across all nodes +4. **Resource Planning**: Allocate appropriate resources based on MCP server requirements +5. **Backup Configuration**: Keep coordinator config backed up +6. **Gradual Rollout**: Test cluster configuration with a small number of nodes first + +## See Also + +- [Docker Deployment](../deployment/docker.md) +- [Kubernetes Deployment](../deployment/kubernetes.md) +- [High Availability Setup](../deployment/high-availability.md) diff --git a/docs/cluster-deployment.zh.md b/docs/cluster-deployment.zh.md new file mode 100644 index 0000000..4ec5248 --- /dev/null +++ b/docs/cluster-deployment.zh.md @@ -0,0 +1,510 @@ +# 集群部署指南 + +MCPHub 支持集群部署,允许多个节点协同工作组成一个统一的系统。这提供了: + +- **高可用性**:将 MCP 服务器分布在多个节点上实现冗余 +- **负载分配**:在同一 MCP 服务器的多个副本之间平衡请求 +- **会话粘性**:确保客户端会话一致性地路由到同一节点 +- **集中管理**:一个协调器管理整个集群 + +## 架构 + +MCPHub 集群有三种运行模式: + +1. **独立模式**(默认):单节点运行,无集群功能 +2. **协调器模式**:管理集群、路由请求、维护会话亲和性的中心节点 +3. **节点模式**:向协调器注册并运行 MCP 服务器的工作节点 + +``` +┌─────────────────────────────────────────┐ +│ 协调器节点 │ +│ - 管理集群状态 │ +│ - 路由客户端请求 │ +│ - 维护会话亲和性 │ +│ - 健康监控 │ +└───────────┬─────────────────────────────┘ + │ + ┌───────┴───────────────────┐ + │ │ +┌───▼────────┐ ┌────────▼────┐ +│ 节点 1 │ │ 节点 2 │ +│ - MCP A │ │ - MCP A │ +│ - MCP B │ │ - MCP C │ +└────────────┘ └─────────────┘ +``` + +## 配置 + +### 协调器配置 + +在协调器节点上创建或更新 `mcp_settings.json`: + +```json +{ + "mcpServers": { + // 可选:协调器也可以运行 MCP 服务器 + "example": { + "command": "npx", + "args": ["-y", "example-mcp-server"] + } + }, + "systemConfig": { + "cluster": { + "enabled": true, + "mode": "coordinator", + "coordinator": { + "nodeTimeout": 15000, + "cleanupInterval": 30000, + "stickySessionTimeout": 3600000 + }, + "stickySession": { + "enabled": true, + "strategy": "consistent-hash", + "cookieName": "MCPHUB_NODE", + "headerName": "X-MCPHub-Node" + } + } + } +} +``` + +**配置选项:** + +- `nodeTimeout`: 将节点标记为不健康之前的时间(毫秒)(默认:15000) +- `cleanupInterval`: 清理不活跃节点的间隔(毫秒)(默认:30000) +- `stickySessionTimeout`: 会话亲和性超时(毫秒)(默认:3600000 - 1小时) +- `stickySession.enabled`: 启用会话粘性路由(默认:true) +- `stickySession.strategy`: 会话亲和性策略: + - `consistent-hash`: 基于哈希的路由(默认) + - `cookie`: 基于 Cookie 的路由 + - `header`: 基于请求头的路由 + +### 节点配置 + +在每个工作节点上创建或更新 `mcp_settings.json`: + +```json +{ + "mcpServers": { + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"] + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"] + } + }, + "systemConfig": { + "cluster": { + "enabled": true, + "mode": "node", + "node": { + "id": "node-1", + "name": "工作节点 1", + "coordinatorUrl": "http://coordinator:3000", + "heartbeatInterval": 5000, + "registerOnStartup": true + } + } + } +} +``` + +**配置选项:** + +- `node.id`: 唯一节点标识符(如未提供则自动生成) +- `node.name`: 人类可读的节点名称(默认为主机名) +- `node.coordinatorUrl`: 协调器节点的 URL(必需) +- `node.heartbeatInterval`: 心跳间隔(毫秒)(默认:5000) +- `node.registerOnStartup`: 启动时自动注册(默认:true) + +## 部署场景 + +### 场景 1:Docker Compose + +创建 `docker-compose.yml`: + +```yaml +version: '3.8' + +services: + coordinator: + image: samanhappy/mcphub:latest + ports: + - "3000:3000" + volumes: + - ./coordinator-config.json:/app/mcp_settings.json + - coordinator-data:/app/data + environment: + - NODE_ENV=production + + node1: + image: samanhappy/mcphub:latest + volumes: + - ./node1-config.json:/app/mcp_settings.json + - node1-data:/app/data + environment: + - NODE_ENV=production + depends_on: + - coordinator + + node2: + image: samanhappy/mcphub:latest + volumes: + - ./node2-config.json:/app/mcp_settings.json + - node2-data:/app/data + environment: + - NODE_ENV=production + depends_on: + - coordinator + +volumes: + coordinator-data: + node1-data: + node2-data: +``` + +启动集群: + +```bash +docker-compose up -d +``` + +### 场景 2:Kubernetes + +创建 Kubernetes 清单: + +**协调器部署:** + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mcphub-coordinator +spec: + replicas: 1 + selector: + matchLabels: + app: mcphub-coordinator + template: + metadata: + labels: + app: mcphub-coordinator + spec: + containers: + - name: mcphub + image: samanhappy/mcphub:latest + ports: + - containerPort: 3000 + volumeMounts: + - name: config + mountPath: /app/mcp_settings.json + subPath: mcp_settings.json + volumes: + - name: config + configMap: + name: mcphub-coordinator-config +--- +apiVersion: v1 +kind: Service +metadata: + name: mcphub-coordinator +spec: + selector: + app: mcphub-coordinator + ports: + - port: 3000 + targetPort: 3000 + type: LoadBalancer +``` + +**工作节点部署:** + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mcphub-node +spec: + replicas: 3 + selector: + matchLabels: + app: mcphub-node + template: + metadata: + labels: + app: mcphub-node + spec: + containers: + - name: mcphub + image: samanhappy/mcphub:latest + volumeMounts: + - name: config + mountPath: /app/mcp_settings.json + subPath: mcp_settings.json + volumes: + - name: config + configMap: + name: mcphub-node-config +``` + +应用清单: + +```bash +kubectl apply -f coordinator.yaml +kubectl apply -f nodes.yaml +``` + +### 场景 3:手动部署 + +**在协调器上(192.168.1.100):** + +```bash +# 安装 MCPHub +npm install -g @samanhappy/mcphub + +# 配置为协调器 +cat > mcp_settings.json < mcp_settings.json <50个节点),考虑: +- 增加协调器资源 +- 调整心跳间隔 +- 使用基于请求头的会话粘性(开销更低) + +### 网络延迟 + +最小化协调器和节点之间的延迟: +- 在同一数据中心/地区部署 +- 使用低延迟网络 +- 考虑协调器放置在接近客户端的位置 + +### 会话超时 + +平衡会话超时与资源使用: +- 较短超时:更少内存,更多重新路由 +- 较长超时:更好的粘性,更多内存 + +默认为 1 小时,根据您的用例进行调整。 + +## 限制 + +1. **有状态会话**:如果节点失败,节点本地状态会丢失。使用外部存储实现持久状态。 +2. **单协调器**:当前支持一个协调器。考虑在基础设施级别进行负载均衡。 +3. **网络分区**:失去与协调器连接的节点将被标记为不健康。 + +## 最佳实践 + +1. **使用分组**:将 MCP 服务器组织到分组中以便更容易管理 +2. **监控健康**:为不健康的节点设置告警 +3. **版本一致性**:在所有节点上运行相同的 MCPHub 版本 +4. **资源规划**:根据 MCP 服务器要求分配适当的资源 +5. **备份配置**:保持协调器配置的备份 +6. **逐步推出**:首先使用少量节点测试集群配置 + +## 相关文档 + +- [Docker 部署](../deployment/docker.md) +- [Kubernetes 部署](../deployment/kubernetes.md) +- [高可用性设置](../deployment/high-availability.md) diff --git a/examples/cluster-config-examples.md b/examples/cluster-config-examples.md new file mode 100644 index 0000000..74a0273 --- /dev/null +++ b/examples/cluster-config-examples.md @@ -0,0 +1,444 @@ +# Cluster Configuration Examples + +## Coordinator Node Configuration + +```json +{ + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"], + "enabled": true + } + }, + "users": [ + { + "username": "admin", + "password": "$2b$10$Vt7krIvjNgyN67LXqly0uOcTpN0LI55cYRbcKC71pUDAP0nJ7RPa.", + "isAdmin": true + } + ], + "systemConfig": { + "cluster": { + "enabled": true, + "mode": "coordinator", + "coordinator": { + "nodeTimeout": 15000, + "cleanupInterval": 30000, + "stickySessionTimeout": 3600000 + }, + "stickySession": { + "enabled": true, + "strategy": "consistent-hash", + "cookieName": "MCPHUB_NODE", + "headerName": "X-MCPHub-Node" + } + }, + "routing": { + "enableGlobalRoute": true, + "enableGroupNameRoute": true, + "enableBearerAuth": false + } + } +} +``` + +## Worker Node 1 Configuration + +```json +{ + "mcpServers": { + "amap": { + "command": "npx", + "args": ["-y", "@amap/amap-maps-mcp-server"], + "env": { + "AMAP_MAPS_API_KEY": "${AMAP_MAPS_API_KEY}" + }, + "enabled": true + }, + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"], + "enabled": true + } + }, + "systemConfig": { + "cluster": { + "enabled": true, + "mode": "node", + "node": { + "id": "node-1", + "name": "Worker Node 1", + "coordinatorUrl": "http://coordinator:3000", + "heartbeatInterval": 5000, + "registerOnStartup": true + } + } + } +} +``` + +## Worker Node 2 Configuration + +```json +{ + "mcpServers": { + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"], + "enabled": true + }, + "slack": { + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-slack"], + "env": { + "SLACK_BOT_TOKEN": "${SLACK_BOT_TOKEN}", + "SLACK_TEAM_ID": "${SLACK_TEAM_ID}" + }, + "enabled": true + } + }, + "systemConfig": { + "cluster": { + "enabled": true, + "mode": "node", + "node": { + "id": "node-2", + "name": "Worker Node 2", + "coordinatorUrl": "http://coordinator:3000", + "heartbeatInterval": 5000, + "registerOnStartup": true + } + } + } +} +``` + +## Docker Compose Example + +```yaml +version: '3.8' + +services: + coordinator: + image: samanhappy/mcphub:latest + container_name: mcphub-coordinator + hostname: coordinator + ports: + - "3000:3000" + volumes: + - ./examples/coordinator-config.json:/app/mcp_settings.json + - coordinator-data:/app/data + environment: + - NODE_ENV=production + - PORT=3000 + networks: + - mcphub-cluster + restart: unless-stopped + + node1: + image: samanhappy/mcphub:latest + container_name: mcphub-node1 + hostname: node1 + volumes: + - ./examples/node1-config.json:/app/mcp_settings.json + - node1-data:/app/data + environment: + - NODE_ENV=production + - PORT=3001 + - AMAP_MAPS_API_KEY=${AMAP_MAPS_API_KEY} + networks: + - mcphub-cluster + depends_on: + - coordinator + restart: unless-stopped + + node2: + image: samanhappy/mcphub:latest + container_name: mcphub-node2 + hostname: node2 + volumes: + - ./examples/node2-config.json:/app/mcp_settings.json + - node2-data:/app/data + environment: + - NODE_ENV=production + - PORT=3002 + - SLACK_BOT_TOKEN=${SLACK_BOT_TOKEN} + - SLACK_TEAM_ID=${SLACK_TEAM_ID} + networks: + - mcphub-cluster + depends_on: + - coordinator + restart: unless-stopped + +networks: + mcphub-cluster: + driver: bridge + +volumes: + coordinator-data: + node1-data: + node2-data: +``` + +## Kubernetes Example + +### ConfigMaps + +**coordinator-config.yaml:** +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: mcphub-coordinator-config + namespace: mcphub +data: + mcp_settings.json: | + { + "mcpServers": { + "fetch": { + "command": "uvx", + "args": ["mcp-server-fetch"], + "enabled": true + } + }, + "users": [ + { + "username": "admin", + "password": "$2b$10$Vt7krIvjNgyN67LXqly0uOcTpN0LI55cYRbcKC71pUDAP0nJ7RPa.", + "isAdmin": true + } + ], + "systemConfig": { + "cluster": { + "enabled": true, + "mode": "coordinator", + "coordinator": { + "nodeTimeout": 15000, + "cleanupInterval": 30000, + "stickySessionTimeout": 3600000 + }, + "stickySession": { + "enabled": true, + "strategy": "consistent-hash" + } + } + } + } +``` + +**node-config.yaml:** +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: mcphub-node-config + namespace: mcphub +data: + mcp_settings.json: | + { + "mcpServers": { + "playwright": { + "command": "npx", + "args": ["@playwright/mcp@latest", "--headless"], + "enabled": true + } + }, + "systemConfig": { + "cluster": { + "enabled": true, + "mode": "node", + "node": { + "coordinatorUrl": "http://mcphub-coordinator:3000", + "heartbeatInterval": 5000, + "registerOnStartup": true + } + } + } + } +``` + +### Deployments + +**coordinator.yaml:** +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mcphub-coordinator + namespace: mcphub +spec: + replicas: 1 + selector: + matchLabels: + app: mcphub-coordinator + template: + metadata: + labels: + app: mcphub-coordinator + spec: + containers: + - name: mcphub + image: samanhappy/mcphub:latest + imagePullPolicy: Always + ports: + - containerPort: 3000 + name: http + env: + - name: NODE_ENV + value: production + - name: PORT + value: "3000" + volumeMounts: + - name: config + mountPath: /app/mcp_settings.json + subPath: mcp_settings.json + - name: data + mountPath: /app/data + resources: + requests: + memory: "512Mi" + cpu: "500m" + limits: + memory: "1Gi" + cpu: "1000m" + livenessProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 30 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /health + port: 3000 + initialDelaySeconds: 10 + periodSeconds: 5 + volumes: + - name: config + configMap: + name: mcphub-coordinator-config + - name: data + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: mcphub-coordinator + namespace: mcphub +spec: + selector: + app: mcphub-coordinator + ports: + - port: 3000 + targetPort: 3000 + name: http + type: LoadBalancer +``` + +**nodes.yaml:** +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mcphub-node + namespace: mcphub +spec: + replicas: 3 + selector: + matchLabels: + app: mcphub-node + template: + metadata: + labels: + app: mcphub-node + spec: + containers: + - name: mcphub + image: samanhappy/mcphub:latest + imagePullPolicy: Always + env: + - name: NODE_ENV + value: production + volumeMounts: + - name: config + mountPath: /app/mcp_settings.json + subPath: mcp_settings.json + - name: data + mountPath: /app/data + resources: + requests: + memory: "512Mi" + cpu: "500m" + limits: + memory: "2Gi" + cpu: "2000m" + volumes: + - name: config + configMap: + name: mcphub-node-config + - name: data + emptyDir: {} +``` + +## Environment Variables + +Create a `.env` file for sensitive values: + +```bash +# API Keys +AMAP_MAPS_API_KEY=your-amap-api-key +SLACK_BOT_TOKEN=xoxb-your-slack-bot-token +SLACK_TEAM_ID=T01234567 + +# Optional: Custom ports +COORDINATOR_PORT=3000 +NODE1_PORT=3001 +NODE2_PORT=3002 +``` + +## Testing the Cluster + +After starting the cluster, test connectivity: + +```bash +# Check coordinator health +curl http://localhost:3000/health + +# Get cluster status +curl http://localhost:3000/api/cluster/status + +# List all nodes +curl http://localhost:3000/api/cluster/nodes + +# Test MCP endpoint +curl -X POST http://localhost:3000/mcp \ + -H "Content-Type: application/json" \ + -d '{ + "jsonrpc": "2.0", + "method": "initialize", + "params": { + "protocolVersion": "2024-11-05", + "capabilities": {}, + "clientInfo": { + "name": "test-client", + "version": "1.0.0" + } + }, + "id": 1 + }' +``` + +## Scaling + +### Scale worker nodes (Docker Compose): + +```bash +docker-compose up -d --scale node1=3 +``` + +### Scale worker nodes (Kubernetes): + +```bash +kubectl scale deployment mcphub-node --replicas=5 -n mcphub +``` diff --git a/tests/services/clusterService.test.ts b/tests/services/clusterService.test.ts index 65a764f..5859131 100644 --- a/tests/services/clusterService.test.ts +++ b/tests/services/clusterService.test.ts @@ -5,7 +5,6 @@ import { isClusterEnabled, getClusterMode, - getCurrentNodeId, registerNode, updateNodeHeartbeat, getActiveNodes, @@ -18,15 +17,16 @@ import { shutdownClusterService, } from '../../src/services/clusterService'; import { ClusterNode } from '../../src/types/index'; +import * as configModule from '../../src/config/index.js'; // Mock the config module jest.mock('../../src/config/index.js', () => ({ loadSettings: jest.fn(), })); -const { loadSettings } = require('../../src/config/index.js'); - describe('Cluster Service', () => { + const loadSettings = configModule.loadSettings as jest.MockedFunction; + beforeEach(() => { jest.clearAllMocks(); });