Add comprehensive cluster documentation and configuration examples

Co-authored-by: samanhappy <2755122+samanhappy@users.noreply.github.com>
This commit is contained in:
copilot-swe-agent[bot]
2025-10-26 14:13:40 +00:00
committed by samanhappy
parent f4bac3adc0
commit 1f535c3d7f
4 changed files with 1473 additions and 3 deletions

516
docs/cluster-deployment.md Normal file
View File

@@ -0,0 +1,516 @@
# Cluster Deployment Guide
MCPHub supports cluster deployment, allowing you to run multiple nodes that work together as a unified system. This enables:
- **High Availability**: Distribute MCP servers across multiple nodes for redundancy
- **Load Distribution**: Balance requests across multiple replicas of the same MCP server
- **Sticky Sessions**: Ensure client sessions are routed to the same node consistently
- **Centralized Management**: One coordinator manages the entire cluster
## Architecture
MCPHub cluster has three operating modes:
1. **Standalone Mode** (Default): Single node operation, no cluster features
2. **Coordinator Mode**: Central node that manages the cluster, routes requests, and maintains session affinity
3. **Node Mode**: Worker nodes that register with the coordinator and run MCP servers
```
┌─────────────────────────────────────────┐
│ Coordinator Node │
│ - Manages cluster state │
│ - Routes client requests │
│ - Maintains session affinity │
│ - Health monitoring │
└───────────┬─────────────────────────────┘
┌───────┴───────────────────┐
│ │
┌───▼────────┐ ┌────────▼────┐
│ Node 1 │ │ Node 2 │
│ - MCP A │ │ - MCP A │
│ - MCP B │ │ - MCP C │
└────────────┘ └─────────────┘
```
## Configuration
### Coordinator Configuration
Create or update `mcp_settings.json` on the coordinator node:
```json
{
"mcpServers": {
// Optional: coordinator can also run MCP servers
"example": {
"command": "npx",
"args": ["-y", "example-mcp-server"]
}
},
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "coordinator",
"coordinator": {
"nodeTimeout": 15000,
"cleanupInterval": 30000,
"stickySessionTimeout": 3600000
},
"stickySession": {
"enabled": true,
"strategy": "consistent-hash",
"cookieName": "MCPHUB_NODE",
"headerName": "X-MCPHub-Node"
}
}
}
}
```
**Configuration Options:**
- `nodeTimeout`: Time (ms) before marking a node as unhealthy (default: 15000)
- `cleanupInterval`: Interval (ms) for cleaning up inactive nodes (default: 30000)
- `stickySessionTimeout`: Session affinity timeout (ms) (default: 3600000 - 1 hour)
- `stickySession.enabled`: Enable sticky session routing (default: true)
- `stickySession.strategy`: Session affinity strategy:
- `consistent-hash`: Hash-based routing (default)
- `cookie`: Cookie-based routing
- `header`: Header-based routing
### Node Configuration
Create or update `mcp_settings.json` on each worker node:
```json
{
"mcpServers": {
"amap": {
"command": "npx",
"args": ["-y", "@amap/amap-maps-mcp-server"]
},
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest", "--headless"]
}
},
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "node",
"node": {
"id": "node-1",
"name": "Worker Node 1",
"coordinatorUrl": "http://coordinator:3000",
"heartbeatInterval": 5000,
"registerOnStartup": true
}
}
}
}
```
**Configuration Options:**
- `node.id`: Unique node identifier (auto-generated if not provided)
- `node.name`: Human-readable node name (defaults to hostname)
- `node.coordinatorUrl`: URL of the coordinator node (required)
- `node.heartbeatInterval`: Heartbeat interval (ms) (default: 5000)
- `node.registerOnStartup`: Auto-register on startup (default: true)
## Deployment Scenarios
### Scenario 1: Docker Compose
Create a `docker-compose.yml`:
```yaml
version: '3.8'
services:
coordinator:
image: samanhappy/mcphub:latest
ports:
- "3000:3000"
volumes:
- ./coordinator-config.json:/app/mcp_settings.json
- coordinator-data:/app/data
environment:
- NODE_ENV=production
node1:
image: samanhappy/mcphub:latest
volumes:
- ./node1-config.json:/app/mcp_settings.json
- node1-data:/app/data
environment:
- NODE_ENV=production
depends_on:
- coordinator
node2:
image: samanhappy/mcphub:latest
volumes:
- ./node2-config.json:/app/mcp_settings.json
- node2-data:/app/data
environment:
- NODE_ENV=production
depends_on:
- coordinator
volumes:
coordinator-data:
node1-data:
node2-data:
```
Start the cluster:
```bash
docker-compose up -d
```
### Scenario 2: Kubernetes
Create Kubernetes manifests:
**Coordinator Deployment:**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mcphub-coordinator
spec:
replicas: 1
selector:
matchLabels:
app: mcphub-coordinator
template:
metadata:
labels:
app: mcphub-coordinator
spec:
containers:
- name: mcphub
image: samanhappy/mcphub:latest
ports:
- containerPort: 3000
volumeMounts:
- name: config
mountPath: /app/mcp_settings.json
subPath: mcp_settings.json
volumes:
- name: config
configMap:
name: mcphub-coordinator-config
---
apiVersion: v1
kind: Service
metadata:
name: mcphub-coordinator
spec:
selector:
app: mcphub-coordinator
ports:
- port: 3000
targetPort: 3000
type: LoadBalancer
```
**Worker Node Deployment:**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mcphub-node
spec:
replicas: 3
selector:
matchLabels:
app: mcphub-node
template:
metadata:
labels:
app: mcphub-node
spec:
containers:
- name: mcphub
image: samanhappy/mcphub:latest
volumeMounts:
- name: config
mountPath: /app/mcp_settings.json
subPath: mcp_settings.json
volumes:
- name: config
configMap:
name: mcphub-node-config
```
Apply the manifests:
```bash
kubectl apply -f coordinator.yaml
kubectl apply -f nodes.yaml
```
### Scenario 3: Manual Deployment
**On Coordinator (192.168.1.100):**
```bash
# Install MCPHub
npm install -g @samanhappy/mcphub
# Configure as coordinator
cat > mcp_settings.json <<EOF
{
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "coordinator"
}
}
}
EOF
# Start coordinator
PORT=3000 mcphub
```
**On Node 1 (192.168.1.101):**
```bash
# Install MCPHub
npm install -g @samanhappy/mcphub
# Configure as node
cat > mcp_settings.json <<EOF
{
"mcpServers": {
"server1": { "command": "..." }
},
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "node",
"node": {
"coordinatorUrl": "http://192.168.1.100:3000"
}
}
}
}
EOF
# Start node
PORT=3001 mcphub
```
**On Node 2 (192.168.1.102):**
```bash
# Similar to Node 1, but with PORT=3002
```
## Usage
### Accessing the Cluster
Once the cluster is running, connect AI clients to the coordinator's endpoint:
```
http://coordinator:3000/mcp
http://coordinator:3000/sse
```
The coordinator will:
1. Route requests to appropriate nodes based on session affinity
2. Load balance across multiple replicas of the same server
3. Automatically failover to healthy nodes
### Sticky Sessions
Sticky sessions ensure that a client's requests are routed to the same node throughout their session. This is important for:
- Maintaining conversation context
- Preserving temporary state
- Consistent tool execution
The default strategy is **consistent-hash**, which uses the session ID to determine the target node. Alternative strategies:
- **Cookie-based**: Uses `MCPHUB_NODE` cookie
- **Header-based**: Uses `X-MCPHub-Node` header
### Multiple Replicas
You can deploy the same MCP server on multiple nodes for:
- **Load balancing**: Distribute requests across replicas
- **High availability**: Failover if one node goes down
Example configuration:
**Node 1:**
```json
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest"]
}
}
}
```
**Node 2:**
```json
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest"]
}
}
}
```
The coordinator will automatically load balance requests to `playwright` across both nodes.
## Management API
The coordinator exposes cluster management endpoints:
### Get Cluster Status
```bash
curl http://coordinator:3000/api/cluster/status
```
Response:
```json
{
"success": true,
"data": {
"enabled": true,
"mode": "coordinator",
"nodeId": "coordinator",
"stats": {
"nodes": 3,
"activeNodes": 3,
"servers": 5,
"sessions": 10
}
}
}
```
### Get All Nodes
```bash
curl http://coordinator:3000/api/cluster/nodes
```
### Get Server Replicas
```bash
curl http://coordinator:3000/api/cluster/servers/playwright/replicas
```
### Get Session Affinity
```bash
curl http://coordinator:3000/api/cluster/sessions/{sessionId}
```
## Monitoring and Troubleshooting
### Check Node Health
Monitor coordinator logs for heartbeat messages:
```
Node registered: Worker Node 1 (node-1) with 2 servers
```
If a node becomes unhealthy:
```
Marking node node-1 as unhealthy (last heartbeat: 2024-01-01T10:00:00.000Z)
```
### Verify Registration
Check if nodes are registered:
```bash
curl http://coordinator:3000/api/cluster/nodes?active=true
```
### Session Affinity Issues
If sessions aren't sticking to the same node:
1. Verify sticky sessions are enabled in coordinator config
2. Check that session IDs are being passed correctly
3. Review coordinator logs for session affinity errors
### Network Connectivity
Ensure worker nodes can reach the coordinator:
```bash
# From worker node
curl http://coordinator:3000/health
```
## Performance Considerations
### Coordinator Load
The coordinator handles:
- Request routing
- Node heartbeats
- Session tracking
For very large clusters (>50 nodes), consider:
- Increasing coordinator resources
- Tuning heartbeat intervals
- Using header-based sticky sessions (lower overhead)
### Network Latency
Minimize latency between coordinator and nodes:
- Deploy in the same datacenter/region
- Use low-latency networking
- Consider coordinator placement near clients
### Session Timeout
Balance session timeout with resource usage:
- Shorter timeout: Less memory, more re-routing
- Longer timeout: Better stickiness, more memory
Default is 1 hour, adjust based on your use case.
## Limitations
1. **Stateful Sessions**: Node-local state is lost if a node fails. Use external storage for persistent state.
2. **Single Coordinator**: Currently supports one coordinator. Consider load balancing at the infrastructure level.
3. **Network Partitions**: Nodes that lose connection to coordinator will be marked unhealthy.
## Best Practices
1. **Use Groups**: Organize MCP servers into groups for easier management
2. **Monitor Health**: Set up alerts for unhealthy nodes
3. **Version Consistency**: Run the same MCPHub version across all nodes
4. **Resource Planning**: Allocate appropriate resources based on MCP server requirements
5. **Backup Configuration**: Keep coordinator config backed up
6. **Gradual Rollout**: Test cluster configuration with a small number of nodes first
## See Also
- [Docker Deployment](../deployment/docker.md)
- [Kubernetes Deployment](../deployment/kubernetes.md)
- [High Availability Setup](../deployment/high-availability.md)

View File

@@ -0,0 +1,510 @@
# 集群部署指南
MCPHub 支持集群部署,允许多个节点协同工作组成一个统一的系统。这提供了:
- **高可用性**:将 MCP 服务器分布在多个节点上实现冗余
- **负载分配**:在同一 MCP 服务器的多个副本之间平衡请求
- **会话粘性**:确保客户端会话一致性地路由到同一节点
- **集中管理**:一个协调器管理整个集群
## 架构
MCPHub 集群有三种运行模式:
1. **独立模式**(默认):单节点运行,无集群功能
2. **协调器模式**:管理集群、路由请求、维护会话亲和性的中心节点
3. **节点模式**:向协调器注册并运行 MCP 服务器的工作节点
```
┌─────────────────────────────────────────┐
│ 协调器节点 │
│ - 管理集群状态 │
│ - 路由客户端请求 │
│ - 维护会话亲和性 │
│ - 健康监控 │
└───────────┬─────────────────────────────┘
┌───────┴───────────────────┐
│ │
┌───▼────────┐ ┌────────▼────┐
│ 节点 1 │ │ 节点 2 │
│ - MCP A │ │ - MCP A │
│ - MCP B │ │ - MCP C │
└────────────┘ └─────────────┘
```
## 配置
### 协调器配置
在协调器节点上创建或更新 `mcp_settings.json`
```json
{
"mcpServers": {
// 可选:协调器也可以运行 MCP 服务器
"example": {
"command": "npx",
"args": ["-y", "example-mcp-server"]
}
},
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "coordinator",
"coordinator": {
"nodeTimeout": 15000,
"cleanupInterval": 30000,
"stickySessionTimeout": 3600000
},
"stickySession": {
"enabled": true,
"strategy": "consistent-hash",
"cookieName": "MCPHUB_NODE",
"headerName": "X-MCPHub-Node"
}
}
}
}
```
**配置选项:**
- `nodeTimeout`: 将节点标记为不健康之前的时间毫秒默认15000
- `cleanupInterval`: 清理不活跃节点的间隔毫秒默认30000
- `stickySessionTimeout`: 会话亲和性超时毫秒默认3600000 - 1小时
- `stickySession.enabled`: 启用会话粘性路由默认true
- `stickySession.strategy`: 会话亲和性策略:
- `consistent-hash`: 基于哈希的路由(默认)
- `cookie`: 基于 Cookie 的路由
- `header`: 基于请求头的路由
### 节点配置
在每个工作节点上创建或更新 `mcp_settings.json`
```json
{
"mcpServers": {
"amap": {
"command": "npx",
"args": ["-y", "@amap/amap-maps-mcp-server"]
},
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest", "--headless"]
}
},
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "node",
"node": {
"id": "node-1",
"name": "工作节点 1",
"coordinatorUrl": "http://coordinator:3000",
"heartbeatInterval": 5000,
"registerOnStartup": true
}
}
}
}
```
**配置选项:**
- `node.id`: 唯一节点标识符(如未提供则自动生成)
- `node.name`: 人类可读的节点名称(默认为主机名)
- `node.coordinatorUrl`: 协调器节点的 URL必需
- `node.heartbeatInterval`: 心跳间隔毫秒默认5000
- `node.registerOnStartup`: 启动时自动注册默认true
## 部署场景
### 场景 1Docker Compose
创建 `docker-compose.yml`
```yaml
version: '3.8'
services:
coordinator:
image: samanhappy/mcphub:latest
ports:
- "3000:3000"
volumes:
- ./coordinator-config.json:/app/mcp_settings.json
- coordinator-data:/app/data
environment:
- NODE_ENV=production
node1:
image: samanhappy/mcphub:latest
volumes:
- ./node1-config.json:/app/mcp_settings.json
- node1-data:/app/data
environment:
- NODE_ENV=production
depends_on:
- coordinator
node2:
image: samanhappy/mcphub:latest
volumes:
- ./node2-config.json:/app/mcp_settings.json
- node2-data:/app/data
environment:
- NODE_ENV=production
depends_on:
- coordinator
volumes:
coordinator-data:
node1-data:
node2-data:
```
启动集群:
```bash
docker-compose up -d
```
### 场景 2Kubernetes
创建 Kubernetes 清单:
**协调器部署:**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mcphub-coordinator
spec:
replicas: 1
selector:
matchLabels:
app: mcphub-coordinator
template:
metadata:
labels:
app: mcphub-coordinator
spec:
containers:
- name: mcphub
image: samanhappy/mcphub:latest
ports:
- containerPort: 3000
volumeMounts:
- name: config
mountPath: /app/mcp_settings.json
subPath: mcp_settings.json
volumes:
- name: config
configMap:
name: mcphub-coordinator-config
---
apiVersion: v1
kind: Service
metadata:
name: mcphub-coordinator
spec:
selector:
app: mcphub-coordinator
ports:
- port: 3000
targetPort: 3000
type: LoadBalancer
```
**工作节点部署:**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mcphub-node
spec:
replicas: 3
selector:
matchLabels:
app: mcphub-node
template:
metadata:
labels:
app: mcphub-node
spec:
containers:
- name: mcphub
image: samanhappy/mcphub:latest
volumeMounts:
- name: config
mountPath: /app/mcp_settings.json
subPath: mcp_settings.json
volumes:
- name: config
configMap:
name: mcphub-node-config
```
应用清单:
```bash
kubectl apply -f coordinator.yaml
kubectl apply -f nodes.yaml
```
### 场景 3手动部署
**在协调器上192.168.1.100**
```bash
# 安装 MCPHub
npm install -g @samanhappy/mcphub
# 配置为协调器
cat > mcp_settings.json <<EOF
{
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "coordinator"
}
}
}
EOF
# 启动协调器
PORT=3000 mcphub
```
**在节点 1 上192.168.1.101**
```bash
# 安装 MCPHub
npm install -g @samanhappy/mcphub
# 配置为节点
cat > mcp_settings.json <<EOF
{
"mcpServers": {
"server1": { "command": "..." }
},
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "node",
"node": {
"coordinatorUrl": "http://192.168.1.100:3000"
}
}
}
}
EOF
# 启动节点
PORT=3001 mcphub
```
## 使用方法
### 访问集群
集群运行后,将 AI 客户端连接到协调器的端点:
```
http://coordinator:3000/mcp
http://coordinator:3000/sse
```
协调器将:
1. 根据会话亲和性将请求路由到适当的节点
2. 在同一服务器的多个副本之间进行负载均衡
3. 自动故障转移到健康的节点
### 会话粘性
会话粘性确保客户端的请求在整个会话期间路由到同一节点。这对于以下场景很重要:
- 维护对话上下文
- 保持临时状态
- 一致的工具执行
默认策略是 **consistent-hash**,使用会话 ID 来确定目标节点。替代策略:
- **Cookie-based**: 使用 `MCPHUB_NODE` cookie
- **Header-based**: 使用 `X-MCPHub-Node` 请求头
### 多副本
您可以在多个节点上部署相同的 MCP 服务器以实现:
- **负载均衡**:在副本之间分配请求
- **高可用性**:如果一个节点宕机则故障转移
配置示例:
**节点 1**
```json
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest"]
}
}
}
```
**节点 2**
```json
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest"]
}
}
}
```
协调器将自动在两个节点之间对 `playwright` 的请求进行负载均衡。
## 管理 API
协调器公开集群管理端点:
### 获取集群状态
```bash
curl http://coordinator:3000/api/cluster/status
```
响应:
```json
{
"success": true,
"data": {
"enabled": true,
"mode": "coordinator",
"nodeId": "coordinator",
"stats": {
"nodes": 3,
"activeNodes": 3,
"servers": 5,
"sessions": 10
}
}
}
```
### 获取所有节点
```bash
curl http://coordinator:3000/api/cluster/nodes
```
### 获取服务器副本
```bash
curl http://coordinator:3000/api/cluster/servers/playwright/replicas
```
### 获取会话亲和性
```bash
curl http://coordinator:3000/api/cluster/sessions/{sessionId}
```
## 监控和故障排除
### 检查节点健康
监控协调器日志以查看心跳消息:
```
Node registered: Worker Node 1 (node-1) with 2 servers
```
如果节点变得不健康:
```
Marking node node-1 as unhealthy (last heartbeat: 2024-01-01T10:00:00.000Z)
```
### 验证注册
检查节点是否已注册:
```bash
curl http://coordinator:3000/api/cluster/nodes?active=true
```
### 会话亲和性问题
如果会话没有粘性到同一节点:
1. 验证协调器配置中是否启用了会话粘性
2. 检查会话 ID 是否正确传递
3. 查看协调器日志以查找会话亲和性错误
### 网络连接
确保工作节点可以访问协调器:
```bash
# 从工作节点
curl http://coordinator:3000/health
```
## 性能考虑
### 协调器负载
协调器处理:
- 请求路由
- 节点心跳
- 会话跟踪
对于非常大的集群(>50个节点考虑
- 增加协调器资源
- 调整心跳间隔
- 使用基于请求头的会话粘性(开销更低)
### 网络延迟
最小化协调器和节点之间的延迟:
- 在同一数据中心/地区部署
- 使用低延迟网络
- 考虑协调器放置在接近客户端的位置
### 会话超时
平衡会话超时与资源使用:
- 较短超时:更少内存,更多重新路由
- 较长超时:更好的粘性,更多内存
默认为 1 小时,根据您的用例进行调整。
## 限制
1. **有状态会话**:如果节点失败,节点本地状态会丢失。使用外部存储实现持久状态。
2. **单协调器**:当前支持一个协调器。考虑在基础设施级别进行负载均衡。
3. **网络分区**:失去与协调器连接的节点将被标记为不健康。
## 最佳实践
1. **使用分组**:将 MCP 服务器组织到分组中以便更容易管理
2. **监控健康**:为不健康的节点设置告警
3. **版本一致性**:在所有节点上运行相同的 MCPHub 版本
4. **资源规划**:根据 MCP 服务器要求分配适当的资源
5. **备份配置**:保持协调器配置的备份
6. **逐步推出**:首先使用少量节点测试集群配置
## 相关文档
- [Docker 部署](../deployment/docker.md)
- [Kubernetes 部署](../deployment/kubernetes.md)
- [高可用性设置](../deployment/high-availability.md)

View File

@@ -0,0 +1,444 @@
# Cluster Configuration Examples
## Coordinator Node Configuration
```json
{
"mcpServers": {
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"],
"enabled": true
}
},
"users": [
{
"username": "admin",
"password": "$2b$10$Vt7krIvjNgyN67LXqly0uOcTpN0LI55cYRbcKC71pUDAP0nJ7RPa.",
"isAdmin": true
}
],
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "coordinator",
"coordinator": {
"nodeTimeout": 15000,
"cleanupInterval": 30000,
"stickySessionTimeout": 3600000
},
"stickySession": {
"enabled": true,
"strategy": "consistent-hash",
"cookieName": "MCPHUB_NODE",
"headerName": "X-MCPHub-Node"
}
},
"routing": {
"enableGlobalRoute": true,
"enableGroupNameRoute": true,
"enableBearerAuth": false
}
}
}
```
## Worker Node 1 Configuration
```json
{
"mcpServers": {
"amap": {
"command": "npx",
"args": ["-y", "@amap/amap-maps-mcp-server"],
"env": {
"AMAP_MAPS_API_KEY": "${AMAP_MAPS_API_KEY}"
},
"enabled": true
},
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest", "--headless"],
"enabled": true
}
},
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "node",
"node": {
"id": "node-1",
"name": "Worker Node 1",
"coordinatorUrl": "http://coordinator:3000",
"heartbeatInterval": 5000,
"registerOnStartup": true
}
}
}
}
```
## Worker Node 2 Configuration
```json
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest", "--headless"],
"enabled": true
},
"slack": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-slack"],
"env": {
"SLACK_BOT_TOKEN": "${SLACK_BOT_TOKEN}",
"SLACK_TEAM_ID": "${SLACK_TEAM_ID}"
},
"enabled": true
}
},
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "node",
"node": {
"id": "node-2",
"name": "Worker Node 2",
"coordinatorUrl": "http://coordinator:3000",
"heartbeatInterval": 5000,
"registerOnStartup": true
}
}
}
}
```
## Docker Compose Example
```yaml
version: '3.8'
services:
coordinator:
image: samanhappy/mcphub:latest
container_name: mcphub-coordinator
hostname: coordinator
ports:
- "3000:3000"
volumes:
- ./examples/coordinator-config.json:/app/mcp_settings.json
- coordinator-data:/app/data
environment:
- NODE_ENV=production
- PORT=3000
networks:
- mcphub-cluster
restart: unless-stopped
node1:
image: samanhappy/mcphub:latest
container_name: mcphub-node1
hostname: node1
volumes:
- ./examples/node1-config.json:/app/mcp_settings.json
- node1-data:/app/data
environment:
- NODE_ENV=production
- PORT=3001
- AMAP_MAPS_API_KEY=${AMAP_MAPS_API_KEY}
networks:
- mcphub-cluster
depends_on:
- coordinator
restart: unless-stopped
node2:
image: samanhappy/mcphub:latest
container_name: mcphub-node2
hostname: node2
volumes:
- ./examples/node2-config.json:/app/mcp_settings.json
- node2-data:/app/data
environment:
- NODE_ENV=production
- PORT=3002
- SLACK_BOT_TOKEN=${SLACK_BOT_TOKEN}
- SLACK_TEAM_ID=${SLACK_TEAM_ID}
networks:
- mcphub-cluster
depends_on:
- coordinator
restart: unless-stopped
networks:
mcphub-cluster:
driver: bridge
volumes:
coordinator-data:
node1-data:
node2-data:
```
## Kubernetes Example
### ConfigMaps
**coordinator-config.yaml:**
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mcphub-coordinator-config
namespace: mcphub
data:
mcp_settings.json: |
{
"mcpServers": {
"fetch": {
"command": "uvx",
"args": ["mcp-server-fetch"],
"enabled": true
}
},
"users": [
{
"username": "admin",
"password": "$2b$10$Vt7krIvjNgyN67LXqly0uOcTpN0LI55cYRbcKC71pUDAP0nJ7RPa.",
"isAdmin": true
}
],
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "coordinator",
"coordinator": {
"nodeTimeout": 15000,
"cleanupInterval": 30000,
"stickySessionTimeout": 3600000
},
"stickySession": {
"enabled": true,
"strategy": "consistent-hash"
}
}
}
}
```
**node-config.yaml:**
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: mcphub-node-config
namespace: mcphub
data:
mcp_settings.json: |
{
"mcpServers": {
"playwright": {
"command": "npx",
"args": ["@playwright/mcp@latest", "--headless"],
"enabled": true
}
},
"systemConfig": {
"cluster": {
"enabled": true,
"mode": "node",
"node": {
"coordinatorUrl": "http://mcphub-coordinator:3000",
"heartbeatInterval": 5000,
"registerOnStartup": true
}
}
}
}
```
### Deployments
**coordinator.yaml:**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mcphub-coordinator
namespace: mcphub
spec:
replicas: 1
selector:
matchLabels:
app: mcphub-coordinator
template:
metadata:
labels:
app: mcphub-coordinator
spec:
containers:
- name: mcphub
image: samanhappy/mcphub:latest
imagePullPolicy: Always
ports:
- containerPort: 3000
name: http
env:
- name: NODE_ENV
value: production
- name: PORT
value: "3000"
volumeMounts:
- name: config
mountPath: /app/mcp_settings.json
subPath: mcp_settings.json
- name: data
mountPath: /app/data
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 10
periodSeconds: 5
volumes:
- name: config
configMap:
name: mcphub-coordinator-config
- name: data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: mcphub-coordinator
namespace: mcphub
spec:
selector:
app: mcphub-coordinator
ports:
- port: 3000
targetPort: 3000
name: http
type: LoadBalancer
```
**nodes.yaml:**
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mcphub-node
namespace: mcphub
spec:
replicas: 3
selector:
matchLabels:
app: mcphub-node
template:
metadata:
labels:
app: mcphub-node
spec:
containers:
- name: mcphub
image: samanhappy/mcphub:latest
imagePullPolicy: Always
env:
- name: NODE_ENV
value: production
volumeMounts:
- name: config
mountPath: /app/mcp_settings.json
subPath: mcp_settings.json
- name: data
mountPath: /app/data
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "2000m"
volumes:
- name: config
configMap:
name: mcphub-node-config
- name: data
emptyDir: {}
```
## Environment Variables
Create a `.env` file for sensitive values:
```bash
# API Keys
AMAP_MAPS_API_KEY=your-amap-api-key
SLACK_BOT_TOKEN=xoxb-your-slack-bot-token
SLACK_TEAM_ID=T01234567
# Optional: Custom ports
COORDINATOR_PORT=3000
NODE1_PORT=3001
NODE2_PORT=3002
```
## Testing the Cluster
After starting the cluster, test connectivity:
```bash
# Check coordinator health
curl http://localhost:3000/health
# Get cluster status
curl http://localhost:3000/api/cluster/status
# List all nodes
curl http://localhost:3000/api/cluster/nodes
# Test MCP endpoint
curl -X POST http://localhost:3000/mcp \
-H "Content-Type: application/json" \
-d '{
"jsonrpc": "2.0",
"method": "initialize",
"params": {
"protocolVersion": "2024-11-05",
"capabilities": {},
"clientInfo": {
"name": "test-client",
"version": "1.0.0"
}
},
"id": 1
}'
```
## Scaling
### Scale worker nodes (Docker Compose):
```bash
docker-compose up -d --scale node1=3
```
### Scale worker nodes (Kubernetes):
```bash
kubectl scale deployment mcphub-node --replicas=5 -n mcphub
```

View File

@@ -5,7 +5,6 @@
import {
isClusterEnabled,
getClusterMode,
getCurrentNodeId,
registerNode,
updateNodeHeartbeat,
getActiveNodes,
@@ -18,15 +17,16 @@ import {
shutdownClusterService,
} from '../../src/services/clusterService';
import { ClusterNode } from '../../src/types/index';
import * as configModule from '../../src/config/index.js';
// Mock the config module
jest.mock('../../src/config/index.js', () => ({
loadSettings: jest.fn(),
}));
const { loadSettings } = require('../../src/config/index.js');
describe('Cluster Service', () => {
const loadSettings = configModule.loadSettings as jest.MockedFunction<typeof configModule.loadSettings>;
beforeEach(() => {
jest.clearAllMocks();
});