feat: add MCP API endpoint and enhance API documentation

Add new MCP configuration management endpoint and enhance API documentation
with detailed descriptions, examples, and OpenAPI support for better
developer experience.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
hetaoBackend
2026-01-20 13:20:50 +08:00
parent d11763dcc8
commit 8434cf4c60
6 changed files with 307 additions and 11 deletions

View File

@@ -5,7 +5,7 @@ from contextlib import asynccontextmanager
from fastapi import FastAPI from fastapi import FastAPI
from src.gateway.config import get_gateway_config from src.gateway.config import get_gateway_config
from src.gateway.routers import artifacts, models from src.gateway.routers import artifacts, mcp, models
# Configure logging # Configure logging
logging.basicConfig( logging.basicConfig(
@@ -44,9 +44,46 @@ def create_app() -> FastAPI:
app = FastAPI( app = FastAPI(
title="DeerFlow API Gateway", title="DeerFlow API Gateway",
description="API Gateway for DeerFlow - provides custom endpoints (models, artifacts). LangGraph requests are handled by nginx.", description="""
## DeerFlow API Gateway
API Gateway for DeerFlow - A LangGraph-based AI agent backend with sandbox execution capabilities.
### Features
- **Models Management**: Query and retrieve available AI models
- **MCP Configuration**: Manage Model Context Protocol (MCP) server configurations
- **Artifacts**: Access thread artifacts and generated files
- **Health Monitoring**: System health check endpoints
### Architecture
LangGraph requests are handled by nginx reverse proxy.
This gateway provides custom endpoints for models, MCP configuration, and artifacts.
""",
version="0.1.0", version="0.1.0",
lifespan=lifespan, lifespan=lifespan,
docs_url="/docs",
redoc_url="/redoc",
openapi_url="/openapi.json",
openapi_tags=[
{
"name": "models",
"description": "Operations for querying available AI models and their configurations",
},
{
"name": "mcp",
"description": "Manage Model Context Protocol (MCP) server configurations",
},
{
"name": "artifacts",
"description": "Access and download thread artifacts and generated files",
},
{
"name": "health",
"description": "Health check and system status endpoints",
},
],
) )
# CORS is handled by nginx - no need for FastAPI middleware # CORS is handled by nginx - no need for FastAPI middleware
@@ -55,12 +92,19 @@ def create_app() -> FastAPI:
# Models API is mounted at /api/models # Models API is mounted at /api/models
app.include_router(models.router) app.include_router(models.router)
# MCP API is mounted at /api/mcp
app.include_router(mcp.router)
# Artifacts API is mounted at /api/threads/{thread_id}/artifacts # Artifacts API is mounted at /api/threads/{thread_id}/artifacts
app.include_router(artifacts.router) app.include_router(artifacts.router)
@app.get("/health") @app.get("/health", tags=["health"])
async def health_check() -> dict: async def health_check() -> dict:
"""Health check endpoint.""" """Health check endpoint.
Returns:
Service health status information.
"""
return {"status": "healthy", "service": "deer-flow-gateway"} return {"status": "healthy", "service": "deer-flow-gateway"}
return app return app

View File

@@ -1,3 +1,3 @@
from . import artifacts, models from . import artifacts, mcp, models
__all__ = ["artifacts", "models"] __all__ = ["artifacts", "mcp", "models"]

View File

@@ -59,19 +59,40 @@ def is_text_file_by_content(path: Path, sample_size: int = 8192) -> bool:
return False return False
@router.get("/threads/{thread_id}/artifacts/{path:path}") @router.get(
"/threads/{thread_id}/artifacts/{path:path}",
summary="Get Artifact File",
description="Retrieve an artifact file generated by the AI agent. Supports text, HTML, and binary files.",
)
async def get_artifact(thread_id: str, path: str, request: Request) -> FileResponse: async def get_artifact(thread_id: str, path: str, request: Request) -> FileResponse:
"""Get an artifact file by its path. """Get an artifact file by its path.
The endpoint automatically detects file types and returns appropriate content types.
Use the `?download=true` query parameter to force file download.
Args: Args:
thread_id: The thread ID. thread_id: The thread ID.
path: The artifact path with virtual prefix (e.g., mnt/user-data/outputs/file.txt). path: The artifact path with virtual prefix (e.g., mnt/user-data/outputs/file.txt).
request: FastAPI request object (automatically injected).
Returns: Returns:
The file content as a FileResponse. The file content as a FileResponse with appropriate content type:
- HTML files: Rendered as HTML
- Text files: Plain text with proper MIME type
- Binary files: Inline display with download option
Raises: Raises:
HTTPException: 404 if file not found, 403 if access denied. HTTPException:
- 400 if path is invalid or not a file
- 403 if access denied (path traversal detected)
- 404 if file not found
Query Parameters:
download (bool): If true, returns file as attachment for download
Example:
- Get HTML file: `/api/threads/abc123/artifacts/mnt/user-data/outputs/index.html`
- Download file: `/api/threads/abc123/artifacts/mnt/user-data/outputs/data.csv?download=true`
""" """
actual_path = _resolve_artifact_path(thread_id, path) actual_path = _resolve_artifact_path(thread_id, path)

View File

@@ -0,0 +1,148 @@
import json
import logging
from pathlib import Path
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, Field
from src.config.mcp_config import McpConfig, get_mcp_config, reload_mcp_config
from src.mcp.cache import reset_mcp_tools_cache
logger = logging.getLogger(__name__)
router = APIRouter(prefix="/api", tags=["mcp"])
class McpServerConfigResponse(BaseModel):
"""Response model for MCP server configuration."""
enabled: bool = Field(default=True, description="Whether this MCP server is enabled")
command: str = Field(..., description="Command to execute to start the MCP server")
args: list[str] = Field(default_factory=list, description="Arguments to pass to the command")
env: dict[str, str] = Field(default_factory=dict, description="Environment variables for the MCP server")
description: str = Field(default="", description="Human-readable description of what this MCP server provides")
class McpConfigResponse(BaseModel):
"""Response model for MCP configuration."""
mcp_servers: dict[str, McpServerConfigResponse] = Field(
default_factory=dict,
description="Map of MCP server name to configuration",
)
class McpConfigUpdateRequest(BaseModel):
"""Request model for updating MCP configuration."""
mcp_servers: dict[str, McpServerConfigResponse] = Field(
...,
description="Map of MCP server name to configuration",
)
@router.get(
"/mcp/config",
response_model=McpConfigResponse,
summary="Get MCP Configuration",
description="Retrieve the current Model Context Protocol (MCP) server configurations.",
)
async def get_mcp_configuration() -> McpConfigResponse:
"""Get the current MCP configuration.
Returns:
The current MCP configuration with all servers.
Example:
```json
{
"mcp_servers": {
"github": {
"enabled": true,
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"],
"env": {"GITHUB_TOKEN": "ghp_xxx"},
"description": "GitHub MCP server for repository operations"
}
}
}
```
"""
config = get_mcp_config()
return McpConfigResponse(
mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in config.mcp_servers.items()}
)
@router.put(
"/mcp/config",
response_model=McpConfigResponse,
summary="Update MCP Configuration",
description="Update Model Context Protocol (MCP) server configurations and save to file.",
)
async def update_mcp_configuration(request: McpConfigUpdateRequest) -> McpConfigResponse:
"""Update the MCP configuration.
This will:
1. Save the new configuration to the mcp_config.json file
2. Reload the configuration cache
3. Reset MCP tools cache to trigger reinitialization
Args:
request: The new MCP configuration to save.
Returns:
The updated MCP configuration.
Raises:
HTTPException: 500 if the configuration file cannot be written.
Example Request:
```json
{
"mcp_servers": {
"github": {
"enabled": true,
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"],
"env": {"GITHUB_TOKEN": "$GITHUB_TOKEN"},
"description": "GitHub MCP server for repository operations"
}
}
}
```
"""
try:
# Get the current config path (or determine where to save it)
config_path = McpConfig.resolve_config_path()
# If no config file exists, create one in the parent directory (project root)
if config_path is None:
config_path = Path.cwd().parent / "mcp_config.json"
logger.info(f"No existing MCP config found. Creating new config at: {config_path}")
# Convert request to dict format for JSON serialization
config_data = {"mcpServers": {name: server.model_dump() for name, server in request.mcp_servers.items()}}
# Write the configuration to file
with open(config_path, "w") as f:
json.dump(config_data, f, indent=2)
logger.info(f"MCP configuration updated and saved to: {config_path}")
# Reload the configuration to update the cache
reload_mcp_config()
# Reset MCP tools cache so they will be reinitialized with new config on next use
reset_mcp_tools_cache()
logger.info("MCP tools cache reset - tools will be reinitialized on next use")
# Return the updated configuration
reloaded_config = get_mcp_config()
return McpConfigResponse(
mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in reloaded_config.mcp_servers.items()}
)
except Exception as e:
logger.error(f"Failed to update MCP configuration: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=f"Failed to update MCP configuration: {str(e)}")

View File

@@ -21,12 +21,40 @@ class ModelsListResponse(BaseModel):
models: list[ModelResponse] models: list[ModelResponse]
@router.get("/models", response_model=ModelsListResponse) @router.get(
"/models",
response_model=ModelsListResponse,
summary="List All Models",
description="Retrieve a list of all available AI models configured in the system.",
)
async def list_models() -> ModelsListResponse: async def list_models() -> ModelsListResponse:
"""List all available models from configuration. """List all available models from configuration.
Returns model information suitable for frontend display, Returns model information suitable for frontend display,
excluding sensitive fields like API keys and internal configuration. excluding sensitive fields like API keys and internal configuration.
Returns:
A list of all configured models with their metadata.
Example Response:
```json
{
"models": [
{
"name": "gpt-4",
"display_name": "GPT-4",
"description": "OpenAI GPT-4 model",
"supports_thinking": false
},
{
"name": "claude-3-opus",
"display_name": "Claude 3 Opus",
"description": "Anthropic Claude 3 Opus model",
"supports_thinking": true
}
]
}
```
""" """
config = get_app_config() config = get_app_config()
models = [ models = [
@@ -41,7 +69,12 @@ async def list_models() -> ModelsListResponse:
return ModelsListResponse(models=models) return ModelsListResponse(models=models)
@router.get("/models/{model_name}", response_model=ModelResponse) @router.get(
"/models/{model_name}",
response_model=ModelResponse,
summary="Get Model Details",
description="Retrieve detailed information about a specific AI model by its name.",
)
async def get_model(model_name: str) -> ModelResponse: async def get_model(model_name: str) -> ModelResponse:
"""Get a specific model by name. """Get a specific model by name.
@@ -53,6 +86,16 @@ async def get_model(model_name: str) -> ModelResponse:
Raises: Raises:
HTTPException: 404 if model not found. HTTPException: 404 if model not found.
Example Response:
```json
{
"name": "gpt-4",
"display_name": "GPT-4",
"description": "OpenAI GPT-4 model",
"supports_thinking": false
}
```
""" """
config = get_app_config() config = get_app_config()
model = config.get_model_config(model_name) model = config.get_model_config(model_name)

View File

@@ -53,6 +53,16 @@ http {
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
} }
# Custom API: MCP configuration endpoint
location /api/mcp {
proxy_pass http://gateway;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Custom API: Artifacts endpoint # Custom API: Artifacts endpoint
location ~ ^/api/threads/[^/]+/artifacts { location ~ ^/api/threads/[^/]+/artifacts {
proxy_pass http://gateway; proxy_pass http://gateway;
@@ -63,6 +73,36 @@ http {
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
} }
# API Documentation: Swagger UI
location /docs {
proxy_pass http://gateway;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# API Documentation: ReDoc
location /redoc {
proxy_pass http://gateway;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# API Documentation: OpenAPI Schema
location /openapi.json {
proxy_pass http://gateway;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# Health check endpoint (gateway) # Health check endpoint (gateway)
location /health { location /health {
proxy_pass http://gateway; proxy_pass http://gateway;