From 7978e05dc1709ba28a82740799efc145b5925a9d Mon Sep 17 00:00:00 2001 From: DanielWalnut <45447813+hetaoBackend@users.noreply.github.com> Date: Mon, 19 Jan 2026 23:23:38 +0800 Subject: [PATCH] feat: add nginx reversed proxy (#15) * docs: add nginx reverse proxy documentation Add comprehensive nginx configuration documentation to README including: - Production deployment instructions with step-by-step setup - Architecture diagram showing traffic routing between services - Nginx features: unified entry point, CORS handling, SSE support - Updated project structure with nginx.conf and service ports Co-Authored-By: Claude Sonnet 4.5 * feat: implement nginx --------- Co-authored-by: Claude Sonnet 4.5 --- README.md | 58 +++++++-- backend/Makefile | 14 ++- backend/src/gateway/app.py | 27 ++--- backend/src/gateway/config.py | 10 +- backend/src/gateway/routers/proxy.py | 168 --------------------------- nginx.conf | 102 ++++++++++++++++ 6 files changed, 177 insertions(+), 202 deletions(-) delete mode 100644 backend/src/gateway/routers/proxy.py create mode 100644 nginx.conf diff --git a/README.md b/README.md index ab905db..e5d1dff 100644 --- a/README.md +++ b/README.md @@ -31,18 +31,62 @@ A LangGraph-based AI agent backend with sandbox execution capabilities. make dev ``` +### Production Deployment + +For production environments, use nginx as a reverse proxy to route traffic between the gateway and LangGraph services: + +1. **Start backend services**: + ```bash + # Terminal 1: Start Gateway API (port 8001) + cd backend + python -m src.gateway.app + + # Terminal 2: Start LangGraph Server (port 2024) + cd backend + langgraph up + ``` + +2. **Start nginx**: + ```bash + nginx -c $(pwd)/nginx.conf + ``` + +3. **Access the application**: + - Main API: http://localhost:8000 + +The nginx configuration provides: +- Unified entry point on port 8000 +- Routes `/api/models`, `/api/threads/*/artifacts`, and `/health` to Gateway (8001) +- Routes all other requests to LangGraph (2024) +- Centralized CORS handling +- SSE/streaming support for real-time agent responses +- Optimized timeouts for long-running operations + ## Project Structure ``` deer-flow/ ├── config.example.yaml # Configuration template (copy to config.yaml) -├── backend/ # Backend application -│ ├── src/ # Source code -│ └── docs/ # Documentation -├── frontend/ # Frontend application -└── skills/ # Agent skills - ├── public/ # Public skills - └── custom/ # Custom skills +├── nginx.conf # Nginx reverse proxy configuration +├── backend/ # Backend application +│ ├── src/ # Source code +│ │ ├── gateway/ # Gateway API (port 8001) +│ │ └── agents/ # LangGraph agents (port 2024) +│ └── docs/ # Documentation +├── frontend/ # Frontend application +└── skills/ # Agent skills + ├── public/ # Public skills + └── custom/ # Custom skills +``` + +### Architecture + +``` +Client + ↓ +Nginx (port 8000) ← Unified entry point + ├→ Gateway API (port 8001) ← /api/models, /api/threads/*/artifacts, /health + └→ LangGraph Server (port 2024) ← All other requests (agent interactions) ``` ## Documentation diff --git a/backend/Makefile b/backend/Makefile index 47ebe53..07537aa 100644 --- a/backend/Makefile +++ b/backend/Makefile @@ -5,12 +5,22 @@ dev: uv run langgraph dev --no-browser --allow-blocking --no-reload gateway: - uv run uvicorn src.gateway.app:app --host 0.0.0.0 --port 8000 + uv run uvicorn src.gateway.app:app --host 0.0.0.0 --port 8001 + +nginx: + nginx -c $(PWD)/../nginx.conf -p $(PWD)/.. serve: + @echo "Stopping existing services if any..." + @-pkill -f "langgraph dev" 2>/dev/null || true + @-pkill -f "uvicorn src.gateway.app:app" 2>/dev/null || true + @-nginx -c $(PWD)/../nginx.conf -p $(PWD)/.. -s quit 2>/dev/null || true + @sleep 1 + @echo "Starting services..." @trap 'kill 0' EXIT; \ uv run langgraph dev --no-browser --allow-blocking --no-reload & \ - sleep 3 && uv run uvicorn src.gateway.app:app --host 0.0.0.0 --port 8000 & \ + sleep 3 && uv run uvicorn src.gateway.app:app --host 0.0.0.0 --port 8001 & \ + sleep 1 && nginx -c $(PWD)/../nginx.conf -p $(PWD)/.. & \ wait lint: diff --git a/backend/src/gateway/app.py b/backend/src/gateway/app.py index 8c3fd0f..614b879 100644 --- a/backend/src/gateway/app.py +++ b/backend/src/gateway/app.py @@ -3,10 +3,16 @@ from collections.abc import AsyncGenerator from contextlib import asynccontextmanager from fastapi import FastAPI -from fastapi.middleware.cors import CORSMiddleware from src.gateway.config import get_gateway_config -from src.gateway.routers import artifacts, models, proxy +from src.gateway.routers import artifacts, models + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", +) logger = logging.getLogger(__name__) @@ -16,7 +22,6 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: """Application lifespan handler.""" config = get_gateway_config() logger.info(f"Starting API Gateway on {config.host}:{config.port}") - logger.info(f"Proxying to LangGraph server at {config.langgraph_url}") # Initialize MCP tools at startup try: @@ -28,8 +33,6 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: yield logger.info("Shutting down API Gateway") - # Close the shared HTTP client - await proxy.close_http_client() def create_app() -> FastAPI: @@ -41,19 +44,12 @@ def create_app() -> FastAPI: app = FastAPI( title="DeerFlow API Gateway", - description="API Gateway for DeerFlow - proxies to LangGraph Server and provides custom endpoints", + description="API Gateway for DeerFlow - provides custom endpoints (models, artifacts). LangGraph requests are handled by nginx.", version="0.1.0", lifespan=lifespan, ) - # Add CORS middleware - app.add_middleware( - CORSMiddleware, - allow_origins=["*"], - allow_credentials=True, - allow_methods=["*"], - allow_headers=["*"], - ) + # CORS is handled by nginx - no need for FastAPI middleware # Include routers # Models API is mounted at /api/models @@ -62,9 +58,6 @@ def create_app() -> FastAPI: # Artifacts API is mounted at /api/threads/{thread_id}/artifacts app.include_router(artifacts.router) - # Proxy router handles all LangGraph paths (must be last due to catch-all) - app.include_router(proxy.router) - @app.get("/health") async def health_check() -> dict: """Health check endpoint.""" diff --git a/backend/src/gateway/config.py b/backend/src/gateway/config.py index be65bb2..66f1f2a 100644 --- a/backend/src/gateway/config.py +++ b/backend/src/gateway/config.py @@ -7,11 +7,8 @@ class GatewayConfig(BaseModel): """Configuration for the API Gateway.""" host: str = Field(default="0.0.0.0", description="Host to bind the gateway server") - port: int = Field(default=8000, description="Port to bind the gateway server") - langgraph_url: str = Field(default="http://localhost:2024", description="URL of the LangGraph server to proxy requests to") + port: int = Field(default=8001, description="Port to bind the gateway server") cors_origins: list[str] = Field(default_factory=lambda: ["http://localhost:3000"], description="Allowed CORS origins") - proxy_timeout: float = Field(default=300.0, description="Timeout for proxy requests in seconds") - stream_timeout: float = Field(default=600.0, description="Timeout for streaming requests in seconds") _gateway_config: GatewayConfig | None = None @@ -24,10 +21,7 @@ def get_gateway_config() -> GatewayConfig: cors_origins_str = os.getenv("CORS_ORIGINS", "http://localhost:3000") _gateway_config = GatewayConfig( host=os.getenv("GATEWAY_HOST", "0.0.0.0"), - port=int(os.getenv("GATEWAY_PORT", "8000")), - langgraph_url=os.getenv("LANGGRAPH_URL", "http://localhost:2024"), + port=int(os.getenv("GATEWAY_PORT", "8001")), cors_origins=cors_origins_str.split(","), - proxy_timeout=float(os.getenv("PROXY_TIMEOUT", "300")), - stream_timeout=float(os.getenv("STREAM_TIMEOUT", "600")), ) return _gateway_config diff --git a/backend/src/gateway/routers/proxy.py b/backend/src/gateway/routers/proxy.py deleted file mode 100644 index 883e223..0000000 --- a/backend/src/gateway/routers/proxy.py +++ /dev/null @@ -1,168 +0,0 @@ -import logging -from collections.abc import AsyncGenerator - -import httpx -from fastapi import APIRouter, Request, Response -from fastapi.responses import StreamingResponse - -from src.gateway.config import get_gateway_config - -logger = logging.getLogger(__name__) - -router = APIRouter(tags=["proxy"]) - -# Shared httpx client for all proxy requests -# This avoids creating/closing clients during streaming responses -_http_client: httpx.AsyncClient | None = None - - -def get_http_client() -> httpx.AsyncClient: - """Get or create the shared HTTP client. - - Returns: - The shared httpx AsyncClient instance. - """ - global _http_client - if _http_client is None: - _http_client = httpx.AsyncClient() - return _http_client - - -async def close_http_client() -> None: - """Close the shared HTTP client if it exists.""" - global _http_client - if _http_client is not None: - await _http_client.aclose() - _http_client = None - - -# Hop-by-hop headers that should not be forwarded -EXCLUDED_HEADERS = { - "host", - "connection", - "keep-alive", - "proxy-authenticate", - "proxy-authorization", - "te", - "trailers", - "transfer-encoding", - "upgrade", - "content-length", -} - - -async def stream_sse_response(stream_ctx, response: httpx.Response) -> AsyncGenerator[bytes, None]: - """Stream SSE response from the upstream server. - - Args: - stream_ctx: The httpx stream context manager. - response: The httpx streaming response. - - Yields: - Response chunks. - """ - try: - async for chunk in response.aiter_bytes(): - yield chunk - finally: - # Ensure stream is properly closed when done - await stream_ctx.__aexit__(None, None, None) - - -async def proxy_request(request: Request, path: str) -> Response | StreamingResponse: - """Proxy a request to the LangGraph server. - - Args: - request: The incoming FastAPI request. - path: The path to proxy to. - - Returns: - Response or StreamingResponse depending on content type. - """ - config = get_gateway_config() - target_url = f"{config.langgraph_url}/{path}" - - # Preserve query parameters - if request.url.query: - target_url = f"{target_url}?{request.url.query}" - - # Prepare headers (exclude hop-by-hop headers) - headers = {key: value for key, value in request.headers.items() if key.lower() not in EXCLUDED_HEADERS} - - # Read request body for non-GET requests - body = None - if request.method not in ("GET", "HEAD"): - body = await request.body() - - client = get_http_client() - - try: - # Use streaming request to avoid waiting for full response - # This allows us to check headers immediately and stream SSE without delay - stream_ctx = client.stream( - method=request.method, - url=target_url, - headers=headers, - content=body, - timeout=config.stream_timeout, - ) - - response = await stream_ctx.__aenter__() - - content_type = response.headers.get("content-type", "") - - # Check if response is SSE (Server-Sent Events) - if "text/event-stream" in content_type: - # For SSE, stream the response immediately - return StreamingResponse( - stream_sse_response(stream_ctx, response), - status_code=response.status_code, - media_type="text/event-stream", - headers={ - "Cache-Control": "no-cache", - "Connection": "keep-alive", - "X-Accel-Buffering": "no", - }, - ) - - # For non-SSE responses, read full content and close the stream - content = await response.aread() - await stream_ctx.__aexit__(None, None, None) - - # Prepare response headers - response_headers = dict(response.headers) - for header in ["transfer-encoding", "connection", "keep-alive"]: - response_headers.pop(header, None) - - return Response( - content=content, - status_code=response.status_code, - headers=response_headers, - ) - - except httpx.TimeoutException: - logger.error(f"Proxy request to {target_url} timed out") - return Response( - content='{"error": "Proxy request timed out"}', - status_code=504, - media_type="application/json", - ) - except httpx.RequestError as e: - logger.error(f"Proxy request to {target_url} failed: {e}") - return Response( - content='{"error": "Proxy request failed"}', - status_code=502, - media_type="application/json", - ) - - -@router.api_route( - "/{path:path}", - methods=["GET", "POST", "PUT", "DELETE", "PATCH"], -) -async def proxy_langgraph(request: Request, path: str) -> Response: - """Proxy all requests to LangGraph server. - - This catch-all route forwards requests to the LangGraph server. - """ - return await proxy_request(request, path) diff --git a/nginx.conf b/nginx.conf new file mode 100644 index 0000000..12e2527 --- /dev/null +++ b/nginx.conf @@ -0,0 +1,102 @@ +events { + worker_connections 1024; +} + +http { + # Basic settings + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + # Logging + access_log /dev/stdout; + error_log /dev/stderr; + + # Upstream servers + upstream gateway { + server localhost:8001; + } + + upstream langgraph { + server localhost:2024; + } + + server { + listen 8000; + server_name _; + + # Hide CORS headers from upstream to prevent duplicates + proxy_hide_header 'Access-Control-Allow-Origin'; + proxy_hide_header 'Access-Control-Allow-Methods'; + proxy_hide_header 'Access-Control-Allow-Headers'; + proxy_hide_header 'Access-Control-Allow-Credentials'; + + # CORS headers for all responses (nginx handles CORS centrally) + add_header 'Access-Control-Allow-Origin' '*' always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, PATCH, OPTIONS' always; + add_header 'Access-Control-Allow-Headers' '*' always; + + # Handle OPTIONS requests (CORS preflight) + if ($request_method = 'OPTIONS') { + return 204; + } + + # Custom API: Models endpoint + location /api/models { + proxy_pass http://gateway; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + # Custom API: Artifacts endpoint + location ~ ^/api/threads/[^/]+/artifacts { + proxy_pass http://gateway; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + # Health check endpoint (gateway) + location /health { + proxy_pass http://gateway; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + # All other requests go to LangGraph + location / { + proxy_pass http://langgraph; + proxy_http_version 1.1; + + # Headers + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Connection ''; + + # SSE/Streaming support + proxy_buffering off; + proxy_cache off; + proxy_set_header X-Accel-Buffering no; + + # Timeouts for long-running requests + proxy_connect_timeout 600s; + proxy_send_timeout 600s; + proxy_read_timeout 600s; + + # Chunked transfer encoding + chunked_transfer_encoding on; + } + } +}