Files
deer-flow/docker/docker-compose.yaml
Purricane 835ba041f8 feat: add Claude Code OAuth and Codex CLI as LLM providers (#1166)
* feat: add Claude Code OAuth and Codex CLI providers

Port of bytedance/deer-flow#1136 from @solanian's feat/cli-oauth-providers branch.\n\nCarries the feature forward on top of current main without the original CLA-blocked commit metadata, while preserving attribution in the commit message for review.

* fix: harden CLI credential loading

Align Codex auth loading with the current ~/.codex/auth.json shape, make Docker credential mounts directory-based to avoid broken file binds on hosts without exported credential files, and add focused loader tests.

* refactor: tighten codex auth typing

Replace the temporary Any return type in CodexChatModel._load_codex_auth with the concrete CodexCliCredential type after the credential loader was stabilized.

* fix: load Claude Code OAuth from Keychain

Match Claude Code's macOS storage strategy more closely by checking the Keychain-backed credentials store before falling back to ~/.claude/.credentials.json. Keep explicit file overrides and add focused tests for the Keychain path.

* fix: require explicit Claude OAuth handoff

* style: format thread hooks reasoning request

* docs: document CLI-backed auth providers

* fix: address provider review feedback

* fix: harden provider edge cases

* Fix deferred tools, Codex message normalization, and local sandbox paths

* chore: narrow PR scope to OAuth providers

* chore: remove unrelated frontend changes

* chore: reapply OAuth branch frontend scope cleanup

* fix: preserve upload guards with reasoning effort wiring

---------

Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
2026-03-22 22:39:50 +08:00

183 lines
7.0 KiB
YAML

# DeerFlow Production Environment
# Usage: make up
#
# Services:
# - nginx: Reverse proxy (port 2026, configurable via PORT env var)
# - frontend: Next.js production server
# - gateway: FastAPI Gateway API
# - langgraph: LangGraph production server (Dockerfile generated by langgraph dockerfile)
# - provisioner: (optional) Sandbox provisioner for Kubernetes mode
#
# Key environment variables (set via environment/.env or scripts/deploy.sh):
# DEER_FLOW_HOME — runtime data dir, default $REPO_ROOT/backend/.deer-flow
# DEER_FLOW_CONFIG_PATH — path to config.yaml
# DEER_FLOW_EXTENSIONS_CONFIG_PATH — path to extensions_config.json
# DEER_FLOW_DOCKER_SOCKET — Docker socket path, default /var/run/docker.sock
# DEER_FLOW_REPO_ROOT — repo root (used for skills host path in DooD)
# BETTER_AUTH_SECRET — required for frontend auth/session security
#
# LangSmith tracing is disabled by default (LANGCHAIN_TRACING_V2=false).
# Set LANGCHAIN_TRACING_V2=true and LANGSMITH_API_KEY in .env to enable it.
#
# Access: http://localhost:${PORT:-2026}
services:
# ── Reverse Proxy ──────────────────────────────────────────────────────────
nginx:
image: nginx:alpine
container_name: deer-flow-nginx
ports:
- "${PORT:-2026}:2026"
volumes:
- ./nginx/${NGINX_CONF:-nginx.conf}:/etc/nginx/nginx.conf:ro
depends_on:
- frontend
- gateway
- langgraph
networks:
- deer-flow
restart: unless-stopped
# ── Frontend: Next.js Production ───────────────────────────────────────────
frontend:
build:
context: ../
dockerfile: frontend/Dockerfile
target: prod
args:
PNPM_STORE_PATH: ${PNPM_STORE_PATH:-/root/.local/share/pnpm/store}
container_name: deer-flow-frontend
environment:
- BETTER_AUTH_SECRET=${BETTER_AUTH_SECRET}
env_file:
- ../frontend/.env
networks:
- deer-flow
restart: unless-stopped
# ── Gateway API ────────────────────────────────────────────────────────────
gateway:
build:
context: ../
dockerfile: backend/Dockerfile
container_name: deer-flow-gateway
command: sh -c "cd backend && PYTHONPATH=. uv run uvicorn app.gateway.app:app --host 0.0.0.0 --port 8001 --workers 2"
volumes:
- ${DEER_FLOW_CONFIG_PATH}:/app/backend/config.yaml:ro
- ${DEER_FLOW_EXTENSIONS_CONFIG_PATH}:/app/backend/extensions_config.json:ro
- ../skills:/app/skills:ro
- ${DEER_FLOW_HOME}:/app/backend/.deer-flow
# DooD: AioSandboxProvider starts sandbox containers via host Docker daemon
- ${DEER_FLOW_DOCKER_SOCKET}:/var/run/docker.sock
# CLI auth directories for auto-auth (Claude Code + Codex CLI)
- type: bind
source: ${HOME:?HOME must be set}/.claude
target: /root/.claude
read_only: true
bind:
create_host_path: true
- type: bind
source: ${HOME:?HOME must be set}/.codex
target: /root/.codex
read_only: true
bind:
create_host_path: true
working_dir: /app
environment:
- CI=true
- DEER_FLOW_HOME=/app/backend/.deer-flow
# DooD path/network translation
- DEER_FLOW_HOST_BASE_DIR=${DEER_FLOW_HOME}
- DEER_FLOW_HOST_SKILLS_PATH=${DEER_FLOW_REPO_ROOT}/skills
- DEER_FLOW_SANDBOX_HOST=host.docker.internal
env_file:
- ../.env
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- deer-flow
restart: unless-stopped
# ── LangGraph Server ───────────────────────────────────────────────────────
# TODO: switch to langchain/langgraph-api (licensed) once a license key is available.
# For now, use `langgraph dev` (no license required) with the standard backend image.
langgraph:
build:
context: ../
dockerfile: backend/Dockerfile
container_name: deer-flow-langgraph
command: sh -c "cd /app/backend && uv run langgraph dev --no-browser --allow-blocking --no-reload --host 0.0.0.0 --port 2024"
volumes:
- ${DEER_FLOW_CONFIG_PATH}:/app/config.yaml:ro
- ${DEER_FLOW_EXTENSIONS_CONFIG_PATH}:/app/extensions_config.json:ro
- ${DEER_FLOW_HOME}:/app/backend/.deer-flow
- ../skills:/app/skills:ro
- ../backend/.langgraph_api:/app/backend/.langgraph_api
# DooD: same as gateway
- ${DEER_FLOW_DOCKER_SOCKET}:/var/run/docker.sock
# CLI auth directories for auto-auth (Claude Code + Codex CLI)
- type: bind
source: ${HOME:?HOME must be set}/.claude
target: /root/.claude
read_only: true
bind:
create_host_path: true
- type: bind
source: ${HOME:?HOME must be set}/.codex
target: /root/.codex
read_only: true
bind:
create_host_path: true
environment:
- CI=true
- DEER_FLOW_HOME=/app/backend/.deer-flow
- DEER_FLOW_CONFIG_PATH=/app/config.yaml
- DEER_FLOW_EXTENSIONS_CONFIG_PATH=/app/extensions_config.json
- DEER_FLOW_HOST_BASE_DIR=${DEER_FLOW_HOME}
- DEER_FLOW_HOST_SKILLS_PATH=${DEER_FLOW_REPO_ROOT}/skills
- DEER_FLOW_SANDBOX_HOST=host.docker.internal
# Disable LangSmith tracing — LANGSMITH_API_KEY is not required.
# Set LANGCHAIN_TRACING_V2=true and LANGSMITH_API_KEY in .env to enable.
- LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2:-false}
env_file:
- ../.env
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- deer-flow
restart: unless-stopped
# ── Sandbox Provisioner (optional, Kubernetes mode) ────────────────────────
provisioner:
profiles:
- provisioner
build:
context: ./provisioner
dockerfile: Dockerfile
container_name: deer-flow-provisioner
volumes:
- ~/.kube/config:/root/.kube/config:ro
environment:
- K8S_NAMESPACE=deer-flow
- SANDBOX_IMAGE=enterprise-public-cn-beijing.cr.volces.com/vefaas-public/all-in-one-sandbox:latest
- SKILLS_HOST_PATH=${DEER_FLOW_REPO_ROOT}/skills
- THREADS_HOST_PATH=${DEER_FLOW_HOME}/threads
- KUBECONFIG_PATH=/root/.kube/config
- NODE_HOST=host.docker.internal
- K8S_API_SERVER=https://host.docker.internal:26443
env_file:
- ../.env
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
- deer-flow
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8002/health"]
interval: 10s
timeout: 5s
retries: 6
networks:
deer-flow:
driver: bridge