mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-05-03 18:50:43 +08:00
refactor: split backend into harness (deerflow.*) and app (app.*) (#1131)
* refactor: extract shared utils to break harness→app cross-layer imports Move _validate_skill_frontmatter to src/skills/validation.py and CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py. This eliminates the two reverse dependencies from client.py (harness layer) into gateway/routers/ (app layer), preparing for the harness/app package split. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * refactor: split backend/src into harness (deerflow.*) and app (app.*) Physically split the monolithic backend/src/ package into two layers: - **Harness** (`packages/harness/deerflow/`): publishable agent framework package with import prefix `deerflow.*`. Contains agents, sandbox, tools, models, MCP, skills, config, and all core infrastructure. - **App** (`app/`): unpublished application code with import prefix `app.*`. Contains gateway (FastAPI REST API) and channels (IM integrations). Key changes: - Move 13 harness modules to packages/harness/deerflow/ via git mv - Move gateway + channels to app/ via git mv - Rename all imports: src.* → deerflow.* (harness) / app.* (app layer) - Set up uv workspace with deerflow-harness as workspace member - Update langgraph.json, config.example.yaml, all scripts, Docker files - Add build-system (hatchling) to harness pyproject.toml - Add PYTHONPATH=. to gateway startup commands for app.* resolution - Update ruff.toml with known-first-party for import sorting - Update all documentation to reflect new directory structure Boundary rule enforced: harness code never imports from app. All 429 tests pass. Lint clean. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * chore: add harness→app boundary check test and update docs Add test_harness_boundary.py that scans all Python files in packages/harness/deerflow/ and fails if any `from app.*` or `import app.*` statement is found. This enforces the architectural rule that the harness layer never depends on the app layer. Update CLAUDE.md to document the harness/app split architecture, import conventions, and the boundary enforcement test. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * feat: add config versioning with auto-upgrade on startup When config.example.yaml schema changes, developers' local config.yaml files can silently become outdated. This adds a config_version field and auto-upgrade mechanism so breaking changes (like src.* → deerflow.* renames) are applied automatically before services start. - Add config_version: 1 to config.example.yaml - Add startup version check warning in AppConfig.from_file() - Add scripts/config-upgrade.sh with migration registry for value replacements - Add `make config-upgrade` target - Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services - Add config error hints in service failure messages Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix comments * fix: update src.* import in test_sandbox_tools_security to deerflow.* Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: handle empty config and search parent dirs for config.example.yaml Address Copilot review comments on PR #1131: - Guard against yaml.safe_load() returning None for empty config files - Search parent directories for config.example.yaml instead of only looking next to config.yaml, fixing detection in common setups Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: correct skills root path depth and config_version type coercion - loader.py: fix get_skills_root_path() to use 5 parent levels (was 3) after harness split, file lives at packages/harness/deerflow/skills/ so parent×3 resolved to backend/packages/harness/ instead of backend/ - app_config.py: coerce config_version to int() before comparison in _check_config_version() to prevent TypeError when YAML stores value as string (e.g. config_version: "1") - tests: add regression tests for both fixes Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * fix: update test imports from src.* to deerflow.*/app.* after harness refactor Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
4
backend/app/gateway/__init__.py
Normal file
4
backend/app/gateway/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .app import app, create_app
|
||||
from .config import GatewayConfig, get_gateway_config
|
||||
|
||||
__all__ = ["app", "create_app", "GatewayConfig", "get_gateway_config"]
|
||||
192
backend/app/gateway/app.py
Normal file
192
backend/app/gateway/app.py
Normal file
@@ -0,0 +1,192 @@
|
||||
import logging
|
||||
from collections.abc import AsyncGenerator
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import FastAPI
|
||||
|
||||
from app.gateway.config import get_gateway_config
|
||||
from app.gateway.routers import (
|
||||
agents,
|
||||
artifacts,
|
||||
channels,
|
||||
mcp,
|
||||
memory,
|
||||
models,
|
||||
skills,
|
||||
suggestions,
|
||||
uploads,
|
||||
)
|
||||
from deerflow.config.app_config import get_app_config
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
"""Application lifespan handler."""
|
||||
|
||||
# Load config and check necessary environment variables at startup
|
||||
try:
|
||||
get_app_config()
|
||||
logger.info("Configuration loaded successfully")
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to load configuration during gateway startup: {e}"
|
||||
logger.exception(error_msg)
|
||||
raise RuntimeError(error_msg) from e
|
||||
config = get_gateway_config()
|
||||
logger.info(f"Starting API Gateway on {config.host}:{config.port}")
|
||||
|
||||
# NOTE: MCP tools initialization is NOT done here because:
|
||||
# 1. Gateway doesn't use MCP tools - they are used by Agents in the LangGraph Server
|
||||
# 2. Gateway and LangGraph Server are separate processes with independent caches
|
||||
# MCP tools are lazily initialized in LangGraph Server when first needed
|
||||
|
||||
# Start IM channel service if any channels are configured
|
||||
try:
|
||||
from app.channels.service import start_channel_service
|
||||
|
||||
channel_service = await start_channel_service()
|
||||
logger.info("Channel service started: %s", channel_service.get_status())
|
||||
except Exception:
|
||||
logger.exception("No IM channels configured or channel service failed to start")
|
||||
|
||||
yield
|
||||
|
||||
# Stop channel service on shutdown
|
||||
try:
|
||||
from app.channels.service import stop_channel_service
|
||||
|
||||
await stop_channel_service()
|
||||
except Exception:
|
||||
logger.exception("Failed to stop channel service")
|
||||
logger.info("Shutting down API Gateway")
|
||||
|
||||
|
||||
def create_app() -> FastAPI:
|
||||
"""Create and configure the FastAPI application.
|
||||
|
||||
Returns:
|
||||
Configured FastAPI application instance.
|
||||
"""
|
||||
|
||||
app = FastAPI(
|
||||
title="DeerFlow API Gateway",
|
||||
description="""
|
||||
## DeerFlow API Gateway
|
||||
|
||||
API Gateway for DeerFlow - A LangGraph-based AI agent backend with sandbox execution capabilities.
|
||||
|
||||
### Features
|
||||
|
||||
- **Models Management**: Query and retrieve available AI models
|
||||
- **MCP Configuration**: Manage Model Context Protocol (MCP) server configurations
|
||||
- **Memory Management**: Access and manage global memory data for personalized conversations
|
||||
- **Skills Management**: Query and manage skills and their enabled status
|
||||
- **Artifacts**: Access thread artifacts and generated files
|
||||
- **Health Monitoring**: System health check endpoints
|
||||
|
||||
### Architecture
|
||||
|
||||
LangGraph requests are handled by nginx reverse proxy.
|
||||
This gateway provides custom endpoints for models, MCP configuration, skills, and artifacts.
|
||||
""",
|
||||
version="0.1.0",
|
||||
lifespan=lifespan,
|
||||
docs_url="/docs",
|
||||
redoc_url="/redoc",
|
||||
openapi_url="/openapi.json",
|
||||
openapi_tags=[
|
||||
{
|
||||
"name": "models",
|
||||
"description": "Operations for querying available AI models and their configurations",
|
||||
},
|
||||
{
|
||||
"name": "mcp",
|
||||
"description": "Manage Model Context Protocol (MCP) server configurations",
|
||||
},
|
||||
{
|
||||
"name": "memory",
|
||||
"description": "Access and manage global memory data for personalized conversations",
|
||||
},
|
||||
{
|
||||
"name": "skills",
|
||||
"description": "Manage skills and their configurations",
|
||||
},
|
||||
{
|
||||
"name": "artifacts",
|
||||
"description": "Access and download thread artifacts and generated files",
|
||||
},
|
||||
{
|
||||
"name": "uploads",
|
||||
"description": "Upload and manage user files for threads",
|
||||
},
|
||||
{
|
||||
"name": "agents",
|
||||
"description": "Create and manage custom agents with per-agent config and prompts",
|
||||
},
|
||||
{
|
||||
"name": "suggestions",
|
||||
"description": "Generate follow-up question suggestions for conversations",
|
||||
},
|
||||
{
|
||||
"name": "channels",
|
||||
"description": "Manage IM channel integrations (Feishu, Slack, Telegram)",
|
||||
},
|
||||
{
|
||||
"name": "health",
|
||||
"description": "Health check and system status endpoints",
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
# CORS is handled by nginx - no need for FastAPI middleware
|
||||
|
||||
# Include routers
|
||||
# Models API is mounted at /api/models
|
||||
app.include_router(models.router)
|
||||
|
||||
# MCP API is mounted at /api/mcp
|
||||
app.include_router(mcp.router)
|
||||
|
||||
# Memory API is mounted at /api/memory
|
||||
app.include_router(memory.router)
|
||||
|
||||
# Skills API is mounted at /api/skills
|
||||
app.include_router(skills.router)
|
||||
|
||||
# Artifacts API is mounted at /api/threads/{thread_id}/artifacts
|
||||
app.include_router(artifacts.router)
|
||||
|
||||
# Uploads API is mounted at /api/threads/{thread_id}/uploads
|
||||
app.include_router(uploads.router)
|
||||
|
||||
# Agents API is mounted at /api/agents
|
||||
app.include_router(agents.router)
|
||||
|
||||
# Suggestions API is mounted at /api/threads/{thread_id}/suggestions
|
||||
app.include_router(suggestions.router)
|
||||
|
||||
# Channels API is mounted at /api/channels
|
||||
app.include_router(channels.router)
|
||||
|
||||
@app.get("/health", tags=["health"])
|
||||
async def health_check() -> dict:
|
||||
"""Health check endpoint.
|
||||
|
||||
Returns:
|
||||
Service health status information.
|
||||
"""
|
||||
return {"status": "healthy", "service": "deer-flow-gateway"}
|
||||
|
||||
return app
|
||||
|
||||
|
||||
# Create app instance for uvicorn
|
||||
app = create_app()
|
||||
27
backend/app/gateway/config.py
Normal file
27
backend/app/gateway/config.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import os
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class GatewayConfig(BaseModel):
|
||||
"""Configuration for the API Gateway."""
|
||||
|
||||
host: str = Field(default="0.0.0.0", description="Host to bind the gateway server")
|
||||
port: int = Field(default=8001, description="Port to bind the gateway server")
|
||||
cors_origins: list[str] = Field(default_factory=lambda: ["http://localhost:3000"], description="Allowed CORS origins")
|
||||
|
||||
|
||||
_gateway_config: GatewayConfig | None = None
|
||||
|
||||
|
||||
def get_gateway_config() -> GatewayConfig:
|
||||
"""Get gateway config, loading from environment if available."""
|
||||
global _gateway_config
|
||||
if _gateway_config is None:
|
||||
cors_origins_str = os.getenv("CORS_ORIGINS", "http://localhost:3000")
|
||||
_gateway_config = GatewayConfig(
|
||||
host=os.getenv("GATEWAY_HOST", "0.0.0.0"),
|
||||
port=int(os.getenv("GATEWAY_PORT", "8001")),
|
||||
cors_origins=cors_origins_str.split(","),
|
||||
)
|
||||
return _gateway_config
|
||||
28
backend/app/gateway/path_utils.py
Normal file
28
backend/app/gateway/path_utils.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""Shared path resolution for thread virtual paths (e.g. mnt/user-data/outputs/...)."""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import HTTPException
|
||||
|
||||
from deerflow.config.paths import get_paths
|
||||
|
||||
|
||||
def resolve_thread_virtual_path(thread_id: str, virtual_path: str) -> Path:
|
||||
"""Resolve a virtual path to the actual filesystem path under thread user-data.
|
||||
|
||||
Args:
|
||||
thread_id: The thread ID.
|
||||
virtual_path: The virtual path as seen inside the sandbox
|
||||
(e.g., /mnt/user-data/outputs/file.txt).
|
||||
|
||||
Returns:
|
||||
The resolved filesystem path.
|
||||
|
||||
Raises:
|
||||
HTTPException: If the path is invalid or outside allowed directories.
|
||||
"""
|
||||
try:
|
||||
return get_paths().resolve_virtual_path(thread_id, virtual_path)
|
||||
except ValueError as e:
|
||||
status = 403 if "traversal" in str(e) else 400
|
||||
raise HTTPException(status_code=status, detail=str(e))
|
||||
3
backend/app/gateway/routers/__init__.py
Normal file
3
backend/app/gateway/routers/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from . import artifacts, mcp, models, skills, suggestions, uploads
|
||||
|
||||
__all__ = ["artifacts", "mcp", "models", "skills", "suggestions", "uploads"]
|
||||
383
backend/app/gateway/routers/agents.py
Normal file
383
backend/app/gateway/routers/agents.py
Normal file
@@ -0,0 +1,383 @@
|
||||
"""CRUD API for custom agents."""
|
||||
|
||||
import logging
|
||||
import re
|
||||
import shutil
|
||||
|
||||
import yaml
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from deerflow.config.agents_config import AgentConfig, list_custom_agents, load_agent_config, load_agent_soul
|
||||
from deerflow.config.paths import get_paths
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api", tags=["agents"])
|
||||
|
||||
AGENT_NAME_PATTERN = re.compile(r"^[A-Za-z0-9-]+$")
|
||||
|
||||
|
||||
class AgentResponse(BaseModel):
|
||||
"""Response model for a custom agent."""
|
||||
|
||||
name: str = Field(..., description="Agent name (hyphen-case)")
|
||||
description: str = Field(default="", description="Agent description")
|
||||
model: str | None = Field(default=None, description="Optional model override")
|
||||
tool_groups: list[str] | None = Field(default=None, description="Optional tool group whitelist")
|
||||
soul: str | None = Field(default=None, description="SOUL.md content (included on GET /{name})")
|
||||
|
||||
|
||||
class AgentsListResponse(BaseModel):
|
||||
"""Response model for listing all custom agents."""
|
||||
|
||||
agents: list[AgentResponse]
|
||||
|
||||
|
||||
class AgentCreateRequest(BaseModel):
|
||||
"""Request body for creating a custom agent."""
|
||||
|
||||
name: str = Field(..., description="Agent name (must match ^[A-Za-z0-9-]+$, stored as lowercase)")
|
||||
description: str = Field(default="", description="Agent description")
|
||||
model: str | None = Field(default=None, description="Optional model override")
|
||||
tool_groups: list[str] | None = Field(default=None, description="Optional tool group whitelist")
|
||||
soul: str = Field(default="", description="SOUL.md content — agent personality and behavioral guardrails")
|
||||
|
||||
|
||||
class AgentUpdateRequest(BaseModel):
|
||||
"""Request body for updating a custom agent."""
|
||||
|
||||
description: str | None = Field(default=None, description="Updated description")
|
||||
model: str | None = Field(default=None, description="Updated model override")
|
||||
tool_groups: list[str] | None = Field(default=None, description="Updated tool group whitelist")
|
||||
soul: str | None = Field(default=None, description="Updated SOUL.md content")
|
||||
|
||||
|
||||
def _validate_agent_name(name: str) -> None:
|
||||
"""Validate agent name against allowed pattern.
|
||||
|
||||
Args:
|
||||
name: The agent name to validate.
|
||||
|
||||
Raises:
|
||||
HTTPException: 422 if the name is invalid.
|
||||
"""
|
||||
if not AGENT_NAME_PATTERN.match(name):
|
||||
raise HTTPException(
|
||||
status_code=422,
|
||||
detail=f"Invalid agent name '{name}'. Must match ^[A-Za-z0-9-]+$ (letters, digits, and hyphens only).",
|
||||
)
|
||||
|
||||
|
||||
def _normalize_agent_name(name: str) -> str:
|
||||
"""Normalize agent name to lowercase for filesystem storage."""
|
||||
return name.lower()
|
||||
|
||||
|
||||
def _agent_config_to_response(agent_cfg: AgentConfig, include_soul: bool = False) -> AgentResponse:
|
||||
"""Convert AgentConfig to AgentResponse."""
|
||||
soul: str | None = None
|
||||
if include_soul:
|
||||
soul = load_agent_soul(agent_cfg.name) or ""
|
||||
|
||||
return AgentResponse(
|
||||
name=agent_cfg.name,
|
||||
description=agent_cfg.description,
|
||||
model=agent_cfg.model,
|
||||
tool_groups=agent_cfg.tool_groups,
|
||||
soul=soul,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/agents",
|
||||
response_model=AgentsListResponse,
|
||||
summary="List Custom Agents",
|
||||
description="List all custom agents available in the agents directory.",
|
||||
)
|
||||
async def list_agents() -> AgentsListResponse:
|
||||
"""List all custom agents.
|
||||
|
||||
Returns:
|
||||
List of all custom agents with their metadata (without soul content).
|
||||
"""
|
||||
try:
|
||||
agents = list_custom_agents()
|
||||
return AgentsListResponse(agents=[_agent_config_to_response(a) for a in agents])
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to list agents: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to list agents: {str(e)}")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/agents/check",
|
||||
summary="Check Agent Name",
|
||||
description="Validate an agent name and check if it is available (case-insensitive).",
|
||||
)
|
||||
async def check_agent_name(name: str) -> dict:
|
||||
"""Check whether an agent name is valid and not yet taken.
|
||||
|
||||
Args:
|
||||
name: The agent name to check.
|
||||
|
||||
Returns:
|
||||
``{"available": true/false, "name": "<normalized>"}``
|
||||
|
||||
Raises:
|
||||
HTTPException: 422 if the name is invalid.
|
||||
"""
|
||||
_validate_agent_name(name)
|
||||
normalized = _normalize_agent_name(name)
|
||||
available = not get_paths().agent_dir(normalized).exists()
|
||||
return {"available": available, "name": normalized}
|
||||
|
||||
|
||||
@router.get(
|
||||
"/agents/{name}",
|
||||
response_model=AgentResponse,
|
||||
summary="Get Custom Agent",
|
||||
description="Retrieve details and SOUL.md content for a specific custom agent.",
|
||||
)
|
||||
async def get_agent(name: str) -> AgentResponse:
|
||||
"""Get a specific custom agent by name.
|
||||
|
||||
Args:
|
||||
name: The agent name.
|
||||
|
||||
Returns:
|
||||
Agent details including SOUL.md content.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if agent not found.
|
||||
"""
|
||||
_validate_agent_name(name)
|
||||
name = _normalize_agent_name(name)
|
||||
|
||||
try:
|
||||
agent_cfg = load_agent_config(name)
|
||||
return _agent_config_to_response(agent_cfg, include_soul=True)
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{name}' not found")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get agent '{name}': {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get agent: {str(e)}")
|
||||
|
||||
|
||||
@router.post(
|
||||
"/agents",
|
||||
response_model=AgentResponse,
|
||||
status_code=201,
|
||||
summary="Create Custom Agent",
|
||||
description="Create a new custom agent with its config and SOUL.md.",
|
||||
)
|
||||
async def create_agent_endpoint(request: AgentCreateRequest) -> AgentResponse:
|
||||
"""Create a new custom agent.
|
||||
|
||||
Args:
|
||||
request: The agent creation request.
|
||||
|
||||
Returns:
|
||||
The created agent details.
|
||||
|
||||
Raises:
|
||||
HTTPException: 409 if agent already exists, 422 if name is invalid.
|
||||
"""
|
||||
_validate_agent_name(request.name)
|
||||
normalized_name = _normalize_agent_name(request.name)
|
||||
|
||||
agent_dir = get_paths().agent_dir(normalized_name)
|
||||
|
||||
if agent_dir.exists():
|
||||
raise HTTPException(status_code=409, detail=f"Agent '{normalized_name}' already exists")
|
||||
|
||||
try:
|
||||
agent_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write config.yaml
|
||||
config_data: dict = {"name": normalized_name}
|
||||
if request.description:
|
||||
config_data["description"] = request.description
|
||||
if request.model is not None:
|
||||
config_data["model"] = request.model
|
||||
if request.tool_groups is not None:
|
||||
config_data["tool_groups"] = request.tool_groups
|
||||
|
||||
config_file = agent_dir / "config.yaml"
|
||||
with open(config_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(config_data, f, default_flow_style=False, allow_unicode=True)
|
||||
|
||||
# Write SOUL.md
|
||||
soul_file = agent_dir / "SOUL.md"
|
||||
soul_file.write_text(request.soul, encoding="utf-8")
|
||||
|
||||
logger.info(f"Created agent '{normalized_name}' at {agent_dir}")
|
||||
|
||||
agent_cfg = load_agent_config(normalized_name)
|
||||
return _agent_config_to_response(agent_cfg, include_soul=True)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
# Clean up on failure
|
||||
if agent_dir.exists():
|
||||
shutil.rmtree(agent_dir)
|
||||
logger.error(f"Failed to create agent '{request.name}': {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to create agent: {str(e)}")
|
||||
|
||||
|
||||
@router.put(
|
||||
"/agents/{name}",
|
||||
response_model=AgentResponse,
|
||||
summary="Update Custom Agent",
|
||||
description="Update an existing custom agent's config and/or SOUL.md.",
|
||||
)
|
||||
async def update_agent(name: str, request: AgentUpdateRequest) -> AgentResponse:
|
||||
"""Update an existing custom agent.
|
||||
|
||||
Args:
|
||||
name: The agent name.
|
||||
request: The update request (all fields optional).
|
||||
|
||||
Returns:
|
||||
The updated agent details.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if agent not found.
|
||||
"""
|
||||
_validate_agent_name(name)
|
||||
name = _normalize_agent_name(name)
|
||||
|
||||
try:
|
||||
agent_cfg = load_agent_config(name)
|
||||
except FileNotFoundError:
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{name}' not found")
|
||||
|
||||
agent_dir = get_paths().agent_dir(name)
|
||||
|
||||
try:
|
||||
# Update config if any config fields changed
|
||||
config_changed = any(v is not None for v in [request.description, request.model, request.tool_groups])
|
||||
|
||||
if config_changed:
|
||||
updated: dict = {
|
||||
"name": agent_cfg.name,
|
||||
"description": request.description if request.description is not None else agent_cfg.description,
|
||||
}
|
||||
new_model = request.model if request.model is not None else agent_cfg.model
|
||||
if new_model is not None:
|
||||
updated["model"] = new_model
|
||||
|
||||
new_tool_groups = request.tool_groups if request.tool_groups is not None else agent_cfg.tool_groups
|
||||
if new_tool_groups is not None:
|
||||
updated["tool_groups"] = new_tool_groups
|
||||
|
||||
config_file = agent_dir / "config.yaml"
|
||||
with open(config_file, "w", encoding="utf-8") as f:
|
||||
yaml.dump(updated, f, default_flow_style=False, allow_unicode=True)
|
||||
|
||||
# Update SOUL.md if provided
|
||||
if request.soul is not None:
|
||||
soul_path = agent_dir / "SOUL.md"
|
||||
soul_path.write_text(request.soul, encoding="utf-8")
|
||||
|
||||
logger.info(f"Updated agent '{name}'")
|
||||
|
||||
refreshed_cfg = load_agent_config(name)
|
||||
return _agent_config_to_response(refreshed_cfg, include_soul=True)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update agent '{name}': {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update agent: {str(e)}")
|
||||
|
||||
|
||||
class UserProfileResponse(BaseModel):
|
||||
"""Response model for the global user profile (USER.md)."""
|
||||
|
||||
content: str | None = Field(default=None, description="USER.md content, or null if not yet created")
|
||||
|
||||
|
||||
class UserProfileUpdateRequest(BaseModel):
|
||||
"""Request body for setting the global user profile."""
|
||||
|
||||
content: str = Field(default="", description="USER.md content — describes the user's background and preferences")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/user-profile",
|
||||
response_model=UserProfileResponse,
|
||||
summary="Get User Profile",
|
||||
description="Read the global USER.md file that is injected into all custom agents.",
|
||||
)
|
||||
async def get_user_profile() -> UserProfileResponse:
|
||||
"""Return the current USER.md content.
|
||||
|
||||
Returns:
|
||||
UserProfileResponse with content=None if USER.md does not exist yet.
|
||||
"""
|
||||
try:
|
||||
user_md_path = get_paths().user_md_file
|
||||
if not user_md_path.exists():
|
||||
return UserProfileResponse(content=None)
|
||||
raw = user_md_path.read_text(encoding="utf-8").strip()
|
||||
return UserProfileResponse(content=raw or None)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read user profile: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to read user profile: {str(e)}")
|
||||
|
||||
|
||||
@router.put(
|
||||
"/user-profile",
|
||||
response_model=UserProfileResponse,
|
||||
summary="Update User Profile",
|
||||
description="Write the global USER.md file that is injected into all custom agents.",
|
||||
)
|
||||
async def update_user_profile(request: UserProfileUpdateRequest) -> UserProfileResponse:
|
||||
"""Create or overwrite the global USER.md.
|
||||
|
||||
Args:
|
||||
request: The update request with the new USER.md content.
|
||||
|
||||
Returns:
|
||||
UserProfileResponse with the saved content.
|
||||
"""
|
||||
try:
|
||||
paths = get_paths()
|
||||
paths.base_dir.mkdir(parents=True, exist_ok=True)
|
||||
paths.user_md_file.write_text(request.content, encoding="utf-8")
|
||||
logger.info(f"Updated USER.md at {paths.user_md_file}")
|
||||
return UserProfileResponse(content=request.content or None)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update user profile: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update user profile: {str(e)}")
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/agents/{name}",
|
||||
status_code=204,
|
||||
summary="Delete Custom Agent",
|
||||
description="Delete a custom agent and all its files (config, SOUL.md, memory).",
|
||||
)
|
||||
async def delete_agent(name: str) -> None:
|
||||
"""Delete a custom agent.
|
||||
|
||||
Args:
|
||||
name: The agent name.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if agent not found.
|
||||
"""
|
||||
_validate_agent_name(name)
|
||||
name = _normalize_agent_name(name)
|
||||
|
||||
agent_dir = get_paths().agent_dir(name)
|
||||
|
||||
if not agent_dir.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Agent '{name}' not found")
|
||||
|
||||
try:
|
||||
shutil.rmtree(agent_dir)
|
||||
logger.info(f"Deleted agent '{name}' from {agent_dir}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete agent '{name}': {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to delete agent: {str(e)}")
|
||||
158
backend/app/gateway/routers/artifacts.py
Normal file
158
backend/app/gateway/routers/artifacts.py
Normal file
@@ -0,0 +1,158 @@
|
||||
import logging
|
||||
import mimetypes
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from urllib.parse import quote
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Request
|
||||
from fastapi.responses import FileResponse, HTMLResponse, PlainTextResponse, Response
|
||||
|
||||
from app.gateway.path_utils import resolve_thread_virtual_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["artifacts"])
|
||||
|
||||
|
||||
def is_text_file_by_content(path: Path, sample_size: int = 8192) -> bool:
|
||||
"""Check if file is text by examining content for null bytes."""
|
||||
try:
|
||||
with open(path, "rb") as f:
|
||||
chunk = f.read(sample_size)
|
||||
# Text files shouldn't contain null bytes
|
||||
return b"\x00" not in chunk
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _extract_file_from_skill_archive(zip_path: Path, internal_path: str) -> bytes | None:
|
||||
"""Extract a file from a .skill ZIP archive.
|
||||
|
||||
Args:
|
||||
zip_path: Path to the .skill file (ZIP archive).
|
||||
internal_path: Path to the file inside the archive (e.g., "SKILL.md").
|
||||
|
||||
Returns:
|
||||
The file content as bytes, or None if not found.
|
||||
"""
|
||||
if not zipfile.is_zipfile(zip_path):
|
||||
return None
|
||||
|
||||
try:
|
||||
with zipfile.ZipFile(zip_path, "r") as zip_ref:
|
||||
# List all files in the archive
|
||||
namelist = zip_ref.namelist()
|
||||
|
||||
# Try direct path first
|
||||
if internal_path in namelist:
|
||||
return zip_ref.read(internal_path)
|
||||
|
||||
# Try with any top-level directory prefix (e.g., "skill-name/SKILL.md")
|
||||
for name in namelist:
|
||||
if name.endswith("/" + internal_path) or name == internal_path:
|
||||
return zip_ref.read(name)
|
||||
|
||||
# Not found
|
||||
return None
|
||||
except (zipfile.BadZipFile, KeyError):
|
||||
return None
|
||||
|
||||
|
||||
@router.get(
|
||||
"/threads/{thread_id}/artifacts/{path:path}",
|
||||
summary="Get Artifact File",
|
||||
description="Retrieve an artifact file generated by the AI agent. Supports text, HTML, and binary files.",
|
||||
)
|
||||
async def get_artifact(thread_id: str, path: str, request: Request) -> FileResponse:
|
||||
"""Get an artifact file by its path.
|
||||
|
||||
The endpoint automatically detects file types and returns appropriate content types.
|
||||
Use the `?download=true` query parameter to force file download.
|
||||
|
||||
Args:
|
||||
thread_id: The thread ID.
|
||||
path: The artifact path with virtual prefix (e.g., mnt/user-data/outputs/file.txt).
|
||||
request: FastAPI request object (automatically injected).
|
||||
|
||||
Returns:
|
||||
The file content as a FileResponse with appropriate content type:
|
||||
- HTML files: Rendered as HTML
|
||||
- Text files: Plain text with proper MIME type
|
||||
- Binary files: Inline display with download option
|
||||
|
||||
Raises:
|
||||
HTTPException:
|
||||
- 400 if path is invalid or not a file
|
||||
- 403 if access denied (path traversal detected)
|
||||
- 404 if file not found
|
||||
|
||||
Query Parameters:
|
||||
download (bool): If true, returns file as attachment for download
|
||||
|
||||
Example:
|
||||
- Get HTML file: `/api/threads/abc123/artifacts/mnt/user-data/outputs/index.html`
|
||||
- Download file: `/api/threads/abc123/artifacts/mnt/user-data/outputs/data.csv?download=true`
|
||||
"""
|
||||
# Check if this is a request for a file inside a .skill archive (e.g., xxx.skill/SKILL.md)
|
||||
if ".skill/" in path:
|
||||
# Split the path at ".skill/" to get the ZIP file path and internal path
|
||||
skill_marker = ".skill/"
|
||||
marker_pos = path.find(skill_marker)
|
||||
skill_file_path = path[: marker_pos + len(".skill")] # e.g., "mnt/user-data/outputs/my-skill.skill"
|
||||
internal_path = path[marker_pos + len(skill_marker) :] # e.g., "SKILL.md"
|
||||
|
||||
actual_skill_path = resolve_thread_virtual_path(thread_id, skill_file_path)
|
||||
|
||||
if not actual_skill_path.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Skill file not found: {skill_file_path}")
|
||||
|
||||
if not actual_skill_path.is_file():
|
||||
raise HTTPException(status_code=400, detail=f"Path is not a file: {skill_file_path}")
|
||||
|
||||
# Extract the file from the .skill archive
|
||||
content = _extract_file_from_skill_archive(actual_skill_path, internal_path)
|
||||
if content is None:
|
||||
raise HTTPException(status_code=404, detail=f"File '{internal_path}' not found in skill archive")
|
||||
|
||||
# Determine MIME type based on the internal file
|
||||
mime_type, _ = mimetypes.guess_type(internal_path)
|
||||
# Add cache headers to avoid repeated ZIP extraction (cache for 5 minutes)
|
||||
cache_headers = {"Cache-Control": "private, max-age=300"}
|
||||
if mime_type and mime_type.startswith("text/"):
|
||||
return PlainTextResponse(content=content.decode("utf-8"), media_type=mime_type, headers=cache_headers)
|
||||
|
||||
# Default to plain text for unknown types that look like text
|
||||
try:
|
||||
return PlainTextResponse(content=content.decode("utf-8"), media_type="text/plain", headers=cache_headers)
|
||||
except UnicodeDecodeError:
|
||||
return Response(content=content, media_type=mime_type or "application/octet-stream", headers=cache_headers)
|
||||
|
||||
actual_path = resolve_thread_virtual_path(thread_id, path)
|
||||
|
||||
logger.info(f"Resolving artifact path: thread_id={thread_id}, requested_path={path}, actual_path={actual_path}")
|
||||
|
||||
if not actual_path.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Artifact not found: {path}")
|
||||
|
||||
if not actual_path.is_file():
|
||||
raise HTTPException(status_code=400, detail=f"Path is not a file: {path}")
|
||||
|
||||
mime_type, _ = mimetypes.guess_type(actual_path)
|
||||
|
||||
# Encode filename for Content-Disposition header (RFC 5987)
|
||||
encoded_filename = quote(actual_path.name)
|
||||
|
||||
# if `download` query parameter is true, return the file as a download
|
||||
if request.query_params.get("download"):
|
||||
return FileResponse(path=actual_path, filename=actual_path.name, media_type=mime_type, headers={"Content-Disposition": f"attachment; filename*=UTF-8''{encoded_filename}"})
|
||||
|
||||
if mime_type and mime_type == "text/html":
|
||||
return HTMLResponse(content=actual_path.read_text())
|
||||
|
||||
if mime_type and mime_type.startswith("text/"):
|
||||
return PlainTextResponse(content=actual_path.read_text(), media_type=mime_type)
|
||||
|
||||
if is_text_file_by_content(actual_path):
|
||||
return PlainTextResponse(content=actual_path.read_text(), media_type=mime_type)
|
||||
|
||||
return Response(content=actual_path.read_bytes(), media_type=mime_type, headers={"Content-Disposition": f"inline; filename*=UTF-8''{encoded_filename}"})
|
||||
52
backend/app/gateway/routers/channels.py
Normal file
52
backend/app/gateway/routers/channels.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""Gateway router for IM channel management."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/channels", tags=["channels"])
|
||||
|
||||
|
||||
class ChannelStatusResponse(BaseModel):
|
||||
service_running: bool
|
||||
channels: dict[str, dict]
|
||||
|
||||
|
||||
class ChannelRestartResponse(BaseModel):
|
||||
success: bool
|
||||
message: str
|
||||
|
||||
|
||||
@router.get("/", response_model=ChannelStatusResponse)
|
||||
async def get_channels_status() -> ChannelStatusResponse:
|
||||
"""Get the status of all IM channels."""
|
||||
from app.channels.service import get_channel_service
|
||||
|
||||
service = get_channel_service()
|
||||
if service is None:
|
||||
return ChannelStatusResponse(service_running=False, channels={})
|
||||
status = service.get_status()
|
||||
return ChannelStatusResponse(**status)
|
||||
|
||||
|
||||
@router.post("/{name}/restart", response_model=ChannelRestartResponse)
|
||||
async def restart_channel(name: str) -> ChannelRestartResponse:
|
||||
"""Restart a specific IM channel."""
|
||||
from app.channels.service import get_channel_service
|
||||
|
||||
service = get_channel_service()
|
||||
if service is None:
|
||||
raise HTTPException(status_code=503, detail="Channel service is not running")
|
||||
|
||||
success = await service.restart_channel(name)
|
||||
if success:
|
||||
logger.info("Channel %s restarted successfully", name)
|
||||
return ChannelRestartResponse(success=True, message=f"Channel {name} restarted successfully")
|
||||
else:
|
||||
logger.warning("Failed to restart channel %s", name)
|
||||
return ChannelRestartResponse(success=False, message=f"Failed to restart channel {name}")
|
||||
169
backend/app/gateway/routers/mcp.py
Normal file
169
backend/app/gateway/routers/mcp.py
Normal file
@@ -0,0 +1,169 @@
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from deerflow.config.extensions_config import ExtensionsConfig, get_extensions_config, reload_extensions_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/api", tags=["mcp"])
|
||||
|
||||
|
||||
class McpOAuthConfigResponse(BaseModel):
|
||||
"""OAuth configuration for an MCP server."""
|
||||
|
||||
enabled: bool = Field(default=True, description="Whether OAuth token injection is enabled")
|
||||
token_url: str = Field(default="", description="OAuth token endpoint URL")
|
||||
grant_type: Literal["client_credentials", "refresh_token"] = Field(default="client_credentials", description="OAuth grant type")
|
||||
client_id: str | None = Field(default=None, description="OAuth client ID")
|
||||
client_secret: str | None = Field(default=None, description="OAuth client secret")
|
||||
refresh_token: str | None = Field(default=None, description="OAuth refresh token")
|
||||
scope: str | None = Field(default=None, description="OAuth scope")
|
||||
audience: str | None = Field(default=None, description="OAuth audience")
|
||||
token_field: str = Field(default="access_token", description="Token response field containing access token")
|
||||
token_type_field: str = Field(default="token_type", description="Token response field containing token type")
|
||||
expires_in_field: str = Field(default="expires_in", description="Token response field containing expires-in seconds")
|
||||
default_token_type: str = Field(default="Bearer", description="Default token type when response omits token_type")
|
||||
refresh_skew_seconds: int = Field(default=60, description="Refresh this many seconds before expiry")
|
||||
extra_token_params: dict[str, str] = Field(default_factory=dict, description="Additional form params sent to token endpoint")
|
||||
|
||||
|
||||
class McpServerConfigResponse(BaseModel):
|
||||
"""Response model for MCP server configuration."""
|
||||
|
||||
enabled: bool = Field(default=True, description="Whether this MCP server is enabled")
|
||||
type: str = Field(default="stdio", description="Transport type: 'stdio', 'sse', or 'http'")
|
||||
command: str | None = Field(default=None, description="Command to execute to start the MCP server (for stdio type)")
|
||||
args: list[str] = Field(default_factory=list, description="Arguments to pass to the command (for stdio type)")
|
||||
env: dict[str, str] = Field(default_factory=dict, description="Environment variables for the MCP server")
|
||||
url: str | None = Field(default=None, description="URL of the MCP server (for sse or http type)")
|
||||
headers: dict[str, str] = Field(default_factory=dict, description="HTTP headers to send (for sse or http type)")
|
||||
oauth: McpOAuthConfigResponse | None = Field(default=None, description="OAuth configuration for MCP HTTP/SSE servers")
|
||||
description: str = Field(default="", description="Human-readable description of what this MCP server provides")
|
||||
|
||||
|
||||
class McpConfigResponse(BaseModel):
|
||||
"""Response model for MCP configuration."""
|
||||
|
||||
mcp_servers: dict[str, McpServerConfigResponse] = Field(
|
||||
default_factory=dict,
|
||||
description="Map of MCP server name to configuration",
|
||||
)
|
||||
|
||||
|
||||
class McpConfigUpdateRequest(BaseModel):
|
||||
"""Request model for updating MCP configuration."""
|
||||
|
||||
mcp_servers: dict[str, McpServerConfigResponse] = Field(
|
||||
...,
|
||||
description="Map of MCP server name to configuration",
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/mcp/config",
|
||||
response_model=McpConfigResponse,
|
||||
summary="Get MCP Configuration",
|
||||
description="Retrieve the current Model Context Protocol (MCP) server configurations.",
|
||||
)
|
||||
async def get_mcp_configuration() -> McpConfigResponse:
|
||||
"""Get the current MCP configuration.
|
||||
|
||||
Returns:
|
||||
The current MCP configuration with all servers.
|
||||
|
||||
Example:
|
||||
```json
|
||||
{
|
||||
"mcp_servers": {
|
||||
"github": {
|
||||
"enabled": true,
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-github"],
|
||||
"env": {"GITHUB_TOKEN": "ghp_xxx"},
|
||||
"description": "GitHub MCP server for repository operations"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_extensions_config()
|
||||
|
||||
return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in config.mcp_servers.items()})
|
||||
|
||||
|
||||
@router.put(
|
||||
"/mcp/config",
|
||||
response_model=McpConfigResponse,
|
||||
summary="Update MCP Configuration",
|
||||
description="Update Model Context Protocol (MCP) server configurations and save to file.",
|
||||
)
|
||||
async def update_mcp_configuration(request: McpConfigUpdateRequest) -> McpConfigResponse:
|
||||
"""Update the MCP configuration.
|
||||
|
||||
This will:
|
||||
1. Save the new configuration to the mcp_config.json file
|
||||
2. Reload the configuration cache
|
||||
3. Reset MCP tools cache to trigger reinitialization
|
||||
|
||||
Args:
|
||||
request: The new MCP configuration to save.
|
||||
|
||||
Returns:
|
||||
The updated MCP configuration.
|
||||
|
||||
Raises:
|
||||
HTTPException: 500 if the configuration file cannot be written.
|
||||
|
||||
Example Request:
|
||||
```json
|
||||
{
|
||||
"mcp_servers": {
|
||||
"github": {
|
||||
"enabled": true,
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-github"],
|
||||
"env": {"GITHUB_TOKEN": "$GITHUB_TOKEN"},
|
||||
"description": "GitHub MCP server for repository operations"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
"""
|
||||
try:
|
||||
# Get the current config path (or determine where to save it)
|
||||
config_path = ExtensionsConfig.resolve_config_path()
|
||||
|
||||
# If no config file exists, create one in the parent directory (project root)
|
||||
if config_path is None:
|
||||
config_path = Path.cwd().parent / "extensions_config.json"
|
||||
logger.info(f"No existing extensions config found. Creating new config at: {config_path}")
|
||||
|
||||
# Load current config to preserve skills configuration
|
||||
current_config = get_extensions_config()
|
||||
|
||||
# Convert request to dict format for JSON serialization
|
||||
config_data = {
|
||||
"mcpServers": {name: server.model_dump() for name, server in request.mcp_servers.items()},
|
||||
"skills": {name: {"enabled": skill.enabled} for name, skill in current_config.skills.items()},
|
||||
}
|
||||
|
||||
# Write the configuration to file
|
||||
with open(config_path, "w") as f:
|
||||
json.dump(config_data, f, indent=2)
|
||||
|
||||
logger.info(f"MCP configuration updated and saved to: {config_path}")
|
||||
|
||||
# NOTE: No need to reload/reset cache here - LangGraph Server (separate process)
|
||||
# will detect config file changes via mtime and reinitialize MCP tools automatically
|
||||
|
||||
# Reload the configuration and update the global cache
|
||||
reloaded_config = reload_extensions_config()
|
||||
return McpConfigResponse(mcp_servers={name: McpServerConfigResponse(**server.model_dump()) for name, server in reloaded_config.mcp_servers.items()})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update MCP configuration: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update MCP configuration: {str(e)}")
|
||||
201
backend/app/gateway/routers/memory.py
Normal file
201
backend/app/gateway/routers/memory.py
Normal file
@@ -0,0 +1,201 @@
|
||||
"""Memory API router for retrieving and managing global memory data."""
|
||||
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from deerflow.agents.memory.updater import get_memory_data, reload_memory_data
|
||||
from deerflow.config.memory_config import get_memory_config
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["memory"])
|
||||
|
||||
|
||||
class ContextSection(BaseModel):
|
||||
"""Model for context sections (user and history)."""
|
||||
|
||||
summary: str = Field(default="", description="Summary content")
|
||||
updatedAt: str = Field(default="", description="Last update timestamp")
|
||||
|
||||
|
||||
class UserContext(BaseModel):
|
||||
"""Model for user context."""
|
||||
|
||||
workContext: ContextSection = Field(default_factory=ContextSection)
|
||||
personalContext: ContextSection = Field(default_factory=ContextSection)
|
||||
topOfMind: ContextSection = Field(default_factory=ContextSection)
|
||||
|
||||
|
||||
class HistoryContext(BaseModel):
|
||||
"""Model for history context."""
|
||||
|
||||
recentMonths: ContextSection = Field(default_factory=ContextSection)
|
||||
earlierContext: ContextSection = Field(default_factory=ContextSection)
|
||||
longTermBackground: ContextSection = Field(default_factory=ContextSection)
|
||||
|
||||
|
||||
class Fact(BaseModel):
|
||||
"""Model for a memory fact."""
|
||||
|
||||
id: str = Field(..., description="Unique identifier for the fact")
|
||||
content: str = Field(..., description="Fact content")
|
||||
category: str = Field(default="context", description="Fact category")
|
||||
confidence: float = Field(default=0.5, description="Confidence score (0-1)")
|
||||
createdAt: str = Field(default="", description="Creation timestamp")
|
||||
source: str = Field(default="unknown", description="Source thread ID")
|
||||
|
||||
|
||||
class MemoryResponse(BaseModel):
|
||||
"""Response model for memory data."""
|
||||
|
||||
version: str = Field(default="1.0", description="Memory schema version")
|
||||
lastUpdated: str = Field(default="", description="Last update timestamp")
|
||||
user: UserContext = Field(default_factory=UserContext)
|
||||
history: HistoryContext = Field(default_factory=HistoryContext)
|
||||
facts: list[Fact] = Field(default_factory=list)
|
||||
|
||||
|
||||
class MemoryConfigResponse(BaseModel):
|
||||
"""Response model for memory configuration."""
|
||||
|
||||
enabled: bool = Field(..., description="Whether memory is enabled")
|
||||
storage_path: str = Field(..., description="Path to memory storage file")
|
||||
debounce_seconds: int = Field(..., description="Debounce time for memory updates")
|
||||
max_facts: int = Field(..., description="Maximum number of facts to store")
|
||||
fact_confidence_threshold: float = Field(..., description="Minimum confidence threshold for facts")
|
||||
injection_enabled: bool = Field(..., description="Whether memory injection is enabled")
|
||||
max_injection_tokens: int = Field(..., description="Maximum tokens for memory injection")
|
||||
|
||||
|
||||
class MemoryStatusResponse(BaseModel):
|
||||
"""Response model for memory status."""
|
||||
|
||||
config: MemoryConfigResponse
|
||||
data: MemoryResponse
|
||||
|
||||
|
||||
@router.get(
|
||||
"/memory",
|
||||
response_model=MemoryResponse,
|
||||
summary="Get Memory Data",
|
||||
description="Retrieve the current global memory data including user context, history, and facts.",
|
||||
)
|
||||
async def get_memory() -> MemoryResponse:
|
||||
"""Get the current global memory data.
|
||||
|
||||
Returns:
|
||||
The current memory data with user context, history, and facts.
|
||||
|
||||
Example Response:
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"lastUpdated": "2024-01-15T10:30:00Z",
|
||||
"user": {
|
||||
"workContext": {"summary": "Working on DeerFlow project", "updatedAt": "..."},
|
||||
"personalContext": {"summary": "Prefers concise responses", "updatedAt": "..."},
|
||||
"topOfMind": {"summary": "Building memory API", "updatedAt": "..."}
|
||||
},
|
||||
"history": {
|
||||
"recentMonths": {"summary": "Recent development activities", "updatedAt": "..."},
|
||||
"earlierContext": {"summary": "", "updatedAt": ""},
|
||||
"longTermBackground": {"summary": "", "updatedAt": ""}
|
||||
},
|
||||
"facts": [
|
||||
{
|
||||
"id": "fact_abc123",
|
||||
"content": "User prefers TypeScript over JavaScript",
|
||||
"category": "preference",
|
||||
"confidence": 0.9,
|
||||
"createdAt": "2024-01-15T10:30:00Z",
|
||||
"source": "thread_xyz"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
"""
|
||||
memory_data = get_memory_data()
|
||||
return MemoryResponse(**memory_data)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/memory/reload",
|
||||
response_model=MemoryResponse,
|
||||
summary="Reload Memory Data",
|
||||
description="Reload memory data from the storage file, refreshing the in-memory cache.",
|
||||
)
|
||||
async def reload_memory() -> MemoryResponse:
|
||||
"""Reload memory data from file.
|
||||
|
||||
This forces a reload of the memory data from the storage file,
|
||||
useful when the file has been modified externally.
|
||||
|
||||
Returns:
|
||||
The reloaded memory data.
|
||||
"""
|
||||
memory_data = reload_memory_data()
|
||||
return MemoryResponse(**memory_data)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/memory/config",
|
||||
response_model=MemoryConfigResponse,
|
||||
summary="Get Memory Configuration",
|
||||
description="Retrieve the current memory system configuration.",
|
||||
)
|
||||
async def get_memory_config_endpoint() -> MemoryConfigResponse:
|
||||
"""Get the memory system configuration.
|
||||
|
||||
Returns:
|
||||
The current memory configuration settings.
|
||||
|
||||
Example Response:
|
||||
```json
|
||||
{
|
||||
"enabled": true,
|
||||
"storage_path": ".deer-flow/memory.json",
|
||||
"debounce_seconds": 30,
|
||||
"max_facts": 100,
|
||||
"fact_confidence_threshold": 0.7,
|
||||
"injection_enabled": true,
|
||||
"max_injection_tokens": 2000
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_memory_config()
|
||||
return MemoryConfigResponse(
|
||||
enabled=config.enabled,
|
||||
storage_path=config.storage_path,
|
||||
debounce_seconds=config.debounce_seconds,
|
||||
max_facts=config.max_facts,
|
||||
fact_confidence_threshold=config.fact_confidence_threshold,
|
||||
injection_enabled=config.injection_enabled,
|
||||
max_injection_tokens=config.max_injection_tokens,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/memory/status",
|
||||
response_model=MemoryStatusResponse,
|
||||
summary="Get Memory Status",
|
||||
description="Retrieve both memory configuration and current data in a single request.",
|
||||
)
|
||||
async def get_memory_status() -> MemoryStatusResponse:
|
||||
"""Get the memory system status including configuration and data.
|
||||
|
||||
Returns:
|
||||
Combined memory configuration and current data.
|
||||
"""
|
||||
config = get_memory_config()
|
||||
memory_data = get_memory_data()
|
||||
|
||||
return MemoryStatusResponse(
|
||||
config=MemoryConfigResponse(
|
||||
enabled=config.enabled,
|
||||
storage_path=config.storage_path,
|
||||
debounce_seconds=config.debounce_seconds,
|
||||
max_facts=config.max_facts,
|
||||
fact_confidence_threshold=config.fact_confidence_threshold,
|
||||
injection_enabled=config.injection_enabled,
|
||||
max_injection_tokens=config.max_injection_tokens,
|
||||
),
|
||||
data=MemoryResponse(**memory_data),
|
||||
)
|
||||
113
backend/app/gateway/routers/models.py
Normal file
113
backend/app/gateway/routers/models.py
Normal file
@@ -0,0 +1,113 @@
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["models"])
|
||||
|
||||
|
||||
class ModelResponse(BaseModel):
|
||||
"""Response model for model information."""
|
||||
|
||||
name: str = Field(..., description="Unique identifier for the model")
|
||||
display_name: str | None = Field(None, description="Human-readable name")
|
||||
description: str | None = Field(None, description="Model description")
|
||||
supports_thinking: bool = Field(default=False, description="Whether model supports thinking mode")
|
||||
supports_reasoning_effort: bool = Field(default=False, description="Whether model supports reasoning effort")
|
||||
|
||||
|
||||
class ModelsListResponse(BaseModel):
|
||||
"""Response model for listing all models."""
|
||||
|
||||
models: list[ModelResponse]
|
||||
|
||||
|
||||
@router.get(
|
||||
"/models",
|
||||
response_model=ModelsListResponse,
|
||||
summary="List All Models",
|
||||
description="Retrieve a list of all available AI models configured in the system.",
|
||||
)
|
||||
async def list_models() -> ModelsListResponse:
|
||||
"""List all available models from configuration.
|
||||
|
||||
Returns model information suitable for frontend display,
|
||||
excluding sensitive fields like API keys and internal configuration.
|
||||
|
||||
Returns:
|
||||
A list of all configured models with their metadata.
|
||||
|
||||
Example Response:
|
||||
```json
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"name": "gpt-4",
|
||||
"display_name": "GPT-4",
|
||||
"description": "OpenAI GPT-4 model",
|
||||
"supports_thinking": false
|
||||
},
|
||||
{
|
||||
"name": "claude-3-opus",
|
||||
"display_name": "Claude 3 Opus",
|
||||
"description": "Anthropic Claude 3 Opus model",
|
||||
"supports_thinking": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_app_config()
|
||||
models = [
|
||||
ModelResponse(
|
||||
name=model.name,
|
||||
display_name=model.display_name,
|
||||
description=model.description,
|
||||
supports_thinking=model.supports_thinking,
|
||||
supports_reasoning_effort=model.supports_reasoning_effort,
|
||||
)
|
||||
for model in config.models
|
||||
]
|
||||
return ModelsListResponse(models=models)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/models/{model_name}",
|
||||
response_model=ModelResponse,
|
||||
summary="Get Model Details",
|
||||
description="Retrieve detailed information about a specific AI model by its name.",
|
||||
)
|
||||
async def get_model(model_name: str) -> ModelResponse:
|
||||
"""Get a specific model by name.
|
||||
|
||||
Args:
|
||||
model_name: The unique name of the model to retrieve.
|
||||
|
||||
Returns:
|
||||
Model information if found.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if model not found.
|
||||
|
||||
Example Response:
|
||||
```json
|
||||
{
|
||||
"name": "gpt-4",
|
||||
"display_name": "GPT-4",
|
||||
"description": "OpenAI GPT-4 model",
|
||||
"supports_thinking": false
|
||||
}
|
||||
```
|
||||
"""
|
||||
config = get_app_config()
|
||||
model = config.get_model_config(model_name)
|
||||
if model is None:
|
||||
raise HTTPException(status_code=404, detail=f"Model '{model_name}' not found")
|
||||
|
||||
return ModelResponse(
|
||||
name=model.name,
|
||||
display_name=model.display_name,
|
||||
description=model.description,
|
||||
supports_thinking=model.supports_thinking,
|
||||
supports_reasoning_effort=model.supports_reasoning_effort,
|
||||
)
|
||||
438
backend/app/gateway/routers/skills.py
Normal file
438
backend/app/gateway/routers/skills.py
Normal file
@@ -0,0 +1,438 @@
|
||||
import json
|
||||
import logging
|
||||
import shutil
|
||||
import stat
|
||||
import tempfile
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.gateway.path_utils import resolve_thread_virtual_path
|
||||
from deerflow.config.extensions_config import ExtensionsConfig, SkillStateConfig, get_extensions_config, reload_extensions_config
|
||||
from deerflow.skills import Skill, load_skills
|
||||
from deerflow.skills.loader import get_skills_root_path
|
||||
from deerflow.skills.validation import _validate_skill_frontmatter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _is_unsafe_zip_member(info: zipfile.ZipInfo) -> bool:
|
||||
"""Return True if the zip member path is absolute or attempts directory traversal."""
|
||||
name = info.filename
|
||||
if not name:
|
||||
return False
|
||||
path = Path(name)
|
||||
if path.is_absolute():
|
||||
return True
|
||||
if ".." in path.parts:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_symlink_member(info: zipfile.ZipInfo) -> bool:
|
||||
"""Detect symlinks based on the external attributes stored in the ZipInfo."""
|
||||
# Upper 16 bits of external_attr contain the Unix file mode when created on Unix.
|
||||
mode = info.external_attr >> 16
|
||||
return stat.S_ISLNK(mode)
|
||||
|
||||
|
||||
def _safe_extract_skill_archive(
|
||||
zip_ref: zipfile.ZipFile,
|
||||
dest_path: Path,
|
||||
max_total_size: int = 512 * 1024 * 1024,
|
||||
) -> None:
|
||||
"""Safely extract a skill archive into dest_path with basic protections.
|
||||
|
||||
Protections:
|
||||
- Reject absolute paths and directory traversal (..).
|
||||
- Skip symlink entries instead of materialising them.
|
||||
- Enforce a hard limit on total uncompressed size to mitigate zip bombs.
|
||||
"""
|
||||
dest_root = Path(dest_path).resolve()
|
||||
total_size = 0
|
||||
|
||||
for info in zip_ref.infolist():
|
||||
# Reject absolute paths or any path that attempts directory traversal.
|
||||
if _is_unsafe_zip_member(info):
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Archive contains unsafe member path: {info.filename!r}",
|
||||
)
|
||||
|
||||
# Skip any symlink entries instead of materialising them on disk.
|
||||
if _is_symlink_member(info):
|
||||
logger.warning("Skipping symlink entry in skill archive: %s", info.filename)
|
||||
continue
|
||||
|
||||
# Basic unzip-bomb defence: bound the total uncompressed size we will write.
|
||||
total_size += max(info.file_size, 0)
|
||||
if total_size > max_total_size:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Skill archive is too large or appears highly compressed.",
|
||||
)
|
||||
|
||||
member_path = dest_root / info.filename
|
||||
member_path_parent = member_path.parent
|
||||
member_path_parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if info.is_dir():
|
||||
member_path.mkdir(parents=True, exist_ok=True)
|
||||
continue
|
||||
|
||||
with zip_ref.open(info) as src, open(member_path, "wb") as dst:
|
||||
shutil.copyfileobj(src, dst)
|
||||
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["skills"])
|
||||
|
||||
|
||||
class SkillResponse(BaseModel):
|
||||
"""Response model for skill information."""
|
||||
|
||||
name: str = Field(..., description="Name of the skill")
|
||||
description: str = Field(..., description="Description of what the skill does")
|
||||
license: str | None = Field(None, description="License information")
|
||||
category: str = Field(..., description="Category of the skill (public or custom)")
|
||||
enabled: bool = Field(default=True, description="Whether this skill is enabled")
|
||||
|
||||
|
||||
class SkillsListResponse(BaseModel):
|
||||
"""Response model for listing all skills."""
|
||||
|
||||
skills: list[SkillResponse]
|
||||
|
||||
|
||||
class SkillUpdateRequest(BaseModel):
|
||||
"""Request model for updating a skill."""
|
||||
|
||||
enabled: bool = Field(..., description="Whether to enable or disable the skill")
|
||||
|
||||
|
||||
class SkillInstallRequest(BaseModel):
|
||||
"""Request model for installing a skill from a .skill file."""
|
||||
|
||||
thread_id: str = Field(..., description="The thread ID where the .skill file is located")
|
||||
path: str = Field(..., description="Virtual path to the .skill file (e.g., mnt/user-data/outputs/my-skill.skill)")
|
||||
|
||||
|
||||
class SkillInstallResponse(BaseModel):
|
||||
"""Response model for skill installation."""
|
||||
|
||||
success: bool = Field(..., description="Whether the installation was successful")
|
||||
skill_name: str = Field(..., description="Name of the installed skill")
|
||||
message: str = Field(..., description="Installation result message")
|
||||
|
||||
|
||||
def _should_ignore_archive_entry(path: Path) -> bool:
|
||||
return path.name.startswith(".") or path.name == "__MACOSX"
|
||||
|
||||
|
||||
def _resolve_skill_dir_from_archive_root(temp_path: Path) -> Path:
|
||||
extracted_items = [item for item in temp_path.iterdir() if not _should_ignore_archive_entry(item)]
|
||||
if len(extracted_items) == 0:
|
||||
raise HTTPException(status_code=400, detail="Skill archive is empty")
|
||||
if len(extracted_items) == 1 and extracted_items[0].is_dir():
|
||||
return extracted_items[0]
|
||||
return temp_path
|
||||
|
||||
|
||||
def _skill_to_response(skill: Skill) -> SkillResponse:
|
||||
"""Convert a Skill object to a SkillResponse."""
|
||||
return SkillResponse(
|
||||
name=skill.name,
|
||||
description=skill.description,
|
||||
license=skill.license,
|
||||
category=skill.category,
|
||||
enabled=skill.enabled,
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/skills",
|
||||
response_model=SkillsListResponse,
|
||||
summary="List All Skills",
|
||||
description="Retrieve a list of all available skills from both public and custom directories.",
|
||||
)
|
||||
async def list_skills() -> SkillsListResponse:
|
||||
"""List all available skills.
|
||||
|
||||
Returns all skills regardless of their enabled status.
|
||||
|
||||
Returns:
|
||||
A list of all skills with their metadata.
|
||||
|
||||
Example Response:
|
||||
```json
|
||||
{
|
||||
"skills": [
|
||||
{
|
||||
"name": "PDF Processing",
|
||||
"description": "Extract and analyze PDF content",
|
||||
"license": "MIT",
|
||||
"category": "public",
|
||||
"enabled": true
|
||||
},
|
||||
{
|
||||
"name": "Frontend Design",
|
||||
"description": "Generate frontend designs and components",
|
||||
"license": null,
|
||||
"category": "custom",
|
||||
"enabled": false
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
"""
|
||||
try:
|
||||
# Load all skills (including disabled ones)
|
||||
skills = load_skills(enabled_only=False)
|
||||
return SkillsListResponse(skills=[_skill_to_response(skill) for skill in skills])
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to load skills: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to load skills: {str(e)}")
|
||||
|
||||
|
||||
@router.get(
|
||||
"/skills/{skill_name}",
|
||||
response_model=SkillResponse,
|
||||
summary="Get Skill Details",
|
||||
description="Retrieve detailed information about a specific skill by its name.",
|
||||
)
|
||||
async def get_skill(skill_name: str) -> SkillResponse:
|
||||
"""Get a specific skill by name.
|
||||
|
||||
Args:
|
||||
skill_name: The name of the skill to retrieve.
|
||||
|
||||
Returns:
|
||||
Skill information if found.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if skill not found.
|
||||
|
||||
Example Response:
|
||||
```json
|
||||
{
|
||||
"name": "PDF Processing",
|
||||
"description": "Extract and analyze PDF content",
|
||||
"license": "MIT",
|
||||
"category": "public",
|
||||
"enabled": true
|
||||
}
|
||||
```
|
||||
"""
|
||||
try:
|
||||
skills = load_skills(enabled_only=False)
|
||||
skill = next((s for s in skills if s.name == skill_name), None)
|
||||
|
||||
if skill is None:
|
||||
raise HTTPException(status_code=404, detail=f"Skill '{skill_name}' not found")
|
||||
|
||||
return _skill_to_response(skill)
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get skill {skill_name}: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get skill: {str(e)}")
|
||||
|
||||
|
||||
@router.put(
|
||||
"/skills/{skill_name}",
|
||||
response_model=SkillResponse,
|
||||
summary="Update Skill",
|
||||
description="Update a skill's enabled status by modifying the extensions_config.json file.",
|
||||
)
|
||||
async def update_skill(skill_name: str, request: SkillUpdateRequest) -> SkillResponse:
|
||||
"""Update a skill's enabled status.
|
||||
|
||||
This will modify the extensions_config.json file to update the enabled state.
|
||||
The SKILL.md file itself is not modified.
|
||||
|
||||
Args:
|
||||
skill_name: The name of the skill to update.
|
||||
request: The update request containing the new enabled status.
|
||||
|
||||
Returns:
|
||||
The updated skill information.
|
||||
|
||||
Raises:
|
||||
HTTPException: 404 if skill not found, 500 if update fails.
|
||||
|
||||
Example Request:
|
||||
```json
|
||||
{
|
||||
"enabled": false
|
||||
}
|
||||
```
|
||||
|
||||
Example Response:
|
||||
```json
|
||||
{
|
||||
"name": "PDF Processing",
|
||||
"description": "Extract and analyze PDF content",
|
||||
"license": "MIT",
|
||||
"category": "public",
|
||||
"enabled": false
|
||||
}
|
||||
```
|
||||
"""
|
||||
try:
|
||||
# Find the skill to verify it exists
|
||||
skills = load_skills(enabled_only=False)
|
||||
skill = next((s for s in skills if s.name == skill_name), None)
|
||||
|
||||
if skill is None:
|
||||
raise HTTPException(status_code=404, detail=f"Skill '{skill_name}' not found")
|
||||
|
||||
# Get or create config path
|
||||
config_path = ExtensionsConfig.resolve_config_path()
|
||||
if config_path is None:
|
||||
# Create new config file in parent directory (project root)
|
||||
config_path = Path.cwd().parent / "extensions_config.json"
|
||||
logger.info(f"No existing extensions config found. Creating new config at: {config_path}")
|
||||
|
||||
# Load current configuration
|
||||
extensions_config = get_extensions_config()
|
||||
|
||||
# Update the skill's enabled status
|
||||
extensions_config.skills[skill_name] = SkillStateConfig(enabled=request.enabled)
|
||||
|
||||
# Convert to JSON format (preserve MCP servers config)
|
||||
config_data = {
|
||||
"mcpServers": {name: server.model_dump() for name, server in extensions_config.mcp_servers.items()},
|
||||
"skills": {name: {"enabled": skill_config.enabled} for name, skill_config in extensions_config.skills.items()},
|
||||
}
|
||||
|
||||
# Write the configuration to file
|
||||
with open(config_path, "w") as f:
|
||||
json.dump(config_data, f, indent=2)
|
||||
|
||||
logger.info(f"Skills configuration updated and saved to: {config_path}")
|
||||
|
||||
# Reload the extensions config to update the global cache
|
||||
reload_extensions_config()
|
||||
|
||||
# Reload the skills to get the updated status (for API response)
|
||||
skills = load_skills(enabled_only=False)
|
||||
updated_skill = next((s for s in skills if s.name == skill_name), None)
|
||||
|
||||
if updated_skill is None:
|
||||
raise HTTPException(status_code=500, detail=f"Failed to reload skill '{skill_name}' after update")
|
||||
|
||||
logger.info(f"Skill '{skill_name}' enabled status updated to {request.enabled}")
|
||||
return _skill_to_response(updated_skill)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update skill {skill_name}: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update skill: {str(e)}")
|
||||
|
||||
|
||||
@router.post(
|
||||
"/skills/install",
|
||||
response_model=SkillInstallResponse,
|
||||
summary="Install Skill",
|
||||
description="Install a skill from a .skill file (ZIP archive) located in the thread's user-data directory.",
|
||||
)
|
||||
async def install_skill(request: SkillInstallRequest) -> SkillInstallResponse:
|
||||
"""Install a skill from a .skill file.
|
||||
|
||||
The .skill file is a ZIP archive containing a skill directory with SKILL.md
|
||||
and optional resources (scripts, references, assets).
|
||||
|
||||
Args:
|
||||
request: The install request containing thread_id and virtual path to .skill file.
|
||||
|
||||
Returns:
|
||||
Installation result with skill name and status message.
|
||||
|
||||
Raises:
|
||||
HTTPException:
|
||||
- 400 if path is invalid or file is not a valid .skill file
|
||||
- 403 if access denied (path traversal detected)
|
||||
- 404 if file not found
|
||||
- 409 if skill already exists
|
||||
- 500 if installation fails
|
||||
|
||||
Example Request:
|
||||
```json
|
||||
{
|
||||
"thread_id": "abc123-def456",
|
||||
"path": "/mnt/user-data/outputs/my-skill.skill"
|
||||
}
|
||||
```
|
||||
|
||||
Example Response:
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"skill_name": "my-skill",
|
||||
"message": "Skill 'my-skill' installed successfully"
|
||||
}
|
||||
```
|
||||
"""
|
||||
try:
|
||||
# Resolve the virtual path to actual file path
|
||||
skill_file_path = resolve_thread_virtual_path(request.thread_id, request.path)
|
||||
|
||||
# Check if file exists
|
||||
if not skill_file_path.exists():
|
||||
raise HTTPException(status_code=404, detail=f"Skill file not found: {request.path}")
|
||||
|
||||
# Check if it's a file
|
||||
if not skill_file_path.is_file():
|
||||
raise HTTPException(status_code=400, detail=f"Path is not a file: {request.path}")
|
||||
|
||||
# Check file extension
|
||||
if not skill_file_path.suffix == ".skill":
|
||||
raise HTTPException(status_code=400, detail="File must have .skill extension")
|
||||
|
||||
# Verify it's a valid ZIP file
|
||||
if not zipfile.is_zipfile(skill_file_path):
|
||||
raise HTTPException(status_code=400, detail="File is not a valid ZIP archive")
|
||||
|
||||
# Get the custom skills directory
|
||||
skills_root = get_skills_root_path()
|
||||
custom_skills_dir = skills_root / "custom"
|
||||
|
||||
# Create custom directory if it doesn't exist
|
||||
custom_skills_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Extract to a temporary directory first for validation
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
temp_path = Path(temp_dir)
|
||||
|
||||
# Extract the .skill file with validation and protections.
|
||||
with zipfile.ZipFile(skill_file_path, "r") as zip_ref:
|
||||
_safe_extract_skill_archive(zip_ref, temp_path)
|
||||
|
||||
skill_dir = _resolve_skill_dir_from_archive_root(temp_path)
|
||||
|
||||
# Validate the skill
|
||||
is_valid, message, skill_name = _validate_skill_frontmatter(skill_dir)
|
||||
if not is_valid:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid skill: {message}")
|
||||
|
||||
if not skill_name:
|
||||
raise HTTPException(status_code=400, detail="Could not determine skill name")
|
||||
|
||||
# Check if skill already exists
|
||||
target_dir = custom_skills_dir / skill_name
|
||||
if target_dir.exists():
|
||||
raise HTTPException(status_code=409, detail=f"Skill '{skill_name}' already exists. Please remove it first or use a different name.")
|
||||
|
||||
# Move the skill directory to the custom skills directory
|
||||
shutil.copytree(skill_dir, target_dir)
|
||||
|
||||
logger.info(f"Skill '{skill_name}' installed successfully to {target_dir}")
|
||||
return SkillInstallResponse(success=True, skill_name=skill_name, message=f"Skill '{skill_name}' installed successfully")
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to install skill: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to install skill: {str(e)}")
|
||||
132
backend/app/gateway/routers/suggestions.py
Normal file
132
backend/app/gateway/routers/suggestions.py
Normal file
@@ -0,0 +1,132 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from deerflow.models import create_chat_model
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api", tags=["suggestions"])
|
||||
|
||||
|
||||
class SuggestionMessage(BaseModel):
|
||||
role: str = Field(..., description="Message role: user|assistant")
|
||||
content: str = Field(..., description="Message content as plain text")
|
||||
|
||||
|
||||
class SuggestionsRequest(BaseModel):
|
||||
messages: list[SuggestionMessage] = Field(..., description="Recent conversation messages")
|
||||
n: int = Field(default=3, ge=1, le=5, description="Number of suggestions to generate")
|
||||
model_name: str | None = Field(default=None, description="Optional model override")
|
||||
|
||||
|
||||
class SuggestionsResponse(BaseModel):
|
||||
suggestions: list[str] = Field(default_factory=list, description="Suggested follow-up questions")
|
||||
|
||||
|
||||
def _strip_markdown_code_fence(text: str) -> str:
|
||||
stripped = text.strip()
|
||||
if not stripped.startswith("```"):
|
||||
return stripped
|
||||
lines = stripped.splitlines()
|
||||
if len(lines) >= 3 and lines[0].startswith("```") and lines[-1].startswith("```"):
|
||||
return "\n".join(lines[1:-1]).strip()
|
||||
return stripped
|
||||
|
||||
|
||||
def _parse_json_string_list(text: str) -> list[str] | None:
|
||||
candidate = _strip_markdown_code_fence(text)
|
||||
start = candidate.find("[")
|
||||
end = candidate.rfind("]")
|
||||
if start == -1 or end == -1 or end <= start:
|
||||
return None
|
||||
candidate = candidate[start : end + 1]
|
||||
try:
|
||||
data = json.loads(candidate)
|
||||
except Exception:
|
||||
return None
|
||||
if not isinstance(data, list):
|
||||
return None
|
||||
out: list[str] = []
|
||||
for item in data:
|
||||
if not isinstance(item, str):
|
||||
continue
|
||||
s = item.strip()
|
||||
if not s:
|
||||
continue
|
||||
out.append(s)
|
||||
return out
|
||||
|
||||
|
||||
def _extract_response_text(content: object) -> str:
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
parts: list[str] = []
|
||||
for block in content:
|
||||
if isinstance(block, str):
|
||||
parts.append(block)
|
||||
elif isinstance(block, dict) and block.get("type") == "text":
|
||||
text = block.get("text")
|
||||
if isinstance(text, str):
|
||||
parts.append(text)
|
||||
return "\n".join(parts) if parts else ""
|
||||
if content is None:
|
||||
return ""
|
||||
return str(content)
|
||||
|
||||
|
||||
def _format_conversation(messages: list[SuggestionMessage]) -> str:
|
||||
parts: list[str] = []
|
||||
for m in messages:
|
||||
role = m.role.strip().lower()
|
||||
if role in ("user", "human"):
|
||||
parts.append(f"User: {m.content.strip()}")
|
||||
elif role in ("assistant", "ai"):
|
||||
parts.append(f"Assistant: {m.content.strip()}")
|
||||
else:
|
||||
parts.append(f"{m.role}: {m.content.strip()}")
|
||||
return "\n".join(parts).strip()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/threads/{thread_id}/suggestions",
|
||||
response_model=SuggestionsResponse,
|
||||
summary="Generate Follow-up Questions",
|
||||
description="Generate short follow-up questions a user might ask next, based on recent conversation context.",
|
||||
)
|
||||
async def generate_suggestions(thread_id: str, request: SuggestionsRequest) -> SuggestionsResponse:
|
||||
if not request.messages:
|
||||
return SuggestionsResponse(suggestions=[])
|
||||
|
||||
n = request.n
|
||||
conversation = _format_conversation(request.messages)
|
||||
if not conversation:
|
||||
return SuggestionsResponse(suggestions=[])
|
||||
|
||||
prompt = (
|
||||
"You are generating follow-up questions to help the user continue the conversation.\n"
|
||||
f"Based on the conversation below, produce EXACTLY {n} short questions the user might ask next.\n"
|
||||
"Requirements:\n"
|
||||
"- Questions must be relevant to the conversation.\n"
|
||||
"- Questions must be written in the same language as the user.\n"
|
||||
"- Keep each question concise (ideally <= 20 words / <= 40 Chinese characters).\n"
|
||||
"- Do NOT include numbering, markdown, or any extra text.\n"
|
||||
"- Output MUST be a JSON array of strings only.\n\n"
|
||||
"Conversation:\n"
|
||||
f"{conversation}\n"
|
||||
)
|
||||
|
||||
try:
|
||||
model = create_chat_model(name=request.model_name, thinking_enabled=False)
|
||||
response = model.invoke(prompt)
|
||||
raw = _extract_response_text(response.content)
|
||||
suggestions = _parse_json_string_list(raw) or []
|
||||
cleaned = [s.replace("\n", " ").strip() for s in suggestions if s.strip()]
|
||||
cleaned = cleaned[:n]
|
||||
return SuggestionsResponse(suggestions=cleaned)
|
||||
except Exception as exc:
|
||||
logger.exception("Failed to generate suggestions: thread_id=%s err=%s", thread_id, exc)
|
||||
return SuggestionsResponse(suggestions=[])
|
||||
195
backend/app/gateway/routers/uploads.py
Normal file
195
backend/app/gateway/routers/uploads.py
Normal file
@@ -0,0 +1,195 @@
|
||||
"""Upload router for handling file uploads."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi import APIRouter, File, HTTPException, UploadFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths
|
||||
from deerflow.sandbox.sandbox_provider import get_sandbox_provider
|
||||
from deerflow.utils.file_conversion import CONVERTIBLE_EXTENSIONS, convert_file_to_markdown
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/threads/{thread_id}/uploads", tags=["uploads"])
|
||||
|
||||
|
||||
class UploadResponse(BaseModel):
|
||||
"""Response model for file upload."""
|
||||
|
||||
success: bool
|
||||
files: list[dict[str, str]]
|
||||
message: str
|
||||
|
||||
|
||||
def get_uploads_dir(thread_id: str) -> Path:
|
||||
"""Get the uploads directory for a thread.
|
||||
|
||||
Args:
|
||||
thread_id: The thread ID.
|
||||
|
||||
Returns:
|
||||
Path to the uploads directory.
|
||||
"""
|
||||
base_dir = get_paths().sandbox_uploads_dir(thread_id)
|
||||
base_dir.mkdir(parents=True, exist_ok=True)
|
||||
return base_dir
|
||||
|
||||
|
||||
@router.post("", response_model=UploadResponse)
|
||||
async def upload_files(
|
||||
thread_id: str,
|
||||
files: list[UploadFile] = File(...),
|
||||
) -> UploadResponse:
|
||||
"""Upload multiple files to a thread's uploads directory.
|
||||
|
||||
For PDF, PPT, Excel, and Word files, they will be converted to markdown using markitdown.
|
||||
All files (original and converted) are saved to /mnt/user-data/uploads.
|
||||
|
||||
Args:
|
||||
thread_id: The thread ID to upload files to.
|
||||
files: List of files to upload.
|
||||
|
||||
Returns:
|
||||
Upload response with success status and file information.
|
||||
"""
|
||||
if not files:
|
||||
raise HTTPException(status_code=400, detail="No files provided")
|
||||
|
||||
uploads_dir = get_uploads_dir(thread_id)
|
||||
paths = get_paths()
|
||||
uploaded_files = []
|
||||
|
||||
sandbox_provider = get_sandbox_provider()
|
||||
sandbox_id = sandbox_provider.acquire(thread_id)
|
||||
sandbox = sandbox_provider.get(sandbox_id)
|
||||
|
||||
for file in files:
|
||||
if not file.filename:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Normalize filename to prevent path traversal
|
||||
safe_filename = Path(file.filename).name
|
||||
if not safe_filename or safe_filename in {".", ".."} or "/" in safe_filename or "\\" in safe_filename:
|
||||
logger.warning(f"Skipping file with unsafe filename: {file.filename!r}")
|
||||
continue
|
||||
|
||||
content = await file.read()
|
||||
file_path = uploads_dir / safe_filename
|
||||
file_path.write_bytes(content)
|
||||
|
||||
# Build relative path from backend root
|
||||
relative_path = str(paths.sandbox_uploads_dir(thread_id) / safe_filename)
|
||||
virtual_path = f"{VIRTUAL_PATH_PREFIX}/uploads/{safe_filename}"
|
||||
|
||||
# Keep local sandbox source of truth in thread-scoped host storage.
|
||||
# For non-local sandboxes, also sync to virtual path for runtime visibility.
|
||||
if sandbox_id != "local":
|
||||
sandbox.update_file(virtual_path, content)
|
||||
|
||||
file_info = {
|
||||
"filename": safe_filename,
|
||||
"size": str(len(content)),
|
||||
"path": relative_path, # Actual filesystem path (relative to backend/)
|
||||
"virtual_path": virtual_path, # Path for Agent in sandbox
|
||||
"artifact_url": f"/api/threads/{thread_id}/artifacts/mnt/user-data/uploads/{safe_filename}", # HTTP URL
|
||||
}
|
||||
|
||||
logger.info(f"Saved file: {safe_filename} ({len(content)} bytes) to {relative_path}")
|
||||
|
||||
# Check if file should be converted to markdown
|
||||
file_ext = file_path.suffix.lower()
|
||||
if file_ext in CONVERTIBLE_EXTENSIONS:
|
||||
md_path = await convert_file_to_markdown(file_path)
|
||||
if md_path:
|
||||
md_relative_path = str(paths.sandbox_uploads_dir(thread_id) / md_path.name)
|
||||
md_virtual_path = f"{VIRTUAL_PATH_PREFIX}/uploads/{md_path.name}"
|
||||
|
||||
if sandbox_id != "local":
|
||||
sandbox.update_file(md_virtual_path, md_path.read_bytes())
|
||||
|
||||
file_info["markdown_file"] = md_path.name
|
||||
file_info["markdown_path"] = md_relative_path
|
||||
file_info["markdown_virtual_path"] = md_virtual_path
|
||||
file_info["markdown_artifact_url"] = f"/api/threads/{thread_id}/artifacts/mnt/user-data/uploads/{md_path.name}"
|
||||
|
||||
uploaded_files.append(file_info)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to upload {file.filename}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to upload {file.filename}: {str(e)}")
|
||||
|
||||
return UploadResponse(
|
||||
success=True,
|
||||
files=uploaded_files,
|
||||
message=f"Successfully uploaded {len(uploaded_files)} file(s)",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/list", response_model=dict)
|
||||
async def list_uploaded_files(thread_id: str) -> dict:
|
||||
"""List all files in a thread's uploads directory.
|
||||
|
||||
Args:
|
||||
thread_id: The thread ID to list files for.
|
||||
|
||||
Returns:
|
||||
Dictionary containing list of files with their metadata.
|
||||
"""
|
||||
uploads_dir = get_uploads_dir(thread_id)
|
||||
|
||||
if not uploads_dir.exists():
|
||||
return {"files": [], "count": 0}
|
||||
|
||||
files = []
|
||||
for file_path in sorted(uploads_dir.iterdir()):
|
||||
if file_path.is_file():
|
||||
stat = file_path.stat()
|
||||
relative_path = str(get_paths().sandbox_uploads_dir(thread_id) / file_path.name)
|
||||
files.append(
|
||||
{
|
||||
"filename": file_path.name,
|
||||
"size": stat.st_size,
|
||||
"path": relative_path, # Actual filesystem path
|
||||
"virtual_path": f"{VIRTUAL_PATH_PREFIX}/uploads/{file_path.name}", # Path for Agent in sandbox
|
||||
"artifact_url": f"/api/threads/{thread_id}/artifacts/mnt/user-data/uploads/{file_path.name}", # HTTP URL
|
||||
"extension": file_path.suffix,
|
||||
"modified": stat.st_mtime,
|
||||
}
|
||||
)
|
||||
|
||||
return {"files": files, "count": len(files)}
|
||||
|
||||
|
||||
@router.delete("/{filename}")
|
||||
async def delete_uploaded_file(thread_id: str, filename: str) -> dict:
|
||||
"""Delete a file from a thread's uploads directory.
|
||||
|
||||
Args:
|
||||
thread_id: The thread ID.
|
||||
filename: The filename to delete.
|
||||
|
||||
Returns:
|
||||
Success message.
|
||||
"""
|
||||
uploads_dir = get_uploads_dir(thread_id)
|
||||
file_path = uploads_dir / filename
|
||||
|
||||
if not file_path.exists():
|
||||
raise HTTPException(status_code=404, detail=f"File not found: {filename}")
|
||||
|
||||
# Security check: ensure the path is within the uploads directory
|
||||
try:
|
||||
file_path.resolve().relative_to(uploads_dir.resolve())
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
try:
|
||||
file_path.unlink()
|
||||
logger.info(f"Deleted file: {filename}")
|
||||
return {"success": True, "message": f"Deleted {filename}"}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to delete {filename}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to delete {filename}: {str(e)}")
|
||||
Reference in New Issue
Block a user