mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-05-04 02:50:45 +08:00
refactor: split backend into harness (deerflow.*) and app (app.*) (#1131)
* refactor: extract shared utils to break harness→app cross-layer imports Move _validate_skill_frontmatter to src/skills/validation.py and CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py. This eliminates the two reverse dependencies from client.py (harness layer) into gateway/routers/ (app layer), preparing for the harness/app package split. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * refactor: split backend/src into harness (deerflow.*) and app (app.*) Physically split the monolithic backend/src/ package into two layers: - **Harness** (`packages/harness/deerflow/`): publishable agent framework package with import prefix `deerflow.*`. Contains agents, sandbox, tools, models, MCP, skills, config, and all core infrastructure. - **App** (`app/`): unpublished application code with import prefix `app.*`. Contains gateway (FastAPI REST API) and channels (IM integrations). Key changes: - Move 13 harness modules to packages/harness/deerflow/ via git mv - Move gateway + channels to app/ via git mv - Rename all imports: src.* → deerflow.* (harness) / app.* (app layer) - Set up uv workspace with deerflow-harness as workspace member - Update langgraph.json, config.example.yaml, all scripts, Docker files - Add build-system (hatchling) to harness pyproject.toml - Add PYTHONPATH=. to gateway startup commands for app.* resolution - Update ruff.toml with known-first-party for import sorting - Update all documentation to reflect new directory structure Boundary rule enforced: harness code never imports from app. All 429 tests pass. Lint clean. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * chore: add harness→app boundary check test and update docs Add test_harness_boundary.py that scans all Python files in packages/harness/deerflow/ and fails if any `from app.*` or `import app.*` statement is found. This enforces the architectural rule that the harness layer never depends on the app layer. Update CLAUDE.md to document the harness/app split architecture, import conventions, and the boundary enforcement test. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * feat: add config versioning with auto-upgrade on startup When config.example.yaml schema changes, developers' local config.yaml files can silently become outdated. This adds a config_version field and auto-upgrade mechanism so breaking changes (like src.* → deerflow.* renames) are applied automatically before services start. - Add config_version: 1 to config.example.yaml - Add startup version check warning in AppConfig.from_file() - Add scripts/config-upgrade.sh with migration registry for value replacements - Add `make config-upgrade` target - Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services - Add config error hints in service failure messages Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix comments * fix: update src.* import in test_sandbox_tools_security to deerflow.* Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: handle empty config and search parent dirs for config.example.yaml Address Copilot review comments on PR #1131: - Guard against yaml.safe_load() returning None for empty config files - Search parent directories for config.example.yaml instead of only looking next to config.yaml, fixing detection in common setups Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: correct skills root path depth and config_version type coercion - loader.py: fix get_skills_root_path() to use 5 parent levels (was 3) after harness split, file lives at packages/harness/deerflow/skills/ so parent×3 resolved to backend/packages/harness/ instead of backend/ - app_config.py: coerce config_version to int() before comparison in _check_config_version() to prevent TypeError when YAML stores value as string (e.g. config_version: "1") - tests: add regression tests for both fixes Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * fix: update test imports from src.* to deerflow.*/app.* after harness refactor Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,15 @@
|
||||
from .aio_sandbox import AioSandbox
|
||||
from .aio_sandbox_provider import AioSandboxProvider
|
||||
from .backend import SandboxBackend
|
||||
from .local_backend import LocalContainerBackend
|
||||
from .remote_backend import RemoteSandboxBackend
|
||||
from .sandbox_info import SandboxInfo
|
||||
|
||||
__all__ = [
|
||||
"AioSandbox",
|
||||
"AioSandboxProvider",
|
||||
"LocalContainerBackend",
|
||||
"RemoteSandboxBackend",
|
||||
"SandboxBackend",
|
||||
"SandboxInfo",
|
||||
]
|
||||
@@ -0,0 +1,128 @@
|
||||
import base64
|
||||
import logging
|
||||
|
||||
from agent_sandbox import Sandbox as AioSandboxClient
|
||||
|
||||
from deerflow.sandbox.sandbox import Sandbox
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AioSandbox(Sandbox):
|
||||
"""Sandbox implementation using the agent-infra/sandbox Docker container.
|
||||
|
||||
This sandbox connects to a running AIO sandbox container via HTTP API.
|
||||
"""
|
||||
|
||||
def __init__(self, id: str, base_url: str, home_dir: str | None = None):
|
||||
"""Initialize the AIO sandbox.
|
||||
|
||||
Args:
|
||||
id: Unique identifier for this sandbox instance.
|
||||
base_url: URL of the sandbox API (e.g., http://localhost:8080).
|
||||
home_dir: Home directory inside the sandbox. If None, will be fetched from the sandbox.
|
||||
"""
|
||||
super().__init__(id)
|
||||
self._base_url = base_url
|
||||
self._client = AioSandboxClient(base_url=base_url, timeout=600)
|
||||
self._home_dir = home_dir
|
||||
|
||||
@property
|
||||
def base_url(self) -> str:
|
||||
return self._base_url
|
||||
|
||||
@property
|
||||
def home_dir(self) -> str:
|
||||
"""Get the home directory inside the sandbox."""
|
||||
if self._home_dir is None:
|
||||
context = self._client.sandbox.get_context()
|
||||
self._home_dir = context.home_dir
|
||||
return self._home_dir
|
||||
|
||||
def execute_command(self, command: str) -> str:
|
||||
"""Execute a shell command in the sandbox.
|
||||
|
||||
Args:
|
||||
command: The command to execute.
|
||||
|
||||
Returns:
|
||||
The output of the command.
|
||||
"""
|
||||
try:
|
||||
result = self._client.shell.exec_command(command=command)
|
||||
output = result.data.output if result.data else ""
|
||||
return output if output else "(no output)"
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to execute command in sandbox: {e}")
|
||||
return f"Error: {e}"
|
||||
|
||||
def read_file(self, path: str) -> str:
|
||||
"""Read the content of a file in the sandbox.
|
||||
|
||||
Args:
|
||||
path: The absolute path of the file to read.
|
||||
|
||||
Returns:
|
||||
The content of the file.
|
||||
"""
|
||||
try:
|
||||
result = self._client.file.read_file(file=path)
|
||||
return result.data.content if result.data else ""
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read file in sandbox: {e}")
|
||||
return f"Error: {e}"
|
||||
|
||||
def list_dir(self, path: str, max_depth: int = 2) -> list[str]:
|
||||
"""List the contents of a directory in the sandbox.
|
||||
|
||||
Args:
|
||||
path: The absolute path of the directory to list.
|
||||
max_depth: The maximum depth to traverse. Default is 2.
|
||||
|
||||
Returns:
|
||||
The contents of the directory.
|
||||
"""
|
||||
try:
|
||||
# Use shell command to list directory with depth limit
|
||||
# The -L flag limits the depth for the tree command
|
||||
result = self._client.shell.exec_command(command=f"find {path} -maxdepth {max_depth} -type f -o -type d 2>/dev/null | head -500")
|
||||
output = result.data.output if result.data else ""
|
||||
if output:
|
||||
return [line.strip() for line in output.strip().split("\n") if line.strip()]
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to list directory in sandbox: {e}")
|
||||
return []
|
||||
|
||||
def write_file(self, path: str, content: str, append: bool = False) -> None:
|
||||
"""Write content to a file in the sandbox.
|
||||
|
||||
Args:
|
||||
path: The absolute path of the file to write to.
|
||||
content: The text content to write to the file.
|
||||
append: Whether to append the content to the file.
|
||||
"""
|
||||
try:
|
||||
if append:
|
||||
# Read existing content first and append
|
||||
existing = self.read_file(path)
|
||||
if not existing.startswith("Error:"):
|
||||
content = existing + content
|
||||
self._client.file.write_file(file=path, content=content)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to write file in sandbox: {e}")
|
||||
raise
|
||||
|
||||
def update_file(self, path: str, content: bytes) -> None:
|
||||
"""Update a file with binary content in the sandbox.
|
||||
|
||||
Args:
|
||||
path: The absolute path of the file to update.
|
||||
content: The binary content to write to the file.
|
||||
"""
|
||||
try:
|
||||
base64_content = base64.b64encode(content).decode("utf-8")
|
||||
self._client.file.write_file(file=path, content=base64_content, encoding="base64")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update file in sandbox: {e}")
|
||||
raise
|
||||
@@ -0,0 +1,609 @@
|
||||
"""AIO Sandbox Provider — orchestrates sandbox lifecycle with pluggable backends.
|
||||
|
||||
This provider composes:
|
||||
- SandboxBackend: how sandboxes are provisioned (local container vs remote/K8s)
|
||||
|
||||
The provider itself handles:
|
||||
- In-process caching for fast repeated access
|
||||
- Idle timeout management
|
||||
- Graceful shutdown with signal handling
|
||||
- Mount computation (thread-specific, skills)
|
||||
"""
|
||||
|
||||
import atexit
|
||||
import fcntl
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, Paths, get_paths
|
||||
from deerflow.sandbox.sandbox import Sandbox
|
||||
from deerflow.sandbox.sandbox_provider import SandboxProvider
|
||||
|
||||
from .aio_sandbox import AioSandbox
|
||||
from .backend import SandboxBackend, wait_for_sandbox_ready
|
||||
from .local_backend import LocalContainerBackend
|
||||
from .remote_backend import RemoteSandboxBackend
|
||||
from .sandbox_info import SandboxInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Default configuration
|
||||
DEFAULT_IMAGE = "enterprise-public-cn-beijing.cr.volces.com/vefaas-public/all-in-one-sandbox:latest"
|
||||
DEFAULT_PORT = 8080
|
||||
DEFAULT_CONTAINER_PREFIX = "deer-flow-sandbox"
|
||||
DEFAULT_IDLE_TIMEOUT = 600 # 10 minutes in seconds
|
||||
DEFAULT_REPLICAS = 3 # Maximum concurrent sandbox containers
|
||||
IDLE_CHECK_INTERVAL = 60 # Check every 60 seconds
|
||||
|
||||
|
||||
class AioSandboxProvider(SandboxProvider):
|
||||
"""Sandbox provider that manages containers running the AIO sandbox.
|
||||
|
||||
Architecture:
|
||||
This provider composes a SandboxBackend (how to provision), enabling:
|
||||
- Local Docker/Apple Container mode (auto-start containers)
|
||||
- Remote/K8s mode (connect to pre-existing sandbox URL)
|
||||
|
||||
Configuration options in config.yaml under sandbox:
|
||||
use: deerflow.community.aio_sandbox:AioSandboxProvider
|
||||
image: <container image>
|
||||
port: 8080 # Base port for local containers
|
||||
container_prefix: deer-flow-sandbox
|
||||
idle_timeout: 600 # Idle timeout in seconds (0 to disable)
|
||||
replicas: 3 # Max concurrent sandbox containers (LRU eviction when exceeded)
|
||||
mounts: # Volume mounts for local containers
|
||||
- host_path: /path/on/host
|
||||
container_path: /path/in/container
|
||||
read_only: false
|
||||
environment: # Environment variables for containers
|
||||
NODE_ENV: production
|
||||
API_KEY: $MY_API_KEY
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._lock = threading.Lock()
|
||||
self._sandboxes: dict[str, AioSandbox] = {} # sandbox_id -> AioSandbox instance
|
||||
self._sandbox_infos: dict[str, SandboxInfo] = {} # sandbox_id -> SandboxInfo (for destroy)
|
||||
self._thread_sandboxes: dict[str, str] = {} # thread_id -> sandbox_id
|
||||
self._thread_locks: dict[str, threading.Lock] = {} # thread_id -> in-process lock
|
||||
self._last_activity: dict[str, float] = {} # sandbox_id -> last activity timestamp
|
||||
# Warm pool: released sandboxes whose containers are still running.
|
||||
# Maps sandbox_id -> (SandboxInfo, release_timestamp).
|
||||
# Containers here can be reclaimed quickly (no cold-start) or destroyed
|
||||
# when replicas capacity is exhausted.
|
||||
self._warm_pool: dict[str, tuple[SandboxInfo, float]] = {}
|
||||
self._shutdown_called = False
|
||||
self._idle_checker_stop = threading.Event()
|
||||
self._idle_checker_thread: threading.Thread | None = None
|
||||
|
||||
self._config = self._load_config()
|
||||
self._backend: SandboxBackend = self._create_backend()
|
||||
|
||||
# Register shutdown handler
|
||||
atexit.register(self.shutdown)
|
||||
self._register_signal_handlers()
|
||||
|
||||
# Start idle checker if enabled
|
||||
if self._config.get("idle_timeout", DEFAULT_IDLE_TIMEOUT) > 0:
|
||||
self._start_idle_checker()
|
||||
|
||||
# ── Factory methods ──────────────────────────────────────────────────
|
||||
|
||||
def _create_backend(self) -> SandboxBackend:
|
||||
"""Create the appropriate backend based on configuration.
|
||||
|
||||
Selection logic (checked in order):
|
||||
1. ``provisioner_url`` set → RemoteSandboxBackend (provisioner mode)
|
||||
Provisioner dynamically creates Pods + Services in k3s.
|
||||
2. Default → LocalContainerBackend (local mode)
|
||||
Local provider manages container lifecycle directly (start/stop).
|
||||
"""
|
||||
provisioner_url = self._config.get("provisioner_url")
|
||||
if provisioner_url:
|
||||
logger.info(f"Using remote sandbox backend with provisioner at {provisioner_url}")
|
||||
return RemoteSandboxBackend(provisioner_url=provisioner_url)
|
||||
|
||||
logger.info("Using local container sandbox backend")
|
||||
return LocalContainerBackend(
|
||||
image=self._config["image"],
|
||||
base_port=self._config["port"],
|
||||
container_prefix=self._config["container_prefix"],
|
||||
config_mounts=self._config["mounts"],
|
||||
environment=self._config["environment"],
|
||||
)
|
||||
|
||||
# ── Configuration ────────────────────────────────────────────────────
|
||||
|
||||
def _load_config(self) -> dict:
|
||||
"""Load sandbox configuration from app config."""
|
||||
config = get_app_config()
|
||||
sandbox_config = config.sandbox
|
||||
|
||||
idle_timeout = getattr(sandbox_config, "idle_timeout", None)
|
||||
replicas = getattr(sandbox_config, "replicas", None)
|
||||
|
||||
return {
|
||||
"image": sandbox_config.image or DEFAULT_IMAGE,
|
||||
"port": sandbox_config.port or DEFAULT_PORT,
|
||||
"container_prefix": sandbox_config.container_prefix or DEFAULT_CONTAINER_PREFIX,
|
||||
"idle_timeout": idle_timeout if idle_timeout is not None else DEFAULT_IDLE_TIMEOUT,
|
||||
"replicas": replicas if replicas is not None else DEFAULT_REPLICAS,
|
||||
"mounts": sandbox_config.mounts or [],
|
||||
"environment": self._resolve_env_vars(sandbox_config.environment or {}),
|
||||
# provisioner URL for dynamic pod management (e.g. http://provisioner:8002)
|
||||
"provisioner_url": getattr(sandbox_config, "provisioner_url", None) or "",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _resolve_env_vars(env_config: dict[str, str]) -> dict[str, str]:
|
||||
"""Resolve environment variable references (values starting with $)."""
|
||||
resolved = {}
|
||||
for key, value in env_config.items():
|
||||
if isinstance(value, str) and value.startswith("$"):
|
||||
env_name = value[1:]
|
||||
resolved[key] = os.environ.get(env_name, "")
|
||||
else:
|
||||
resolved[key] = str(value)
|
||||
return resolved
|
||||
|
||||
# ── Deterministic ID ─────────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
def _deterministic_sandbox_id(thread_id: str) -> str:
|
||||
"""Generate a deterministic sandbox ID from a thread ID.
|
||||
|
||||
Ensures all processes derive the same sandbox_id for a given thread,
|
||||
enabling cross-process sandbox discovery without shared memory.
|
||||
"""
|
||||
return hashlib.sha256(thread_id.encode()).hexdigest()[:8]
|
||||
|
||||
# ── Mount helpers ────────────────────────────────────────────────────
|
||||
|
||||
def _get_extra_mounts(self, thread_id: str | None) -> list[tuple[str, str, bool]]:
|
||||
"""Collect all extra mounts for a sandbox (thread-specific + skills)."""
|
||||
mounts: list[tuple[str, str, bool]] = []
|
||||
|
||||
if thread_id:
|
||||
mounts.extend(self._get_thread_mounts(thread_id))
|
||||
logger.info(f"Adding thread mounts for thread {thread_id}: {mounts}")
|
||||
|
||||
skills_mount = self._get_skills_mount()
|
||||
if skills_mount:
|
||||
mounts.append(skills_mount)
|
||||
logger.info(f"Adding skills mount: {skills_mount}")
|
||||
|
||||
return mounts
|
||||
|
||||
@staticmethod
|
||||
def _get_thread_mounts(thread_id: str) -> list[tuple[str, str, bool]]:
|
||||
"""Get volume mounts for a thread's data directories.
|
||||
|
||||
Creates directories if they don't exist (lazy initialization).
|
||||
Mount sources use host_base_dir so that when running inside Docker with a
|
||||
mounted Docker socket (DooD), the host Docker daemon can resolve the paths.
|
||||
"""
|
||||
paths = get_paths()
|
||||
paths.ensure_thread_dirs(thread_id)
|
||||
|
||||
# host_paths resolves to the host-side base dir when DEER_FLOW_HOST_BASE_DIR
|
||||
# is set, otherwise falls back to the container's own base dir (native mode).
|
||||
host_paths = Paths(base_dir=paths.host_base_dir)
|
||||
|
||||
return [
|
||||
(str(host_paths.sandbox_work_dir(thread_id)), f"{VIRTUAL_PATH_PREFIX}/workspace", False),
|
||||
(str(host_paths.sandbox_uploads_dir(thread_id)), f"{VIRTUAL_PATH_PREFIX}/uploads", False),
|
||||
(str(host_paths.sandbox_outputs_dir(thread_id)), f"{VIRTUAL_PATH_PREFIX}/outputs", False),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _get_skills_mount() -> tuple[str, str, bool] | None:
|
||||
"""Get the skills directory mount configuration.
|
||||
|
||||
Mount source uses DEER_FLOW_HOST_SKILLS_PATH when running inside Docker (DooD)
|
||||
so the host Docker daemon can resolve the path.
|
||||
"""
|
||||
try:
|
||||
config = get_app_config()
|
||||
skills_path = config.skills.get_skills_path()
|
||||
container_path = config.skills.container_path
|
||||
|
||||
if skills_path.exists():
|
||||
# When running inside Docker with DooD, use host-side skills path.
|
||||
host_skills = os.environ.get("DEER_FLOW_HOST_SKILLS_PATH") or str(skills_path)
|
||||
return (host_skills, container_path, True) # Read-only for security
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not setup skills mount: {e}")
|
||||
return None
|
||||
|
||||
# ── Idle timeout management ──────────────────────────────────────────
|
||||
|
||||
def _start_idle_checker(self) -> None:
|
||||
"""Start the background thread that checks for idle sandboxes."""
|
||||
self._idle_checker_thread = threading.Thread(
|
||||
target=self._idle_checker_loop,
|
||||
name="sandbox-idle-checker",
|
||||
daemon=True,
|
||||
)
|
||||
self._idle_checker_thread.start()
|
||||
logger.info(f"Started idle checker thread (timeout: {self._config.get('idle_timeout', DEFAULT_IDLE_TIMEOUT)}s)")
|
||||
|
||||
def _idle_checker_loop(self) -> None:
|
||||
idle_timeout = self._config.get("idle_timeout", DEFAULT_IDLE_TIMEOUT)
|
||||
while not self._idle_checker_stop.wait(timeout=IDLE_CHECK_INTERVAL):
|
||||
try:
|
||||
self._cleanup_idle_sandboxes(idle_timeout)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in idle checker loop: {e}")
|
||||
|
||||
def _cleanup_idle_sandboxes(self, idle_timeout: float) -> None:
|
||||
current_time = time.time()
|
||||
active_to_destroy = []
|
||||
warm_to_destroy: list[tuple[str, SandboxInfo]] = []
|
||||
|
||||
with self._lock:
|
||||
# Active sandboxes: tracked via _last_activity
|
||||
for sandbox_id, last_activity in self._last_activity.items():
|
||||
idle_duration = current_time - last_activity
|
||||
if idle_duration > idle_timeout:
|
||||
active_to_destroy.append(sandbox_id)
|
||||
logger.info(f"Sandbox {sandbox_id} idle for {idle_duration:.1f}s, marking for destroy")
|
||||
|
||||
# Warm pool: tracked via release_timestamp stored in _warm_pool
|
||||
for sandbox_id, (info, release_ts) in list(self._warm_pool.items()):
|
||||
warm_duration = current_time - release_ts
|
||||
if warm_duration > idle_timeout:
|
||||
warm_to_destroy.append((sandbox_id, info))
|
||||
del self._warm_pool[sandbox_id]
|
||||
logger.info(f"Warm-pool sandbox {sandbox_id} idle for {warm_duration:.1f}s, marking for destroy")
|
||||
|
||||
# Destroy active sandboxes (re-verify still idle before acting)
|
||||
for sandbox_id in active_to_destroy:
|
||||
try:
|
||||
# Re-verify the sandbox is still idle under the lock before destroying.
|
||||
# Between the snapshot above and here, the sandbox may have been
|
||||
# re-acquired (last_activity updated) or already released/destroyed.
|
||||
with self._lock:
|
||||
last_activity = self._last_activity.get(sandbox_id)
|
||||
if last_activity is None:
|
||||
# Already released or destroyed by another path — skip.
|
||||
logger.info(f"Sandbox {sandbox_id} already gone before idle destroy, skipping")
|
||||
continue
|
||||
if (time.time() - last_activity) < idle_timeout:
|
||||
# Re-acquired (activity updated) since the snapshot — skip.
|
||||
logger.info(f"Sandbox {sandbox_id} was re-acquired before idle destroy, skipping")
|
||||
continue
|
||||
logger.info(f"Destroying idle sandbox {sandbox_id}")
|
||||
self.destroy(sandbox_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to destroy idle sandbox {sandbox_id}: {e}")
|
||||
|
||||
# Destroy warm-pool sandboxes (already removed from _warm_pool under lock above)
|
||||
for sandbox_id, info in warm_to_destroy:
|
||||
try:
|
||||
self._backend.destroy(info)
|
||||
logger.info(f"Destroyed idle warm-pool sandbox {sandbox_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to destroy idle warm-pool sandbox {sandbox_id}: {e}")
|
||||
|
||||
# ── Signal handling ──────────────────────────────────────────────────
|
||||
|
||||
def _register_signal_handlers(self) -> None:
|
||||
"""Register signal handlers for graceful shutdown."""
|
||||
self._original_sigterm = signal.getsignal(signal.SIGTERM)
|
||||
self._original_sigint = signal.getsignal(signal.SIGINT)
|
||||
|
||||
def signal_handler(signum, frame):
|
||||
self.shutdown()
|
||||
original = self._original_sigterm if signum == signal.SIGTERM else self._original_sigint
|
||||
if callable(original):
|
||||
original(signum, frame)
|
||||
elif original == signal.SIG_DFL:
|
||||
signal.signal(signum, signal.SIG_DFL)
|
||||
signal.raise_signal(signum)
|
||||
|
||||
try:
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
except ValueError:
|
||||
logger.debug("Could not register signal handlers (not main thread)")
|
||||
|
||||
# ── Thread locking (in-process) ──────────────────────────────────────
|
||||
|
||||
def _get_thread_lock(self, thread_id: str) -> threading.Lock:
|
||||
"""Get or create an in-process lock for a specific thread_id."""
|
||||
with self._lock:
|
||||
if thread_id not in self._thread_locks:
|
||||
self._thread_locks[thread_id] = threading.Lock()
|
||||
return self._thread_locks[thread_id]
|
||||
|
||||
# ── Core: acquire / get / release / shutdown ─────────────────────────
|
||||
|
||||
def acquire(self, thread_id: str | None = None) -> str:
|
||||
"""Acquire a sandbox environment and return its ID.
|
||||
|
||||
For the same thread_id, this method will return the same sandbox_id
|
||||
across multiple turns, multiple processes, and (with shared storage)
|
||||
multiple pods.
|
||||
|
||||
Thread-safe with both in-process and cross-process locking.
|
||||
|
||||
Args:
|
||||
thread_id: Optional thread ID for thread-specific configurations.
|
||||
|
||||
Returns:
|
||||
The ID of the acquired sandbox environment.
|
||||
"""
|
||||
if thread_id:
|
||||
thread_lock = self._get_thread_lock(thread_id)
|
||||
with thread_lock:
|
||||
return self._acquire_internal(thread_id)
|
||||
else:
|
||||
return self._acquire_internal(thread_id)
|
||||
|
||||
def _acquire_internal(self, thread_id: str | None) -> str:
|
||||
"""Internal sandbox acquisition with two-layer consistency.
|
||||
|
||||
Layer 1: In-process cache (fastest, covers same-process repeated access)
|
||||
Layer 2: Backend discovery (covers containers started by other processes;
|
||||
sandbox_id is deterministic from thread_id so no shared state file
|
||||
is needed — any process can derive the same container name)
|
||||
"""
|
||||
# ── Layer 1: In-process cache (fast path) ──
|
||||
if thread_id:
|
||||
with self._lock:
|
||||
if thread_id in self._thread_sandboxes:
|
||||
existing_id = self._thread_sandboxes[thread_id]
|
||||
if existing_id in self._sandboxes:
|
||||
logger.info(f"Reusing in-process sandbox {existing_id} for thread {thread_id}")
|
||||
self._last_activity[existing_id] = time.time()
|
||||
return existing_id
|
||||
else:
|
||||
del self._thread_sandboxes[thread_id]
|
||||
|
||||
# Deterministic ID for thread-specific, random for anonymous
|
||||
sandbox_id = self._deterministic_sandbox_id(thread_id) if thread_id else str(uuid.uuid4())[:8]
|
||||
|
||||
# ── Layer 1.5: Warm pool (container still running, no cold-start) ──
|
||||
if thread_id:
|
||||
with self._lock:
|
||||
if sandbox_id in self._warm_pool:
|
||||
info, _ = self._warm_pool.pop(sandbox_id)
|
||||
sandbox = AioSandbox(id=sandbox_id, base_url=info.sandbox_url)
|
||||
self._sandboxes[sandbox_id] = sandbox
|
||||
self._sandbox_infos[sandbox_id] = info
|
||||
self._last_activity[sandbox_id] = time.time()
|
||||
self._thread_sandboxes[thread_id] = sandbox_id
|
||||
logger.info(f"Reclaimed warm-pool sandbox {sandbox_id} for thread {thread_id} at {info.sandbox_url}")
|
||||
return sandbox_id
|
||||
|
||||
# ── Layer 2: Backend discovery + create (protected by cross-process lock) ──
|
||||
# Use a file lock so that two processes racing to create the same sandbox
|
||||
# for the same thread_id serialize here: the second process will discover
|
||||
# the container started by the first instead of hitting a name-conflict.
|
||||
if thread_id:
|
||||
return self._discover_or_create_with_lock(thread_id, sandbox_id)
|
||||
|
||||
return self._create_sandbox(thread_id, sandbox_id)
|
||||
|
||||
def _discover_or_create_with_lock(self, thread_id: str, sandbox_id: str) -> str:
|
||||
"""Discover an existing sandbox or create a new one under a cross-process file lock.
|
||||
|
||||
The file lock serializes concurrent sandbox creation for the same thread_id
|
||||
across multiple processes, preventing container-name conflicts.
|
||||
"""
|
||||
paths = get_paths()
|
||||
paths.ensure_thread_dirs(thread_id)
|
||||
lock_path = paths.thread_dir(thread_id) / f"{sandbox_id}.lock"
|
||||
|
||||
with open(lock_path, "a") as lock_file:
|
||||
try:
|
||||
fcntl.flock(lock_file, fcntl.LOCK_EX)
|
||||
# Re-check in-process caches under the file lock in case another
|
||||
# thread in this process won the race while we were waiting.
|
||||
with self._lock:
|
||||
if thread_id in self._thread_sandboxes:
|
||||
existing_id = self._thread_sandboxes[thread_id]
|
||||
if existing_id in self._sandboxes:
|
||||
logger.info(f"Reusing in-process sandbox {existing_id} for thread {thread_id} (post-lock check)")
|
||||
self._last_activity[existing_id] = time.time()
|
||||
return existing_id
|
||||
if sandbox_id in self._warm_pool:
|
||||
info, _ = self._warm_pool.pop(sandbox_id)
|
||||
sandbox = AioSandbox(id=sandbox_id, base_url=info.sandbox_url)
|
||||
self._sandboxes[sandbox_id] = sandbox
|
||||
self._sandbox_infos[sandbox_id] = info
|
||||
self._last_activity[sandbox_id] = time.time()
|
||||
self._thread_sandboxes[thread_id] = sandbox_id
|
||||
logger.info(f"Reclaimed warm-pool sandbox {sandbox_id} for thread {thread_id} (post-lock check)")
|
||||
return sandbox_id
|
||||
|
||||
# Backend discovery: another process may have created the container.
|
||||
discovered = self._backend.discover(sandbox_id)
|
||||
if discovered is not None:
|
||||
sandbox = AioSandbox(id=discovered.sandbox_id, base_url=discovered.sandbox_url)
|
||||
with self._lock:
|
||||
self._sandboxes[discovered.sandbox_id] = sandbox
|
||||
self._sandbox_infos[discovered.sandbox_id] = discovered
|
||||
self._last_activity[discovered.sandbox_id] = time.time()
|
||||
self._thread_sandboxes[thread_id] = discovered.sandbox_id
|
||||
logger.info(f"Discovered existing sandbox {discovered.sandbox_id} for thread {thread_id} at {discovered.sandbox_url}")
|
||||
return discovered.sandbox_id
|
||||
|
||||
return self._create_sandbox(thread_id, sandbox_id)
|
||||
finally:
|
||||
fcntl.flock(lock_file, fcntl.LOCK_UN)
|
||||
|
||||
def _evict_oldest_warm(self) -> str | None:
|
||||
"""Destroy the oldest container in the warm pool to free capacity.
|
||||
|
||||
Returns:
|
||||
The evicted sandbox_id, or None if warm pool is empty.
|
||||
"""
|
||||
with self._lock:
|
||||
if not self._warm_pool:
|
||||
return None
|
||||
oldest_id = min(self._warm_pool, key=lambda sid: self._warm_pool[sid][1])
|
||||
info, _ = self._warm_pool.pop(oldest_id)
|
||||
|
||||
try:
|
||||
self._backend.destroy(info)
|
||||
logger.info(f"Destroyed warm-pool sandbox {oldest_id}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to destroy warm-pool sandbox {oldest_id}: {e}")
|
||||
return None
|
||||
return oldest_id
|
||||
|
||||
def _create_sandbox(self, thread_id: str | None, sandbox_id: str) -> str:
|
||||
"""Create a new sandbox via the backend.
|
||||
|
||||
Args:
|
||||
thread_id: Optional thread ID.
|
||||
sandbox_id: The sandbox ID to use.
|
||||
|
||||
Returns:
|
||||
The sandbox_id.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If sandbox creation or readiness check fails.
|
||||
"""
|
||||
extra_mounts = self._get_extra_mounts(thread_id)
|
||||
|
||||
# Enforce replicas: only warm-pool containers count toward eviction budget.
|
||||
# Active sandboxes are in use by live threads and must not be forcibly stopped.
|
||||
replicas = self._config.get("replicas", DEFAULT_REPLICAS)
|
||||
with self._lock:
|
||||
total = len(self._sandboxes) + len(self._warm_pool)
|
||||
if total >= replicas:
|
||||
evicted = self._evict_oldest_warm()
|
||||
if evicted:
|
||||
logger.info(f"Evicted warm-pool sandbox {evicted} to stay within replicas={replicas}")
|
||||
else:
|
||||
# All slots are occupied by active sandboxes — proceed anyway and log.
|
||||
# The replicas limit is a soft cap; we never forcibly stop a container
|
||||
# that is actively serving a thread.
|
||||
logger.warning(f"All {replicas} replica slots are in active use; creating sandbox {sandbox_id} beyond the soft limit")
|
||||
|
||||
info = self._backend.create(thread_id, sandbox_id, extra_mounts=extra_mounts or None)
|
||||
|
||||
# Wait for sandbox to be ready
|
||||
if not wait_for_sandbox_ready(info.sandbox_url, timeout=60):
|
||||
self._backend.destroy(info)
|
||||
raise RuntimeError(f"Sandbox {sandbox_id} failed to become ready within timeout at {info.sandbox_url}")
|
||||
|
||||
sandbox = AioSandbox(id=sandbox_id, base_url=info.sandbox_url)
|
||||
with self._lock:
|
||||
self._sandboxes[sandbox_id] = sandbox
|
||||
self._sandbox_infos[sandbox_id] = info
|
||||
self._last_activity[sandbox_id] = time.time()
|
||||
if thread_id:
|
||||
self._thread_sandboxes[thread_id] = sandbox_id
|
||||
|
||||
logger.info(f"Created sandbox {sandbox_id} for thread {thread_id} at {info.sandbox_url}")
|
||||
return sandbox_id
|
||||
|
||||
def get(self, sandbox_id: str) -> Sandbox | None:
|
||||
"""Get a sandbox by ID. Updates last activity timestamp.
|
||||
|
||||
Args:
|
||||
sandbox_id: The ID of the sandbox.
|
||||
|
||||
Returns:
|
||||
The sandbox instance if found, None otherwise.
|
||||
"""
|
||||
with self._lock:
|
||||
sandbox = self._sandboxes.get(sandbox_id)
|
||||
if sandbox is not None:
|
||||
self._last_activity[sandbox_id] = time.time()
|
||||
return sandbox
|
||||
|
||||
def release(self, sandbox_id: str) -> None:
|
||||
"""Release a sandbox from active use into the warm pool.
|
||||
|
||||
The container is kept running so it can be reclaimed quickly by the same
|
||||
thread on its next turn without a cold-start. The container will only be
|
||||
stopped when the replicas limit forces eviction or during shutdown.
|
||||
|
||||
Args:
|
||||
sandbox_id: The ID of the sandbox to release.
|
||||
"""
|
||||
info = None
|
||||
thread_ids_to_remove: list[str] = []
|
||||
|
||||
with self._lock:
|
||||
self._sandboxes.pop(sandbox_id, None)
|
||||
info = self._sandbox_infos.pop(sandbox_id, None)
|
||||
thread_ids_to_remove = [tid for tid, sid in self._thread_sandboxes.items() if sid == sandbox_id]
|
||||
for tid in thread_ids_to_remove:
|
||||
del self._thread_sandboxes[tid]
|
||||
self._last_activity.pop(sandbox_id, None)
|
||||
# Park in warm pool — container keeps running
|
||||
if info and sandbox_id not in self._warm_pool:
|
||||
self._warm_pool[sandbox_id] = (info, time.time())
|
||||
|
||||
logger.info(f"Released sandbox {sandbox_id} to warm pool (container still running)")
|
||||
|
||||
def destroy(self, sandbox_id: str) -> None:
|
||||
"""Destroy a sandbox: stop the container and free all resources.
|
||||
|
||||
Unlike release(), this actually stops the container. Use this for
|
||||
explicit cleanup, capacity-driven eviction, or shutdown.
|
||||
|
||||
Args:
|
||||
sandbox_id: The ID of the sandbox to destroy.
|
||||
"""
|
||||
info = None
|
||||
thread_ids_to_remove: list[str] = []
|
||||
|
||||
with self._lock:
|
||||
self._sandboxes.pop(sandbox_id, None)
|
||||
info = self._sandbox_infos.pop(sandbox_id, None)
|
||||
thread_ids_to_remove = [tid for tid, sid in self._thread_sandboxes.items() if sid == sandbox_id]
|
||||
for tid in thread_ids_to_remove:
|
||||
del self._thread_sandboxes[tid]
|
||||
self._last_activity.pop(sandbox_id, None)
|
||||
# Also pull from warm pool if it was parked there
|
||||
if info is None and sandbox_id in self._warm_pool:
|
||||
info, _ = self._warm_pool.pop(sandbox_id)
|
||||
else:
|
||||
self._warm_pool.pop(sandbox_id, None)
|
||||
|
||||
if info:
|
||||
self._backend.destroy(info)
|
||||
logger.info(f"Destroyed sandbox {sandbox_id}")
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Shutdown all sandboxes. Thread-safe and idempotent."""
|
||||
with self._lock:
|
||||
if self._shutdown_called:
|
||||
return
|
||||
self._shutdown_called = True
|
||||
sandbox_ids = list(self._sandboxes.keys())
|
||||
warm_items = list(self._warm_pool.items())
|
||||
self._warm_pool.clear()
|
||||
|
||||
# Stop idle checker
|
||||
self._idle_checker_stop.set()
|
||||
if self._idle_checker_thread is not None and self._idle_checker_thread.is_alive():
|
||||
self._idle_checker_thread.join(timeout=5)
|
||||
logger.info("Stopped idle checker thread")
|
||||
|
||||
logger.info(f"Shutting down {len(sandbox_ids)} active + {len(warm_items)} warm-pool sandbox(es)")
|
||||
|
||||
for sandbox_id in sandbox_ids:
|
||||
try:
|
||||
self.destroy(sandbox_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to destroy sandbox {sandbox_id} during shutdown: {e}")
|
||||
|
||||
for sandbox_id, (info, _) in warm_items:
|
||||
try:
|
||||
self._backend.destroy(info)
|
||||
logger.info(f"Destroyed warm-pool sandbox {sandbox_id} during shutdown")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to destroy warm-pool sandbox {sandbox_id} during shutdown: {e}")
|
||||
@@ -0,0 +1,98 @@
|
||||
"""Abstract base class for sandbox provisioning backends."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import requests
|
||||
|
||||
from .sandbox_info import SandboxInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def wait_for_sandbox_ready(sandbox_url: str, timeout: int = 30) -> bool:
|
||||
"""Poll sandbox health endpoint until ready or timeout.
|
||||
|
||||
Args:
|
||||
sandbox_url: URL of the sandbox (e.g. http://k3s:30001).
|
||||
timeout: Maximum time to wait in seconds.
|
||||
|
||||
Returns:
|
||||
True if sandbox is ready, False otherwise.
|
||||
"""
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
try:
|
||||
response = requests.get(f"{sandbox_url}/v1/sandbox", timeout=5)
|
||||
if response.status_code == 200:
|
||||
return True
|
||||
except requests.exceptions.RequestException:
|
||||
pass
|
||||
time.sleep(1)
|
||||
return False
|
||||
|
||||
|
||||
class SandboxBackend(ABC):
|
||||
"""Abstract base for sandbox provisioning backends.
|
||||
|
||||
Two implementations:
|
||||
- LocalContainerBackend: starts Docker/Apple Container locally, manages ports
|
||||
- RemoteSandboxBackend: connects to a pre-existing URL (K8s service, external)
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def create(self, thread_id: str, sandbox_id: str, extra_mounts: list[tuple[str, str, bool]] | None = None) -> SandboxInfo:
|
||||
"""Create/provision a new sandbox.
|
||||
|
||||
Args:
|
||||
thread_id: Thread ID for which the sandbox is being created. Useful for backends that want to organize sandboxes by thread.
|
||||
sandbox_id: Deterministic sandbox identifier.
|
||||
extra_mounts: Additional volume mounts as (host_path, container_path, read_only) tuples.
|
||||
Ignored by backends that don't manage containers (e.g., remote).
|
||||
|
||||
Returns:
|
||||
SandboxInfo with connection details.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def destroy(self, info: SandboxInfo) -> None:
|
||||
"""Destroy/cleanup a sandbox and release its resources.
|
||||
|
||||
Args:
|
||||
info: The sandbox metadata to destroy.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def is_alive(self, info: SandboxInfo) -> bool:
|
||||
"""Quick check whether a sandbox is still alive.
|
||||
|
||||
This should be a lightweight check (e.g., container inspect)
|
||||
rather than a full health check.
|
||||
|
||||
Args:
|
||||
info: The sandbox metadata to check.
|
||||
|
||||
Returns:
|
||||
True if the sandbox appears to be alive.
|
||||
"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def discover(self, sandbox_id: str) -> SandboxInfo | None:
|
||||
"""Try to discover an existing sandbox by its deterministic ID.
|
||||
|
||||
Used for cross-process recovery: when another process started a sandbox,
|
||||
this process can discover it by the deterministic container name or URL.
|
||||
|
||||
Args:
|
||||
sandbox_id: The deterministic sandbox ID to look for.
|
||||
|
||||
Returns:
|
||||
SandboxInfo if found and healthy, None otherwise.
|
||||
"""
|
||||
...
|
||||
@@ -0,0 +1,327 @@
|
||||
"""Local container backend for sandbox provisioning.
|
||||
|
||||
Manages sandbox containers using Docker or Apple Container on the local machine.
|
||||
Handles container lifecycle, port allocation, and cross-process container discovery.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from deerflow.utils.network import get_free_port, release_port
|
||||
|
||||
from .backend import SandboxBackend, wait_for_sandbox_ready
|
||||
from .sandbox_info import SandboxInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LocalContainerBackend(SandboxBackend):
|
||||
"""Backend that manages sandbox containers locally using Docker or Apple Container.
|
||||
|
||||
On macOS, automatically prefers Apple Container if available, otherwise falls back to Docker.
|
||||
On other platforms, uses Docker.
|
||||
|
||||
Features:
|
||||
- Deterministic container naming for cross-process discovery
|
||||
- Port allocation with thread-safe utilities
|
||||
- Container lifecycle management (start/stop with --rm)
|
||||
- Support for volume mounts and environment variables
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
image: str,
|
||||
base_port: int,
|
||||
container_prefix: str,
|
||||
config_mounts: list,
|
||||
environment: dict[str, str],
|
||||
):
|
||||
"""Initialize the local container backend.
|
||||
|
||||
Args:
|
||||
image: Container image to use.
|
||||
base_port: Base port number to start searching for free ports.
|
||||
container_prefix: Prefix for container names (e.g., "deer-flow-sandbox").
|
||||
config_mounts: Volume mount configurations from config (list of VolumeMountConfig).
|
||||
environment: Environment variables to inject into containers.
|
||||
"""
|
||||
self._image = image
|
||||
self._base_port = base_port
|
||||
self._container_prefix = container_prefix
|
||||
self._config_mounts = config_mounts
|
||||
self._environment = environment
|
||||
self._runtime = self._detect_runtime()
|
||||
|
||||
@property
|
||||
def runtime(self) -> str:
|
||||
"""The detected container runtime ("docker" or "container")."""
|
||||
return self._runtime
|
||||
|
||||
def _detect_runtime(self) -> str:
|
||||
"""Detect which container runtime to use.
|
||||
|
||||
On macOS, prefer Apple Container if available, otherwise fall back to Docker.
|
||||
On other platforms, use Docker.
|
||||
|
||||
Returns:
|
||||
"container" for Apple Container, "docker" for Docker.
|
||||
"""
|
||||
import platform
|
||||
|
||||
if platform.system() == "Darwin":
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["container", "--version"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
timeout=5,
|
||||
)
|
||||
logger.info(f"Detected Apple Container: {result.stdout.strip()}")
|
||||
return "container"
|
||||
except (FileNotFoundError, subprocess.CalledProcessError, subprocess.TimeoutExpired):
|
||||
logger.info("Apple Container not available, falling back to Docker")
|
||||
|
||||
return "docker"
|
||||
|
||||
# ── SandboxBackend interface ──────────────────────────────────────────
|
||||
|
||||
def create(self, thread_id: str, sandbox_id: str, extra_mounts: list[tuple[str, str, bool]] | None = None) -> SandboxInfo:
|
||||
"""Start a new container and return its connection info.
|
||||
|
||||
Args:
|
||||
thread_id: Thread ID for which the sandbox is being created. Useful for backends that want to organize sandboxes by thread.
|
||||
sandbox_id: Deterministic sandbox identifier (used in container name).
|
||||
extra_mounts: Additional volume mounts as (host_path, container_path, read_only) tuples.
|
||||
|
||||
Returns:
|
||||
SandboxInfo with container details.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the container fails to start.
|
||||
"""
|
||||
container_name = f"{self._container_prefix}-{sandbox_id}"
|
||||
|
||||
# Retry loop: if Docker rejects the port (e.g. a stale container still
|
||||
# holds the binding after a process restart), skip that port and try the
|
||||
# next one. The socket-bind check in get_free_port mirrors Docker's
|
||||
# 0.0.0.0 bind, but Docker's port-release can be slightly asynchronous,
|
||||
# so a reactive fallback here ensures we always make progress.
|
||||
_next_start = self._base_port
|
||||
container_id: str | None = None
|
||||
port: int = 0
|
||||
for _attempt in range(10):
|
||||
port = get_free_port(start_port=_next_start)
|
||||
try:
|
||||
container_id = self._start_container(container_name, port, extra_mounts)
|
||||
break
|
||||
except RuntimeError as exc:
|
||||
release_port(port)
|
||||
err = str(exc)
|
||||
err_lower = err.lower()
|
||||
# Port already bound: skip this port and retry with the next one.
|
||||
if "port is already allocated" in err or "address already in use" in err_lower:
|
||||
logger.warning(f"Port {port} rejected by Docker (already allocated), retrying with next port")
|
||||
_next_start = port + 1
|
||||
continue
|
||||
# Container-name conflict: another process may have already started
|
||||
# the deterministic sandbox container for this sandbox_id. Try to
|
||||
# discover and adopt the existing container instead of failing.
|
||||
if "is already in use by container" in err_lower or "conflict. the container name" in err_lower:
|
||||
logger.warning(f"Container name {container_name} already in use, attempting to discover existing sandbox instance")
|
||||
existing = self.discover(sandbox_id)
|
||||
if existing is not None:
|
||||
return existing
|
||||
raise
|
||||
else:
|
||||
raise RuntimeError("Could not start sandbox container: all candidate ports are already allocated by Docker")
|
||||
|
||||
# When running inside Docker (DooD), sandbox containers are reachable via
|
||||
# host.docker.internal rather than localhost (they run on the host daemon).
|
||||
sandbox_host = os.environ.get("DEER_FLOW_SANDBOX_HOST", "localhost")
|
||||
return SandboxInfo(
|
||||
sandbox_id=sandbox_id,
|
||||
sandbox_url=f"http://{sandbox_host}:{port}",
|
||||
container_name=container_name,
|
||||
container_id=container_id,
|
||||
)
|
||||
|
||||
def destroy(self, info: SandboxInfo) -> None:
|
||||
"""Stop the container and release its port."""
|
||||
if info.container_id:
|
||||
self._stop_container(info.container_id)
|
||||
# Extract port from sandbox_url for release
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
port = urlparse(info.sandbox_url).port
|
||||
if port:
|
||||
release_port(port)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def is_alive(self, info: SandboxInfo) -> bool:
|
||||
"""Check if the container is still running (lightweight, no HTTP)."""
|
||||
if info.container_name:
|
||||
return self._is_container_running(info.container_name)
|
||||
return False
|
||||
|
||||
def discover(self, sandbox_id: str) -> SandboxInfo | None:
|
||||
"""Discover an existing container by its deterministic name.
|
||||
|
||||
Checks if a container with the expected name is running, retrieves its
|
||||
port, and verifies it responds to health checks.
|
||||
|
||||
Args:
|
||||
sandbox_id: The deterministic sandbox ID (determines container name).
|
||||
|
||||
Returns:
|
||||
SandboxInfo if container found and healthy, None otherwise.
|
||||
"""
|
||||
container_name = f"{self._container_prefix}-{sandbox_id}"
|
||||
|
||||
if not self._is_container_running(container_name):
|
||||
return None
|
||||
|
||||
port = self._get_container_port(container_name)
|
||||
if port is None:
|
||||
return None
|
||||
|
||||
sandbox_host = os.environ.get("DEER_FLOW_SANDBOX_HOST", "localhost")
|
||||
sandbox_url = f"http://{sandbox_host}:{port}"
|
||||
if not wait_for_sandbox_ready(sandbox_url, timeout=5):
|
||||
return None
|
||||
|
||||
return SandboxInfo(
|
||||
sandbox_id=sandbox_id,
|
||||
sandbox_url=sandbox_url,
|
||||
container_name=container_name,
|
||||
)
|
||||
|
||||
# ── Container operations ─────────────────────────────────────────────
|
||||
|
||||
def _start_container(
|
||||
self,
|
||||
container_name: str,
|
||||
port: int,
|
||||
extra_mounts: list[tuple[str, str, bool]] | None = None,
|
||||
) -> str:
|
||||
"""Start a new container.
|
||||
|
||||
Args:
|
||||
container_name: Name for the container.
|
||||
port: Host port to map to container port 8080.
|
||||
extra_mounts: Additional volume mounts.
|
||||
|
||||
Returns:
|
||||
The container ID.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If container fails to start.
|
||||
"""
|
||||
cmd = [self._runtime, "run"]
|
||||
|
||||
# Docker-specific security options
|
||||
if self._runtime == "docker":
|
||||
cmd.extend(["--security-opt", "seccomp=unconfined"])
|
||||
|
||||
cmd.extend(
|
||||
[
|
||||
"--rm",
|
||||
"-d",
|
||||
"-p",
|
||||
f"{port}:8080",
|
||||
"--name",
|
||||
container_name,
|
||||
]
|
||||
)
|
||||
|
||||
# Environment variables
|
||||
for key, value in self._environment.items():
|
||||
cmd.extend(["-e", f"{key}={value}"])
|
||||
|
||||
# Config-level volume mounts
|
||||
for mount in self._config_mounts:
|
||||
mount_spec = f"{mount.host_path}:{mount.container_path}"
|
||||
if mount.read_only:
|
||||
mount_spec += ":ro"
|
||||
cmd.extend(["-v", mount_spec])
|
||||
|
||||
# Extra mounts (thread-specific, skills, etc.)
|
||||
if extra_mounts:
|
||||
for host_path, container_path, read_only in extra_mounts:
|
||||
mount_spec = f"{host_path}:{container_path}"
|
||||
if read_only:
|
||||
mount_spec += ":ro"
|
||||
cmd.extend(["-v", mount_spec])
|
||||
|
||||
cmd.append(self._image)
|
||||
|
||||
logger.info(f"Starting container using {self._runtime}: {' '.join(cmd)}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
||||
container_id = result.stdout.strip()
|
||||
logger.info(f"Started container {container_name} (ID: {container_id}) using {self._runtime}")
|
||||
return container_id
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to start container using {self._runtime}: {e.stderr}")
|
||||
raise RuntimeError(f"Failed to start sandbox container: {e.stderr}")
|
||||
|
||||
def _stop_container(self, container_id: str) -> None:
|
||||
"""Stop a container (--rm ensures automatic removal)."""
|
||||
try:
|
||||
subprocess.run(
|
||||
[self._runtime, "stop", container_id],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
logger.info(f"Stopped container {container_id} using {self._runtime}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.warning(f"Failed to stop container {container_id}: {e.stderr}")
|
||||
|
||||
def _is_container_running(self, container_name: str) -> bool:
|
||||
"""Check if a named container is currently running.
|
||||
|
||||
This enables cross-process container discovery — any process can detect
|
||||
containers started by another process via the deterministic container name.
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[self._runtime, "inspect", "-f", "{{.State.Running}}", container_name],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
return result.returncode == 0 and result.stdout.strip().lower() == "true"
|
||||
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
|
||||
return False
|
||||
|
||||
def _get_container_port(self, container_name: str) -> int | None:
|
||||
"""Get the host port of a running container.
|
||||
|
||||
Args:
|
||||
container_name: The container name to inspect.
|
||||
|
||||
Returns:
|
||||
The host port mapped to container port 8080, or None if not found.
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[self._runtime, "port", container_name, "8080"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
# Output format: "0.0.0.0:PORT" or ":::PORT"
|
||||
port_str = result.stdout.strip().split(":")[-1]
|
||||
return int(port_str)
|
||||
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, ValueError):
|
||||
pass
|
||||
return None
|
||||
@@ -0,0 +1,156 @@
|
||||
"""Remote sandbox backend — delegates Pod lifecycle to the provisioner service.
|
||||
|
||||
The provisioner dynamically creates per-sandbox-id Pods + NodePort Services
|
||||
in k3s. The backend accesses sandbox pods directly via ``k3s:{NodePort}``.
|
||||
|
||||
Architecture:
|
||||
┌────────────┐ HTTP ┌─────────────┐ K8s API ┌──────────┐
|
||||
│ this file │ ──────▸ │ provisioner │ ────────▸ │ k3s │
|
||||
│ (backend) │ │ :8002 │ │ :6443 │
|
||||
└────────────┘ └─────────────┘ └─────┬────┘
|
||||
│ creates
|
||||
┌─────────────┐ ┌─────▼──────┐
|
||||
│ backend │ ────────▸ │ sandbox │
|
||||
│ │ direct │ Pod(s) │
|
||||
└─────────────┘ k3s:NPort └────────────┘
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
import requests
|
||||
|
||||
from .backend import SandboxBackend
|
||||
from .sandbox_info import SandboxInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RemoteSandboxBackend(SandboxBackend):
|
||||
"""Backend that delegates sandbox lifecycle to the provisioner service.
|
||||
|
||||
All Pod creation, destruction, and discovery are handled by the
|
||||
provisioner. This backend is a thin HTTP client.
|
||||
|
||||
Typical config.yaml::
|
||||
|
||||
sandbox:
|
||||
use: deerflow.community.aio_sandbox:AioSandboxProvider
|
||||
provisioner_url: http://provisioner:8002
|
||||
"""
|
||||
|
||||
def __init__(self, provisioner_url: str):
|
||||
"""Initialize with the provisioner service URL.
|
||||
|
||||
Args:
|
||||
provisioner_url: URL of the provisioner service
|
||||
(e.g., ``http://provisioner:8002``).
|
||||
"""
|
||||
self._provisioner_url = provisioner_url.rstrip("/")
|
||||
|
||||
@property
|
||||
def provisioner_url(self) -> str:
|
||||
return self._provisioner_url
|
||||
|
||||
# ── SandboxBackend interface ──────────────────────────────────────────
|
||||
|
||||
def create(
|
||||
self,
|
||||
thread_id: str,
|
||||
sandbox_id: str,
|
||||
extra_mounts: list[tuple[str, str, bool]] | None = None,
|
||||
) -> SandboxInfo:
|
||||
"""Create a sandbox Pod + Service via the provisioner.
|
||||
|
||||
Calls ``POST /api/sandboxes`` which creates a dedicated Pod +
|
||||
NodePort Service in k3s.
|
||||
"""
|
||||
return self._provisioner_create(thread_id, sandbox_id, extra_mounts)
|
||||
|
||||
def destroy(self, info: SandboxInfo) -> None:
|
||||
"""Destroy a sandbox Pod + Service via the provisioner."""
|
||||
self._provisioner_destroy(info.sandbox_id)
|
||||
|
||||
def is_alive(self, info: SandboxInfo) -> bool:
|
||||
"""Check whether the sandbox Pod is running."""
|
||||
return self._provisioner_is_alive(info.sandbox_id)
|
||||
|
||||
def discover(self, sandbox_id: str) -> SandboxInfo | None:
|
||||
"""Discover an existing sandbox via the provisioner.
|
||||
|
||||
Calls ``GET /api/sandboxes/{sandbox_id}`` and returns info if
|
||||
the Pod exists.
|
||||
"""
|
||||
return self._provisioner_discover(sandbox_id)
|
||||
|
||||
# ── Provisioner API calls ─────────────────────────────────────────────
|
||||
|
||||
def _provisioner_create(self, thread_id: str, sandbox_id: str, extra_mounts: list[tuple[str, str, bool]] | None = None) -> SandboxInfo:
|
||||
"""POST /api/sandboxes → create Pod + Service."""
|
||||
try:
|
||||
resp = requests.post(
|
||||
f"{self._provisioner_url}/api/sandboxes",
|
||||
json={
|
||||
"sandbox_id": sandbox_id,
|
||||
"thread_id": thread_id,
|
||||
},
|
||||
timeout=30,
|
||||
)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
logger.info(f"Provisioner created sandbox {sandbox_id}: sandbox_url={data['sandbox_url']}")
|
||||
return SandboxInfo(
|
||||
sandbox_id=sandbox_id,
|
||||
sandbox_url=data["sandbox_url"],
|
||||
)
|
||||
except requests.RequestException as exc:
|
||||
logger.error(f"Provisioner create failed for {sandbox_id}: {exc}")
|
||||
raise RuntimeError(f"Provisioner create failed: {exc}") from exc
|
||||
|
||||
def _provisioner_destroy(self, sandbox_id: str) -> None:
|
||||
"""DELETE /api/sandboxes/{sandbox_id} → destroy Pod + Service."""
|
||||
try:
|
||||
resp = requests.delete(
|
||||
f"{self._provisioner_url}/api/sandboxes/{sandbox_id}",
|
||||
timeout=15,
|
||||
)
|
||||
if resp.ok:
|
||||
logger.info(f"Provisioner destroyed sandbox {sandbox_id}")
|
||||
else:
|
||||
logger.warning(f"Provisioner destroy returned {resp.status_code}: {resp.text}")
|
||||
except requests.RequestException as exc:
|
||||
logger.warning(f"Provisioner destroy failed for {sandbox_id}: {exc}")
|
||||
|
||||
def _provisioner_is_alive(self, sandbox_id: str) -> bool:
|
||||
"""GET /api/sandboxes/{sandbox_id} → check Pod phase."""
|
||||
try:
|
||||
resp = requests.get(
|
||||
f"{self._provisioner_url}/api/sandboxes/{sandbox_id}",
|
||||
timeout=10,
|
||||
)
|
||||
if resp.ok:
|
||||
data = resp.json()
|
||||
return data.get("status") == "Running"
|
||||
return False
|
||||
except requests.RequestException:
|
||||
return False
|
||||
|
||||
def _provisioner_discover(self, sandbox_id: str) -> SandboxInfo | None:
|
||||
"""GET /api/sandboxes/{sandbox_id} → discover existing sandbox."""
|
||||
try:
|
||||
resp = requests.get(
|
||||
f"{self._provisioner_url}/api/sandboxes/{sandbox_id}",
|
||||
timeout=10,
|
||||
)
|
||||
if resp.status_code == 404:
|
||||
return None
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
return SandboxInfo(
|
||||
sandbox_id=sandbox_id,
|
||||
sandbox_url=data["sandbox_url"],
|
||||
)
|
||||
except requests.RequestException as exc:
|
||||
logger.debug(f"Provisioner discover failed for {sandbox_id}: {exc}")
|
||||
return None
|
||||
@@ -0,0 +1,41 @@
|
||||
"""Sandbox metadata for cross-process discovery and state persistence."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class SandboxInfo:
|
||||
"""Persisted sandbox metadata that enables cross-process discovery.
|
||||
|
||||
This dataclass holds all the information needed to reconnect to an
|
||||
existing sandbox from a different process (e.g., gateway vs langgraph,
|
||||
multiple workers, or across K8s pods with shared storage).
|
||||
"""
|
||||
|
||||
sandbox_id: str
|
||||
sandbox_url: str # e.g. http://localhost:8080 or http://k3s:30001
|
||||
container_name: str | None = None # Only for local container backend
|
||||
container_id: str | None = None # Only for local container backend
|
||||
created_at: float = field(default_factory=time.time)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"sandbox_id": self.sandbox_id,
|
||||
"sandbox_url": self.sandbox_url,
|
||||
"container_name": self.container_name,
|
||||
"container_id": self.container_id,
|
||||
"created_at": self.created_at,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> SandboxInfo:
|
||||
return cls(
|
||||
sandbox_id=data["sandbox_id"],
|
||||
sandbox_url=data.get("sandbox_url", data.get("base_url", "")),
|
||||
container_name=data.get("container_name"),
|
||||
container_id=data.get("container_id"),
|
||||
created_at=data.get("created_at", time.time()),
|
||||
)
|
||||
@@ -0,0 +1,73 @@
|
||||
import json
|
||||
|
||||
from firecrawl import FirecrawlApp
|
||||
from langchain.tools import tool
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
|
||||
def _get_firecrawl_client() -> FirecrawlApp:
|
||||
config = get_app_config().get_tool_config("web_search")
|
||||
api_key = None
|
||||
if config is not None:
|
||||
api_key = config.model_extra.get("api_key")
|
||||
return FirecrawlApp(api_key=api_key) # type: ignore[arg-type]
|
||||
|
||||
|
||||
@tool("web_search", parse_docstring=True)
|
||||
def web_search_tool(query: str) -> str:
|
||||
"""Search the web.
|
||||
|
||||
Args:
|
||||
query: The query to search for.
|
||||
"""
|
||||
try:
|
||||
config = get_app_config().get_tool_config("web_search")
|
||||
max_results = 5
|
||||
if config is not None:
|
||||
max_results = config.model_extra.get("max_results", max_results)
|
||||
|
||||
client = _get_firecrawl_client()
|
||||
result = client.search(query, limit=max_results)
|
||||
|
||||
# result.web contains list of SearchResultWeb objects
|
||||
web_results = result.web or []
|
||||
normalized_results = [
|
||||
{
|
||||
"title": getattr(item, "title", "") or "",
|
||||
"url": getattr(item, "url", "") or "",
|
||||
"snippet": getattr(item, "description", "") or "",
|
||||
}
|
||||
for item in web_results
|
||||
]
|
||||
json_results = json.dumps(normalized_results, indent=2, ensure_ascii=False)
|
||||
return json_results
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@tool("web_fetch", parse_docstring=True)
|
||||
def web_fetch_tool(url: str) -> str:
|
||||
"""Fetch the contents of a web page at a given URL.
|
||||
Only fetch EXACT URLs that have been provided directly by the user or have been returned in results from the web_search and web_fetch tools.
|
||||
This tool can NOT access content that requires authentication, such as private Google Docs or pages behind login walls.
|
||||
Do NOT add www. to URLs that do NOT have them.
|
||||
URLs must include the schema: https://example.com is a valid URL while example.com is an invalid URL.
|
||||
|
||||
Args:
|
||||
url: The URL to fetch the contents of.
|
||||
"""
|
||||
try:
|
||||
client = _get_firecrawl_client()
|
||||
result = client.scrape(url, formats=["markdown"])
|
||||
|
||||
markdown_content = result.markdown or ""
|
||||
metadata = result.metadata
|
||||
title = metadata.title if metadata and metadata.title else "Untitled"
|
||||
|
||||
if not markdown_content:
|
||||
return "Error: No content found"
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
return f"# {title}\n\n{markdown_content[:4096]}"
|
||||
@@ -0,0 +1,3 @@
|
||||
from .tools import image_search_tool
|
||||
|
||||
__all__ = ["image_search_tool"]
|
||||
@@ -0,0 +1,135 @@
|
||||
"""
|
||||
Image Search Tool - Search images using DuckDuckGo for reference in image generation.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
|
||||
from langchain.tools import tool
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _search_images(
|
||||
query: str,
|
||||
max_results: int = 5,
|
||||
region: str = "wt-wt",
|
||||
safesearch: str = "moderate",
|
||||
size: str | None = None,
|
||||
color: str | None = None,
|
||||
type_image: str | None = None,
|
||||
layout: str | None = None,
|
||||
license_image: str | None = None,
|
||||
) -> list[dict]:
|
||||
"""
|
||||
Execute image search using DuckDuckGo.
|
||||
|
||||
Args:
|
||||
query: Search keywords
|
||||
max_results: Maximum number of results
|
||||
region: Search region
|
||||
safesearch: Safe search level
|
||||
size: Image size (Small/Medium/Large/Wallpaper)
|
||||
color: Color filter
|
||||
type_image: Image type (photo/clipart/gif/transparent/line)
|
||||
layout: Layout (Square/Tall/Wide)
|
||||
license_image: License filter
|
||||
|
||||
Returns:
|
||||
List of search results
|
||||
"""
|
||||
try:
|
||||
from ddgs import DDGS
|
||||
except ImportError:
|
||||
logger.error("ddgs library not installed. Run: pip install ddgs")
|
||||
return []
|
||||
|
||||
ddgs = DDGS(timeout=30)
|
||||
|
||||
try:
|
||||
kwargs = {
|
||||
"region": region,
|
||||
"safesearch": safesearch,
|
||||
"max_results": max_results,
|
||||
}
|
||||
|
||||
if size:
|
||||
kwargs["size"] = size
|
||||
if color:
|
||||
kwargs["color"] = color
|
||||
if type_image:
|
||||
kwargs["type_image"] = type_image
|
||||
if layout:
|
||||
kwargs["layout"] = layout
|
||||
if license_image:
|
||||
kwargs["license_image"] = license_image
|
||||
|
||||
results = ddgs.images(query, **kwargs)
|
||||
return list(results) if results else []
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to search images: {e}")
|
||||
return []
|
||||
|
||||
|
||||
@tool("image_search", parse_docstring=True)
|
||||
def image_search_tool(
|
||||
query: str,
|
||||
max_results: int = 5,
|
||||
size: str | None = None,
|
||||
type_image: str | None = None,
|
||||
layout: str | None = None,
|
||||
) -> str:
|
||||
"""Search for images online. Use this tool BEFORE image generation to find reference images for characters, portraits, objects, scenes, or any content requiring visual accuracy.
|
||||
|
||||
**When to use:**
|
||||
- Before generating character/portrait images: search for similar poses, expressions, styles
|
||||
- Before generating specific objects/products: search for accurate visual references
|
||||
- Before generating scenes/locations: search for architectural or environmental references
|
||||
- Before generating fashion/clothing: search for style and detail references
|
||||
|
||||
The returned image URLs can be used as reference images in image generation to significantly improve quality.
|
||||
|
||||
Args:
|
||||
query: Search keywords describing the images you want to find. Be specific for better results (e.g., "Japanese woman street photography 1990s" instead of just "woman").
|
||||
max_results: Maximum number of images to return. Default is 5.
|
||||
size: Image size filter. Options: "Small", "Medium", "Large", "Wallpaper". Use "Large" for reference images.
|
||||
type_image: Image type filter. Options: "photo", "clipart", "gif", "transparent", "line". Use "photo" for realistic references.
|
||||
layout: Layout filter. Options: "Square", "Tall", "Wide". Choose based on your generation needs.
|
||||
"""
|
||||
config = get_app_config().get_tool_config("image_search")
|
||||
|
||||
# Override max_results from config if set
|
||||
if config is not None and "max_results" in config.model_extra:
|
||||
max_results = config.model_extra.get("max_results", max_results)
|
||||
|
||||
results = _search_images(
|
||||
query=query,
|
||||
max_results=max_results,
|
||||
size=size,
|
||||
type_image=type_image,
|
||||
layout=layout,
|
||||
)
|
||||
|
||||
if not results:
|
||||
return json.dumps({"error": "No images found", "query": query}, ensure_ascii=False)
|
||||
|
||||
normalized_results = [
|
||||
{
|
||||
"title": r.get("title", ""),
|
||||
"image_url": r.get("thumbnail", ""),
|
||||
"thumbnail_url": r.get("thumbnail", ""),
|
||||
}
|
||||
for r in results
|
||||
]
|
||||
|
||||
output = {
|
||||
"query": query,
|
||||
"total_results": len(normalized_results),
|
||||
"results": normalized_results,
|
||||
"usage_hint": "Use the 'image_url' values as reference images in image generation. Download them first if needed.",
|
||||
}
|
||||
|
||||
return json.dumps(output, indent=2, ensure_ascii=False)
|
||||
@@ -0,0 +1,311 @@
|
||||
"""Util that calls InfoQuest Search And Fetch API.
|
||||
|
||||
In order to set this up, follow instructions at:
|
||||
https://docs.byteplus.com/en/docs/InfoQuest/What_is_Info_Quest
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InfoQuestClient:
|
||||
"""Client for interacting with the InfoQuest web search and fetch API."""
|
||||
|
||||
def __init__(self, fetch_time: int = -1, fetch_timeout: int = -1, fetch_navigation_timeout: int = -1, search_time_range: int = -1):
|
||||
logger.info("\n============================================\n🚀 BytePlus InfoQuest Client Initialization 🚀\n============================================")
|
||||
|
||||
self.fetch_time = fetch_time
|
||||
self.fetch_timeout = fetch_timeout
|
||||
self.fetch_navigation_timeout = fetch_navigation_timeout
|
||||
self.search_time_range = search_time_range
|
||||
self.api_key_set = bool(os.getenv("INFOQUEST_API_KEY"))
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
config_details = (
|
||||
f"\n📋 Configuration Details:\n"
|
||||
f"├── Fetch time: {fetch_time} {'(Default: No fetch time)' if fetch_time == -1 else '(Custom)'}\n"
|
||||
f"├── Fetch Timeout: {fetch_timeout} {'(Default: No fetch timeout)' if fetch_timeout == -1 else '(Custom)'}\n"
|
||||
f"├── Navigation Timeout: {fetch_navigation_timeout} {'(Default: No Navigation Timeout)' if fetch_navigation_timeout == -1 else '(Custom)'}\n"
|
||||
f"├── Search Time Range: {search_time_range} {'(Default: No Search Time Range)' if search_time_range == -1 else '(Custom)'}\n"
|
||||
f"└── API Key: {'✅ Configured' if self.api_key_set else '❌ Not set'}"
|
||||
)
|
||||
|
||||
logger.debug(config_details)
|
||||
logger.debug("\n" + "*" * 70 + "\n")
|
||||
|
||||
def fetch(self, url: str, return_format: str = "html") -> str:
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
url_truncated = url[:50] + "..." if len(url) > 50 else url
|
||||
logger.debug(
|
||||
f"InfoQuest - Fetch API request initiated | "
|
||||
f"operation=crawl url | "
|
||||
f"url_truncated={url_truncated} | "
|
||||
f"has_timeout_filter={self.fetch_timeout > 0} | timeout_filter={self.fetch_timeout} | "
|
||||
f"has_fetch_time_filter={self.fetch_time > 0} | fetch_time_filter={self.fetch_time} | "
|
||||
f"has_navigation_timeout_filter={self.fetch_navigation_timeout > 0} | navi_timeout_filter={self.fetch_navigation_timeout} | "
|
||||
f"request_type=sync"
|
||||
)
|
||||
|
||||
# Prepare headers
|
||||
headers = self._prepare_headers()
|
||||
|
||||
# Prepare request data
|
||||
data = self._prepare_crawl_request_data(url, return_format)
|
||||
|
||||
logger.debug("Sending crawl request to InfoQuest API")
|
||||
try:
|
||||
response = requests.post("https://reader.infoquest.bytepluses.com", headers=headers, json=data)
|
||||
|
||||
# Check if status code is not 200
|
||||
if response.status_code != 200:
|
||||
error_message = f"fetch API returned status {response.status_code}: {response.text}"
|
||||
logger.debug("InfoQuest Crawler fetch API return status %d: %s for URL: %s", response.status_code, response.text, url)
|
||||
return f"Error: {error_message}"
|
||||
|
||||
# Check for empty response
|
||||
if not response.text or not response.text.strip():
|
||||
error_message = "no result found"
|
||||
logger.debug("InfoQuest Crawler returned empty response for URL: %s", url)
|
||||
return f"Error: {error_message}"
|
||||
|
||||
# Try to parse response as JSON and extract reader_result
|
||||
try:
|
||||
response_data = json.loads(response.text)
|
||||
# Extract reader_result if it exists
|
||||
if "reader_result" in response_data:
|
||||
logger.debug("Successfully extracted reader_result from JSON response")
|
||||
return response_data["reader_result"]
|
||||
elif "content" in response_data:
|
||||
# Fallback to content field if reader_result is not available
|
||||
logger.debug("reader_result missing in JSON response, falling back to content field: %s", response_data["content"])
|
||||
return response_data["content"]
|
||||
else:
|
||||
# If neither field exists, return the original response
|
||||
logger.warning("Neither reader_result nor content field found in JSON response")
|
||||
except json.JSONDecodeError:
|
||||
# If response is not JSON, return the original text
|
||||
logger.debug("Response is not in JSON format, returning as-is")
|
||||
return response.text
|
||||
|
||||
# Print partial response for debugging
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
response_sample = response.text[:200] + ("..." if len(response.text) > 200 else "")
|
||||
logger.debug("Successfully received response, content length: %d bytes, first 200 chars: %s", len(response.text), response_sample)
|
||||
return response.text
|
||||
except Exception as e:
|
||||
error_message = f"fetch API failed: {str(e)}"
|
||||
logger.error(error_message)
|
||||
return f"Error: {error_message}"
|
||||
|
||||
@staticmethod
|
||||
def _prepare_headers() -> dict[str, str]:
|
||||
"""Prepare request headers."""
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
# Add API key if available
|
||||
if os.getenv("INFOQUEST_API_KEY"):
|
||||
headers["Authorization"] = f"Bearer {os.getenv('INFOQUEST_API_KEY')}"
|
||||
logger.debug("API key added to request headers")
|
||||
else:
|
||||
logger.warning("InfoQuest API key is not set. Provide your own key for authentication.")
|
||||
|
||||
return headers
|
||||
|
||||
def _prepare_crawl_request_data(self, url: str, return_format: str) -> dict[str, Any]:
|
||||
"""Prepare request data with formatted parameters."""
|
||||
# Normalize return_format
|
||||
if return_format and return_format.lower() == "html":
|
||||
normalized_format = "HTML"
|
||||
else:
|
||||
normalized_format = return_format
|
||||
|
||||
data = {"url": url, "format": normalized_format}
|
||||
|
||||
# Add timeout parameters if set to positive values
|
||||
timeout_params = {}
|
||||
if self.fetch_time > 0:
|
||||
timeout_params["fetch_time"] = self.fetch_time
|
||||
if self.fetch_timeout > 0:
|
||||
timeout_params["timeout"] = self.fetch_timeout
|
||||
if self.fetch_navigation_timeout > 0:
|
||||
timeout_params["navi_timeout"] = self.fetch_navigation_timeout
|
||||
|
||||
# Log applied timeout parameters
|
||||
if timeout_params:
|
||||
logger.debug("Applying timeout parameters: %s", timeout_params)
|
||||
data.update(timeout_params)
|
||||
|
||||
return data
|
||||
|
||||
def web_search_raw_results(
|
||||
self,
|
||||
query: str,
|
||||
site: str,
|
||||
output_format: str = "JSON",
|
||||
) -> dict:
|
||||
"""Get results from the InfoQuest Web-Search API synchronously."""
|
||||
headers = self._prepare_headers()
|
||||
|
||||
params = {"format": output_format, "query": query}
|
||||
if self.search_time_range > 0:
|
||||
params["time_range"] = self.search_time_range
|
||||
|
||||
if site != "":
|
||||
params["site"] = site
|
||||
|
||||
response = requests.post("https://search.infoquest.bytepluses.com", headers=headers, json=params)
|
||||
response.raise_for_status()
|
||||
|
||||
# Print partial response for debugging
|
||||
response_json = response.json()
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
response_sample = json.dumps(response_json)[:200] + ("..." if len(json.dumps(response_json)) > 200 else "")
|
||||
logger.debug(f"Search API request completed successfully | service=InfoQuest | status=success | response_sample={response_sample}")
|
||||
|
||||
return response_json
|
||||
|
||||
@staticmethod
|
||||
def clean_results(raw_results: list[dict[str, dict[str, dict[str, Any]]]]) -> list[dict]:
|
||||
"""Clean results from InfoQuest Web-Search API."""
|
||||
logger.debug("Processing web-search results")
|
||||
|
||||
seen_urls = set()
|
||||
clean_results = []
|
||||
counts = {"pages": 0, "news": 0}
|
||||
|
||||
for content_list in raw_results:
|
||||
content = content_list["content"]
|
||||
results = content["results"]
|
||||
|
||||
if results.get("organic"):
|
||||
organic_results = results["organic"]
|
||||
for result in organic_results:
|
||||
clean_result = {
|
||||
"type": "page",
|
||||
}
|
||||
if "title" in result:
|
||||
clean_result["title"] = result["title"]
|
||||
if "desc" in result:
|
||||
clean_result["desc"] = result["desc"]
|
||||
clean_result["snippet"] = result["desc"]
|
||||
if "url" in result:
|
||||
clean_result["url"] = result["url"]
|
||||
url = clean_result["url"]
|
||||
if isinstance(url, str) and url and url not in seen_urls:
|
||||
seen_urls.add(url)
|
||||
clean_results.append(clean_result)
|
||||
counts["pages"] += 1
|
||||
|
||||
if results.get("top_stories"):
|
||||
news = results["top_stories"]
|
||||
for obj in news["items"]:
|
||||
clean_result = {
|
||||
"type": "news",
|
||||
}
|
||||
if "time_frame" in obj:
|
||||
clean_result["time_frame"] = obj["time_frame"]
|
||||
if "source" in obj:
|
||||
clean_result["source"] = obj["source"]
|
||||
title = obj.get("title")
|
||||
url = obj.get("url")
|
||||
if title:
|
||||
clean_result["title"] = title
|
||||
if url:
|
||||
clean_result["url"] = url
|
||||
if title and isinstance(url, str) and url and url not in seen_urls:
|
||||
seen_urls.add(url)
|
||||
clean_results.append(clean_result)
|
||||
counts["news"] += 1
|
||||
logger.debug(f"Results processing completed | total_results={len(clean_results)} | pages={counts['pages']} | news_items={counts['news']} | unique_urls={len(seen_urls)}")
|
||||
|
||||
return clean_results
|
||||
|
||||
def web_search(
|
||||
self,
|
||||
query: str,
|
||||
site: str = "",
|
||||
output_format: str = "JSON",
|
||||
) -> str:
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
query_truncated = query[:50] + "..." if len(query) > 50 else query
|
||||
logger.debug(
|
||||
f"InfoQuest - Search API request initiated | "
|
||||
f"operation=search webs | "
|
||||
f"query_truncated={query_truncated} | "
|
||||
f"has_time_filter={self.search_time_range > 0} | time_filter={self.search_time_range} | "
|
||||
f"has_site_filter={bool(site)} | site={site} | "
|
||||
f"request_type=sync"
|
||||
)
|
||||
|
||||
try:
|
||||
logger.debug("InfoQuest Web-Search - Executing search with parameters")
|
||||
raw_results = self.web_search_raw_results(
|
||||
query,
|
||||
site,
|
||||
output_format,
|
||||
)
|
||||
if "search_result" in raw_results:
|
||||
logger.debug("InfoQuest Web-Search - Successfully extracted search_result from JSON response")
|
||||
results = raw_results["search_result"]
|
||||
|
||||
logger.debug("InfoQuest Web-Search - Processing raw search results")
|
||||
cleaned_results = self.clean_results(results["results"])
|
||||
|
||||
result_json = json.dumps(cleaned_results, indent=2, ensure_ascii=False)
|
||||
|
||||
logger.debug(f"InfoQuest Web-Search - Search tool execution completed | mode=synchronous | results_count={len(cleaned_results)}")
|
||||
return result_json
|
||||
|
||||
elif "content" in raw_results:
|
||||
# Fallback to content field if search_result is not available
|
||||
error_message = "web search API return wrong format"
|
||||
logger.error("web search API return wrong format, no search_result nor content field found in JSON response, content: %s", raw_results["content"])
|
||||
return f"Error: {error_message}"
|
||||
else:
|
||||
# If neither field exists, return the original response
|
||||
logger.warning("InfoQuest Web-Search - Neither search_result nor content field found in JSON response")
|
||||
return json.dumps(raw_results, indent=2, ensure_ascii=False)
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"InfoQuest Web-Search - Search tool execution failed | mode=synchronous | error={str(e)}"
|
||||
logger.error(error_message)
|
||||
return f"Error: {error_message}"
|
||||
|
||||
@staticmethod
|
||||
def clean_results_with_image_search(raw_results: list[dict[str, dict[str, dict[str, Any]]]]) -> list[dict]:
|
||||
"""Clean results from InfoQuest Web-Search API."""
|
||||
logger.debug("Processing web-search results")
|
||||
|
||||
seen_urls = set()
|
||||
clean_results = []
|
||||
counts = {"images": 0}
|
||||
|
||||
for content_list in raw_results:
|
||||
content = content_list["content"]
|
||||
results = content["results"]
|
||||
|
||||
if results.get("images_results"):
|
||||
images_results = results["images_results"]
|
||||
for result in images_results:
|
||||
clean_result = {}
|
||||
if "image_url" in result:
|
||||
clean_result["image_url"] = result["image_url"]
|
||||
url = clean_result["image_url"]
|
||||
if isinstance(url, str) and url and url not in seen_urls:
|
||||
seen_urls.add(url)
|
||||
clean_results.append(clean_result)
|
||||
counts["images"] += 1
|
||||
if "thumbnail_url" in result:
|
||||
clean_result["thumbnail_url"] = result["thumbnail_url"]
|
||||
if "url" in result:
|
||||
clean_result["url"] = result["url"]
|
||||
logger.debug(f"Results processing completed | total_results={len(clean_results)} | images={counts['images']} | unique_urls={len(seen_urls)}")
|
||||
|
||||
return clean_results
|
||||
@@ -0,0 +1,63 @@
|
||||
from langchain.tools import tool
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
from deerflow.utils.readability import ReadabilityExtractor
|
||||
|
||||
from .infoquest_client import InfoQuestClient
|
||||
|
||||
readability_extractor = ReadabilityExtractor()
|
||||
|
||||
|
||||
def _get_infoquest_client() -> InfoQuestClient:
|
||||
search_config = get_app_config().get_tool_config("web_search")
|
||||
search_time_range = -1
|
||||
if search_config is not None and "search_time_range" in search_config.model_extra:
|
||||
search_time_range = search_config.model_extra.get("search_time_range")
|
||||
fetch_config = get_app_config().get_tool_config("web_fetch")
|
||||
fetch_time = -1
|
||||
if fetch_config is not None and "fetch_time" in fetch_config.model_extra:
|
||||
fetch_time = fetch_config.model_extra.get("fetch_time")
|
||||
fetch_timeout = -1
|
||||
if fetch_config is not None and "timeout" in fetch_config.model_extra:
|
||||
fetch_timeout = fetch_config.model_extra.get("timeout")
|
||||
navigation_timeout = -1
|
||||
if fetch_config is not None and "navigation_timeout" in fetch_config.model_extra:
|
||||
navigation_timeout = fetch_config.model_extra.get("navigation_timeout")
|
||||
|
||||
return InfoQuestClient(
|
||||
search_time_range=search_time_range,
|
||||
fetch_timeout=fetch_timeout,
|
||||
fetch_navigation_timeout=navigation_timeout,
|
||||
fetch_time=fetch_time,
|
||||
)
|
||||
|
||||
|
||||
@tool("web_search", parse_docstring=True)
|
||||
def web_search_tool(query: str) -> str:
|
||||
"""Search the web.
|
||||
|
||||
Args:
|
||||
query: The query to search for.
|
||||
"""
|
||||
|
||||
client = _get_infoquest_client()
|
||||
return client.web_search(query)
|
||||
|
||||
|
||||
@tool("web_fetch", parse_docstring=True)
|
||||
def web_fetch_tool(url: str) -> str:
|
||||
"""Fetch the contents of a web page at a given URL.
|
||||
Only fetch EXACT URLs that have been provided directly by the user or have been returned in results from the web_search and web_fetch tools.
|
||||
This tool can NOT access content that requires authentication, such as private Google Docs or pages behind login walls.
|
||||
Do NOT add www. to URLs that do NOT have them.
|
||||
URLs must include the schema: https://example.com is a valid URL while example.com is an invalid URL.
|
||||
|
||||
Args:
|
||||
url: The URL to fetch the contents of.
|
||||
"""
|
||||
client = _get_infoquest_client()
|
||||
result = client.fetch(url)
|
||||
if result.startswith("Error: "):
|
||||
return result
|
||||
article = readability_extractor.extract_article(result)
|
||||
return article.to_markdown()[:4096]
|
||||
@@ -0,0 +1,38 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class JinaClient:
|
||||
def crawl(self, url: str, return_format: str = "html", timeout: int = 10) -> str:
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"X-Return-Format": return_format,
|
||||
"X-Timeout": str(timeout),
|
||||
}
|
||||
if os.getenv("JINA_API_KEY"):
|
||||
headers["Authorization"] = f"Bearer {os.getenv('JINA_API_KEY')}"
|
||||
else:
|
||||
logger.warning("Jina API key is not set. Provide your own key to access a higher rate limit. See https://jina.ai/reader for more information.")
|
||||
data = {"url": url}
|
||||
try:
|
||||
response = requests.post("https://r.jina.ai/", headers=headers, json=data)
|
||||
|
||||
if response.status_code != 200:
|
||||
error_message = f"Jina API returned status {response.status_code}: {response.text}"
|
||||
logger.error(error_message)
|
||||
return f"Error: {error_message}"
|
||||
|
||||
if not response.text or not response.text.strip():
|
||||
error_message = "Jina API returned empty response"
|
||||
logger.error(error_message)
|
||||
return f"Error: {error_message}"
|
||||
|
||||
return response.text
|
||||
except Exception as e:
|
||||
error_message = f"Request to Jina API failed: {str(e)}"
|
||||
logger.error(error_message)
|
||||
return f"Error: {error_message}"
|
||||
28
backend/packages/harness/deerflow/community/jina_ai/tools.py
Normal file
28
backend/packages/harness/deerflow/community/jina_ai/tools.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from langchain.tools import tool
|
||||
|
||||
from deerflow.community.jina_ai.jina_client import JinaClient
|
||||
from deerflow.config import get_app_config
|
||||
from deerflow.utils.readability import ReadabilityExtractor
|
||||
|
||||
readability_extractor = ReadabilityExtractor()
|
||||
|
||||
|
||||
@tool("web_fetch", parse_docstring=True)
|
||||
def web_fetch_tool(url: str) -> str:
|
||||
"""Fetch the contents of a web page at a given URL.
|
||||
Only fetch EXACT URLs that have been provided directly by the user or have been returned in results from the web_search and web_fetch tools.
|
||||
This tool can NOT access content that requires authentication, such as private Google Docs or pages behind login walls.
|
||||
Do NOT add www. to URLs that do NOT have them.
|
||||
URLs must include the schema: https://example.com is a valid URL while example.com is an invalid URL.
|
||||
|
||||
Args:
|
||||
url: The URL to fetch the contents of.
|
||||
"""
|
||||
jina_client = JinaClient()
|
||||
timeout = 10
|
||||
config = get_app_config().get_tool_config("web_fetch")
|
||||
if config is not None and "timeout" in config.model_extra:
|
||||
timeout = config.model_extra.get("timeout")
|
||||
html_content = jina_client.crawl(url, return_format="html", timeout=timeout)
|
||||
article = readability_extractor.extract_article(html_content)
|
||||
return article.to_markdown()[:4096]
|
||||
62
backend/packages/harness/deerflow/community/tavily/tools.py
Normal file
62
backend/packages/harness/deerflow/community/tavily/tools.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import json
|
||||
|
||||
from langchain.tools import tool
|
||||
from tavily import TavilyClient
|
||||
|
||||
from deerflow.config import get_app_config
|
||||
|
||||
|
||||
def _get_tavily_client() -> TavilyClient:
|
||||
config = get_app_config().get_tool_config("web_search")
|
||||
api_key = None
|
||||
if config is not None and "api_key" in config.model_extra:
|
||||
api_key = config.model_extra.get("api_key")
|
||||
return TavilyClient(api_key=api_key)
|
||||
|
||||
|
||||
@tool("web_search", parse_docstring=True)
|
||||
def web_search_tool(query: str) -> str:
|
||||
"""Search the web.
|
||||
|
||||
Args:
|
||||
query: The query to search for.
|
||||
"""
|
||||
config = get_app_config().get_tool_config("web_search")
|
||||
max_results = 5
|
||||
if config is not None and "max_results" in config.model_extra:
|
||||
max_results = config.model_extra.get("max_results")
|
||||
|
||||
client = _get_tavily_client()
|
||||
res = client.search(query, max_results=max_results)
|
||||
normalized_results = [
|
||||
{
|
||||
"title": result["title"],
|
||||
"url": result["url"],
|
||||
"snippet": result["content"],
|
||||
}
|
||||
for result in res["results"]
|
||||
]
|
||||
json_results = json.dumps(normalized_results, indent=2, ensure_ascii=False)
|
||||
return json_results
|
||||
|
||||
|
||||
@tool("web_fetch", parse_docstring=True)
|
||||
def web_fetch_tool(url: str) -> str:
|
||||
"""Fetch the contents of a web page at a given URL.
|
||||
Only fetch EXACT URLs that have been provided directly by the user or have been returned in results from the web_search and web_fetch tools.
|
||||
This tool can NOT access content that requires authentication, such as private Google Docs or pages behind login walls.
|
||||
Do NOT add www. to URLs that do NOT have them.
|
||||
URLs must include the schema: https://example.com is a valid URL while example.com is an invalid URL.
|
||||
|
||||
Args:
|
||||
url: The URL to fetch the contents of.
|
||||
"""
|
||||
client = _get_tavily_client()
|
||||
res = client.extract([url])
|
||||
if "failed_results" in res and len(res["failed_results"]) > 0:
|
||||
return f"Error: {res['failed_results'][0]['error']}"
|
||||
elif "results" in res and len(res["results"]) > 0:
|
||||
result = res["results"][0]
|
||||
return f"# {result['title']}\n\n{result['raw_content'][:4096]}"
|
||||
else:
|
||||
return "Error: No results found"
|
||||
Reference in New Issue
Block a user