2026-02-12 11:02:09 +08:00
|
|
|
"""Remote sandbox backend — delegates Pod lifecycle to the provisioner service.
|
|
|
|
|
|
|
|
|
|
The provisioner dynamically creates per-sandbox-id Pods + NodePort Services
|
|
|
|
|
in k3s. The backend accesses sandbox pods directly via ``k3s:{NodePort}``.
|
|
|
|
|
|
|
|
|
|
Architecture:
|
|
|
|
|
┌────────────┐ HTTP ┌─────────────┐ K8s API ┌──────────┐
|
|
|
|
|
│ this file │ ──────▸ │ provisioner │ ────────▸ │ k3s │
|
|
|
|
|
│ (backend) │ │ :8002 │ │ :6443 │
|
|
|
|
|
└────────────┘ └─────────────┘ └─────┬────┘
|
|
|
|
|
│ creates
|
|
|
|
|
┌─────────────┐ ┌─────▼──────┐
|
|
|
|
|
│ backend │ ────────▸ │ sandbox │
|
|
|
|
|
│ │ direct │ Pod(s) │
|
|
|
|
|
└─────────────┘ k3s:NPort └────────────┘
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
|
|
|
|
|
import logging
|
|
|
|
|
|
|
|
|
|
import requests
|
|
|
|
|
|
|
|
|
|
from .backend import SandboxBackend
|
|
|
|
|
from .sandbox_info import SandboxInfo
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class RemoteSandboxBackend(SandboxBackend):
|
|
|
|
|
"""Backend that delegates sandbox lifecycle to the provisioner service.
|
|
|
|
|
|
|
|
|
|
All Pod creation, destruction, and discovery are handled by the
|
|
|
|
|
provisioner. This backend is a thin HTTP client.
|
|
|
|
|
|
|
|
|
|
Typical config.yaml::
|
|
|
|
|
|
|
|
|
|
sandbox:
|
refactor: split backend into harness (deerflow.*) and app (app.*) (#1131)
* refactor: extract shared utils to break harness→app cross-layer imports
Move _validate_skill_frontmatter to src/skills/validation.py and
CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py.
This eliminates the two reverse dependencies from client.py (harness layer)
into gateway/routers/ (app layer), preparing for the harness/app package split.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* refactor: split backend/src into harness (deerflow.*) and app (app.*)
Physically split the monolithic backend/src/ package into two layers:
- **Harness** (`packages/harness/deerflow/`): publishable agent framework
package with import prefix `deerflow.*`. Contains agents, sandbox, tools,
models, MCP, skills, config, and all core infrastructure.
- **App** (`app/`): unpublished application code with import prefix `app.*`.
Contains gateway (FastAPI REST API) and channels (IM integrations).
Key changes:
- Move 13 harness modules to packages/harness/deerflow/ via git mv
- Move gateway + channels to app/ via git mv
- Rename all imports: src.* → deerflow.* (harness) / app.* (app layer)
- Set up uv workspace with deerflow-harness as workspace member
- Update langgraph.json, config.example.yaml, all scripts, Docker files
- Add build-system (hatchling) to harness pyproject.toml
- Add PYTHONPATH=. to gateway startup commands for app.* resolution
- Update ruff.toml with known-first-party for import sorting
- Update all documentation to reflect new directory structure
Boundary rule enforced: harness code never imports from app.
All 429 tests pass. Lint clean.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* chore: add harness→app boundary check test and update docs
Add test_harness_boundary.py that scans all Python files in
packages/harness/deerflow/ and fails if any `from app.*` or
`import app.*` statement is found. This enforces the architectural
rule that the harness layer never depends on the app layer.
Update CLAUDE.md to document the harness/app split architecture,
import conventions, and the boundary enforcement test.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* feat: add config versioning with auto-upgrade on startup
When config.example.yaml schema changes, developers' local config.yaml
files can silently become outdated. This adds a config_version field and
auto-upgrade mechanism so breaking changes (like src.* → deerflow.*
renames) are applied automatically before services start.
- Add config_version: 1 to config.example.yaml
- Add startup version check warning in AppConfig.from_file()
- Add scripts/config-upgrade.sh with migration registry for value replacements
- Add `make config-upgrade` target
- Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services
- Add config error hints in service failure messages
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix comments
* fix: update src.* import in test_sandbox_tools_security to deerflow.*
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: handle empty config and search parent dirs for config.example.yaml
Address Copilot review comments on PR #1131:
- Guard against yaml.safe_load() returning None for empty config files
- Search parent directories for config.example.yaml instead of only
looking next to config.yaml, fixing detection in common setups
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: correct skills root path depth and config_version type coercion
- loader.py: fix get_skills_root_path() to use 5 parent levels (was 3)
after harness split, file lives at packages/harness/deerflow/skills/
so parent×3 resolved to backend/packages/harness/ instead of backend/
- app_config.py: coerce config_version to int() before comparison in
_check_config_version() to prevent TypeError when YAML stores value
as string (e.g. config_version: "1")
- tests: add regression tests for both fixes
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
* fix: update test imports from src.* to deerflow.*/app.* after harness refactor
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
---------
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-14 22:55:52 +08:00
|
|
|
use: deerflow.community.aio_sandbox:AioSandboxProvider
|
2026-02-12 11:02:09 +08:00
|
|
|
provisioner_url: http://provisioner:8002
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def __init__(self, provisioner_url: str):
|
|
|
|
|
"""Initialize with the provisioner service URL.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
provisioner_url: URL of the provisioner service
|
|
|
|
|
(e.g., ``http://provisioner:8002``).
|
|
|
|
|
"""
|
|
|
|
|
self._provisioner_url = provisioner_url.rstrip("/")
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def provisioner_url(self) -> str:
|
|
|
|
|
return self._provisioner_url
|
|
|
|
|
|
|
|
|
|
# ── SandboxBackend interface ──────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
def create(
|
|
|
|
|
self,
|
|
|
|
|
thread_id: str,
|
|
|
|
|
sandbox_id: str,
|
|
|
|
|
extra_mounts: list[tuple[str, str, bool]] | None = None,
|
|
|
|
|
) -> SandboxInfo:
|
|
|
|
|
"""Create a sandbox Pod + Service via the provisioner.
|
|
|
|
|
|
|
|
|
|
Calls ``POST /api/sandboxes`` which creates a dedicated Pod +
|
|
|
|
|
NodePort Service in k3s.
|
|
|
|
|
"""
|
|
|
|
|
return self._provisioner_create(thread_id, sandbox_id, extra_mounts)
|
|
|
|
|
|
|
|
|
|
def destroy(self, info: SandboxInfo) -> None:
|
|
|
|
|
"""Destroy a sandbox Pod + Service via the provisioner."""
|
|
|
|
|
self._provisioner_destroy(info.sandbox_id)
|
|
|
|
|
|
|
|
|
|
def is_alive(self, info: SandboxInfo) -> bool:
|
|
|
|
|
"""Check whether the sandbox Pod is running."""
|
|
|
|
|
return self._provisioner_is_alive(info.sandbox_id)
|
|
|
|
|
|
|
|
|
|
def discover(self, sandbox_id: str) -> SandboxInfo | None:
|
|
|
|
|
"""Discover an existing sandbox via the provisioner.
|
|
|
|
|
|
|
|
|
|
Calls ``GET /api/sandboxes/{sandbox_id}`` and returns info if
|
|
|
|
|
the Pod exists.
|
|
|
|
|
"""
|
|
|
|
|
return self._provisioner_discover(sandbox_id)
|
|
|
|
|
|
|
|
|
|
# ── Provisioner API calls ─────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
def _provisioner_create(self, thread_id: str, sandbox_id: str, extra_mounts: list[tuple[str, str, bool]] | None = None) -> SandboxInfo:
|
|
|
|
|
"""POST /api/sandboxes → create Pod + Service."""
|
|
|
|
|
try:
|
|
|
|
|
resp = requests.post(
|
|
|
|
|
f"{self._provisioner_url}/api/sandboxes",
|
|
|
|
|
json={
|
|
|
|
|
"sandbox_id": sandbox_id,
|
|
|
|
|
"thread_id": thread_id,
|
|
|
|
|
},
|
|
|
|
|
timeout=30,
|
|
|
|
|
)
|
|
|
|
|
resp.raise_for_status()
|
|
|
|
|
data = resp.json()
|
|
|
|
|
logger.info(f"Provisioner created sandbox {sandbox_id}: sandbox_url={data['sandbox_url']}")
|
|
|
|
|
return SandboxInfo(
|
|
|
|
|
sandbox_id=sandbox_id,
|
|
|
|
|
sandbox_url=data["sandbox_url"],
|
|
|
|
|
)
|
|
|
|
|
except requests.RequestException as exc:
|
|
|
|
|
logger.error(f"Provisioner create failed for {sandbox_id}: {exc}")
|
|
|
|
|
raise RuntimeError(f"Provisioner create failed: {exc}") from exc
|
|
|
|
|
|
|
|
|
|
def _provisioner_destroy(self, sandbox_id: str) -> None:
|
|
|
|
|
"""DELETE /api/sandboxes/{sandbox_id} → destroy Pod + Service."""
|
|
|
|
|
try:
|
|
|
|
|
resp = requests.delete(
|
|
|
|
|
f"{self._provisioner_url}/api/sandboxes/{sandbox_id}",
|
|
|
|
|
timeout=15,
|
|
|
|
|
)
|
|
|
|
|
if resp.ok:
|
|
|
|
|
logger.info(f"Provisioner destroyed sandbox {sandbox_id}")
|
|
|
|
|
else:
|
|
|
|
|
logger.warning(f"Provisioner destroy returned {resp.status_code}: {resp.text}")
|
|
|
|
|
except requests.RequestException as exc:
|
|
|
|
|
logger.warning(f"Provisioner destroy failed for {sandbox_id}: {exc}")
|
|
|
|
|
|
|
|
|
|
def _provisioner_is_alive(self, sandbox_id: str) -> bool:
|
|
|
|
|
"""GET /api/sandboxes/{sandbox_id} → check Pod phase."""
|
|
|
|
|
try:
|
|
|
|
|
resp = requests.get(
|
|
|
|
|
f"{self._provisioner_url}/api/sandboxes/{sandbox_id}",
|
|
|
|
|
timeout=10,
|
|
|
|
|
)
|
|
|
|
|
if resp.ok:
|
|
|
|
|
data = resp.json()
|
|
|
|
|
return data.get("status") == "Running"
|
|
|
|
|
return False
|
|
|
|
|
except requests.RequestException:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
def _provisioner_discover(self, sandbox_id: str) -> SandboxInfo | None:
|
|
|
|
|
"""GET /api/sandboxes/{sandbox_id} → discover existing sandbox."""
|
|
|
|
|
try:
|
|
|
|
|
resp = requests.get(
|
|
|
|
|
f"{self._provisioner_url}/api/sandboxes/{sandbox_id}",
|
|
|
|
|
timeout=10,
|
|
|
|
|
)
|
|
|
|
|
if resp.status_code == 404:
|
|
|
|
|
return None
|
|
|
|
|
resp.raise_for_status()
|
|
|
|
|
data = resp.json()
|
|
|
|
|
return SandboxInfo(
|
|
|
|
|
sandbox_id=sandbox_id,
|
|
|
|
|
sandbox_url=data["sandbox_url"],
|
|
|
|
|
)
|
|
|
|
|
except requests.RequestException as exc:
|
|
|
|
|
logger.debug(f"Provisioner discover failed for {sandbox_id}: {exc}")
|
|
|
|
|
return None
|