mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-17 11:44:44 +08:00
* refactor: extract shared utils to break harness→app cross-layer imports Move _validate_skill_frontmatter to src/skills/validation.py and CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py. This eliminates the two reverse dependencies from client.py (harness layer) into gateway/routers/ (app layer), preparing for the harness/app package split. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * refactor: split backend/src into harness (deerflow.*) and app (app.*) Physically split the monolithic backend/src/ package into two layers: - **Harness** (`packages/harness/deerflow/`): publishable agent framework package with import prefix `deerflow.*`. Contains agents, sandbox, tools, models, MCP, skills, config, and all core infrastructure. - **App** (`app/`): unpublished application code with import prefix `app.*`. Contains gateway (FastAPI REST API) and channels (IM integrations). Key changes: - Move 13 harness modules to packages/harness/deerflow/ via git mv - Move gateway + channels to app/ via git mv - Rename all imports: src.* → deerflow.* (harness) / app.* (app layer) - Set up uv workspace with deerflow-harness as workspace member - Update langgraph.json, config.example.yaml, all scripts, Docker files - Add build-system (hatchling) to harness pyproject.toml - Add PYTHONPATH=. to gateway startup commands for app.* resolution - Update ruff.toml with known-first-party for import sorting - Update all documentation to reflect new directory structure Boundary rule enforced: harness code never imports from app. All 429 tests pass. Lint clean. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * chore: add harness→app boundary check test and update docs Add test_harness_boundary.py that scans all Python files in packages/harness/deerflow/ and fails if any `from app.*` or `import app.*` statement is found. This enforces the architectural rule that the harness layer never depends on the app layer. Update CLAUDE.md to document the harness/app split architecture, import conventions, and the boundary enforcement test. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * feat: add config versioning with auto-upgrade on startup When config.example.yaml schema changes, developers' local config.yaml files can silently become outdated. This adds a config_version field and auto-upgrade mechanism so breaking changes (like src.* → deerflow.* renames) are applied automatically before services start. - Add config_version: 1 to config.example.yaml - Add startup version check warning in AppConfig.from_file() - Add scripts/config-upgrade.sh with migration registry for value replacements - Add `make config-upgrade` target - Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services - Add config error hints in service failure messages Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix comments * fix: update src.* import in test_sandbox_tools_security to deerflow.* Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: handle empty config and search parent dirs for config.example.yaml Address Copilot review comments on PR #1131: - Guard against yaml.safe_load() returning None for empty config files - Search parent directories for config.example.yaml instead of only looking next to config.yaml, fixing detection in common setups Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: correct skills root path depth and config_version type coercion - loader.py: fix get_skills_root_path() to use 5 parent levels (was 3) after harness split, file lives at packages/harness/deerflow/skills/ so parent×3 resolved to backend/packages/harness/ instead of backend/ - app_config.py: coerce config_version to int() before comparison in _check_config_version() to prevent TypeError when YAML stores value as string (e.g. config_version: "1") - tests: add regression tests for both fixes Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * fix: update test imports from src.* to deerflow.*/app.* after harness refactor Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
101 lines
3.7 KiB
Python
101 lines
3.7 KiB
Python
from pathlib import Path
|
|
from typing import Annotated
|
|
|
|
from langchain.tools import InjectedToolCallId, ToolRuntime, tool
|
|
from langchain_core.messages import ToolMessage
|
|
from langgraph.types import Command
|
|
from langgraph.typing import ContextT
|
|
|
|
from deerflow.agents.thread_state import ThreadState
|
|
from deerflow.config.paths import VIRTUAL_PATH_PREFIX, get_paths
|
|
|
|
OUTPUTS_VIRTUAL_PREFIX = f"{VIRTUAL_PATH_PREFIX}/outputs"
|
|
|
|
|
|
def _normalize_presented_filepath(
|
|
runtime: ToolRuntime[ContextT, ThreadState],
|
|
filepath: str,
|
|
) -> str:
|
|
"""Normalize a presented file path to the `/mnt/user-data/outputs/*` contract.
|
|
|
|
Accepts either:
|
|
- A virtual sandbox path such as `/mnt/user-data/outputs/report.md`
|
|
- A host-side thread outputs path such as
|
|
`/app/backend/.deer-flow/threads/<thread>/user-data/outputs/report.md`
|
|
|
|
Returns:
|
|
The normalized virtual path.
|
|
|
|
Raises:
|
|
ValueError: If runtime metadata is missing or the path is outside the
|
|
current thread's outputs directory.
|
|
"""
|
|
if runtime.state is None:
|
|
raise ValueError("Thread runtime state is not available")
|
|
|
|
thread_id = runtime.context.get("thread_id")
|
|
if not thread_id:
|
|
raise ValueError("Thread ID is not available in runtime context")
|
|
|
|
thread_data = runtime.state.get("thread_data") or {}
|
|
outputs_path = thread_data.get("outputs_path")
|
|
if not outputs_path:
|
|
raise ValueError("Thread outputs path is not available in runtime state")
|
|
|
|
outputs_dir = Path(outputs_path).resolve()
|
|
stripped = filepath.lstrip("/")
|
|
virtual_prefix = VIRTUAL_PATH_PREFIX.lstrip("/")
|
|
|
|
if stripped == virtual_prefix or stripped.startswith(virtual_prefix + "/"):
|
|
actual_path = get_paths().resolve_virtual_path(thread_id, filepath)
|
|
else:
|
|
actual_path = Path(filepath).expanduser().resolve()
|
|
|
|
try:
|
|
relative_path = actual_path.relative_to(outputs_dir)
|
|
except ValueError as exc:
|
|
raise ValueError(f"Only files in {OUTPUTS_VIRTUAL_PREFIX} can be presented: {filepath}") from exc
|
|
|
|
return f"{OUTPUTS_VIRTUAL_PREFIX}/{relative_path.as_posix()}"
|
|
|
|
|
|
@tool("present_files", parse_docstring=True)
|
|
def present_file_tool(
|
|
runtime: ToolRuntime[ContextT, ThreadState],
|
|
filepaths: list[str],
|
|
tool_call_id: Annotated[str, InjectedToolCallId],
|
|
) -> Command:
|
|
"""Make files visible to the user for viewing and rendering in the client interface.
|
|
|
|
When to use the present_files tool:
|
|
|
|
- Making any file available for the user to view, download, or interact with
|
|
- Presenting multiple related files at once
|
|
- After creating files that should be presented to the user
|
|
|
|
When NOT to use the present_files tool:
|
|
- When you only need to read file contents for your own processing
|
|
- For temporary or intermediate files not meant for user viewing
|
|
|
|
Notes:
|
|
- You should call this tool after creating files and moving them to the `/mnt/user-data/outputs` directory.
|
|
- This tool can be safely called in parallel with other tools. State updates are handled by a reducer to prevent conflicts.
|
|
|
|
Args:
|
|
filepaths: List of absolute file paths to present to the user. **Only** files in `/mnt/user-data/outputs` can be presented.
|
|
"""
|
|
try:
|
|
normalized_paths = [_normalize_presented_filepath(runtime, filepath) for filepath in filepaths]
|
|
except ValueError as exc:
|
|
return Command(
|
|
update={"messages": [ToolMessage(f"Error: {exc}", tool_call_id=tool_call_id)]},
|
|
)
|
|
|
|
# The merge_artifacts reducer will handle merging and deduplication
|
|
return Command(
|
|
update={
|
|
"artifacts": normalized_paths,
|
|
"messages": [ToolMessage("Successfully presented files", tool_call_id=tool_call_id)],
|
|
},
|
|
)
|