2026-02-21 16:41:34 +08:00
|
|
|
import logging
|
2026-02-25 08:39:29 +08:00
|
|
|
|
2026-01-14 07:16:45 +08:00
|
|
|
from langchain.chat_models import BaseChatModel
|
|
|
|
|
|
refactor: split backend into harness (deerflow.*) and app (app.*) (#1131)
* refactor: extract shared utils to break harness→app cross-layer imports
Move _validate_skill_frontmatter to src/skills/validation.py and
CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py.
This eliminates the two reverse dependencies from client.py (harness layer)
into gateway/routers/ (app layer), preparing for the harness/app package split.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* refactor: split backend/src into harness (deerflow.*) and app (app.*)
Physically split the monolithic backend/src/ package into two layers:
- **Harness** (`packages/harness/deerflow/`): publishable agent framework
package with import prefix `deerflow.*`. Contains agents, sandbox, tools,
models, MCP, skills, config, and all core infrastructure.
- **App** (`app/`): unpublished application code with import prefix `app.*`.
Contains gateway (FastAPI REST API) and channels (IM integrations).
Key changes:
- Move 13 harness modules to packages/harness/deerflow/ via git mv
- Move gateway + channels to app/ via git mv
- Rename all imports: src.* → deerflow.* (harness) / app.* (app layer)
- Set up uv workspace with deerflow-harness as workspace member
- Update langgraph.json, config.example.yaml, all scripts, Docker files
- Add build-system (hatchling) to harness pyproject.toml
- Add PYTHONPATH=. to gateway startup commands for app.* resolution
- Update ruff.toml with known-first-party for import sorting
- Update all documentation to reflect new directory structure
Boundary rule enforced: harness code never imports from app.
All 429 tests pass. Lint clean.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* chore: add harness→app boundary check test and update docs
Add test_harness_boundary.py that scans all Python files in
packages/harness/deerflow/ and fails if any `from app.*` or
`import app.*` statement is found. This enforces the architectural
rule that the harness layer never depends on the app layer.
Update CLAUDE.md to document the harness/app split architecture,
import conventions, and the boundary enforcement test.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* feat: add config versioning with auto-upgrade on startup
When config.example.yaml schema changes, developers' local config.yaml
files can silently become outdated. This adds a config_version field and
auto-upgrade mechanism so breaking changes (like src.* → deerflow.*
renames) are applied automatically before services start.
- Add config_version: 1 to config.example.yaml
- Add startup version check warning in AppConfig.from_file()
- Add scripts/config-upgrade.sh with migration registry for value replacements
- Add `make config-upgrade` target
- Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services
- Add config error hints in service failure messages
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix comments
* fix: update src.* import in test_sandbox_tools_security to deerflow.*
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: handle empty config and search parent dirs for config.example.yaml
Address Copilot review comments on PR #1131:
- Guard against yaml.safe_load() returning None for empty config files
- Search parent directories for config.example.yaml instead of only
looking next to config.yaml, fixing detection in common setups
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: correct skills root path depth and config_version type coercion
- loader.py: fix get_skills_root_path() to use 5 parent levels (was 3)
after harness split, file lives at packages/harness/deerflow/skills/
so parent×3 resolved to backend/packages/harness/ instead of backend/
- app_config.py: coerce config_version to int() before comparison in
_check_config_version() to prevent TypeError when YAML stores value
as string (e.g. config_version: "1")
- tests: add regression tests for both fixes
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
* fix: update test imports from src.* to deerflow.*/app.* after harness refactor
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
---------
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-14 22:55:52 +08:00
|
|
|
from deerflow.config import get_app_config, get_tracing_config, is_tracing_enabled
|
|
|
|
|
from deerflow.reflection import resolve_class
|
2026-01-14 07:16:45 +08:00
|
|
|
|
2026-02-21 16:41:34 +08:00
|
|
|
logger = logging.getLogger(__name__)
|
2026-01-14 07:16:45 +08:00
|
|
|
|
2026-02-25 08:39:29 +08:00
|
|
|
|
2026-01-14 09:21:19 +08:00
|
|
|
def create_chat_model(name: str | None = None, thinking_enabled: bool = False, **kwargs) -> BaseChatModel:
|
2026-01-14 07:16:45 +08:00
|
|
|
"""Create a chat model instance from the config.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
name: The name of the model to create. If None, the first model in the config will be used.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
A chat model instance.
|
|
|
|
|
"""
|
|
|
|
|
config = get_app_config()
|
|
|
|
|
if name is None:
|
|
|
|
|
name = config.models[0].name
|
|
|
|
|
model_config = config.get_model_config(name)
|
|
|
|
|
if model_config is None:
|
|
|
|
|
raise ValueError(f"Model {name} not found in config") from None
|
|
|
|
|
model_class = resolve_class(model_config.use, BaseChatModel)
|
|
|
|
|
model_settings_from_config = model_config.model_dump(
|
|
|
|
|
exclude_none=True,
|
|
|
|
|
exclude={
|
|
|
|
|
"use",
|
|
|
|
|
"name",
|
|
|
|
|
"display_name",
|
|
|
|
|
"description",
|
|
|
|
|
"supports_thinking",
|
2026-03-02 20:49:41 +08:00
|
|
|
"supports_reasoning_effort",
|
2026-01-14 07:16:45 +08:00
|
|
|
"when_thinking_enabled",
|
2026-03-08 20:18:21 +08:00
|
|
|
"thinking",
|
2026-01-29 13:44:04 +08:00
|
|
|
"supports_vision",
|
2026-01-14 07:16:45 +08:00
|
|
|
},
|
|
|
|
|
)
|
2026-03-08 20:18:21 +08:00
|
|
|
# Compute effective when_thinking_enabled by merging in the `thinking` shortcut field.
|
|
|
|
|
# The `thinking` shortcut is equivalent to setting when_thinking_enabled["thinking"].
|
|
|
|
|
has_thinking_settings = (model_config.when_thinking_enabled is not None) or (model_config.thinking is not None)
|
|
|
|
|
effective_wte: dict = dict(model_config.when_thinking_enabled) if model_config.when_thinking_enabled else {}
|
|
|
|
|
if model_config.thinking is not None:
|
|
|
|
|
merged_thinking = {**(effective_wte.get("thinking") or {}), **model_config.thinking}
|
|
|
|
|
effective_wte = {**effective_wte, "thinking": merged_thinking}
|
|
|
|
|
if thinking_enabled and has_thinking_settings:
|
2026-01-14 07:16:45 +08:00
|
|
|
if not model_config.supports_thinking:
|
2026-01-14 09:21:19 +08:00
|
|
|
raise ValueError(f"Model {name} does not support thinking. Set `supports_thinking` to true in the `config.yaml` to enable thinking.") from None
|
2026-03-08 20:18:21 +08:00
|
|
|
if effective_wte:
|
|
|
|
|
model_settings_from_config.update(effective_wte)
|
|
|
|
|
if not thinking_enabled and has_thinking_settings:
|
|
|
|
|
if effective_wte.get("extra_body", {}).get("thinking", {}).get("type"):
|
|
|
|
|
# OpenAI-compatible gateway: thinking is nested under extra_body
|
|
|
|
|
kwargs.update({"extra_body": {"thinking": {"type": "disabled"}}})
|
|
|
|
|
kwargs.update({"reasoning_effort": "minimal"})
|
|
|
|
|
elif effective_wte.get("thinking", {}).get("type"):
|
|
|
|
|
# Native langchain_anthropic: thinking is a direct constructor parameter
|
|
|
|
|
kwargs.update({"thinking": {"type": "disabled"}})
|
2026-03-10 11:24:53 +08:00
|
|
|
if not model_config.supports_reasoning_effort and "reasoning_effort" in kwargs:
|
|
|
|
|
del kwargs["reasoning_effort"]
|
2026-03-08 20:18:21 +08:00
|
|
|
|
2026-03-22 07:39:50 -07:00
|
|
|
# For Codex Responses API models: map thinking mode to reasoning_effort
|
|
|
|
|
from deerflow.models.openai_codex_provider import CodexChatModel
|
|
|
|
|
|
|
|
|
|
if issubclass(model_class, CodexChatModel):
|
|
|
|
|
# The ChatGPT Codex endpoint currently rejects max_tokens/max_output_tokens.
|
|
|
|
|
model_settings_from_config.pop("max_tokens", None)
|
|
|
|
|
|
|
|
|
|
# Use explicit reasoning_effort from frontend if provided (low/medium/high)
|
|
|
|
|
explicit_effort = kwargs.pop("reasoning_effort", None)
|
|
|
|
|
if not thinking_enabled:
|
|
|
|
|
model_settings_from_config["reasoning_effort"] = "none"
|
|
|
|
|
elif explicit_effort and explicit_effort in ("low", "medium", "high", "xhigh"):
|
|
|
|
|
model_settings_from_config["reasoning_effort"] = explicit_effort
|
|
|
|
|
elif "reasoning_effort" not in model_settings_from_config:
|
|
|
|
|
model_settings_from_config["reasoning_effort"] = "medium"
|
|
|
|
|
|
2026-01-14 07:16:45 +08:00
|
|
|
model_instance = model_class(**kwargs, **model_settings_from_config)
|
2026-02-21 16:41:34 +08:00
|
|
|
|
|
|
|
|
if is_tracing_enabled():
|
|
|
|
|
try:
|
|
|
|
|
from langchain_core.tracers.langchain import LangChainTracer
|
|
|
|
|
|
|
|
|
|
tracing_config = get_tracing_config()
|
|
|
|
|
tracer = LangChainTracer(
|
|
|
|
|
project_name=tracing_config.project,
|
|
|
|
|
)
|
|
|
|
|
existing_callbacks = model_instance.callbacks or []
|
|
|
|
|
model_instance.callbacks = [*existing_callbacks, tracer]
|
2026-02-25 08:39:29 +08:00
|
|
|
logger.debug(f"LangSmith tracing attached to model '{name}' (project='{tracing_config.project}')")
|
2026-02-21 16:41:34 +08:00
|
|
|
except Exception as e:
|
|
|
|
|
logger.warning(f"Failed to attach LangSmith tracing to model '{name}': {e}")
|
2026-01-14 07:16:45 +08:00
|
|
|
return model_instance
|