mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-02 22:02:13 +08:00
* refactor: extract shared utils to break harness→app cross-layer imports Move _validate_skill_frontmatter to src/skills/validation.py and CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py. This eliminates the two reverse dependencies from client.py (harness layer) into gateway/routers/ (app layer), preparing for the harness/app package split. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * refactor: split backend/src into harness (deerflow.*) and app (app.*) Physically split the monolithic backend/src/ package into two layers: - **Harness** (`packages/harness/deerflow/`): publishable agent framework package with import prefix `deerflow.*`. Contains agents, sandbox, tools, models, MCP, skills, config, and all core infrastructure. - **App** (`app/`): unpublished application code with import prefix `app.*`. Contains gateway (FastAPI REST API) and channels (IM integrations). Key changes: - Move 13 harness modules to packages/harness/deerflow/ via git mv - Move gateway + channels to app/ via git mv - Rename all imports: src.* → deerflow.* (harness) / app.* (app layer) - Set up uv workspace with deerflow-harness as workspace member - Update langgraph.json, config.example.yaml, all scripts, Docker files - Add build-system (hatchling) to harness pyproject.toml - Add PYTHONPATH=. to gateway startup commands for app.* resolution - Update ruff.toml with known-first-party for import sorting - Update all documentation to reflect new directory structure Boundary rule enforced: harness code never imports from app. All 429 tests pass. Lint clean. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * chore: add harness→app boundary check test and update docs Add test_harness_boundary.py that scans all Python files in packages/harness/deerflow/ and fails if any `from app.*` or `import app.*` statement is found. This enforces the architectural rule that the harness layer never depends on the app layer. Update CLAUDE.md to document the harness/app split architecture, import conventions, and the boundary enforcement test. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * feat: add config versioning with auto-upgrade on startup When config.example.yaml schema changes, developers' local config.yaml files can silently become outdated. This adds a config_version field and auto-upgrade mechanism so breaking changes (like src.* → deerflow.* renames) are applied automatically before services start. - Add config_version: 1 to config.example.yaml - Add startup version check warning in AppConfig.from_file() - Add scripts/config-upgrade.sh with migration registry for value replacements - Add `make config-upgrade` target - Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services - Add config error hints in service failure messages Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix comments * fix: update src.* import in test_sandbox_tools_security to deerflow.* Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: handle empty config and search parent dirs for config.example.yaml Address Copilot review comments on PR #1131: - Guard against yaml.safe_load() returning None for empty config files - Search parent directories for config.example.yaml instead of only looking next to config.yaml, fixing detection in common setups Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: correct skills root path depth and config_version type coercion - loader.py: fix get_skills_root_path() to use 5 parent levels (was 3) after harness split, file lives at packages/harness/deerflow/skills/ so parent×3 resolved to backend/packages/harness/ instead of backend/ - app_config.py: coerce config_version to int() before comparison in _check_config_version() to prevent TypeError when YAML stores value as string (e.g. config_version: "1") - tests: add regression tests for both fixes Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * fix: update test imports from src.* to deerflow.*/app.* after harness refactor Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
150 lines
5.5 KiB
Python
150 lines
5.5 KiB
Python
"""Middleware for memory mechanism."""
|
|
|
|
import re
|
|
from typing import Any, override
|
|
|
|
from langchain.agents import AgentState
|
|
from langchain.agents.middleware import AgentMiddleware
|
|
from langgraph.runtime import Runtime
|
|
|
|
from deerflow.agents.memory.queue import get_memory_queue
|
|
from deerflow.config.memory_config import get_memory_config
|
|
|
|
|
|
class MemoryMiddlewareState(AgentState):
|
|
"""Compatible with the `ThreadState` schema."""
|
|
|
|
pass
|
|
|
|
|
|
def _filter_messages_for_memory(messages: list[Any]) -> list[Any]:
|
|
"""Filter messages to keep only user inputs and final assistant responses.
|
|
|
|
This filters out:
|
|
- Tool messages (intermediate tool call results)
|
|
- AI messages with tool_calls (intermediate steps, not final responses)
|
|
- The <uploaded_files> block injected by UploadsMiddleware into human messages
|
|
(file paths are session-scoped and must not persist in long-term memory).
|
|
The user's actual question is preserved; only turns whose content is entirely
|
|
the upload block (nothing remains after stripping) are dropped along with
|
|
their paired assistant response.
|
|
|
|
Only keeps:
|
|
- Human messages (with the ephemeral upload block removed)
|
|
- AI messages without tool_calls (final assistant responses), unless the
|
|
paired human turn was upload-only and had no real user text.
|
|
|
|
Args:
|
|
messages: List of all conversation messages.
|
|
|
|
Returns:
|
|
Filtered list containing only user inputs and final assistant responses.
|
|
"""
|
|
_UPLOAD_BLOCK_RE = re.compile(r"<uploaded_files>[\s\S]*?</uploaded_files>\n*", re.IGNORECASE)
|
|
|
|
filtered = []
|
|
skip_next_ai = False
|
|
for msg in messages:
|
|
msg_type = getattr(msg, "type", None)
|
|
|
|
if msg_type == "human":
|
|
content = getattr(msg, "content", "")
|
|
if isinstance(content, list):
|
|
content = " ".join(p.get("text", "") for p in content if isinstance(p, dict))
|
|
content_str = str(content)
|
|
if "<uploaded_files>" in content_str:
|
|
# Strip the ephemeral upload block; keep the user's real question.
|
|
stripped = _UPLOAD_BLOCK_RE.sub("", content_str).strip()
|
|
if not stripped:
|
|
# Nothing left — the entire turn was upload bookkeeping;
|
|
# skip it and the paired assistant response.
|
|
skip_next_ai = True
|
|
continue
|
|
# Rebuild the message with cleaned content so the user's question
|
|
# is still available for memory summarisation.
|
|
from copy import copy
|
|
|
|
clean_msg = copy(msg)
|
|
clean_msg.content = stripped
|
|
filtered.append(clean_msg)
|
|
skip_next_ai = False
|
|
else:
|
|
filtered.append(msg)
|
|
skip_next_ai = False
|
|
elif msg_type == "ai":
|
|
tool_calls = getattr(msg, "tool_calls", None)
|
|
if not tool_calls:
|
|
if skip_next_ai:
|
|
skip_next_ai = False
|
|
continue
|
|
filtered.append(msg)
|
|
# Skip tool messages and AI messages with tool_calls
|
|
|
|
return filtered
|
|
|
|
|
|
class MemoryMiddleware(AgentMiddleware[MemoryMiddlewareState]):
|
|
"""Middleware that queues conversation for memory update after agent execution.
|
|
|
|
This middleware:
|
|
1. After each agent execution, queues the conversation for memory update
|
|
2. Only includes user inputs and final assistant responses (ignores tool calls)
|
|
3. The queue uses debouncing to batch multiple updates together
|
|
4. Memory is updated asynchronously via LLM summarization
|
|
"""
|
|
|
|
state_schema = MemoryMiddlewareState
|
|
|
|
def __init__(self, agent_name: str | None = None):
|
|
"""Initialize the MemoryMiddleware.
|
|
|
|
Args:
|
|
agent_name: If provided, memory is stored per-agent. If None, uses global memory.
|
|
"""
|
|
super().__init__()
|
|
self._agent_name = agent_name
|
|
|
|
@override
|
|
def after_agent(self, state: MemoryMiddlewareState, runtime: Runtime) -> dict | None:
|
|
"""Queue conversation for memory update after agent completes.
|
|
|
|
Args:
|
|
state: The current agent state.
|
|
runtime: The runtime context.
|
|
|
|
Returns:
|
|
None (no state changes needed from this middleware).
|
|
"""
|
|
config = get_memory_config()
|
|
if not config.enabled:
|
|
return None
|
|
|
|
# Get thread ID from runtime context
|
|
thread_id = runtime.context.get("thread_id")
|
|
if not thread_id:
|
|
print("MemoryMiddleware: No thread_id in context, skipping memory update")
|
|
return None
|
|
|
|
# Get messages from state
|
|
messages = state.get("messages", [])
|
|
if not messages:
|
|
print("MemoryMiddleware: No messages in state, skipping memory update")
|
|
return None
|
|
|
|
# Filter to only keep user inputs and final assistant responses
|
|
filtered_messages = _filter_messages_for_memory(messages)
|
|
|
|
# Only queue if there's meaningful conversation
|
|
# At minimum need one user message and one assistant response
|
|
user_messages = [m for m in filtered_messages if getattr(m, "type", None) == "human"]
|
|
assistant_messages = [m for m in filtered_messages if getattr(m, "type", None) == "ai"]
|
|
|
|
if not user_messages or not assistant_messages:
|
|
return None
|
|
|
|
# Queue the filtered conversation for memory update
|
|
queue = get_memory_queue()
|
|
queue.add(thread_id=thread_id, messages=filtered_messages, agent_name=self._agent_name)
|
|
|
|
return None
|