2026-02-03 13:31:05 +08:00
|
|
|
"""Memory updater for reading, writing, and updating memory data."""
|
|
|
|
|
|
|
|
|
|
import json
|
fix: normalize structured LLM content in serialization and memory updater (#1215)
* fix: normalize ToolMessage structured content in serialization
When models return ToolMessage content as a list of content blocks
(e.g. [{"type": "text", "text": "..."}]), the UI previously displayed
the raw Python repr string instead of the extracted text.
Replace str(msg.content) with the existing _extract_text() helper in
both _serialize_message() and stream() to properly normalize
list-of-blocks content to plain text.
Fixes #1149
Also fixes the same root cause as #1188 (characters displayed one per
line when tool response content is returned as structured blocks).
Added 11 regression tests covering string, list-of-blocks, mixed,
empty, and fallback content types.
* fix(memory): extract text from structured LLM responses in memory updater
When LLMs return response content as list of content blocks
(e.g. [{"type": "text", "text": "..."}]) instead of plain strings,
str() produces Python repr which breaks JSON parsing in the memory
updater. This caused memory updates to silently fail.
Changes:
- Add _extract_text() helper in updater.py for safe content normalization
- Use _extract_text() instead of str(response.content) in update_memory()
- Fix format_conversation_for_update() to handle plain strings in list content
- Fix subagent executor fallback path to extract text from list content
- Replace print() with structured logging (logger.info/warning/error)
- Add 13 regression tests covering _extract_text, format_conversation,
and update_memory with structured LLM responses
* fix: address Copilot review - defensive text extraction + logger.exception
- client.py _extract_text: use block.get('text') + isinstance check (prevent KeyError/TypeError)
- prompt.py format_conversation_for_update: same defensive check for dict text blocks
- executor.py: type-safe text extraction in both code paths, fallback to placeholder instead of str(raw_content)
- updater.py: use logger.exception() instead of logger.error() for traceback preservation
* Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: preserve chunked structured content without spurious newlines
* fix: restore backend unit test compatibility
---------
Co-authored-by: Exploreunive <Exploreunive@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-22 17:29:29 +08:00
|
|
|
import logging
|
2026-03-05 11:14:34 +08:00
|
|
|
import re
|
2026-02-03 13:31:05 +08:00
|
|
|
import uuid
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
from typing import Any
|
|
|
|
|
|
refactor: split backend into harness (deerflow.*) and app (app.*) (#1131)
* refactor: extract shared utils to break harness→app cross-layer imports
Move _validate_skill_frontmatter to src/skills/validation.py and
CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py.
This eliminates the two reverse dependencies from client.py (harness layer)
into gateway/routers/ (app layer), preparing for the harness/app package split.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* refactor: split backend/src into harness (deerflow.*) and app (app.*)
Physically split the monolithic backend/src/ package into two layers:
- **Harness** (`packages/harness/deerflow/`): publishable agent framework
package with import prefix `deerflow.*`. Contains agents, sandbox, tools,
models, MCP, skills, config, and all core infrastructure.
- **App** (`app/`): unpublished application code with import prefix `app.*`.
Contains gateway (FastAPI REST API) and channels (IM integrations).
Key changes:
- Move 13 harness modules to packages/harness/deerflow/ via git mv
- Move gateway + channels to app/ via git mv
- Rename all imports: src.* → deerflow.* (harness) / app.* (app layer)
- Set up uv workspace with deerflow-harness as workspace member
- Update langgraph.json, config.example.yaml, all scripts, Docker files
- Add build-system (hatchling) to harness pyproject.toml
- Add PYTHONPATH=. to gateway startup commands for app.* resolution
- Update ruff.toml with known-first-party for import sorting
- Update all documentation to reflect new directory structure
Boundary rule enforced: harness code never imports from app.
All 429 tests pass. Lint clean.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* chore: add harness→app boundary check test and update docs
Add test_harness_boundary.py that scans all Python files in
packages/harness/deerflow/ and fails if any `from app.*` or
`import app.*` statement is found. This enforces the architectural
rule that the harness layer never depends on the app layer.
Update CLAUDE.md to document the harness/app split architecture,
import conventions, and the boundary enforcement test.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* feat: add config versioning with auto-upgrade on startup
When config.example.yaml schema changes, developers' local config.yaml
files can silently become outdated. This adds a config_version field and
auto-upgrade mechanism so breaking changes (like src.* → deerflow.*
renames) are applied automatically before services start.
- Add config_version: 1 to config.example.yaml
- Add startup version check warning in AppConfig.from_file()
- Add scripts/config-upgrade.sh with migration registry for value replacements
- Add `make config-upgrade` target
- Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services
- Add config error hints in service failure messages
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix comments
* fix: update src.* import in test_sandbox_tools_security to deerflow.*
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: handle empty config and search parent dirs for config.example.yaml
Address Copilot review comments on PR #1131:
- Guard against yaml.safe_load() returning None for empty config files
- Search parent directories for config.example.yaml instead of only
looking next to config.yaml, fixing detection in common setups
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: correct skills root path depth and config_version type coercion
- loader.py: fix get_skills_root_path() to use 5 parent levels (was 3)
after harness split, file lives at packages/harness/deerflow/skills/
so parent×3 resolved to backend/packages/harness/ instead of backend/
- app_config.py: coerce config_version to int() before comparison in
_check_config_version() to prevent TypeError when YAML stores value
as string (e.g. config_version: "1")
- tests: add regression tests for both fixes
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
* fix: update test imports from src.* to deerflow.*/app.* after harness refactor
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
---------
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-14 22:55:52 +08:00
|
|
|
from deerflow.agents.memory.prompt import (
|
2026-02-03 13:31:05 +08:00
|
|
|
MEMORY_UPDATE_PROMPT,
|
|
|
|
|
format_conversation_for_update,
|
|
|
|
|
)
|
refactor: split backend into harness (deerflow.*) and app (app.*) (#1131)
* refactor: extract shared utils to break harness→app cross-layer imports
Move _validate_skill_frontmatter to src/skills/validation.py and
CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py.
This eliminates the two reverse dependencies from client.py (harness layer)
into gateway/routers/ (app layer), preparing for the harness/app package split.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* refactor: split backend/src into harness (deerflow.*) and app (app.*)
Physically split the monolithic backend/src/ package into two layers:
- **Harness** (`packages/harness/deerflow/`): publishable agent framework
package with import prefix `deerflow.*`. Contains agents, sandbox, tools,
models, MCP, skills, config, and all core infrastructure.
- **App** (`app/`): unpublished application code with import prefix `app.*`.
Contains gateway (FastAPI REST API) and channels (IM integrations).
Key changes:
- Move 13 harness modules to packages/harness/deerflow/ via git mv
- Move gateway + channels to app/ via git mv
- Rename all imports: src.* → deerflow.* (harness) / app.* (app layer)
- Set up uv workspace with deerflow-harness as workspace member
- Update langgraph.json, config.example.yaml, all scripts, Docker files
- Add build-system (hatchling) to harness pyproject.toml
- Add PYTHONPATH=. to gateway startup commands for app.* resolution
- Update ruff.toml with known-first-party for import sorting
- Update all documentation to reflect new directory structure
Boundary rule enforced: harness code never imports from app.
All 429 tests pass. Lint clean.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* chore: add harness→app boundary check test and update docs
Add test_harness_boundary.py that scans all Python files in
packages/harness/deerflow/ and fails if any `from app.*` or
`import app.*` statement is found. This enforces the architectural
rule that the harness layer never depends on the app layer.
Update CLAUDE.md to document the harness/app split architecture,
import conventions, and the boundary enforcement test.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* feat: add config versioning with auto-upgrade on startup
When config.example.yaml schema changes, developers' local config.yaml
files can silently become outdated. This adds a config_version field and
auto-upgrade mechanism so breaking changes (like src.* → deerflow.*
renames) are applied automatically before services start.
- Add config_version: 1 to config.example.yaml
- Add startup version check warning in AppConfig.from_file()
- Add scripts/config-upgrade.sh with migration registry for value replacements
- Add `make config-upgrade` target
- Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services
- Add config error hints in service failure messages
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix comments
* fix: update src.* import in test_sandbox_tools_security to deerflow.*
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: handle empty config and search parent dirs for config.example.yaml
Address Copilot review comments on PR #1131:
- Guard against yaml.safe_load() returning None for empty config files
- Search parent directories for config.example.yaml instead of only
looking next to config.yaml, fixing detection in common setups
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: correct skills root path depth and config_version type coercion
- loader.py: fix get_skills_root_path() to use 5 parent levels (was 3)
after harness split, file lives at packages/harness/deerflow/skills/
so parent×3 resolved to backend/packages/harness/ instead of backend/
- app_config.py: coerce config_version to int() before comparison in
_check_config_version() to prevent TypeError when YAML stores value
as string (e.g. config_version: "1")
- tests: add regression tests for both fixes
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
* fix: update test imports from src.* to deerflow.*/app.* after harness refactor
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
---------
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-14 22:55:52 +08:00
|
|
|
from deerflow.config.memory_config import get_memory_config
|
|
|
|
|
from deerflow.config.paths import get_paths
|
|
|
|
|
from deerflow.models import create_chat_model
|
2026-02-03 13:31:05 +08:00
|
|
|
|
fix: normalize structured LLM content in serialization and memory updater (#1215)
* fix: normalize ToolMessage structured content in serialization
When models return ToolMessage content as a list of content blocks
(e.g. [{"type": "text", "text": "..."}]), the UI previously displayed
the raw Python repr string instead of the extracted text.
Replace str(msg.content) with the existing _extract_text() helper in
both _serialize_message() and stream() to properly normalize
list-of-blocks content to plain text.
Fixes #1149
Also fixes the same root cause as #1188 (characters displayed one per
line when tool response content is returned as structured blocks).
Added 11 regression tests covering string, list-of-blocks, mixed,
empty, and fallback content types.
* fix(memory): extract text from structured LLM responses in memory updater
When LLMs return response content as list of content blocks
(e.g. [{"type": "text", "text": "..."}]) instead of plain strings,
str() produces Python repr which breaks JSON parsing in the memory
updater. This caused memory updates to silently fail.
Changes:
- Add _extract_text() helper in updater.py for safe content normalization
- Use _extract_text() instead of str(response.content) in update_memory()
- Fix format_conversation_for_update() to handle plain strings in list content
- Fix subagent executor fallback path to extract text from list content
- Replace print() with structured logging (logger.info/warning/error)
- Add 13 regression tests covering _extract_text, format_conversation,
and update_memory with structured LLM responses
* fix: address Copilot review - defensive text extraction + logger.exception
- client.py _extract_text: use block.get('text') + isinstance check (prevent KeyError/TypeError)
- prompt.py format_conversation_for_update: same defensive check for dict text blocks
- executor.py: type-safe text extraction in both code paths, fallback to placeholder instead of str(raw_content)
- updater.py: use logger.exception() instead of logger.error() for traceback preservation
* Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: preserve chunked structured content without spurious newlines
* fix: restore backend unit test compatibility
---------
Co-authored-by: Exploreunive <Exploreunive@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-22 17:29:29 +08:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
2026-02-03 13:31:05 +08:00
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
def _get_memory_file_path(agent_name: str | None = None) -> Path:
|
|
|
|
|
"""Get the path to the memory file.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
agent_name: If provided, returns the per-agent memory file path.
|
|
|
|
|
If None, returns the global memory file path.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Path to the memory file.
|
|
|
|
|
"""
|
|
|
|
|
if agent_name is not None:
|
|
|
|
|
return get_paths().agent_memory_file(agent_name)
|
|
|
|
|
|
2026-02-03 13:31:05 +08:00
|
|
|
config = get_memory_config()
|
2026-02-25 21:30:33 +08:00
|
|
|
if config.storage_path:
|
|
|
|
|
p = Path(config.storage_path)
|
|
|
|
|
# Absolute path: use as-is; relative path: resolve against base_dir
|
|
|
|
|
return p if p.is_absolute() else get_paths().base_dir / p
|
|
|
|
|
return get_paths().memory_file
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def _create_empty_memory() -> dict[str, Any]:
|
|
|
|
|
"""Create an empty memory structure."""
|
|
|
|
|
return {
|
|
|
|
|
"version": "1.0",
|
|
|
|
|
"lastUpdated": datetime.utcnow().isoformat() + "Z",
|
|
|
|
|
"user": {
|
|
|
|
|
"workContext": {"summary": "", "updatedAt": ""},
|
|
|
|
|
"personalContext": {"summary": "", "updatedAt": ""},
|
|
|
|
|
"topOfMind": {"summary": "", "updatedAt": ""},
|
|
|
|
|
},
|
|
|
|
|
"history": {
|
|
|
|
|
"recentMonths": {"summary": "", "updatedAt": ""},
|
|
|
|
|
"earlierContext": {"summary": "", "updatedAt": ""},
|
|
|
|
|
"longTermBackground": {"summary": "", "updatedAt": ""},
|
|
|
|
|
},
|
|
|
|
|
"facts": [],
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
# Per-agent memory cache: keyed by agent_name (None = global)
|
|
|
|
|
# Value: (memory_data, file_mtime)
|
|
|
|
|
_memory_cache: dict[str | None, tuple[dict[str, Any], float | None]] = {}
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
def get_memory_data(agent_name: str | None = None) -> dict[str, Any]:
|
2026-02-03 13:50:01 +08:00
|
|
|
"""Get the current memory data (cached with file modification time check).
|
|
|
|
|
|
|
|
|
|
The cache is automatically invalidated if the memory file has been modified
|
|
|
|
|
since the last load, ensuring fresh data is always returned.
|
2026-02-03 13:31:05 +08:00
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
Args:
|
|
|
|
|
agent_name: If provided, loads per-agent memory. If None, loads global memory.
|
|
|
|
|
|
2026-02-03 13:31:05 +08:00
|
|
|
Returns:
|
|
|
|
|
The memory data dictionary.
|
|
|
|
|
"""
|
2026-03-03 21:32:01 +08:00
|
|
|
file_path = _get_memory_file_path(agent_name)
|
2026-02-03 13:50:01 +08:00
|
|
|
|
|
|
|
|
# Get current file modification time
|
|
|
|
|
try:
|
|
|
|
|
current_mtime = file_path.stat().st_mtime if file_path.exists() else None
|
|
|
|
|
except OSError:
|
|
|
|
|
current_mtime = None
|
|
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
cached = _memory_cache.get(agent_name)
|
|
|
|
|
|
2026-02-03 13:50:01 +08:00
|
|
|
# Invalidate cache if file has been modified or doesn't exist
|
2026-03-03 21:32:01 +08:00
|
|
|
if cached is None or cached[1] != current_mtime:
|
|
|
|
|
memory_data = _load_memory_from_file(agent_name)
|
|
|
|
|
_memory_cache[agent_name] = (memory_data, current_mtime)
|
|
|
|
|
return memory_data
|
2026-02-03 13:50:01 +08:00
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
return cached[0]
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
def reload_memory_data(agent_name: str | None = None) -> dict[str, Any]:
|
2026-02-03 13:50:01 +08:00
|
|
|
"""Reload memory data from file, forcing cache invalidation.
|
2026-02-03 13:31:05 +08:00
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
Args:
|
|
|
|
|
agent_name: If provided, reloads per-agent memory. If None, reloads global memory.
|
|
|
|
|
|
2026-02-03 13:31:05 +08:00
|
|
|
Returns:
|
|
|
|
|
The reloaded memory data dictionary.
|
|
|
|
|
"""
|
2026-03-03 21:32:01 +08:00
|
|
|
file_path = _get_memory_file_path(agent_name)
|
|
|
|
|
memory_data = _load_memory_from_file(agent_name)
|
2026-02-03 13:50:01 +08:00
|
|
|
|
|
|
|
|
try:
|
2026-03-03 21:32:01 +08:00
|
|
|
mtime = file_path.stat().st_mtime if file_path.exists() else None
|
2026-02-03 13:50:01 +08:00
|
|
|
except OSError:
|
2026-03-03 21:32:01 +08:00
|
|
|
mtime = None
|
2026-02-03 13:50:01 +08:00
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
_memory_cache[agent_name] = (memory_data, mtime)
|
|
|
|
|
return memory_data
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
|
fix: normalize structured LLM content in serialization and memory updater (#1215)
* fix: normalize ToolMessage structured content in serialization
When models return ToolMessage content as a list of content blocks
(e.g. [{"type": "text", "text": "..."}]), the UI previously displayed
the raw Python repr string instead of the extracted text.
Replace str(msg.content) with the existing _extract_text() helper in
both _serialize_message() and stream() to properly normalize
list-of-blocks content to plain text.
Fixes #1149
Also fixes the same root cause as #1188 (characters displayed one per
line when tool response content is returned as structured blocks).
Added 11 regression tests covering string, list-of-blocks, mixed,
empty, and fallback content types.
* fix(memory): extract text from structured LLM responses in memory updater
When LLMs return response content as list of content blocks
(e.g. [{"type": "text", "text": "..."}]) instead of plain strings,
str() produces Python repr which breaks JSON parsing in the memory
updater. This caused memory updates to silently fail.
Changes:
- Add _extract_text() helper in updater.py for safe content normalization
- Use _extract_text() instead of str(response.content) in update_memory()
- Fix format_conversation_for_update() to handle plain strings in list content
- Fix subagent executor fallback path to extract text from list content
- Replace print() with structured logging (logger.info/warning/error)
- Add 13 regression tests covering _extract_text, format_conversation,
and update_memory with structured LLM responses
* fix: address Copilot review - defensive text extraction + logger.exception
- client.py _extract_text: use block.get('text') + isinstance check (prevent KeyError/TypeError)
- prompt.py format_conversation_for_update: same defensive check for dict text blocks
- executor.py: type-safe text extraction in both code paths, fallback to placeholder instead of str(raw_content)
- updater.py: use logger.exception() instead of logger.error() for traceback preservation
* Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: preserve chunked structured content without spurious newlines
* fix: restore backend unit test compatibility
---------
Co-authored-by: Exploreunive <Exploreunive@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-22 17:29:29 +08:00
|
|
|
def _extract_text(content: Any) -> str:
|
|
|
|
|
"""Extract plain text from LLM response content (str or list of content blocks).
|
|
|
|
|
|
|
|
|
|
Modern LLMs may return structured content as a list of blocks instead of a
|
|
|
|
|
plain string, e.g. [{"type": "text", "text": "..."}]. Using str() on such
|
|
|
|
|
content produces Python repr instead of the actual text, breaking JSON
|
|
|
|
|
parsing downstream.
|
|
|
|
|
|
|
|
|
|
String chunks are concatenated without separators to avoid corrupting
|
|
|
|
|
chunked JSON/text payloads. Dict-based text blocks are treated as full text
|
|
|
|
|
blocks and joined with newlines for readability.
|
|
|
|
|
"""
|
|
|
|
|
if isinstance(content, str):
|
|
|
|
|
return content
|
|
|
|
|
if isinstance(content, list):
|
|
|
|
|
pieces: list[str] = []
|
|
|
|
|
pending_str_parts: list[str] = []
|
|
|
|
|
|
|
|
|
|
def flush_pending_str_parts() -> None:
|
|
|
|
|
if pending_str_parts:
|
|
|
|
|
pieces.append("".join(pending_str_parts))
|
|
|
|
|
pending_str_parts.clear()
|
|
|
|
|
|
|
|
|
|
for block in content:
|
|
|
|
|
if isinstance(block, str):
|
|
|
|
|
pending_str_parts.append(block)
|
|
|
|
|
elif isinstance(block, dict):
|
|
|
|
|
flush_pending_str_parts()
|
|
|
|
|
text_val = block.get("text")
|
|
|
|
|
if isinstance(text_val, str):
|
|
|
|
|
pieces.append(text_val)
|
|
|
|
|
|
|
|
|
|
flush_pending_str_parts()
|
|
|
|
|
return "\n".join(pieces)
|
|
|
|
|
return str(content)
|
|
|
|
|
|
|
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
def _load_memory_from_file(agent_name: str | None = None) -> dict[str, Any]:
|
2026-02-03 13:31:05 +08:00
|
|
|
"""Load memory data from file.
|
|
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
Args:
|
|
|
|
|
agent_name: If provided, loads per-agent memory file. If None, loads global.
|
|
|
|
|
|
2026-02-03 13:31:05 +08:00
|
|
|
Returns:
|
|
|
|
|
The memory data dictionary.
|
|
|
|
|
"""
|
2026-03-03 21:32:01 +08:00
|
|
|
file_path = _get_memory_file_path(agent_name)
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
if not file_path.exists():
|
|
|
|
|
return _create_empty_memory()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
with open(file_path, encoding="utf-8") as f:
|
|
|
|
|
data = json.load(f)
|
|
|
|
|
return data
|
|
|
|
|
except (json.JSONDecodeError, OSError) as e:
|
fix: normalize structured LLM content in serialization and memory updater (#1215)
* fix: normalize ToolMessage structured content in serialization
When models return ToolMessage content as a list of content blocks
(e.g. [{"type": "text", "text": "..."}]), the UI previously displayed
the raw Python repr string instead of the extracted text.
Replace str(msg.content) with the existing _extract_text() helper in
both _serialize_message() and stream() to properly normalize
list-of-blocks content to plain text.
Fixes #1149
Also fixes the same root cause as #1188 (characters displayed one per
line when tool response content is returned as structured blocks).
Added 11 regression tests covering string, list-of-blocks, mixed,
empty, and fallback content types.
* fix(memory): extract text from structured LLM responses in memory updater
When LLMs return response content as list of content blocks
(e.g. [{"type": "text", "text": "..."}]) instead of plain strings,
str() produces Python repr which breaks JSON parsing in the memory
updater. This caused memory updates to silently fail.
Changes:
- Add _extract_text() helper in updater.py for safe content normalization
- Use _extract_text() instead of str(response.content) in update_memory()
- Fix format_conversation_for_update() to handle plain strings in list content
- Fix subagent executor fallback path to extract text from list content
- Replace print() with structured logging (logger.info/warning/error)
- Add 13 regression tests covering _extract_text, format_conversation,
and update_memory with structured LLM responses
* fix: address Copilot review - defensive text extraction + logger.exception
- client.py _extract_text: use block.get('text') + isinstance check (prevent KeyError/TypeError)
- prompt.py format_conversation_for_update: same defensive check for dict text blocks
- executor.py: type-safe text extraction in both code paths, fallback to placeholder instead of str(raw_content)
- updater.py: use logger.exception() instead of logger.error() for traceback preservation
* Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: preserve chunked structured content without spurious newlines
* fix: restore backend unit test compatibility
---------
Co-authored-by: Exploreunive <Exploreunive@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-22 17:29:29 +08:00
|
|
|
logger.warning("Failed to load memory file: %s", e)
|
2026-02-03 13:31:05 +08:00
|
|
|
return _create_empty_memory()
|
|
|
|
|
|
|
|
|
|
|
2026-03-05 11:14:34 +08:00
|
|
|
# Matches sentences that describe a file-upload *event* rather than general
|
|
|
|
|
# file-related work. Deliberately narrow to avoid removing legitimate facts
|
|
|
|
|
# such as "User works with CSV files" or "prefers PDF export".
|
|
|
|
|
_UPLOAD_SENTENCE_RE = re.compile(
|
|
|
|
|
r"[^.!?]*\b(?:"
|
|
|
|
|
r"upload(?:ed|ing)?(?:\s+\w+){0,3}\s+(?:file|files?|document|documents?|attachment|attachments?)"
|
|
|
|
|
r"|file\s+upload"
|
|
|
|
|
r"|/mnt/user-data/uploads/"
|
|
|
|
|
r"|<uploaded_files>"
|
|
|
|
|
r")[^.!?]*[.!?]?\s*",
|
|
|
|
|
re.IGNORECASE,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _strip_upload_mentions_from_memory(memory_data: dict[str, Any]) -> dict[str, Any]:
|
|
|
|
|
"""Remove sentences about file uploads from all memory summaries and facts.
|
|
|
|
|
|
|
|
|
|
Uploaded files are session-scoped; persisting upload events in long-term
|
|
|
|
|
memory causes the agent to search for non-existent files in future sessions.
|
|
|
|
|
"""
|
|
|
|
|
# Scrub summaries in user/history sections
|
|
|
|
|
for section in ("user", "history"):
|
|
|
|
|
section_data = memory_data.get(section, {})
|
|
|
|
|
for _key, val in section_data.items():
|
|
|
|
|
if isinstance(val, dict) and "summary" in val:
|
|
|
|
|
cleaned = _UPLOAD_SENTENCE_RE.sub("", val["summary"]).strip()
|
|
|
|
|
cleaned = re.sub(r" +", " ", cleaned)
|
|
|
|
|
val["summary"] = cleaned
|
|
|
|
|
|
|
|
|
|
# Also remove any facts that describe upload events
|
|
|
|
|
facts = memory_data.get("facts", [])
|
|
|
|
|
if facts:
|
feat: add IM channels for Feishu, Slack, and Telegram (#1010)
* feat: add IM channels system for Feishu, Slack, and Telegram integration
Bridge external messaging platforms to DeerFlow via LangGraph Server with
async message bus, thread management, and per-channel configuration.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: address review comments on IM channels system
Fix topic_id handling in store remove/list_entries and manager commands,
correct Telegram reply threading, remove unused imports/variables, update
docstrings and docs to match implementation, and prevent config mutation.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* update skill creator
* fix im reply text
* fix comments
---------
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-08 15:21:18 +08:00
|
|
|
memory_data["facts"] = [f for f in facts if not _UPLOAD_SENTENCE_RE.search(f.get("content", ""))]
|
2026-03-05 11:14:34 +08:00
|
|
|
|
|
|
|
|
return memory_data
|
|
|
|
|
|
|
|
|
|
|
2026-03-18 22:41:13 +08:00
|
|
|
def _fact_content_key(content: Any) -> str | None:
|
|
|
|
|
if not isinstance(content, str):
|
|
|
|
|
return None
|
|
|
|
|
stripped = content.strip()
|
|
|
|
|
if not stripped:
|
|
|
|
|
return None
|
|
|
|
|
return stripped
|
|
|
|
|
|
|
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
def _save_memory_to_file(memory_data: dict[str, Any], agent_name: str | None = None) -> bool:
|
2026-02-03 13:50:01 +08:00
|
|
|
"""Save memory data to file and update cache.
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
memory_data: The memory data to save.
|
2026-03-03 21:32:01 +08:00
|
|
|
agent_name: If provided, saves to per-agent memory file. If None, saves to global.
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
True if successful, False otherwise.
|
|
|
|
|
"""
|
2026-03-03 21:32:01 +08:00
|
|
|
file_path = _get_memory_file_path(agent_name)
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Ensure directory exists
|
|
|
|
|
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
|
|
# Update lastUpdated timestamp
|
|
|
|
|
memory_data["lastUpdated"] = datetime.utcnow().isoformat() + "Z"
|
|
|
|
|
|
|
|
|
|
# Write atomically using temp file
|
|
|
|
|
temp_path = file_path.with_suffix(".tmp")
|
|
|
|
|
with open(temp_path, "w", encoding="utf-8") as f:
|
|
|
|
|
json.dump(memory_data, f, indent=2, ensure_ascii=False)
|
|
|
|
|
|
|
|
|
|
# Rename temp file to actual file (atomic on most systems)
|
|
|
|
|
temp_path.replace(file_path)
|
|
|
|
|
|
2026-02-03 13:50:01 +08:00
|
|
|
# Update cache and file modification time
|
|
|
|
|
try:
|
2026-03-03 21:32:01 +08:00
|
|
|
mtime = file_path.stat().st_mtime
|
2026-02-03 13:50:01 +08:00
|
|
|
except OSError:
|
2026-03-03 21:32:01 +08:00
|
|
|
mtime = None
|
|
|
|
|
|
|
|
|
|
_memory_cache[agent_name] = (memory_data, mtime)
|
2026-02-03 13:31:05 +08:00
|
|
|
|
fix: normalize structured LLM content in serialization and memory updater (#1215)
* fix: normalize ToolMessage structured content in serialization
When models return ToolMessage content as a list of content blocks
(e.g. [{"type": "text", "text": "..."}]), the UI previously displayed
the raw Python repr string instead of the extracted text.
Replace str(msg.content) with the existing _extract_text() helper in
both _serialize_message() and stream() to properly normalize
list-of-blocks content to plain text.
Fixes #1149
Also fixes the same root cause as #1188 (characters displayed one per
line when tool response content is returned as structured blocks).
Added 11 regression tests covering string, list-of-blocks, mixed,
empty, and fallback content types.
* fix(memory): extract text from structured LLM responses in memory updater
When LLMs return response content as list of content blocks
(e.g. [{"type": "text", "text": "..."}]) instead of plain strings,
str() produces Python repr which breaks JSON parsing in the memory
updater. This caused memory updates to silently fail.
Changes:
- Add _extract_text() helper in updater.py for safe content normalization
- Use _extract_text() instead of str(response.content) in update_memory()
- Fix format_conversation_for_update() to handle plain strings in list content
- Fix subagent executor fallback path to extract text from list content
- Replace print() with structured logging (logger.info/warning/error)
- Add 13 regression tests covering _extract_text, format_conversation,
and update_memory with structured LLM responses
* fix: address Copilot review - defensive text extraction + logger.exception
- client.py _extract_text: use block.get('text') + isinstance check (prevent KeyError/TypeError)
- prompt.py format_conversation_for_update: same defensive check for dict text blocks
- executor.py: type-safe text extraction in both code paths, fallback to placeholder instead of str(raw_content)
- updater.py: use logger.exception() instead of logger.error() for traceback preservation
* Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: preserve chunked structured content without spurious newlines
* fix: restore backend unit test compatibility
---------
Co-authored-by: Exploreunive <Exploreunive@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-22 17:29:29 +08:00
|
|
|
logger.info("Memory saved to %s", file_path)
|
2026-02-03 13:31:05 +08:00
|
|
|
return True
|
|
|
|
|
except OSError as e:
|
fix: normalize structured LLM content in serialization and memory updater (#1215)
* fix: normalize ToolMessage structured content in serialization
When models return ToolMessage content as a list of content blocks
(e.g. [{"type": "text", "text": "..."}]), the UI previously displayed
the raw Python repr string instead of the extracted text.
Replace str(msg.content) with the existing _extract_text() helper in
both _serialize_message() and stream() to properly normalize
list-of-blocks content to plain text.
Fixes #1149
Also fixes the same root cause as #1188 (characters displayed one per
line when tool response content is returned as structured blocks).
Added 11 regression tests covering string, list-of-blocks, mixed,
empty, and fallback content types.
* fix(memory): extract text from structured LLM responses in memory updater
When LLMs return response content as list of content blocks
(e.g. [{"type": "text", "text": "..."}]) instead of plain strings,
str() produces Python repr which breaks JSON parsing in the memory
updater. This caused memory updates to silently fail.
Changes:
- Add _extract_text() helper in updater.py for safe content normalization
- Use _extract_text() instead of str(response.content) in update_memory()
- Fix format_conversation_for_update() to handle plain strings in list content
- Fix subagent executor fallback path to extract text from list content
- Replace print() with structured logging (logger.info/warning/error)
- Add 13 regression tests covering _extract_text, format_conversation,
and update_memory with structured LLM responses
* fix: address Copilot review - defensive text extraction + logger.exception
- client.py _extract_text: use block.get('text') + isinstance check (prevent KeyError/TypeError)
- prompt.py format_conversation_for_update: same defensive check for dict text blocks
- executor.py: type-safe text extraction in both code paths, fallback to placeholder instead of str(raw_content)
- updater.py: use logger.exception() instead of logger.error() for traceback preservation
* Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: preserve chunked structured content without spurious newlines
* fix: restore backend unit test compatibility
---------
Co-authored-by: Exploreunive <Exploreunive@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-22 17:29:29 +08:00
|
|
|
logger.error("Failed to save memory file: %s", e)
|
2026-02-03 13:31:05 +08:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class MemoryUpdater:
|
|
|
|
|
"""Updates memory using LLM based on conversation context."""
|
|
|
|
|
|
|
|
|
|
def __init__(self, model_name: str | None = None):
|
|
|
|
|
"""Initialize the memory updater.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
model_name: Optional model name to use. If None, uses config or default.
|
|
|
|
|
"""
|
|
|
|
|
self._model_name = model_name
|
|
|
|
|
|
|
|
|
|
def _get_model(self):
|
|
|
|
|
"""Get the model for memory updates."""
|
|
|
|
|
config = get_memory_config()
|
|
|
|
|
model_name = self._model_name or config.model_name
|
|
|
|
|
return create_chat_model(name=model_name, thinking_enabled=False)
|
|
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
def update_memory(self, messages: list[Any], thread_id: str | None = None, agent_name: str | None = None) -> bool:
|
2026-02-03 13:31:05 +08:00
|
|
|
"""Update memory based on conversation messages.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
messages: List of conversation messages.
|
|
|
|
|
thread_id: Optional thread ID for tracking source.
|
2026-03-03 21:32:01 +08:00
|
|
|
agent_name: If provided, updates per-agent memory. If None, updates global memory.
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
True if update was successful, False otherwise.
|
|
|
|
|
"""
|
|
|
|
|
config = get_memory_config()
|
|
|
|
|
if not config.enabled:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
if not messages:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Get current memory
|
2026-03-03 21:32:01 +08:00
|
|
|
current_memory = get_memory_data(agent_name)
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
# Format conversation for prompt
|
|
|
|
|
conversation_text = format_conversation_for_update(messages)
|
|
|
|
|
|
|
|
|
|
if not conversation_text.strip():
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
# Build prompt
|
|
|
|
|
prompt = MEMORY_UPDATE_PROMPT.format(
|
|
|
|
|
current_memory=json.dumps(current_memory, indent=2),
|
|
|
|
|
conversation=conversation_text,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Call LLM
|
|
|
|
|
model = self._get_model()
|
|
|
|
|
response = model.invoke(prompt)
|
fix: normalize structured LLM content in serialization and memory updater (#1215)
* fix: normalize ToolMessage structured content in serialization
When models return ToolMessage content as a list of content blocks
(e.g. [{"type": "text", "text": "..."}]), the UI previously displayed
the raw Python repr string instead of the extracted text.
Replace str(msg.content) with the existing _extract_text() helper in
both _serialize_message() and stream() to properly normalize
list-of-blocks content to plain text.
Fixes #1149
Also fixes the same root cause as #1188 (characters displayed one per
line when tool response content is returned as structured blocks).
Added 11 regression tests covering string, list-of-blocks, mixed,
empty, and fallback content types.
* fix(memory): extract text from structured LLM responses in memory updater
When LLMs return response content as list of content blocks
(e.g. [{"type": "text", "text": "..."}]) instead of plain strings,
str() produces Python repr which breaks JSON parsing in the memory
updater. This caused memory updates to silently fail.
Changes:
- Add _extract_text() helper in updater.py for safe content normalization
- Use _extract_text() instead of str(response.content) in update_memory()
- Fix format_conversation_for_update() to handle plain strings in list content
- Fix subagent executor fallback path to extract text from list content
- Replace print() with structured logging (logger.info/warning/error)
- Add 13 regression tests covering _extract_text, format_conversation,
and update_memory with structured LLM responses
* fix: address Copilot review - defensive text extraction + logger.exception
- client.py _extract_text: use block.get('text') + isinstance check (prevent KeyError/TypeError)
- prompt.py format_conversation_for_update: same defensive check for dict text blocks
- executor.py: type-safe text extraction in both code paths, fallback to placeholder instead of str(raw_content)
- updater.py: use logger.exception() instead of logger.error() for traceback preservation
* Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: preserve chunked structured content without spurious newlines
* fix: restore backend unit test compatibility
---------
Co-authored-by: Exploreunive <Exploreunive@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-22 17:29:29 +08:00
|
|
|
response_text = _extract_text(response.content).strip()
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
# Parse response
|
|
|
|
|
# Remove markdown code blocks if present
|
|
|
|
|
if response_text.startswith("```"):
|
|
|
|
|
lines = response_text.split("\n")
|
|
|
|
|
response_text = "\n".join(lines[1:-1] if lines[-1] == "```" else lines[1:])
|
|
|
|
|
|
|
|
|
|
update_data = json.loads(response_text)
|
|
|
|
|
|
|
|
|
|
# Apply updates
|
|
|
|
|
updated_memory = self._apply_updates(current_memory, update_data, thread_id)
|
|
|
|
|
|
2026-03-05 11:14:34 +08:00
|
|
|
# Strip file-upload mentions from all summaries before saving.
|
|
|
|
|
# Uploaded files are session-scoped and won't exist in future sessions,
|
|
|
|
|
# so recording upload events in long-term memory causes the agent to
|
|
|
|
|
# try (and fail) to locate those files in subsequent conversations.
|
|
|
|
|
updated_memory = _strip_upload_mentions_from_memory(updated_memory)
|
|
|
|
|
|
2026-02-03 13:31:05 +08:00
|
|
|
# Save
|
2026-03-03 21:32:01 +08:00
|
|
|
return _save_memory_to_file(updated_memory, agent_name)
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
except json.JSONDecodeError as e:
|
fix: normalize structured LLM content in serialization and memory updater (#1215)
* fix: normalize ToolMessage structured content in serialization
When models return ToolMessage content as a list of content blocks
(e.g. [{"type": "text", "text": "..."}]), the UI previously displayed
the raw Python repr string instead of the extracted text.
Replace str(msg.content) with the existing _extract_text() helper in
both _serialize_message() and stream() to properly normalize
list-of-blocks content to plain text.
Fixes #1149
Also fixes the same root cause as #1188 (characters displayed one per
line when tool response content is returned as structured blocks).
Added 11 regression tests covering string, list-of-blocks, mixed,
empty, and fallback content types.
* fix(memory): extract text from structured LLM responses in memory updater
When LLMs return response content as list of content blocks
(e.g. [{"type": "text", "text": "..."}]) instead of plain strings,
str() produces Python repr which breaks JSON parsing in the memory
updater. This caused memory updates to silently fail.
Changes:
- Add _extract_text() helper in updater.py for safe content normalization
- Use _extract_text() instead of str(response.content) in update_memory()
- Fix format_conversation_for_update() to handle plain strings in list content
- Fix subagent executor fallback path to extract text from list content
- Replace print() with structured logging (logger.info/warning/error)
- Add 13 regression tests covering _extract_text, format_conversation,
and update_memory with structured LLM responses
* fix: address Copilot review - defensive text extraction + logger.exception
- client.py _extract_text: use block.get('text') + isinstance check (prevent KeyError/TypeError)
- prompt.py format_conversation_for_update: same defensive check for dict text blocks
- executor.py: type-safe text extraction in both code paths, fallback to placeholder instead of str(raw_content)
- updater.py: use logger.exception() instead of logger.error() for traceback preservation
* Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: preserve chunked structured content without spurious newlines
* fix: restore backend unit test compatibility
---------
Co-authored-by: Exploreunive <Exploreunive@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-22 17:29:29 +08:00
|
|
|
logger.warning("Failed to parse LLM response for memory update: %s", e)
|
2026-02-03 13:31:05 +08:00
|
|
|
return False
|
|
|
|
|
except Exception as e:
|
fix: normalize structured LLM content in serialization and memory updater (#1215)
* fix: normalize ToolMessage structured content in serialization
When models return ToolMessage content as a list of content blocks
(e.g. [{"type": "text", "text": "..."}]), the UI previously displayed
the raw Python repr string instead of the extracted text.
Replace str(msg.content) with the existing _extract_text() helper in
both _serialize_message() and stream() to properly normalize
list-of-blocks content to plain text.
Fixes #1149
Also fixes the same root cause as #1188 (characters displayed one per
line when tool response content is returned as structured blocks).
Added 11 regression tests covering string, list-of-blocks, mixed,
empty, and fallback content types.
* fix(memory): extract text from structured LLM responses in memory updater
When LLMs return response content as list of content blocks
(e.g. [{"type": "text", "text": "..."}]) instead of plain strings,
str() produces Python repr which breaks JSON parsing in the memory
updater. This caused memory updates to silently fail.
Changes:
- Add _extract_text() helper in updater.py for safe content normalization
- Use _extract_text() instead of str(response.content) in update_memory()
- Fix format_conversation_for_update() to handle plain strings in list content
- Fix subagent executor fallback path to extract text from list content
- Replace print() with structured logging (logger.info/warning/error)
- Add 13 regression tests covering _extract_text, format_conversation,
and update_memory with structured LLM responses
* fix: address Copilot review - defensive text extraction + logger.exception
- client.py _extract_text: use block.get('text') + isinstance check (prevent KeyError/TypeError)
- prompt.py format_conversation_for_update: same defensive check for dict text blocks
- executor.py: type-safe text extraction in both code paths, fallback to placeholder instead of str(raw_content)
- updater.py: use logger.exception() instead of logger.error() for traceback preservation
* Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
* fix: preserve chunked structured content without spurious newlines
* fix: restore backend unit test compatibility
---------
Co-authored-by: Exploreunive <Exploreunive@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-03-22 17:29:29 +08:00
|
|
|
logger.exception("Memory update failed: %s", e)
|
2026-02-03 13:31:05 +08:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
def _apply_updates(
|
|
|
|
|
self,
|
|
|
|
|
current_memory: dict[str, Any],
|
|
|
|
|
update_data: dict[str, Any],
|
|
|
|
|
thread_id: str | None = None,
|
|
|
|
|
) -> dict[str, Any]:
|
|
|
|
|
"""Apply LLM-generated updates to memory.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
current_memory: Current memory data.
|
|
|
|
|
update_data: Updates from LLM.
|
|
|
|
|
thread_id: Optional thread ID for tracking.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Updated memory data.
|
|
|
|
|
"""
|
|
|
|
|
config = get_memory_config()
|
|
|
|
|
now = datetime.utcnow().isoformat() + "Z"
|
|
|
|
|
|
|
|
|
|
# Update user sections
|
|
|
|
|
user_updates = update_data.get("user", {})
|
|
|
|
|
for section in ["workContext", "personalContext", "topOfMind"]:
|
|
|
|
|
section_data = user_updates.get(section, {})
|
|
|
|
|
if section_data.get("shouldUpdate") and section_data.get("summary"):
|
|
|
|
|
current_memory["user"][section] = {
|
|
|
|
|
"summary": section_data["summary"],
|
|
|
|
|
"updatedAt": now,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Update history sections
|
|
|
|
|
history_updates = update_data.get("history", {})
|
|
|
|
|
for section in ["recentMonths", "earlierContext", "longTermBackground"]:
|
|
|
|
|
section_data = history_updates.get(section, {})
|
|
|
|
|
if section_data.get("shouldUpdate") and section_data.get("summary"):
|
|
|
|
|
current_memory["history"][section] = {
|
|
|
|
|
"summary": section_data["summary"],
|
|
|
|
|
"updatedAt": now,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Remove facts
|
|
|
|
|
facts_to_remove = set(update_data.get("factsToRemove", []))
|
|
|
|
|
if facts_to_remove:
|
2026-02-05 19:59:25 +08:00
|
|
|
current_memory["facts"] = [f for f in current_memory.get("facts", []) if f.get("id") not in facts_to_remove]
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
# Add new facts
|
feat(harness): integration ACP agent tool (#1344)
* refactor: extract shared utils to break harness→app cross-layer imports
Move _validate_skill_frontmatter to src/skills/validation.py and
CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py.
This eliminates the two reverse dependencies from client.py (harness layer)
into gateway/routers/ (app layer), preparing for the harness/app package split.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* refactor: split backend/src into harness (deerflow.*) and app (app.*)
Physically split the monolithic backend/src/ package into two layers:
- **Harness** (`packages/harness/deerflow/`): publishable agent framework
package with import prefix `deerflow.*`. Contains agents, sandbox, tools,
models, MCP, skills, config, and all core infrastructure.
- **App** (`app/`): unpublished application code with import prefix `app.*`.
Contains gateway (FastAPI REST API) and channels (IM integrations).
Key changes:
- Move 13 harness modules to packages/harness/deerflow/ via git mv
- Move gateway + channels to app/ via git mv
- Rename all imports: src.* → deerflow.* (harness) / app.* (app layer)
- Set up uv workspace with deerflow-harness as workspace member
- Update langgraph.json, config.example.yaml, all scripts, Docker files
- Add build-system (hatchling) to harness pyproject.toml
- Add PYTHONPATH=. to gateway startup commands for app.* resolution
- Update ruff.toml with known-first-party for import sorting
- Update all documentation to reflect new directory structure
Boundary rule enforced: harness code never imports from app.
All 429 tests pass. Lint clean.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* chore: add harness→app boundary check test and update docs
Add test_harness_boundary.py that scans all Python files in
packages/harness/deerflow/ and fails if any `from app.*` or
`import app.*` statement is found. This enforces the architectural
rule that the harness layer never depends on the app layer.
Update CLAUDE.md to document the harness/app split architecture,
import conventions, and the boundary enforcement test.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* feat: add config versioning with auto-upgrade on startup
When config.example.yaml schema changes, developers' local config.yaml
files can silently become outdated. This adds a config_version field and
auto-upgrade mechanism so breaking changes (like src.* → deerflow.*
renames) are applied automatically before services start.
- Add config_version: 1 to config.example.yaml
- Add startup version check warning in AppConfig.from_file()
- Add scripts/config-upgrade.sh with migration registry for value replacements
- Add `make config-upgrade` target
- Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services
- Add config error hints in service failure messages
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix comments
* fix: update src.* import in test_sandbox_tools_security to deerflow.*
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: handle empty config and search parent dirs for config.example.yaml
Address Copilot review comments on PR #1131:
- Guard against yaml.safe_load() returning None for empty config files
- Search parent directories for config.example.yaml instead of only
looking next to config.yaml, fixing detection in common setups
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: correct skills root path depth and config_version type coercion
- loader.py: fix get_skills_root_path() to use 5 parent levels (was 3)
after harness split, file lives at packages/harness/deerflow/skills/
so parent×3 resolved to backend/packages/harness/ instead of backend/
- app_config.py: coerce config_version to int() before comparison in
_check_config_version() to prevent TypeError when YAML stores value
as string (e.g. config_version: "1")
- tests: add regression tests for both fixes
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
* fix: update test imports from src.* to deerflow.*/app.* after harness refactor
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
* feat(harness): add tool-first ACP agent invocation (#37)
* feat(harness): add tool-first ACP agent invocation
* build(harness): make ACP dependency required
* fix(harness): address ACP review feedback
* feat(harness): decouple ACP agent workspace from thread data
ACP agents (codex, claude-code) previously used per-thread workspace
directories, causing path resolution complexity and coupling task
execution to DeerFlow's internal thread data layout. This change:
- Replace _resolve_cwd() with a fixed _get_work_dir() that always uses
{base_dir}/acp-workspace/, eliminating virtual path translation and
thread_id lookups
- Introduce /mnt/acp-workspace virtual path for lead agent read-only
access to ACP agent output files (same pattern as /mnt/skills)
- Add security guards: read-only validation, path traversal prevention,
command path allowlisting, and output masking for acp-workspace
- Update system prompt and tool description to guide LLM: send
self-contained tasks to ACP agents, copy results via /mnt/acp-workspace
- Add 11 new security tests for ACP workspace path handling
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* refactor(prompt): inject ACP section only when ACP agents are configured
The ACP agent guidance in the system prompt is now conditionally built
by _build_acp_section(), which checks get_acp_agents() and returns an
empty string when no ACP agents are configured. This avoids polluting
the prompt with irrelevant instructions for users who don't use ACP.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix lint
* fix(harness): address Copilot review comments on sandbox path handling and ACP tool
- local_sandbox: fix path-segment boundary bug in _resolve_path (== or startswith +"/")
and add lookahead in _resolve_paths_in_command regex to prevent /mnt/skills matching
inside /mnt/skills-extra
- local_sandbox_provider: replace print() with logger.warning(..., exc_info=True)
- invoke_acp_agent_tool: guard getattr(option, "optionId") with None default + continue;
move full prompt from INFO to DEBUG level (truncated to 200 chars)
- sandbox/tools: fix _get_acp_workspace_host_path docstring to match implementation;
remove misleading "read-only" language from validate_local_bash_command_paths
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
* fix(acp): thread-isolated workspaces, permission guardrail, and ContextVar registry
P1.1 – ACP workspace thread isolation
- Add `Paths.acp_workspace_dir(thread_id)` for per-thread paths
- `_get_work_dir(thread_id)` in invoke_acp_agent_tool now uses
`{base_dir}/threads/{thread_id}/acp-workspace/`; falls back to
global workspace when thread_id is absent or invalid
- `_invoke` extracts thread_id from `RunnableConfig` via
`Annotated[RunnableConfig, InjectedToolArg]`
- `sandbox/tools.py`: `_get_acp_workspace_host_path(thread_id)`,
`_resolve_acp_workspace_path(path, thread_id)`, and all callers
(`replace_virtual_paths_in_command`, `mask_local_paths_in_output`,
`ls_tool`, `read_file_tool`) now resolve ACP paths per-thread
P1.2 – ACP permission guardrail
- New `auto_approve_permissions: bool = False` field in `ACPAgentConfig`
- `_build_permission_response(options, *, auto_approve: bool)` now
defaults to deny; only approves when `auto_approve=True`
- Document field in `config.example.yaml`
P2 – Deferred tool registry race condition
- Replace module-level `_registry` global with `contextvars.ContextVar`
- Each asyncio request context gets its own registry; worker threads
inherit the context automatically via `loop.run_in_executor`
- Expose `get_deferred_registry` / `set_deferred_registry` /
`reset_deferred_registry` helpers
Tests: 831 pass (57 for affected modules, 3 new tests)
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
* fix(sandbox): mount /mnt/acp-workspace in docker sandbox container
The AioSandboxProvider was not mounting the ACP workspace into the
sandbox container, so /mnt/acp-workspace was inaccessible when the lead
agent tried to read ACP results in docker mode.
Changes:
- `ensure_thread_dirs`: also create `acp-workspace/` (chmod 0o777) so
the directory exists before the sandbox container starts — required
for Docker volume mounts
- `_get_thread_mounts`: add read-only `/mnt/acp-workspace` mount using
the per-thread host path (`host_paths.acp_workspace_dir(thread_id)`)
- Update stale CLAUDE.md description (was "fixed global workspace")
Tests: `test_aio_sandbox_provider.py` (4 new tests)
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
* fix(lint): remove unused imports in test_aio_sandbox_provider
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix config
---------
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-26 14:20:18 +08:00
|
|
|
existing_fact_keys = {fact_key for fact_key in (_fact_content_key(fact.get("content")) for fact in current_memory.get("facts", [])) if fact_key is not None}
|
2026-02-03 13:31:05 +08:00
|
|
|
new_facts = update_data.get("newFacts", [])
|
|
|
|
|
for fact in new_facts:
|
|
|
|
|
confidence = fact.get("confidence", 0.5)
|
|
|
|
|
if confidence >= config.fact_confidence_threshold:
|
2026-03-18 22:41:13 +08:00
|
|
|
raw_content = fact.get("content", "")
|
|
|
|
|
normalized_content = raw_content.strip()
|
|
|
|
|
fact_key = _fact_content_key(normalized_content)
|
|
|
|
|
if fact_key is not None and fact_key in existing_fact_keys:
|
|
|
|
|
continue
|
|
|
|
|
|
2026-02-03 13:31:05 +08:00
|
|
|
fact_entry = {
|
|
|
|
|
"id": f"fact_{uuid.uuid4().hex[:8]}",
|
2026-03-18 22:41:13 +08:00
|
|
|
"content": normalized_content,
|
2026-02-03 13:31:05 +08:00
|
|
|
"category": fact.get("category", "context"),
|
|
|
|
|
"confidence": confidence,
|
|
|
|
|
"createdAt": now,
|
|
|
|
|
"source": thread_id or "unknown",
|
|
|
|
|
}
|
|
|
|
|
current_memory["facts"].append(fact_entry)
|
2026-03-18 22:41:13 +08:00
|
|
|
if fact_key is not None:
|
|
|
|
|
existing_fact_keys.add(fact_key)
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
# Enforce max facts limit
|
|
|
|
|
if len(current_memory["facts"]) > config.max_facts:
|
|
|
|
|
# Sort by confidence and keep top ones
|
|
|
|
|
current_memory["facts"] = sorted(
|
|
|
|
|
current_memory["facts"],
|
|
|
|
|
key=lambda f: f.get("confidence", 0),
|
|
|
|
|
reverse=True,
|
|
|
|
|
)[: config.max_facts]
|
|
|
|
|
|
|
|
|
|
return current_memory
|
|
|
|
|
|
|
|
|
|
|
2026-03-03 21:32:01 +08:00
|
|
|
def update_memory_from_conversation(messages: list[Any], thread_id: str | None = None, agent_name: str | None = None) -> bool:
|
2026-02-03 13:31:05 +08:00
|
|
|
"""Convenience function to update memory from a conversation.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
messages: List of conversation messages.
|
|
|
|
|
thread_id: Optional thread ID.
|
2026-03-03 21:32:01 +08:00
|
|
|
agent_name: If provided, updates per-agent memory. If None, updates global memory.
|
2026-02-03 13:31:05 +08:00
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
True if successful, False otherwise.
|
|
|
|
|
"""
|
|
|
|
|
updater = MemoryUpdater()
|
2026-03-03 21:32:01 +08:00
|
|
|
return updater.update_memory(messages, thread_id, agent_name)
|