feat: add IM channels for Feishu, Slack, and Telegram (#1010)

* feat: add IM channels system for Feishu, Slack, and Telegram integration

Bridge external messaging platforms to DeerFlow via LangGraph Server with
async message bus, thread management, and per-channel configuration.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* fix: address review comments on IM channels system

Fix topic_id handling in store remove/list_entries and manager commands,
correct Telegram reply threading, remove unused imports/variables, update
docstrings and docs to match implementation, and prevent config mutation.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

* update skill creator

* fix im reply text

* fix comments

---------

Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
DanielWalnut
2026-03-08 15:21:18 +08:00
committed by GitHub
parent d664ae5a4b
commit 75b7302000
49 changed files with 8354 additions and 367 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -20,6 +20,7 @@ from src.gateway.routers.uploads import UploadResponse
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
def mock_app_config():
"""Provide a minimal AppConfig mock."""
@@ -45,6 +46,7 @@ def client(mock_app_config):
# __init__
# ---------------------------------------------------------------------------
class TestClientInit:
def test_default_params(self, client):
assert client._model_name is None
@@ -86,6 +88,7 @@ class TestClientInit:
# list_models / list_skills / get_memory
# ---------------------------------------------------------------------------
class TestConfigQueries:
def test_list_models(self, client):
result = client.list_models()
@@ -135,6 +138,7 @@ class TestConfigQueries:
# stream / chat
# ---------------------------------------------------------------------------
def _make_agent_mock(chunks: list[dict]):
"""Create a mock agent whose .stream() yields the given chunks."""
agent = MagicMock()
@@ -314,6 +318,7 @@ class TestChat:
# _extract_text
# ---------------------------------------------------------------------------
class TestExtractText:
def test_string(self):
assert DeerFlowClient._extract_text("hello") == "hello"
@@ -340,6 +345,7 @@ class TestExtractText:
# _ensure_agent
# ---------------------------------------------------------------------------
class TestEnsureAgent:
def test_creates_agent(self, client):
"""_ensure_agent creates an agent on first call."""
@@ -374,6 +380,7 @@ class TestEnsureAgent:
# get_model
# ---------------------------------------------------------------------------
class TestGetModel:
def test_found(self, client):
model_cfg = MagicMock()
@@ -402,6 +409,7 @@ class TestGetModel:
# MCP config
# ---------------------------------------------------------------------------
class TestMcpConfig:
def test_get_mcp_config(self, client):
server = MagicMock()
@@ -457,6 +465,7 @@ class TestMcpConfig:
# Skills management
# ---------------------------------------------------------------------------
class TestSkillsManagement:
def _make_skill(self, name="test-skill", enabled=True):
s = MagicMock()
@@ -556,6 +565,7 @@ class TestSkillsManagement:
# Memory management
# ---------------------------------------------------------------------------
class TestMemoryManagement:
def test_reload_memory(self, client):
data = {"version": "1.0", "facts": []}
@@ -605,6 +615,7 @@ class TestMemoryManagement:
# Uploads
# ---------------------------------------------------------------------------
class TestUploads:
def test_upload_files(self, client):
with tempfile.TemporaryDirectory() as tmp:
@@ -678,6 +689,7 @@ class TestUploads:
# Artifacts
# ---------------------------------------------------------------------------
class TestArtifacts:
def test_get_artifact(self, client):
with tempfile.TemporaryDirectory() as tmp:
@@ -759,9 +771,13 @@ class TestScenarioMultiTurnConversation:
def test_stream_collects_all_event_types_across_turns(self, client):
"""A full turn emits messages-tuple (tool_call, tool_result, ai text) + values + end."""
ai_tc = AIMessage(content="", id="ai-1", tool_calls=[
{"name": "web_search", "args": {"query": "LangGraph"}, "id": "tc-1"},
])
ai_tc = AIMessage(
content="",
id="ai-1",
tool_calls=[
{"name": "web_search", "args": {"query": "LangGraph"}, "id": "tc-1"},
],
)
tool_r = ToolMessage(content="LangGraph is a framework...", id="tm-1", tool_call_id="tc-1", name="web_search")
ai_final = AIMessage(content="LangGraph is a framework for building agents.", id="ai-2")
@@ -809,13 +825,21 @@ class TestScenarioToolChain:
def test_multi_tool_chain(self, client):
"""Agent calls bash → reads output → calls write_file → responds."""
ai_bash = AIMessage(content="", id="ai-1", tool_calls=[
{"name": "bash", "args": {"cmd": "ls /mnt/user-data/workspace"}, "id": "tc-1"},
])
ai_bash = AIMessage(
content="",
id="ai-1",
tool_calls=[
{"name": "bash", "args": {"cmd": "ls /mnt/user-data/workspace"}, "id": "tc-1"},
],
)
bash_result = ToolMessage(content="README.md\nsrc/", id="tm-1", tool_call_id="tc-1", name="bash")
ai_write = AIMessage(content="", id="ai-2", tool_calls=[
{"name": "write_file", "args": {"path": "/mnt/user-data/outputs/listing.txt", "content": "README.md\nsrc/"}, "id": "tc-2"},
])
ai_write = AIMessage(
content="",
id="ai-2",
tool_calls=[
{"name": "write_file", "args": {"path": "/mnt/user-data/outputs/listing.txt", "content": "README.md\nsrc/"}, "id": "tc-2"},
],
)
write_result = ToolMessage(content="File written successfully.", id="tm-2", tool_call_id="tc-2", name="write_file")
ai_final = AIMessage(content="I listed the workspace and saved the output.", id="ai-3")
@@ -862,10 +886,13 @@ class TestScenarioFileLifecycle:
with patch.object(DeerFlowClient, "_get_uploads_dir", return_value=uploads_dir):
# Step 1: Upload
result = client.upload_files("t-lifecycle", [
tmp_path / "report.txt",
tmp_path / "data.csv",
])
result = client.upload_files(
"t-lifecycle",
[
tmp_path / "report.txt",
tmp_path / "data.csv",
],
)
assert result["success"] is True
assert len(result["files"]) == 2
assert {f["filename"] for f in result["files"]} == {"report.txt", "data.csv"}
@@ -1166,10 +1193,13 @@ class TestScenarioMemoryWorkflow:
def test_memory_full_lifecycle(self, client):
"""get_memory → reload → get_status covers the full memory API."""
initial_data = {"version": "1.0", "facts": [{"id": "f1", "content": "User likes Python"}]}
updated_data = {"version": "1.0", "facts": [
{"id": "f1", "content": "User likes Python"},
{"id": "f2", "content": "User prefers dark mode"},
]}
updated_data = {
"version": "1.0",
"facts": [
{"id": "f1", "content": "User likes Python"},
{"id": "f2", "content": "User prefers dark mode"},
],
}
config = MagicMock()
config.enabled = True
@@ -1208,9 +1238,7 @@ class TestScenarioSkillInstallAndUse:
# Create .skill archive
skill_src = tmp_path / "my-analyzer"
skill_src.mkdir()
(skill_src / "SKILL.md").write_text(
"---\nname: my-analyzer\ndescription: Analyze code\nlicense: MIT\n---\nAnalysis skill"
)
(skill_src / "SKILL.md").write_text("---\nname: my-analyzer\ndescription: Analyze code\nlicense: MIT\n---\nAnalysis skill")
archive = tmp_path / "my-analyzer.skill"
with zipfile.ZipFile(archive, "w") as zf:
zf.write(skill_src / "SKILL.md", "my-analyzer/SKILL.md")
@@ -1319,11 +1347,15 @@ class TestScenarioEdgeCases:
def test_concurrent_tool_calls_in_single_message(self, client):
"""Agent produces multiple tool_calls in one AIMessage — emitted as single messages-tuple."""
ai = AIMessage(content="", id="ai-1", tool_calls=[
{"name": "web_search", "args": {"q": "a"}, "id": "tc-1"},
{"name": "web_search", "args": {"q": "b"}, "id": "tc-2"},
{"name": "bash", "args": {"cmd": "echo hi"}, "id": "tc-3"},
])
ai = AIMessage(
content="",
id="ai-1",
tool_calls=[
{"name": "web_search", "args": {"q": "a"}, "id": "tc-1"},
{"name": "web_search", "args": {"q": "b"}, "id": "tc-2"},
{"name": "bash", "args": {"cmd": "echo hi"}, "id": "tc-3"},
],
)
chunks = [{"messages": [ai]}]
agent = _make_agent_mock(chunks)
@@ -1367,6 +1399,7 @@ class TestScenarioEdgeCases:
# Gateway conformance — validate client output against Gateway Pydantic models
# ---------------------------------------------------------------------------
class TestGatewayConformance:
"""Validate that DeerFlowClient return dicts conform to Gateway Pydantic response models.
@@ -1441,9 +1474,7 @@ class TestGatewayConformance:
def test_install_skill(self, client, tmp_path):
skill_dir = tmp_path / "my-skill"
skill_dir.mkdir()
(skill_dir / "SKILL.md").write_text(
"---\nname: my-skill\ndescription: A test skill\n---\nBody\n"
)
(skill_dir / "SKILL.md").write_text("---\nname: my-skill\ndescription: A test skill\n---\nBody\n")
archive = tmp_path / "my-skill.skill"
with zipfile.ZipFile(archive, "w") as zf:

View File

@@ -125,7 +125,7 @@ class TestInfoQuestClient:
def test_clean_results_with_image_search(self):
"""Test clean_results_with_image_search method with sample raw results."""
raw_results = [{"content": {"results": {"images_results": [{"image_url": "https://example.com/image1.jpg", "thumbnail_url": "https://example.com/thumb1.jpg","url": "https://example.com/page1"}]}}}]
raw_results = [{"content": {"results": {"images_results": [{"image_url": "https://example.com/image1.jpg", "thumbnail_url": "https://example.com/thumb1.jpg", "url": "https://example.com/page1"}]}}}]
cleaned = InfoQuestClient.clean_results_with_image_search(raw_results)
assert len(cleaned) == 1
@@ -181,4 +181,4 @@ class TestInfoQuestClient:
client = InfoQuestClient()
result = client.web_search("test query")
assert "Error" in result
assert "Error" in result

View File

@@ -16,14 +16,7 @@ from src.agents.middlewares.memory_middleware import _filter_messages_for_memory
# Helpers
# ---------------------------------------------------------------------------
_UPLOAD_BLOCK = (
"<uploaded_files>\n"
"The following files have been uploaded and are available for use:\n\n"
"- filename: secret.txt\n"
" path: /mnt/user-data/uploads/abc123/secret.txt\n"
" size: 42 bytes\n"
"</uploaded_files>"
)
_UPLOAD_BLOCK = "<uploaded_files>\nThe following files have been uploaded and are available for use:\n\n- filename: secret.txt\n path: /mnt/user-data/uploads/abc123/secret.txt\n size: 42 bytes\n</uploaded_files>"
def _human(text: str) -> HumanMessage:
@@ -103,7 +96,7 @@ class TestFilterMessagesForMemory:
msgs = [
_human("Hello, how are you?"),
_ai("I'm doing well, thank you!"),
_human(_UPLOAD_BLOCK), # upload-only → dropped
_human(_UPLOAD_BLOCK), # upload-only → dropped
_ai("I read the uploaded file."), # paired AI → dropped
_human("What is 2 + 2?"),
_ai("4"),
@@ -122,9 +115,11 @@ class TestFilterMessagesForMemory:
def test_multimodal_content_list_handled(self):
"""Human messages with list-style content (multimodal) are handled."""
msg = HumanMessage(content=[
{"type": "text", "text": _UPLOAD_BLOCK},
])
msg = HumanMessage(
content=[
{"type": "text", "text": _UPLOAD_BLOCK},
]
)
msgs = [msg, _ai("Done.")]
result = _filter_messages_for_memory(msgs)
assert result == []
@@ -134,9 +129,7 @@ class TestFilterMessagesForMemory:
combined = _UPLOAD_BLOCK + "\n\nSummarise the file please."
msgs = [_human(combined), _ai("It says hello.")]
result = _filter_messages_for_memory(msgs)
all_content = " ".join(
m.content for m in result if isinstance(m.content, str)
)
all_content = " ".join(m.content for m in result if isinstance(m.content, str))
assert "/mnt/user-data/uploads/" not in all_content
assert "<uploaded_files>" not in all_content
@@ -157,11 +150,7 @@ class TestStripUploadMentionsFromMemory:
# --- summaries ---
def test_upload_event_sentence_removed_from_summary(self):
mem = self._make_memory(
"User is interested in AI. "
"User uploaded a test file for verification purposes. "
"User prefers concise answers."
)
mem = self._make_memory("User is interested in AI. User uploaded a test file for verification purposes. User prefers concise answers.")
result = _strip_upload_mentions_from_memory(mem)
summary = result["user"]["topOfMind"]["summary"]
assert "uploaded a test file" not in summary
@@ -169,11 +158,7 @@ class TestStripUploadMentionsFromMemory:
assert "User prefers concise answers" in summary
def test_upload_path_sentence_removed_from_summary(self):
mem = self._make_memory(
"User uses Python. "
"User uploaded file to /mnt/user-data/uploads/tid/data.csv. "
"User likes clean code."
)
mem = self._make_memory("User uses Python. User uploaded file to /mnt/user-data/uploads/tid/data.csv. User likes clean code.")
result = _strip_upload_mentions_from_memory(mem)
summary = result["user"]["topOfMind"]["summary"]
assert "/mnt/user-data/uploads/" not in summary
@@ -193,10 +178,7 @@ class TestStripUploadMentionsFromMemory:
def test_uploading_a_test_file_removed(self):
"""'uploading a test file' (with intervening words) must be caught."""
mem = self._make_memory(
"User conducted a hands-on test by uploading a test file titled "
"'test_deerflow_memory_bug.txt'. User is also learning Python."
)
mem = self._make_memory("User conducted a hands-on test by uploading a test file titled 'test_deerflow_memory_bug.txt'. User is also learning Python.")
result = _strip_upload_mentions_from_memory(mem)
summary = result["user"]["topOfMind"]["summary"]
assert "test_deerflow_memory_bug.txt" not in summary

View File

@@ -3,9 +3,7 @@
import importlib
from types import SimpleNamespace
present_file_tool_module = importlib.import_module(
"src.tools.builtins.present_file_tool"
)
present_file_tool_module = importlib.import_module("src.tools.builtins.present_file_tool")
def _make_runtime(outputs_path: str) -> SimpleNamespace:
@@ -40,9 +38,7 @@ def test_present_files_keeps_virtual_outputs_path(tmp_path, monkeypatch):
monkeypatch.setattr(
present_file_tool_module,
"get_paths",
lambda: SimpleNamespace(
resolve_virtual_path=lambda thread_id, path: artifact_path
),
lambda: SimpleNamespace(resolve_virtual_path=lambda thread_id, path: artifact_path),
)
result = present_file_tool_module.present_file_tool.func(
@@ -69,7 +65,4 @@ def test_present_files_rejects_paths_outside_outputs(tmp_path):
)
assert "artifacts" not in result.update
assert (
result.update["messages"][0].content
== f"Error: Only files in /mnt/user-data/outputs can be presented: {leaked_path}"
)
assert result.update["messages"][0].content == f"Error: Only files in /mnt/user-data/outputs can be presented: {leaked_path}"

View File

@@ -8,6 +8,7 @@ from src.reflection.resolvers import resolve_variable
def test_resolve_variable_reports_install_hint_for_missing_google_provider(monkeypatch: pytest.MonkeyPatch):
"""Missing google provider should return actionable install guidance."""
def fake_import_module(module_path: str):
raise ModuleNotFoundError(f"No module named '{module_path}'", name=module_path)
@@ -38,6 +39,8 @@ def test_resolve_variable_reports_install_hint_for_missing_google_transitive_dep
message = str(exc_info.value)
# Even when a transitive dependency is missing, the hint should still point to the provider package.
assert "uv add langchain-google-genai" in message
def test_resolve_variable_invalid_path_format():
"""Invalid variable path should fail with format guidance."""
with pytest.raises(ImportError) as exc_info:

View File

@@ -5,22 +5,22 @@ from src.gateway.routers import suggestions
def test_strip_markdown_code_fence_removes_wrapping():
text = "```json\n[\"a\"]\n```"
assert suggestions._strip_markdown_code_fence(text) == "[\"a\"]"
text = '```json\n["a"]\n```'
assert suggestions._strip_markdown_code_fence(text) == '["a"]'
def test_strip_markdown_code_fence_no_fence_keeps_content():
text = " [\"a\"] "
assert suggestions._strip_markdown_code_fence(text) == "[\"a\"]"
text = ' ["a"] '
assert suggestions._strip_markdown_code_fence(text) == '["a"]'
def test_parse_json_string_list_filters_invalid_items():
text = "```json\n[\"a\", \" \", 1, \"b\"]\n```"
text = '```json\n["a", " ", 1, "b"]\n```'
assert suggestions._parse_json_string_list(text) == ["a", "b"]
def test_parse_json_string_list_rejects_non_list():
text = "{\"a\": 1}"
text = '{"a": 1}'
assert suggestions._parse_json_string_list(text) is None
@@ -43,7 +43,7 @@ def test_generate_suggestions_parses_and_limits(monkeypatch):
model_name=None,
)
fake_model = MagicMock()
fake_model.invoke.return_value = MagicMock(content="```json\n[\"Q1\", \"Q2\", \"Q3\", \"Q4\"]\n```")
fake_model.invoke.return_value = MagicMock(content='```json\n["Q1", "Q2", "Q3", "Q4"]\n```')
monkeypatch.setattr(suggestions, "create_chat_model", lambda **kwargs: fake_model)
result = asyncio.run(suggestions.generate_suggestions("t1", req))
@@ -63,4 +63,4 @@ def test_generate_suggestions_returns_empty_on_model_error(monkeypatch):
result = asyncio.run(suggestions.generate_suggestions("t1", req))
assert result.suggestions == []
assert result.suggestions == []

View File

@@ -21,7 +21,6 @@ def test_upload_files_writes_thread_storage_and_skips_local_sandbox_sync(tmp_pat
patch.object(uploads, "get_uploads_dir", return_value=thread_uploads_dir),
patch.object(uploads, "get_sandbox_provider", return_value=provider),
):
file = UploadFile(filename="notes.txt", file=BytesIO(b"hello uploads"))
result = asyncio.run(uploads.upload_files("thread-local", files=[file]))
@@ -52,7 +51,6 @@ def test_upload_files_syncs_non_local_sandbox_and_marks_markdown_file(tmp_path):
patch.object(uploads, "get_sandbox_provider", return_value=provider),
patch.object(uploads, "convert_file_to_markdown", AsyncMock(side_effect=fake_convert)),
):
file = UploadFile(filename="report.pdf", file=BytesIO(b"pdf-bytes"))
result = asyncio.run(uploads.upload_files("thread-aio", files=[file]))