mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-05-02 10:10:44 +08:00
refactor: split backend into harness (deerflow.*) and app (app.*) (#1131)
* refactor: extract shared utils to break harness→app cross-layer imports Move _validate_skill_frontmatter to src/skills/validation.py and CONVERTIBLE_EXTENSIONS + convert_file_to_markdown to src/utils/file_conversion.py. This eliminates the two reverse dependencies from client.py (harness layer) into gateway/routers/ (app layer), preparing for the harness/app package split. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * refactor: split backend/src into harness (deerflow.*) and app (app.*) Physically split the monolithic backend/src/ package into two layers: - **Harness** (`packages/harness/deerflow/`): publishable agent framework package with import prefix `deerflow.*`. Contains agents, sandbox, tools, models, MCP, skills, config, and all core infrastructure. - **App** (`app/`): unpublished application code with import prefix `app.*`. Contains gateway (FastAPI REST API) and channels (IM integrations). Key changes: - Move 13 harness modules to packages/harness/deerflow/ via git mv - Move gateway + channels to app/ via git mv - Rename all imports: src.* → deerflow.* (harness) / app.* (app layer) - Set up uv workspace with deerflow-harness as workspace member - Update langgraph.json, config.example.yaml, all scripts, Docker files - Add build-system (hatchling) to harness pyproject.toml - Add PYTHONPATH=. to gateway startup commands for app.* resolution - Update ruff.toml with known-first-party for import sorting - Update all documentation to reflect new directory structure Boundary rule enforced: harness code never imports from app. All 429 tests pass. Lint clean. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * chore: add harness→app boundary check test and update docs Add test_harness_boundary.py that scans all Python files in packages/harness/deerflow/ and fails if any `from app.*` or `import app.*` statement is found. This enforces the architectural rule that the harness layer never depends on the app layer. Update CLAUDE.md to document the harness/app split architecture, import conventions, and the boundary enforcement test. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * feat: add config versioning with auto-upgrade on startup When config.example.yaml schema changes, developers' local config.yaml files can silently become outdated. This adds a config_version field and auto-upgrade mechanism so breaking changes (like src.* → deerflow.* renames) are applied automatically before services start. - Add config_version: 1 to config.example.yaml - Add startup version check warning in AppConfig.from_file() - Add scripts/config-upgrade.sh with migration registry for value replacements - Add `make config-upgrade` target - Auto-run config-upgrade in serve.sh and start-daemon.sh before starting services - Add config error hints in service failure messages Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix comments * fix: update src.* import in test_sandbox_tools_security to deerflow.* Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: handle empty config and search parent dirs for config.example.yaml Address Copilot review comments on PR #1131: - Guard against yaml.safe_load() returning None for empty config files - Search parent directories for config.example.yaml instead of only looking next to config.yaml, fixing detection in common setups Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: correct skills root path depth and config_version type coercion - loader.py: fix get_skills_root_path() to use 5 parent levels (was 3) after harness split, file lives at packages/harness/deerflow/skills/ so parent×3 resolved to backend/packages/harness/ instead of backend/ - app_config.py: coerce config_version to int() before comparison in _check_config_version() to prevent TypeError when YAML stores value as string (e.g. config_version: "1") - tests: add regression tests for both fixes Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> * fix: update test imports from src.* to deerflow.*/app.* after harness refactor Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com> --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
16
backend/app/channels/__init__.py
Normal file
16
backend/app/channels/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""IM Channel integration for DeerFlow.
|
||||
|
||||
Provides a pluggable channel system that connects external messaging platforms
|
||||
(Feishu/Lark, Slack, Telegram) to the DeerFlow agent via the ChannelManager,
|
||||
which uses ``langgraph-sdk`` to communicate with the underlying LangGraph Server.
|
||||
"""
|
||||
|
||||
from app.channels.base import Channel
|
||||
from app.channels.message_bus import InboundMessage, MessageBus, OutboundMessage
|
||||
|
||||
__all__ = [
|
||||
"Channel",
|
||||
"InboundMessage",
|
||||
"MessageBus",
|
||||
"OutboundMessage",
|
||||
]
|
||||
108
backend/app/channels/base.py
Normal file
108
backend/app/channels/base.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""Abstract base class for IM channels."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
from app.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Channel(ABC):
|
||||
"""Base class for all IM channel implementations.
|
||||
|
||||
Each channel connects to an external messaging platform and:
|
||||
1. Receives messages, wraps them as InboundMessage, publishes to the bus.
|
||||
2. Subscribes to outbound messages and sends replies back to the platform.
|
||||
|
||||
Subclasses must implement ``start``, ``stop``, and ``send``.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
self.name = name
|
||||
self.bus = bus
|
||||
self.config = config
|
||||
self._running = False
|
||||
|
||||
@property
|
||||
def is_running(self) -> bool:
|
||||
return self._running
|
||||
|
||||
# -- lifecycle ---------------------------------------------------------
|
||||
|
||||
@abstractmethod
|
||||
async def start(self) -> None:
|
||||
"""Start listening for messages from the external platform."""
|
||||
|
||||
@abstractmethod
|
||||
async def stop(self) -> None:
|
||||
"""Gracefully stop the channel."""
|
||||
|
||||
# -- outbound ----------------------------------------------------------
|
||||
|
||||
@abstractmethod
|
||||
async def send(self, msg: OutboundMessage) -> None:
|
||||
"""Send a message back to the external platform.
|
||||
|
||||
The implementation should use ``msg.chat_id`` and ``msg.thread_ts``
|
||||
to route the reply to the correct conversation/thread.
|
||||
"""
|
||||
|
||||
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
|
||||
"""Upload a single file attachment to the platform.
|
||||
|
||||
Returns True if the upload succeeded, False otherwise.
|
||||
Default implementation returns False (no file upload support).
|
||||
"""
|
||||
return False
|
||||
|
||||
# -- helpers -----------------------------------------------------------
|
||||
|
||||
def _make_inbound(
|
||||
self,
|
||||
chat_id: str,
|
||||
user_id: str,
|
||||
text: str,
|
||||
*,
|
||||
msg_type: InboundMessageType = InboundMessageType.CHAT,
|
||||
thread_ts: str | None = None,
|
||||
files: list[dict[str, Any]] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> InboundMessage:
|
||||
"""Convenience factory for creating InboundMessage instances."""
|
||||
return InboundMessage(
|
||||
channel_name=self.name,
|
||||
chat_id=chat_id,
|
||||
user_id=user_id,
|
||||
text=text,
|
||||
msg_type=msg_type,
|
||||
thread_ts=thread_ts,
|
||||
files=files or [],
|
||||
metadata=metadata or {},
|
||||
)
|
||||
|
||||
async def _on_outbound(self, msg: OutboundMessage) -> None:
|
||||
"""Outbound callback registered with the bus.
|
||||
|
||||
Only forwards messages targeted at this channel.
|
||||
Sends the text message first, then uploads any file attachments.
|
||||
File uploads are skipped entirely when the text send fails to avoid
|
||||
partial deliveries (files without accompanying text).
|
||||
"""
|
||||
if msg.channel_name == self.name:
|
||||
try:
|
||||
await self.send(msg)
|
||||
except Exception:
|
||||
logger.exception("Failed to send outbound message on channel %s", self.name)
|
||||
return # Do not attempt file uploads when the text message failed
|
||||
|
||||
for attachment in msg.attachments:
|
||||
try:
|
||||
success = await self.send_file(msg, attachment)
|
||||
if not success:
|
||||
logger.warning("[%s] file upload skipped for %s", self.name, attachment.filename)
|
||||
except Exception:
|
||||
logger.exception("[%s] failed to upload file %s", self.name, attachment.filename)
|
||||
510
backend/app/channels/feishu.py
Normal file
510
backend/app/channels/feishu.py
Normal file
@@ -0,0 +1,510 @@
|
||||
"""Feishu/Lark channel — connects to Feishu via WebSocket (no public IP needed)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
from typing import Any
|
||||
|
||||
from app.channels.base import Channel
|
||||
from app.channels.message_bus import InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FeishuChannel(Channel):
|
||||
"""Feishu/Lark IM channel using the ``lark-oapi`` WebSocket client.
|
||||
|
||||
Configuration keys (in ``config.yaml`` under ``channels.feishu``):
|
||||
- ``app_id``: Feishu app ID.
|
||||
- ``app_secret``: Feishu app secret.
|
||||
- ``verification_token``: (optional) Event verification token.
|
||||
|
||||
The channel uses WebSocket long-connection mode so no public IP is required.
|
||||
|
||||
Message flow:
|
||||
1. User sends a message → bot adds "OK" emoji reaction
|
||||
2. Bot replies in thread: "Working on it......"
|
||||
3. Agent processes the message and returns a result
|
||||
4. Bot replies in thread with the result
|
||||
5. Bot adds "DONE" emoji reaction to the original message
|
||||
"""
|
||||
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
super().__init__(name="feishu", bus=bus, config=config)
|
||||
self._thread: threading.Thread | None = None
|
||||
self._main_loop: asyncio.AbstractEventLoop | None = None
|
||||
self._api_client = None
|
||||
self._CreateMessageReactionRequest = None
|
||||
self._CreateMessageReactionRequestBody = None
|
||||
self._Emoji = None
|
||||
self._PatchMessageRequest = None
|
||||
self._PatchMessageRequestBody = None
|
||||
self._background_tasks: set[asyncio.Task] = set()
|
||||
self._running_card_ids: dict[str, str] = {}
|
||||
self._running_card_tasks: dict[str, asyncio.Task] = {}
|
||||
self._CreateFileRequest = None
|
||||
self._CreateFileRequestBody = None
|
||||
self._CreateImageRequest = None
|
||||
self._CreateImageRequestBody = None
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
return
|
||||
|
||||
try:
|
||||
import lark_oapi as lark
|
||||
from lark_oapi.api.im.v1 import (
|
||||
CreateFileRequest,
|
||||
CreateFileRequestBody,
|
||||
CreateImageRequest,
|
||||
CreateImageRequestBody,
|
||||
CreateMessageReactionRequest,
|
||||
CreateMessageReactionRequestBody,
|
||||
CreateMessageRequest,
|
||||
CreateMessageRequestBody,
|
||||
Emoji,
|
||||
PatchMessageRequest,
|
||||
PatchMessageRequestBody,
|
||||
ReplyMessageRequest,
|
||||
ReplyMessageRequestBody,
|
||||
)
|
||||
except ImportError:
|
||||
logger.error("lark-oapi is not installed. Install it with: uv add lark-oapi")
|
||||
return
|
||||
|
||||
self._lark = lark
|
||||
self._CreateMessageRequest = CreateMessageRequest
|
||||
self._CreateMessageRequestBody = CreateMessageRequestBody
|
||||
self._ReplyMessageRequest = ReplyMessageRequest
|
||||
self._ReplyMessageRequestBody = ReplyMessageRequestBody
|
||||
self._CreateMessageReactionRequest = CreateMessageReactionRequest
|
||||
self._CreateMessageReactionRequestBody = CreateMessageReactionRequestBody
|
||||
self._Emoji = Emoji
|
||||
self._PatchMessageRequest = PatchMessageRequest
|
||||
self._PatchMessageRequestBody = PatchMessageRequestBody
|
||||
self._CreateFileRequest = CreateFileRequest
|
||||
self._CreateFileRequestBody = CreateFileRequestBody
|
||||
self._CreateImageRequest = CreateImageRequest
|
||||
self._CreateImageRequestBody = CreateImageRequestBody
|
||||
|
||||
app_id = self.config.get("app_id", "")
|
||||
app_secret = self.config.get("app_secret", "")
|
||||
|
||||
if not app_id or not app_secret:
|
||||
logger.error("Feishu channel requires app_id and app_secret")
|
||||
return
|
||||
|
||||
self._api_client = lark.Client.builder().app_id(app_id).app_secret(app_secret).build()
|
||||
self._main_loop = asyncio.get_event_loop()
|
||||
|
||||
self._running = True
|
||||
self.bus.subscribe_outbound(self._on_outbound)
|
||||
|
||||
# Both ws.Client construction and start() must happen in a dedicated
|
||||
# thread with its own event loop. lark-oapi caches the running loop
|
||||
# at construction time and later calls loop.run_until_complete(),
|
||||
# which conflicts with an already-running uvloop.
|
||||
self._thread = threading.Thread(
|
||||
target=self._run_ws,
|
||||
args=(app_id, app_secret),
|
||||
daemon=True,
|
||||
)
|
||||
self._thread.start()
|
||||
logger.info("Feishu channel started")
|
||||
|
||||
def _run_ws(self, app_id: str, app_secret: str) -> None:
|
||||
"""Construct and run the lark WS client in a thread with a fresh event loop.
|
||||
|
||||
The lark-oapi SDK captures a module-level event loop at import time
|
||||
(``lark_oapi.ws.client.loop``). When uvicorn uses uvloop, that
|
||||
captured loop is the *main* thread's uvloop — which is already
|
||||
running, so ``loop.run_until_complete()`` inside ``Client.start()``
|
||||
raises ``RuntimeError``.
|
||||
|
||||
We work around this by creating a plain asyncio event loop for this
|
||||
thread and patching the SDK's module-level reference before calling
|
||||
``start()``.
|
||||
"""
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
import lark_oapi as lark
|
||||
import lark_oapi.ws.client as _ws_client_mod
|
||||
|
||||
# Replace the SDK's module-level loop so Client.start() uses
|
||||
# this thread's (non-running) event loop instead of the main
|
||||
# thread's uvloop.
|
||||
_ws_client_mod.loop = loop
|
||||
|
||||
event_handler = lark.EventDispatcherHandler.builder("", "").register_p2_im_message_receive_v1(self._on_message).build()
|
||||
ws_client = lark.ws.Client(
|
||||
app_id=app_id,
|
||||
app_secret=app_secret,
|
||||
event_handler=event_handler,
|
||||
log_level=lark.LogLevel.INFO,
|
||||
)
|
||||
ws_client.start()
|
||||
except Exception:
|
||||
if self._running:
|
||||
logger.exception("Feishu WebSocket error")
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._running = False
|
||||
self.bus.unsubscribe_outbound(self._on_outbound)
|
||||
for task in list(self._background_tasks):
|
||||
task.cancel()
|
||||
self._background_tasks.clear()
|
||||
for task in list(self._running_card_tasks.values()):
|
||||
task.cancel()
|
||||
self._running_card_tasks.clear()
|
||||
if self._thread:
|
||||
self._thread.join(timeout=5)
|
||||
self._thread = None
|
||||
logger.info("Feishu channel stopped")
|
||||
|
||||
async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -> None:
|
||||
if not self._api_client:
|
||||
logger.warning("[Feishu] send called but no api_client available")
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"[Feishu] sending reply: chat_id=%s, thread_ts=%s, text_len=%d",
|
||||
msg.chat_id,
|
||||
msg.thread_ts,
|
||||
len(msg.text),
|
||||
)
|
||||
|
||||
last_exc: Exception | None = None
|
||||
for attempt in range(_max_retries):
|
||||
try:
|
||||
await self._send_card_message(msg)
|
||||
return # success
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt < _max_retries - 1:
|
||||
delay = 2**attempt # 1s, 2s
|
||||
logger.warning(
|
||||
"[Feishu] send failed (attempt %d/%d), retrying in %ds: %s",
|
||||
attempt + 1,
|
||||
_max_retries,
|
||||
delay,
|
||||
exc,
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("[Feishu] send failed after %d attempts: %s", _max_retries, last_exc)
|
||||
raise last_exc # type: ignore[misc]
|
||||
|
||||
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
|
||||
if not self._api_client:
|
||||
return False
|
||||
|
||||
# Check size limits (image: 10MB, file: 30MB)
|
||||
if attachment.is_image and attachment.size > 10 * 1024 * 1024:
|
||||
logger.warning("[Feishu] image too large (%d bytes), skipping: %s", attachment.size, attachment.filename)
|
||||
return False
|
||||
if not attachment.is_image and attachment.size > 30 * 1024 * 1024:
|
||||
logger.warning("[Feishu] file too large (%d bytes), skipping: %s", attachment.size, attachment.filename)
|
||||
return False
|
||||
|
||||
try:
|
||||
if attachment.is_image:
|
||||
file_key = await self._upload_image(attachment.actual_path)
|
||||
msg_type = "image"
|
||||
content = json.dumps({"image_key": file_key})
|
||||
else:
|
||||
file_key = await self._upload_file(attachment.actual_path, attachment.filename)
|
||||
msg_type = "file"
|
||||
content = json.dumps({"file_key": file_key})
|
||||
|
||||
if msg.thread_ts:
|
||||
request = self._ReplyMessageRequest.builder().message_id(msg.thread_ts).request_body(self._ReplyMessageRequestBody.builder().msg_type(msg_type).content(content).reply_in_thread(True).build()).build()
|
||||
await asyncio.to_thread(self._api_client.im.v1.message.reply, request)
|
||||
else:
|
||||
request = self._CreateMessageRequest.builder().receive_id_type("chat_id").request_body(self._CreateMessageRequestBody.builder().receive_id(msg.chat_id).msg_type(msg_type).content(content).build()).build()
|
||||
await asyncio.to_thread(self._api_client.im.v1.message.create, request)
|
||||
|
||||
logger.info("[Feishu] file sent: %s (type=%s)", attachment.filename, msg_type)
|
||||
return True
|
||||
except Exception:
|
||||
logger.exception("[Feishu] failed to upload/send file: %s", attachment.filename)
|
||||
return False
|
||||
|
||||
async def _upload_image(self, path) -> str:
|
||||
"""Upload an image to Feishu and return the image_key."""
|
||||
with open(str(path), "rb") as f:
|
||||
request = self._CreateImageRequest.builder().request_body(self._CreateImageRequestBody.builder().image_type("message").image(f).build()).build()
|
||||
response = await asyncio.to_thread(self._api_client.im.v1.image.create, request)
|
||||
if not response.success():
|
||||
raise RuntimeError(f"Feishu image upload failed: code={response.code}, msg={response.msg}")
|
||||
return response.data.image_key
|
||||
|
||||
async def _upload_file(self, path, filename: str) -> str:
|
||||
"""Upload a file to Feishu and return the file_key."""
|
||||
suffix = path.suffix.lower() if hasattr(path, "suffix") else ""
|
||||
if suffix in (".xls", ".xlsx", ".csv"):
|
||||
file_type = "xls"
|
||||
elif suffix in (".ppt", ".pptx"):
|
||||
file_type = "ppt"
|
||||
elif suffix == ".pdf":
|
||||
file_type = "pdf"
|
||||
elif suffix in (".doc", ".docx"):
|
||||
file_type = "doc"
|
||||
else:
|
||||
file_type = "stream"
|
||||
|
||||
with open(str(path), "rb") as f:
|
||||
request = self._CreateFileRequest.builder().request_body(self._CreateFileRequestBody.builder().file_type(file_type).file_name(filename).file(f).build()).build()
|
||||
response = await asyncio.to_thread(self._api_client.im.v1.file.create, request)
|
||||
if not response.success():
|
||||
raise RuntimeError(f"Feishu file upload failed: code={response.code}, msg={response.msg}")
|
||||
return response.data.file_key
|
||||
|
||||
# -- message formatting ------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _build_card_content(text: str) -> str:
|
||||
"""Build a Feishu interactive card with markdown content.
|
||||
|
||||
Feishu's interactive card format natively renders markdown, including
|
||||
headers, bold/italic, code blocks, lists, and links.
|
||||
"""
|
||||
card = {
|
||||
"config": {"wide_screen_mode": True, "update_multi": True},
|
||||
"elements": [{"tag": "markdown", "content": text}],
|
||||
}
|
||||
return json.dumps(card)
|
||||
|
||||
# -- reaction helpers --------------------------------------------------
|
||||
|
||||
async def _add_reaction(self, message_id: str, emoji_type: str = "THUMBSUP") -> None:
|
||||
"""Add an emoji reaction to a message."""
|
||||
if not self._api_client or not self._CreateMessageReactionRequest:
|
||||
return
|
||||
try:
|
||||
request = self._CreateMessageReactionRequest.builder().message_id(message_id).request_body(self._CreateMessageReactionRequestBody.builder().reaction_type(self._Emoji.builder().emoji_type(emoji_type).build()).build()).build()
|
||||
await asyncio.to_thread(self._api_client.im.v1.message_reaction.create, request)
|
||||
logger.info("[Feishu] reaction '%s' added to message %s", emoji_type, message_id)
|
||||
except Exception:
|
||||
logger.exception("[Feishu] failed to add reaction '%s' to message %s", emoji_type, message_id)
|
||||
|
||||
async def _reply_card(self, message_id: str, text: str) -> str | None:
|
||||
"""Reply with an interactive card and return the created card message ID."""
|
||||
if not self._api_client:
|
||||
return None
|
||||
|
||||
content = self._build_card_content(text)
|
||||
request = self._ReplyMessageRequest.builder().message_id(message_id).request_body(self._ReplyMessageRequestBody.builder().msg_type("interactive").content(content).reply_in_thread(True).build()).build()
|
||||
response = await asyncio.to_thread(self._api_client.im.v1.message.reply, request)
|
||||
response_data = getattr(response, "data", None)
|
||||
return getattr(response_data, "message_id", None)
|
||||
|
||||
async def _create_card(self, chat_id: str, text: str) -> None:
|
||||
"""Create a new card message in the target chat."""
|
||||
if not self._api_client:
|
||||
return
|
||||
|
||||
content = self._build_card_content(text)
|
||||
request = self._CreateMessageRequest.builder().receive_id_type("chat_id").request_body(self._CreateMessageRequestBody.builder().receive_id(chat_id).msg_type("interactive").content(content).build()).build()
|
||||
await asyncio.to_thread(self._api_client.im.v1.message.create, request)
|
||||
|
||||
async def _update_card(self, message_id: str, text: str) -> None:
|
||||
"""Patch an existing card message in place."""
|
||||
if not self._api_client or not self._PatchMessageRequest:
|
||||
return
|
||||
|
||||
content = self._build_card_content(text)
|
||||
request = self._PatchMessageRequest.builder().message_id(message_id).request_body(self._PatchMessageRequestBody.builder().content(content).build()).build()
|
||||
await asyncio.to_thread(self._api_client.im.v1.message.patch, request)
|
||||
|
||||
def _track_background_task(self, task: asyncio.Task, *, name: str, msg_id: str) -> None:
|
||||
"""Keep a strong reference to fire-and-forget tasks and surface errors."""
|
||||
self._background_tasks.add(task)
|
||||
task.add_done_callback(lambda done_task, task_name=name, mid=msg_id: self._finalize_background_task(done_task, task_name, mid))
|
||||
|
||||
def _finalize_background_task(self, task: asyncio.Task, name: str, msg_id: str) -> None:
|
||||
self._background_tasks.discard(task)
|
||||
self._log_task_error(task, name, msg_id)
|
||||
|
||||
async def _create_running_card(self, source_message_id: str, text: str) -> str | None:
|
||||
"""Create the running card and cache its message ID when available."""
|
||||
running_card_id = await self._reply_card(source_message_id, text)
|
||||
if running_card_id:
|
||||
self._running_card_ids[source_message_id] = running_card_id
|
||||
logger.info("[Feishu] running card created: source=%s card=%s", source_message_id, running_card_id)
|
||||
else:
|
||||
logger.warning("[Feishu] running card creation returned no message_id for source=%s, subsequent updates will fall back to new replies", source_message_id)
|
||||
return running_card_id
|
||||
|
||||
def _ensure_running_card_started(self, source_message_id: str, text: str = "Working on it...") -> asyncio.Task | None:
|
||||
"""Start running-card creation once per source message."""
|
||||
running_card_id = self._running_card_ids.get(source_message_id)
|
||||
if running_card_id:
|
||||
return None
|
||||
|
||||
running_card_task = self._running_card_tasks.get(source_message_id)
|
||||
if running_card_task:
|
||||
return running_card_task
|
||||
|
||||
running_card_task = asyncio.create_task(self._create_running_card(source_message_id, text))
|
||||
self._running_card_tasks[source_message_id] = running_card_task
|
||||
running_card_task.add_done_callback(lambda done_task, mid=source_message_id: self._finalize_running_card_task(mid, done_task))
|
||||
return running_card_task
|
||||
|
||||
def _finalize_running_card_task(self, source_message_id: str, task: asyncio.Task) -> None:
|
||||
if self._running_card_tasks.get(source_message_id) is task:
|
||||
self._running_card_tasks.pop(source_message_id, None)
|
||||
self._log_task_error(task, "create_running_card", source_message_id)
|
||||
|
||||
async def _ensure_running_card(self, source_message_id: str, text: str = "Working on it...") -> str | None:
|
||||
"""Ensure the in-thread running card exists and track its message ID."""
|
||||
running_card_id = self._running_card_ids.get(source_message_id)
|
||||
if running_card_id:
|
||||
return running_card_id
|
||||
|
||||
running_card_task = self._ensure_running_card_started(source_message_id, text)
|
||||
if running_card_task is None:
|
||||
return self._running_card_ids.get(source_message_id)
|
||||
return await running_card_task
|
||||
|
||||
async def _send_running_reply(self, message_id: str) -> None:
|
||||
"""Reply to a message in-thread with a running card."""
|
||||
try:
|
||||
await self._ensure_running_card(message_id)
|
||||
except Exception:
|
||||
logger.exception("[Feishu] failed to send running reply for message %s", message_id)
|
||||
|
||||
async def _send_card_message(self, msg: OutboundMessage) -> None:
|
||||
"""Send or update the Feishu card tied to the current request."""
|
||||
source_message_id = msg.thread_ts
|
||||
if source_message_id:
|
||||
running_card_id = self._running_card_ids.get(source_message_id)
|
||||
awaited_running_card_task = False
|
||||
|
||||
if not running_card_id:
|
||||
running_card_task = self._running_card_tasks.get(source_message_id)
|
||||
if running_card_task:
|
||||
awaited_running_card_task = True
|
||||
running_card_id = await running_card_task
|
||||
|
||||
if running_card_id:
|
||||
try:
|
||||
await self._update_card(running_card_id, msg.text)
|
||||
except Exception:
|
||||
if not msg.is_final:
|
||||
raise
|
||||
logger.exception(
|
||||
"[Feishu] failed to patch running card %s, falling back to final reply",
|
||||
running_card_id,
|
||||
)
|
||||
await self._reply_card(source_message_id, msg.text)
|
||||
else:
|
||||
logger.info("[Feishu] running card updated: source=%s card=%s", source_message_id, running_card_id)
|
||||
elif msg.is_final:
|
||||
await self._reply_card(source_message_id, msg.text)
|
||||
elif awaited_running_card_task:
|
||||
logger.warning(
|
||||
"[Feishu] running card task finished without message_id for source=%s, skipping duplicate non-final creation",
|
||||
source_message_id,
|
||||
)
|
||||
else:
|
||||
await self._ensure_running_card(source_message_id, msg.text)
|
||||
|
||||
if msg.is_final:
|
||||
self._running_card_ids.pop(source_message_id, None)
|
||||
await self._add_reaction(source_message_id, "DONE")
|
||||
return
|
||||
|
||||
await self._create_card(msg.chat_id, msg.text)
|
||||
|
||||
# -- internal ----------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _log_future_error(fut, name: str, msg_id: str) -> None:
|
||||
"""Callback for run_coroutine_threadsafe futures to surface errors."""
|
||||
try:
|
||||
exc = fut.exception()
|
||||
if exc:
|
||||
logger.error("[Feishu] %s failed for msg_id=%s: %s", name, msg_id, exc)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _log_task_error(task: asyncio.Task, name: str, msg_id: str) -> None:
|
||||
"""Callback for background asyncio tasks to surface errors."""
|
||||
try:
|
||||
exc = task.exception()
|
||||
if exc:
|
||||
logger.error("[Feishu] %s failed for msg_id=%s: %s", name, msg_id, exc)
|
||||
except asyncio.CancelledError:
|
||||
logger.info("[Feishu] %s cancelled for msg_id=%s", name, msg_id)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async def _prepare_inbound(self, msg_id: str, inbound) -> None:
|
||||
"""Kick off Feishu side effects without delaying inbound dispatch."""
|
||||
reaction_task = asyncio.create_task(self._add_reaction(msg_id, "OK"))
|
||||
self._track_background_task(reaction_task, name="add_reaction", msg_id=msg_id)
|
||||
self._ensure_running_card_started(msg_id)
|
||||
await self.bus.publish_inbound(inbound)
|
||||
|
||||
def _on_message(self, event) -> None:
|
||||
"""Called by lark-oapi when a message is received (runs in lark thread)."""
|
||||
try:
|
||||
logger.info("[Feishu] raw event received: type=%s", type(event).__name__)
|
||||
message = event.event.message
|
||||
chat_id = message.chat_id
|
||||
msg_id = message.message_id
|
||||
sender_id = event.event.sender.sender_id.open_id
|
||||
|
||||
# root_id is set when the message is a reply within a Feishu thread.
|
||||
# Use it as topic_id so all replies share the same DeerFlow thread.
|
||||
root_id = getattr(message, "root_id", None) or None
|
||||
|
||||
# Parse message content
|
||||
content = json.loads(message.content)
|
||||
text = content.get("text", "").strip()
|
||||
logger.info(
|
||||
"[Feishu] parsed message: chat_id=%s, msg_id=%s, root_id=%s, sender=%s, text=%r",
|
||||
chat_id,
|
||||
msg_id,
|
||||
root_id,
|
||||
sender_id,
|
||||
text[:100] if text else "",
|
||||
)
|
||||
|
||||
if not text:
|
||||
logger.info("[Feishu] empty text, ignoring message")
|
||||
return
|
||||
|
||||
# Check if it's a command
|
||||
if text.startswith("/"):
|
||||
msg_type = InboundMessageType.COMMAND
|
||||
else:
|
||||
msg_type = InboundMessageType.CHAT
|
||||
|
||||
# topic_id: use root_id for replies (same topic), msg_id for new messages (new topic)
|
||||
topic_id = root_id or msg_id
|
||||
|
||||
inbound = self._make_inbound(
|
||||
chat_id=chat_id,
|
||||
user_id=sender_id,
|
||||
text=text,
|
||||
msg_type=msg_type,
|
||||
thread_ts=msg_id,
|
||||
metadata={"message_id": msg_id, "root_id": root_id},
|
||||
)
|
||||
inbound.topic_id = topic_id
|
||||
|
||||
# Schedule on the async event loop
|
||||
if self._main_loop and self._main_loop.is_running():
|
||||
logger.info("[Feishu] publishing inbound message to bus (type=%s, msg_id=%s)", msg_type.value, msg_id)
|
||||
fut = asyncio.run_coroutine_threadsafe(self._prepare_inbound(msg_id, inbound), self._main_loop)
|
||||
fut.add_done_callback(lambda f, mid=msg_id: self._log_future_error(f, "prepare_inbound", mid))
|
||||
else:
|
||||
logger.warning("[Feishu] main loop not running, cannot publish inbound message")
|
||||
except Exception:
|
||||
logger.exception("[Feishu] error processing message")
|
||||
703
backend/app/channels/manager.py
Normal file
703
backend/app/channels/manager.py
Normal file
@@ -0,0 +1,703 @@
|
||||
"""ChannelManager — consumes inbound messages and dispatches them to the DeerFlow agent via LangGraph Server."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import mimetypes
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from app.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
from app.channels.store import ChannelStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_LANGGRAPH_URL = "http://localhost:2024"
|
||||
DEFAULT_GATEWAY_URL = "http://localhost:8001"
|
||||
DEFAULT_ASSISTANT_ID = "lead_agent"
|
||||
|
||||
DEFAULT_RUN_CONFIG: dict[str, Any] = {"recursion_limit": 100}
|
||||
DEFAULT_RUN_CONTEXT: dict[str, Any] = {
|
||||
"thinking_enabled": True,
|
||||
"is_plan_mode": False,
|
||||
"subagent_enabled": False,
|
||||
}
|
||||
STREAM_UPDATE_MIN_INTERVAL_SECONDS = 0.35
|
||||
|
||||
|
||||
def _as_dict(value: Any) -> dict[str, Any]:
|
||||
return dict(value) if isinstance(value, Mapping) else {}
|
||||
|
||||
|
||||
def _merge_dicts(*layers: Any) -> dict[str, Any]:
|
||||
merged: dict[str, Any] = {}
|
||||
for layer in layers:
|
||||
if isinstance(layer, Mapping):
|
||||
merged.update(layer)
|
||||
return merged
|
||||
|
||||
|
||||
def _extract_response_text(result: dict | list) -> str:
|
||||
"""Extract the last AI message text from a LangGraph runs.wait result.
|
||||
|
||||
``runs.wait`` returns the final state dict which contains a ``messages``
|
||||
list. Each message is a dict with at least ``type`` and ``content``.
|
||||
|
||||
Handles special cases:
|
||||
- Regular AI text responses
|
||||
- Clarification interrupts (``ask_clarification`` tool messages)
|
||||
- AI messages with tool_calls but no text content
|
||||
"""
|
||||
if isinstance(result, list):
|
||||
messages = result
|
||||
elif isinstance(result, dict):
|
||||
messages = result.get("messages", [])
|
||||
else:
|
||||
return ""
|
||||
|
||||
# Walk backwards to find usable response text, but stop at the last
|
||||
# human message to avoid returning text from a previous turn.
|
||||
for msg in reversed(messages):
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
|
||||
msg_type = msg.get("type")
|
||||
|
||||
# Stop at the last human message — anything before it is a previous turn
|
||||
if msg_type == "human":
|
||||
break
|
||||
|
||||
# Check for tool messages from ask_clarification (interrupt case)
|
||||
if msg_type == "tool" and msg.get("name") == "ask_clarification":
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, str) and content:
|
||||
return content
|
||||
|
||||
# Regular AI message with text content
|
||||
if msg_type == "ai":
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, str) and content:
|
||||
return content
|
||||
# content can be a list of content blocks
|
||||
if isinstance(content, list):
|
||||
parts = []
|
||||
for block in content:
|
||||
if isinstance(block, dict) and block.get("type") == "text":
|
||||
parts.append(block.get("text", ""))
|
||||
elif isinstance(block, str):
|
||||
parts.append(block)
|
||||
text = "".join(parts)
|
||||
if text:
|
||||
return text
|
||||
return ""
|
||||
|
||||
|
||||
def _extract_text_content(content: Any) -> str:
|
||||
"""Extract text from a streaming payload content field."""
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
if isinstance(content, list):
|
||||
parts: list[str] = []
|
||||
for block in content:
|
||||
if isinstance(block, str):
|
||||
parts.append(block)
|
||||
elif isinstance(block, Mapping):
|
||||
text = block.get("text")
|
||||
if isinstance(text, str):
|
||||
parts.append(text)
|
||||
else:
|
||||
nested = block.get("content")
|
||||
if isinstance(nested, str):
|
||||
parts.append(nested)
|
||||
return "".join(parts)
|
||||
if isinstance(content, Mapping):
|
||||
for key in ("text", "content"):
|
||||
value = content.get(key)
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
return ""
|
||||
|
||||
|
||||
def _merge_stream_text(existing: str, chunk: str) -> str:
|
||||
"""Merge either delta text or cumulative text into a single snapshot."""
|
||||
if not chunk:
|
||||
return existing
|
||||
if not existing or chunk == existing:
|
||||
return chunk or existing
|
||||
if chunk.startswith(existing):
|
||||
return chunk
|
||||
if existing.endswith(chunk):
|
||||
return existing
|
||||
return existing + chunk
|
||||
|
||||
|
||||
def _extract_stream_message_id(payload: Any, metadata: Any) -> str | None:
|
||||
"""Best-effort extraction of the streamed AI message identifier."""
|
||||
candidates = [payload, metadata]
|
||||
if isinstance(payload, Mapping):
|
||||
candidates.append(payload.get("kwargs"))
|
||||
|
||||
for candidate in candidates:
|
||||
if not isinstance(candidate, Mapping):
|
||||
continue
|
||||
for key in ("id", "message_id"):
|
||||
value = candidate.get(key)
|
||||
if isinstance(value, str) and value:
|
||||
return value
|
||||
return None
|
||||
|
||||
|
||||
def _accumulate_stream_text(
|
||||
buffers: dict[str, str],
|
||||
current_message_id: str | None,
|
||||
event_data: Any,
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Convert a ``messages-tuple`` event into the latest displayable AI text."""
|
||||
payload = event_data
|
||||
metadata: Any = None
|
||||
if isinstance(event_data, (list, tuple)):
|
||||
if event_data:
|
||||
payload = event_data[0]
|
||||
if len(event_data) > 1:
|
||||
metadata = event_data[1]
|
||||
|
||||
if isinstance(payload, str):
|
||||
message_id = current_message_id or "__default__"
|
||||
buffers[message_id] = _merge_stream_text(buffers.get(message_id, ""), payload)
|
||||
return buffers[message_id], message_id
|
||||
|
||||
if not isinstance(payload, Mapping):
|
||||
return None, current_message_id
|
||||
|
||||
payload_type = str(payload.get("type", "")).lower()
|
||||
if "tool" in payload_type:
|
||||
return None, current_message_id
|
||||
|
||||
text = _extract_text_content(payload.get("content"))
|
||||
if not text and isinstance(payload.get("kwargs"), Mapping):
|
||||
text = _extract_text_content(payload["kwargs"].get("content"))
|
||||
if not text:
|
||||
return None, current_message_id
|
||||
|
||||
message_id = _extract_stream_message_id(payload, metadata) or current_message_id or "__default__"
|
||||
buffers[message_id] = _merge_stream_text(buffers.get(message_id, ""), text)
|
||||
return buffers[message_id], message_id
|
||||
|
||||
|
||||
def _extract_artifacts(result: dict | list) -> list[str]:
|
||||
"""Extract artifact paths from the last AI response cycle only.
|
||||
|
||||
Instead of reading the full accumulated ``artifacts`` state (which contains
|
||||
all artifacts ever produced in the thread), this inspects the messages after
|
||||
the last human message and collects file paths from ``present_files`` tool
|
||||
calls. This ensures only newly-produced artifacts are returned.
|
||||
"""
|
||||
if isinstance(result, list):
|
||||
messages = result
|
||||
elif isinstance(result, dict):
|
||||
messages = result.get("messages", [])
|
||||
else:
|
||||
return []
|
||||
|
||||
artifacts: list[str] = []
|
||||
for msg in reversed(messages):
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
# Stop at the last human message — anything before it is a previous turn
|
||||
if msg.get("type") == "human":
|
||||
break
|
||||
# Look for AI messages with present_files tool calls
|
||||
if msg.get("type") == "ai":
|
||||
for tc in msg.get("tool_calls", []):
|
||||
if isinstance(tc, dict) and tc.get("name") == "present_files":
|
||||
args = tc.get("args", {})
|
||||
paths = args.get("filepaths", [])
|
||||
if isinstance(paths, list):
|
||||
artifacts.extend(p for p in paths if isinstance(p, str))
|
||||
return artifacts
|
||||
|
||||
|
||||
def _format_artifact_text(artifacts: list[str]) -> str:
|
||||
"""Format artifact paths into a human-readable text block listing filenames."""
|
||||
import posixpath
|
||||
|
||||
filenames = [posixpath.basename(p) for p in artifacts]
|
||||
if len(filenames) == 1:
|
||||
return f"Created File: 📎 {filenames[0]}"
|
||||
return "Created Files: 📎 " + "、".join(filenames)
|
||||
|
||||
|
||||
_OUTPUTS_VIRTUAL_PREFIX = "/mnt/user-data/outputs/"
|
||||
|
||||
|
||||
def _resolve_attachments(thread_id: str, artifacts: list[str]) -> list[ResolvedAttachment]:
|
||||
"""Resolve virtual artifact paths to host filesystem paths with metadata.
|
||||
|
||||
Only paths under ``/mnt/user-data/outputs/`` are accepted; any other
|
||||
virtual path is rejected with a warning to prevent exfiltrating uploads
|
||||
or workspace files via IM channels.
|
||||
|
||||
Skips artifacts that cannot be resolved (missing files, invalid paths)
|
||||
and logs warnings for them.
|
||||
"""
|
||||
from deerflow.config.paths import get_paths
|
||||
|
||||
attachments: list[ResolvedAttachment] = []
|
||||
paths = get_paths()
|
||||
outputs_dir = paths.sandbox_outputs_dir(thread_id).resolve()
|
||||
for virtual_path in artifacts:
|
||||
# Security: only allow files from the agent outputs directory
|
||||
if not virtual_path.startswith(_OUTPUTS_VIRTUAL_PREFIX):
|
||||
logger.warning("[Manager] rejected non-outputs artifact path: %s", virtual_path)
|
||||
continue
|
||||
try:
|
||||
actual = paths.resolve_virtual_path(thread_id, virtual_path)
|
||||
# Verify the resolved path is actually under the outputs directory
|
||||
# (guards against path-traversal even after prefix check)
|
||||
try:
|
||||
actual.resolve().relative_to(outputs_dir)
|
||||
except ValueError:
|
||||
logger.warning("[Manager] artifact path escapes outputs dir: %s -> %s", virtual_path, actual)
|
||||
continue
|
||||
if not actual.is_file():
|
||||
logger.warning("[Manager] artifact not found on disk: %s -> %s", virtual_path, actual)
|
||||
continue
|
||||
mime, _ = mimetypes.guess_type(str(actual))
|
||||
mime = mime or "application/octet-stream"
|
||||
attachments.append(
|
||||
ResolvedAttachment(
|
||||
virtual_path=virtual_path,
|
||||
actual_path=actual,
|
||||
filename=actual.name,
|
||||
mime_type=mime,
|
||||
size=actual.stat().st_size,
|
||||
is_image=mime.startswith("image/"),
|
||||
)
|
||||
)
|
||||
except (ValueError, OSError) as exc:
|
||||
logger.warning("[Manager] failed to resolve artifact %s: %s", virtual_path, exc)
|
||||
return attachments
|
||||
|
||||
|
||||
def _prepare_artifact_delivery(
|
||||
thread_id: str,
|
||||
response_text: str,
|
||||
artifacts: list[str],
|
||||
) -> tuple[str, list[ResolvedAttachment]]:
|
||||
"""Resolve attachments and append filename fallbacks to the text response."""
|
||||
attachments: list[ResolvedAttachment] = []
|
||||
if not artifacts:
|
||||
return response_text, attachments
|
||||
|
||||
attachments = _resolve_attachments(thread_id, artifacts)
|
||||
resolved_virtuals = {attachment.virtual_path for attachment in attachments}
|
||||
unresolved = [path for path in artifacts if path not in resolved_virtuals]
|
||||
|
||||
if unresolved:
|
||||
artifact_text = _format_artifact_text(unresolved)
|
||||
response_text = (response_text + "\n\n" + artifact_text) if response_text else artifact_text
|
||||
|
||||
# Always include resolved attachment filenames as a text fallback so files
|
||||
# remain discoverable even when the upload is skipped or fails.
|
||||
if attachments:
|
||||
resolved_text = _format_artifact_text([attachment.virtual_path for attachment in attachments])
|
||||
response_text = (response_text + "\n\n" + resolved_text) if response_text else resolved_text
|
||||
|
||||
return response_text, attachments
|
||||
|
||||
|
||||
class ChannelManager:
|
||||
"""Core dispatcher that bridges IM channels to the DeerFlow agent.
|
||||
|
||||
It reads from the MessageBus inbound queue, creates/reuses threads on
|
||||
the LangGraph Server, sends messages via ``runs.wait``, and publishes
|
||||
outbound responses back through the bus.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
bus: MessageBus,
|
||||
store: ChannelStore,
|
||||
*,
|
||||
max_concurrency: int = 5,
|
||||
langgraph_url: str = DEFAULT_LANGGRAPH_URL,
|
||||
gateway_url: str = DEFAULT_GATEWAY_URL,
|
||||
assistant_id: str = DEFAULT_ASSISTANT_ID,
|
||||
default_session: dict[str, Any] | None = None,
|
||||
channel_sessions: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
self.bus = bus
|
||||
self.store = store
|
||||
self._max_concurrency = max_concurrency
|
||||
self._langgraph_url = langgraph_url
|
||||
self._gateway_url = gateway_url
|
||||
self._assistant_id = assistant_id
|
||||
self._default_session = _as_dict(default_session)
|
||||
self._channel_sessions = dict(channel_sessions or {})
|
||||
self._client = None # lazy init — langgraph_sdk async client
|
||||
self._semaphore: asyncio.Semaphore | None = None
|
||||
self._running = False
|
||||
self._task: asyncio.Task | None = None
|
||||
|
||||
def _resolve_session_layer(self, msg: InboundMessage) -> tuple[dict[str, Any], dict[str, Any]]:
|
||||
channel_layer = _as_dict(self._channel_sessions.get(msg.channel_name))
|
||||
users_layer = _as_dict(channel_layer.get("users"))
|
||||
user_layer = _as_dict(users_layer.get(msg.user_id))
|
||||
return channel_layer, user_layer
|
||||
|
||||
def _resolve_run_params(self, msg: InboundMessage, thread_id: str) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
||||
channel_layer, user_layer = self._resolve_session_layer(msg)
|
||||
|
||||
assistant_id = user_layer.get("assistant_id") or channel_layer.get("assistant_id") or self._default_session.get("assistant_id") or self._assistant_id
|
||||
if not isinstance(assistant_id, str) or not assistant_id.strip():
|
||||
assistant_id = self._assistant_id
|
||||
|
||||
run_config = _merge_dicts(
|
||||
DEFAULT_RUN_CONFIG,
|
||||
self._default_session.get("config"),
|
||||
channel_layer.get("config"),
|
||||
user_layer.get("config"),
|
||||
)
|
||||
|
||||
run_context = _merge_dicts(
|
||||
DEFAULT_RUN_CONTEXT,
|
||||
self._default_session.get("context"),
|
||||
channel_layer.get("context"),
|
||||
user_layer.get("context"),
|
||||
{"thread_id": thread_id},
|
||||
)
|
||||
|
||||
return assistant_id, run_config, run_context
|
||||
|
||||
# -- LangGraph SDK client (lazy) ----------------------------------------
|
||||
|
||||
def _get_client(self):
|
||||
"""Return the ``langgraph_sdk`` async client, creating it on first use."""
|
||||
if self._client is None:
|
||||
from langgraph_sdk import get_client
|
||||
|
||||
self._client = get_client(url=self._langgraph_url)
|
||||
return self._client
|
||||
|
||||
# -- lifecycle ---------------------------------------------------------
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the dispatch loop."""
|
||||
if self._running:
|
||||
return
|
||||
self._running = True
|
||||
self._semaphore = asyncio.Semaphore(self._max_concurrency)
|
||||
self._task = asyncio.create_task(self._dispatch_loop())
|
||||
logger.info("ChannelManager started (max_concurrency=%d)", self._max_concurrency)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the dispatch loop."""
|
||||
self._running = False
|
||||
if self._task:
|
||||
self._task.cancel()
|
||||
try:
|
||||
await self._task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
self._task = None
|
||||
logger.info("ChannelManager stopped")
|
||||
|
||||
# -- dispatch loop -----------------------------------------------------
|
||||
|
||||
async def _dispatch_loop(self) -> None:
|
||||
logger.info("[Manager] dispatch loop started, waiting for inbound messages")
|
||||
while self._running:
|
||||
try:
|
||||
msg = await asyncio.wait_for(self.bus.get_inbound(), timeout=1.0)
|
||||
except TimeoutError:
|
||||
continue
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
|
||||
logger.info(
|
||||
"[Manager] received inbound: channel=%s, chat_id=%s, type=%s, text=%r",
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
msg.msg_type.value,
|
||||
msg.text[:100] if msg.text else "",
|
||||
)
|
||||
task = asyncio.create_task(self._handle_message(msg))
|
||||
task.add_done_callback(self._log_task_error)
|
||||
|
||||
@staticmethod
|
||||
def _log_task_error(task: asyncio.Task) -> None:
|
||||
"""Surface unhandled exceptions from background tasks."""
|
||||
if task.cancelled():
|
||||
return
|
||||
exc = task.exception()
|
||||
if exc:
|
||||
logger.error("[Manager] unhandled error in message task: %s", exc, exc_info=exc)
|
||||
|
||||
async def _handle_message(self, msg: InboundMessage) -> None:
|
||||
async with self._semaphore:
|
||||
try:
|
||||
if msg.msg_type == InboundMessageType.COMMAND:
|
||||
await self._handle_command(msg)
|
||||
else:
|
||||
await self._handle_chat(msg)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Error handling message from %s (chat=%s)",
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
)
|
||||
await self._send_error(msg, "An internal error occurred. Please try again.")
|
||||
|
||||
# -- chat handling -----------------------------------------------------
|
||||
|
||||
async def _create_thread(self, client, msg: InboundMessage) -> str:
|
||||
"""Create a new thread on the LangGraph Server and store the mapping."""
|
||||
thread = await client.threads.create()
|
||||
thread_id = thread["thread_id"]
|
||||
self.store.set_thread_id(
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
thread_id,
|
||||
topic_id=msg.topic_id,
|
||||
user_id=msg.user_id,
|
||||
)
|
||||
logger.info("[Manager] new thread created on LangGraph Server: thread_id=%s for chat_id=%s topic_id=%s", thread_id, msg.chat_id, msg.topic_id)
|
||||
return thread_id
|
||||
|
||||
async def _handle_chat(self, msg: InboundMessage) -> None:
|
||||
client = self._get_client()
|
||||
|
||||
# Look up existing DeerFlow thread.
|
||||
# topic_id may be None (e.g. Telegram private chats) — the store
|
||||
# handles this by using the "channel:chat_id" key without a topic suffix.
|
||||
thread_id = self.store.get_thread_id(msg.channel_name, msg.chat_id, topic_id=msg.topic_id)
|
||||
if thread_id:
|
||||
logger.info("[Manager] reusing thread: thread_id=%s for topic_id=%s", thread_id, msg.topic_id)
|
||||
|
||||
# No existing thread found — create a new one
|
||||
if thread_id is None:
|
||||
thread_id = await self._create_thread(client, msg)
|
||||
|
||||
assistant_id, run_config, run_context = self._resolve_run_params(msg, thread_id)
|
||||
if msg.channel_name == "feishu":
|
||||
await self._handle_streaming_chat(
|
||||
client,
|
||||
msg,
|
||||
thread_id,
|
||||
assistant_id,
|
||||
run_config,
|
||||
run_context,
|
||||
)
|
||||
return
|
||||
|
||||
logger.info("[Manager] invoking runs.wait(thread_id=%s, text=%r)", thread_id, msg.text[:100])
|
||||
result = await client.runs.wait(
|
||||
thread_id,
|
||||
assistant_id,
|
||||
input={"messages": [{"role": "human", "content": msg.text}]},
|
||||
config=run_config,
|
||||
context=run_context,
|
||||
)
|
||||
|
||||
response_text = _extract_response_text(result)
|
||||
artifacts = _extract_artifacts(result)
|
||||
|
||||
logger.info(
|
||||
"[Manager] agent response received: thread_id=%s, response_len=%d, artifacts=%d",
|
||||
thread_id,
|
||||
len(response_text) if response_text else 0,
|
||||
len(artifacts),
|
||||
)
|
||||
|
||||
response_text, attachments = _prepare_artifact_delivery(thread_id, response_text, artifacts)
|
||||
|
||||
if not response_text:
|
||||
if attachments:
|
||||
response_text = _format_artifact_text([a.virtual_path for a in attachments])
|
||||
else:
|
||||
response_text = "(No response from agent)"
|
||||
|
||||
outbound = OutboundMessage(
|
||||
channel_name=msg.channel_name,
|
||||
chat_id=msg.chat_id,
|
||||
thread_id=thread_id,
|
||||
text=response_text,
|
||||
artifacts=artifacts,
|
||||
attachments=attachments,
|
||||
thread_ts=msg.thread_ts,
|
||||
)
|
||||
logger.info("[Manager] publishing outbound message to bus: channel=%s, chat_id=%s", msg.channel_name, msg.chat_id)
|
||||
await self.bus.publish_outbound(outbound)
|
||||
|
||||
async def _handle_streaming_chat(
|
||||
self,
|
||||
client,
|
||||
msg: InboundMessage,
|
||||
thread_id: str,
|
||||
assistant_id: str,
|
||||
run_config: dict[str, Any],
|
||||
run_context: dict[str, Any],
|
||||
) -> None:
|
||||
logger.info("[Manager] invoking runs.stream(thread_id=%s, text=%r)", thread_id, msg.text[:100])
|
||||
|
||||
last_values: dict[str, Any] | list | None = None
|
||||
streamed_buffers: dict[str, str] = {}
|
||||
current_message_id: str | None = None
|
||||
latest_text = ""
|
||||
last_published_text = ""
|
||||
last_publish_at = 0.0
|
||||
stream_error: BaseException | None = None
|
||||
|
||||
try:
|
||||
async for chunk in client.runs.stream(
|
||||
thread_id,
|
||||
assistant_id,
|
||||
input={"messages": [{"role": "human", "content": msg.text}]},
|
||||
config=run_config,
|
||||
context=run_context,
|
||||
stream_mode=["messages-tuple", "values"],
|
||||
):
|
||||
event = getattr(chunk, "event", "")
|
||||
data = getattr(chunk, "data", None)
|
||||
|
||||
if event == "messages-tuple":
|
||||
accumulated_text, current_message_id = _accumulate_stream_text(streamed_buffers, current_message_id, data)
|
||||
if accumulated_text:
|
||||
latest_text = accumulated_text
|
||||
elif event == "values" and isinstance(data, (dict, list)):
|
||||
last_values = data
|
||||
snapshot_text = _extract_response_text(data)
|
||||
if snapshot_text:
|
||||
latest_text = snapshot_text
|
||||
|
||||
if not latest_text or latest_text == last_published_text:
|
||||
continue
|
||||
|
||||
now = time.monotonic()
|
||||
if last_published_text and now - last_publish_at < STREAM_UPDATE_MIN_INTERVAL_SECONDS:
|
||||
continue
|
||||
|
||||
await self.bus.publish_outbound(
|
||||
OutboundMessage(
|
||||
channel_name=msg.channel_name,
|
||||
chat_id=msg.chat_id,
|
||||
thread_id=thread_id,
|
||||
text=latest_text,
|
||||
is_final=False,
|
||||
thread_ts=msg.thread_ts,
|
||||
)
|
||||
)
|
||||
last_published_text = latest_text
|
||||
last_publish_at = now
|
||||
except Exception as exc:
|
||||
stream_error = exc
|
||||
logger.exception("[Manager] streaming error: thread_id=%s", thread_id)
|
||||
finally:
|
||||
result = last_values if last_values is not None else {"messages": [{"type": "ai", "content": latest_text}]}
|
||||
response_text = _extract_response_text(result)
|
||||
artifacts = _extract_artifacts(result)
|
||||
response_text, attachments = _prepare_artifact_delivery(thread_id, response_text, artifacts)
|
||||
|
||||
if not response_text:
|
||||
if attachments:
|
||||
response_text = _format_artifact_text([attachment.virtual_path for attachment in attachments])
|
||||
elif stream_error:
|
||||
response_text = "An error occurred while processing your request. Please try again."
|
||||
else:
|
||||
response_text = latest_text or "(No response from agent)"
|
||||
|
||||
logger.info(
|
||||
"[Manager] streaming response completed: thread_id=%s, response_len=%d, artifacts=%d, error=%s",
|
||||
thread_id,
|
||||
len(response_text),
|
||||
len(artifacts),
|
||||
stream_error,
|
||||
)
|
||||
await self.bus.publish_outbound(
|
||||
OutboundMessage(
|
||||
channel_name=msg.channel_name,
|
||||
chat_id=msg.chat_id,
|
||||
thread_id=thread_id,
|
||||
text=response_text,
|
||||
artifacts=artifacts,
|
||||
attachments=attachments,
|
||||
is_final=True,
|
||||
thread_ts=msg.thread_ts,
|
||||
)
|
||||
)
|
||||
|
||||
# -- command handling --------------------------------------------------
|
||||
|
||||
async def _handle_command(self, msg: InboundMessage) -> None:
|
||||
text = msg.text.strip()
|
||||
parts = text.split(maxsplit=1)
|
||||
command = parts[0].lower().lstrip("/")
|
||||
|
||||
if command == "new":
|
||||
# Create a new thread on the LangGraph Server
|
||||
client = self._get_client()
|
||||
thread = await client.threads.create()
|
||||
new_thread_id = thread["thread_id"]
|
||||
self.store.set_thread_id(
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
new_thread_id,
|
||||
topic_id=msg.topic_id,
|
||||
user_id=msg.user_id,
|
||||
)
|
||||
reply = "New conversation started."
|
||||
elif command == "status":
|
||||
thread_id = self.store.get_thread_id(msg.channel_name, msg.chat_id, topic_id=msg.topic_id)
|
||||
reply = f"Active thread: {thread_id}" if thread_id else "No active conversation."
|
||||
elif command == "models":
|
||||
reply = await self._fetch_gateway("/api/models", "models")
|
||||
elif command == "memory":
|
||||
reply = await self._fetch_gateway("/api/memory", "memory")
|
||||
elif command == "help":
|
||||
reply = "Available commands:\n/new — Start a new conversation\n/status — Show current thread info\n/models — List available models\n/memory — Show memory status\n/help — Show this help"
|
||||
else:
|
||||
reply = f"Unknown command: /{command}. Type /help for available commands."
|
||||
|
||||
outbound = OutboundMessage(
|
||||
channel_name=msg.channel_name,
|
||||
chat_id=msg.chat_id,
|
||||
thread_id=self.store.get_thread_id(msg.channel_name, msg.chat_id) or "",
|
||||
text=reply,
|
||||
thread_ts=msg.thread_ts,
|
||||
)
|
||||
await self.bus.publish_outbound(outbound)
|
||||
|
||||
async def _fetch_gateway(self, path: str, kind: str) -> str:
|
||||
"""Fetch data from the Gateway API for command responses."""
|
||||
import httpx
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as http:
|
||||
resp = await http.get(f"{self._gateway_url}{path}", timeout=10)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
except Exception:
|
||||
logger.exception("Failed to fetch %s from gateway", kind)
|
||||
return f"Failed to fetch {kind} information."
|
||||
|
||||
if kind == "models":
|
||||
names = [m["name"] for m in data.get("models", [])]
|
||||
return ("Available models:\n" + "\n".join(f"• {n}" for n in names)) if names else "No models configured."
|
||||
elif kind == "memory":
|
||||
facts = data.get("facts", [])
|
||||
return f"Memory contains {len(facts)} fact(s)."
|
||||
return str(data)
|
||||
|
||||
# -- error helper ------------------------------------------------------
|
||||
|
||||
async def _send_error(self, msg: InboundMessage, error_text: str) -> None:
|
||||
outbound = OutboundMessage(
|
||||
channel_name=msg.channel_name,
|
||||
chat_id=msg.chat_id,
|
||||
thread_id=self.store.get_thread_id(msg.channel_name, msg.chat_id) or "",
|
||||
text=error_text,
|
||||
thread_ts=msg.thread_ts,
|
||||
)
|
||||
await self.bus.publish_outbound(outbound)
|
||||
173
backend/app/channels/message_bus.py
Normal file
173
backend/app/channels/message_bus.py
Normal file
@@ -0,0 +1,173 @@
|
||||
"""MessageBus — async pub/sub hub that decouples channels from the agent dispatcher."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from collections.abc import Callable, Coroutine
|
||||
from dataclasses import dataclass, field
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Message types
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InboundMessageType(StrEnum):
|
||||
"""Types of messages arriving from IM channels."""
|
||||
|
||||
CHAT = "chat"
|
||||
COMMAND = "command"
|
||||
|
||||
|
||||
@dataclass
|
||||
class InboundMessage:
|
||||
"""A message arriving from an IM channel toward the agent dispatcher.
|
||||
|
||||
Attributes:
|
||||
channel_name: Name of the source channel (e.g. "feishu", "slack").
|
||||
chat_id: Platform-specific chat/conversation identifier.
|
||||
user_id: Platform-specific user identifier.
|
||||
text: The message text.
|
||||
msg_type: Whether this is a regular chat message or a command.
|
||||
thread_ts: Optional platform thread identifier (for threaded replies).
|
||||
topic_id: Conversation topic identifier used to map to a DeerFlow thread.
|
||||
Messages sharing the same ``topic_id`` within a ``chat_id`` will
|
||||
reuse the same DeerFlow thread. When ``None``, each message
|
||||
creates a new thread (one-shot Q&A).
|
||||
files: Optional list of file attachments (platform-specific dicts).
|
||||
metadata: Arbitrary extra data from the channel.
|
||||
created_at: Unix timestamp when the message was created.
|
||||
"""
|
||||
|
||||
channel_name: str
|
||||
chat_id: str
|
||||
user_id: str
|
||||
text: str
|
||||
msg_type: InboundMessageType = InboundMessageType.CHAT
|
||||
thread_ts: str | None = None
|
||||
topic_id: str | None = None
|
||||
files: list[dict[str, Any]] = field(default_factory=list)
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
created_at: float = field(default_factory=time.time)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ResolvedAttachment:
|
||||
"""A file attachment resolved to a host filesystem path, ready for upload.
|
||||
|
||||
Attributes:
|
||||
virtual_path: Original virtual path (e.g. /mnt/user-data/outputs/report.pdf).
|
||||
actual_path: Resolved host filesystem path.
|
||||
filename: Basename of the file.
|
||||
mime_type: MIME type (e.g. "application/pdf").
|
||||
size: File size in bytes.
|
||||
is_image: True for image/* MIME types (platforms may handle images differently).
|
||||
"""
|
||||
|
||||
virtual_path: str
|
||||
actual_path: Path
|
||||
filename: str
|
||||
mime_type: str
|
||||
size: int
|
||||
is_image: bool
|
||||
|
||||
|
||||
@dataclass
|
||||
class OutboundMessage:
|
||||
"""A message from the agent dispatcher back to a channel.
|
||||
|
||||
Attributes:
|
||||
channel_name: Target channel name (used for routing).
|
||||
chat_id: Target chat/conversation identifier.
|
||||
thread_id: DeerFlow thread ID that produced this response.
|
||||
text: The response text.
|
||||
artifacts: List of artifact paths produced by the agent.
|
||||
is_final: Whether this is the final message in the response stream.
|
||||
thread_ts: Optional platform thread identifier for threaded replies.
|
||||
metadata: Arbitrary extra data.
|
||||
created_at: Unix timestamp.
|
||||
"""
|
||||
|
||||
channel_name: str
|
||||
chat_id: str
|
||||
thread_id: str
|
||||
text: str
|
||||
artifacts: list[str] = field(default_factory=list)
|
||||
attachments: list[ResolvedAttachment] = field(default_factory=list)
|
||||
is_final: bool = True
|
||||
thread_ts: str | None = None
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
created_at: float = field(default_factory=time.time)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MessageBus
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
OutboundCallback = Callable[[OutboundMessage], Coroutine[Any, Any, None]]
|
||||
|
||||
|
||||
class MessageBus:
|
||||
"""Async pub/sub hub connecting channels and the agent dispatcher.
|
||||
|
||||
Channels publish inbound messages; the dispatcher consumes them.
|
||||
The dispatcher publishes outbound messages; channels receive them
|
||||
via registered callbacks.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._inbound_queue: asyncio.Queue[InboundMessage] = asyncio.Queue()
|
||||
self._outbound_listeners: list[OutboundCallback] = []
|
||||
|
||||
# -- inbound -----------------------------------------------------------
|
||||
|
||||
async def publish_inbound(self, msg: InboundMessage) -> None:
|
||||
"""Enqueue an inbound message from a channel."""
|
||||
await self._inbound_queue.put(msg)
|
||||
logger.info(
|
||||
"[Bus] inbound enqueued: channel=%s, chat_id=%s, type=%s, queue_size=%d",
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
msg.msg_type.value,
|
||||
self._inbound_queue.qsize(),
|
||||
)
|
||||
|
||||
async def get_inbound(self) -> InboundMessage:
|
||||
"""Block until the next inbound message is available."""
|
||||
return await self._inbound_queue.get()
|
||||
|
||||
@property
|
||||
def inbound_queue(self) -> asyncio.Queue[InboundMessage]:
|
||||
return self._inbound_queue
|
||||
|
||||
# -- outbound ----------------------------------------------------------
|
||||
|
||||
def subscribe_outbound(self, callback: OutboundCallback) -> None:
|
||||
"""Register an async callback for outbound messages."""
|
||||
self._outbound_listeners.append(callback)
|
||||
|
||||
def unsubscribe_outbound(self, callback: OutboundCallback) -> None:
|
||||
"""Remove a previously registered outbound callback."""
|
||||
self._outbound_listeners = [cb for cb in self._outbound_listeners if cb is not callback]
|
||||
|
||||
async def publish_outbound(self, msg: OutboundMessage) -> None:
|
||||
"""Dispatch an outbound message to all registered listeners."""
|
||||
logger.info(
|
||||
"[Bus] outbound dispatching: channel=%s, chat_id=%s, listeners=%d, text_len=%d",
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
len(self._outbound_listeners),
|
||||
len(msg.text),
|
||||
)
|
||||
for callback in self._outbound_listeners:
|
||||
try:
|
||||
await callback(msg)
|
||||
except Exception:
|
||||
logger.exception("Error in outbound callback for channel=%s", msg.channel_name)
|
||||
178
backend/app/channels/service.py
Normal file
178
backend/app/channels/service.py
Normal file
@@ -0,0 +1,178 @@
|
||||
"""ChannelService — manages the lifecycle of all IM channels."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from app.channels.manager import ChannelManager
|
||||
from app.channels.message_bus import MessageBus
|
||||
from app.channels.store import ChannelStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Channel name → import path for lazy loading
|
||||
_CHANNEL_REGISTRY: dict[str, str] = {
|
||||
"feishu": "app.channels.feishu:FeishuChannel",
|
||||
"slack": "app.channels.slack:SlackChannel",
|
||||
"telegram": "app.channels.telegram:TelegramChannel",
|
||||
}
|
||||
|
||||
|
||||
class ChannelService:
|
||||
"""Manages the lifecycle of all configured IM channels.
|
||||
|
||||
Reads configuration from ``config.yaml`` under the ``channels`` key,
|
||||
instantiates enabled channels, and starts the ChannelManager dispatcher.
|
||||
"""
|
||||
|
||||
def __init__(self, channels_config: dict[str, Any] | None = None) -> None:
|
||||
self.bus = MessageBus()
|
||||
self.store = ChannelStore()
|
||||
config = dict(channels_config or {})
|
||||
langgraph_url = config.pop("langgraph_url", None) or "http://localhost:2024"
|
||||
gateway_url = config.pop("gateway_url", None) or "http://localhost:8001"
|
||||
default_session = config.pop("session", None)
|
||||
channel_sessions = {name: channel_config.get("session") for name, channel_config in config.items() if isinstance(channel_config, dict)}
|
||||
self.manager = ChannelManager(
|
||||
bus=self.bus,
|
||||
store=self.store,
|
||||
langgraph_url=langgraph_url,
|
||||
gateway_url=gateway_url,
|
||||
default_session=default_session if isinstance(default_session, dict) else None,
|
||||
channel_sessions=channel_sessions,
|
||||
)
|
||||
self._channels: dict[str, Any] = {} # name -> Channel instance
|
||||
self._config = config
|
||||
self._running = False
|
||||
|
||||
@classmethod
|
||||
def from_app_config(cls) -> ChannelService:
|
||||
"""Create a ChannelService from the application config."""
|
||||
from deerflow.config.app_config import get_app_config
|
||||
|
||||
config = get_app_config()
|
||||
channels_config = {}
|
||||
# extra fields are allowed by AppConfig (extra="allow")
|
||||
extra = config.model_extra or {}
|
||||
if "channels" in extra:
|
||||
channels_config = extra["channels"]
|
||||
return cls(channels_config=channels_config)
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the manager and all enabled channels."""
|
||||
if self._running:
|
||||
return
|
||||
|
||||
await self.manager.start()
|
||||
|
||||
for name, channel_config in self._config.items():
|
||||
if not isinstance(channel_config, dict):
|
||||
continue
|
||||
if not channel_config.get("enabled", False):
|
||||
logger.info("Channel %s is disabled, skipping", name)
|
||||
continue
|
||||
|
||||
await self._start_channel(name, channel_config)
|
||||
|
||||
self._running = True
|
||||
logger.info("ChannelService started with channels: %s", list(self._channels.keys()))
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop all channels and the manager."""
|
||||
for name, channel in list(self._channels.items()):
|
||||
try:
|
||||
await channel.stop()
|
||||
logger.info("Channel %s stopped", name)
|
||||
except Exception:
|
||||
logger.exception("Error stopping channel %s", name)
|
||||
self._channels.clear()
|
||||
|
||||
await self.manager.stop()
|
||||
self._running = False
|
||||
logger.info("ChannelService stopped")
|
||||
|
||||
async def restart_channel(self, name: str) -> bool:
|
||||
"""Restart a specific channel. Returns True if successful."""
|
||||
if name in self._channels:
|
||||
try:
|
||||
await self._channels[name].stop()
|
||||
except Exception:
|
||||
logger.exception("Error stopping channel %s for restart", name)
|
||||
del self._channels[name]
|
||||
|
||||
config = self._config.get(name)
|
||||
if not config or not isinstance(config, dict):
|
||||
logger.warning("No config for channel %s", name)
|
||||
return False
|
||||
|
||||
return await self._start_channel(name, config)
|
||||
|
||||
async def _start_channel(self, name: str, config: dict[str, Any]) -> bool:
|
||||
"""Instantiate and start a single channel."""
|
||||
import_path = _CHANNEL_REGISTRY.get(name)
|
||||
if not import_path:
|
||||
logger.warning("Unknown channel type: %s", name)
|
||||
return False
|
||||
|
||||
try:
|
||||
from deerflow.reflection import resolve_class
|
||||
|
||||
channel_cls = resolve_class(import_path, base_class=None)
|
||||
except Exception:
|
||||
logger.exception("Failed to import channel class for %s", name)
|
||||
return False
|
||||
|
||||
try:
|
||||
channel = channel_cls(bus=self.bus, config=config)
|
||||
await channel.start()
|
||||
self._channels[name] = channel
|
||||
logger.info("Channel %s started", name)
|
||||
return True
|
||||
except Exception:
|
||||
logger.exception("Failed to start channel %s", name)
|
||||
return False
|
||||
|
||||
def get_status(self) -> dict[str, Any]:
|
||||
"""Return status information for all channels."""
|
||||
channels_status = {}
|
||||
for name in _CHANNEL_REGISTRY:
|
||||
config = self._config.get(name, {})
|
||||
enabled = isinstance(config, dict) and config.get("enabled", False)
|
||||
running = name in self._channels and self._channels[name].is_running
|
||||
channels_status[name] = {
|
||||
"enabled": enabled,
|
||||
"running": running,
|
||||
}
|
||||
return {
|
||||
"service_running": self._running,
|
||||
"channels": channels_status,
|
||||
}
|
||||
|
||||
|
||||
# -- singleton access -------------------------------------------------------
|
||||
|
||||
_channel_service: ChannelService | None = None
|
||||
|
||||
|
||||
def get_channel_service() -> ChannelService | None:
|
||||
"""Get the singleton ChannelService instance (if started)."""
|
||||
return _channel_service
|
||||
|
||||
|
||||
async def start_channel_service() -> ChannelService:
|
||||
"""Create and start the global ChannelService from app config."""
|
||||
global _channel_service
|
||||
if _channel_service is not None:
|
||||
return _channel_service
|
||||
_channel_service = ChannelService.from_app_config()
|
||||
await _channel_service.start()
|
||||
return _channel_service
|
||||
|
||||
|
||||
async def stop_channel_service() -> None:
|
||||
"""Stop the global ChannelService."""
|
||||
global _channel_service
|
||||
if _channel_service is not None:
|
||||
await _channel_service.stop()
|
||||
_channel_service = None
|
||||
244
backend/app/channels/slack.py
Normal file
244
backend/app/channels/slack.py
Normal file
@@ -0,0 +1,244 @@
|
||||
"""Slack channel — connects via Socket Mode (no public IP needed)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from markdown_to_mrkdwn import SlackMarkdownConverter
|
||||
|
||||
from app.channels.base import Channel
|
||||
from app.channels.message_bus import InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_slack_md_converter = SlackMarkdownConverter()
|
||||
|
||||
|
||||
class SlackChannel(Channel):
|
||||
"""Slack IM channel using Socket Mode (WebSocket, no public IP).
|
||||
|
||||
Configuration keys (in ``config.yaml`` under ``channels.slack``):
|
||||
- ``bot_token``: Slack Bot User OAuth Token (xoxb-...).
|
||||
- ``app_token``: Slack App-Level Token (xapp-...) for Socket Mode.
|
||||
- ``allowed_users``: (optional) List of allowed Slack user IDs. Empty = allow all.
|
||||
"""
|
||||
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
super().__init__(name="slack", bus=bus, config=config)
|
||||
self._socket_client = None
|
||||
self._web_client = None
|
||||
self._loop: asyncio.AbstractEventLoop | None = None
|
||||
self._allowed_users: set[str] = set(config.get("allowed_users", []))
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
return
|
||||
|
||||
try:
|
||||
from slack_sdk import WebClient
|
||||
from slack_sdk.socket_mode import SocketModeClient
|
||||
from slack_sdk.socket_mode.response import SocketModeResponse
|
||||
except ImportError:
|
||||
logger.error("slack-sdk is not installed. Install it with: uv add slack-sdk")
|
||||
return
|
||||
|
||||
self._SocketModeResponse = SocketModeResponse
|
||||
|
||||
bot_token = self.config.get("bot_token", "")
|
||||
app_token = self.config.get("app_token", "")
|
||||
|
||||
if not bot_token or not app_token:
|
||||
logger.error("Slack channel requires bot_token and app_token")
|
||||
return
|
||||
|
||||
self._web_client = WebClient(token=bot_token)
|
||||
self._socket_client = SocketModeClient(
|
||||
app_token=app_token,
|
||||
web_client=self._web_client,
|
||||
)
|
||||
self._loop = asyncio.get_event_loop()
|
||||
|
||||
self._socket_client.socket_mode_request_listeners.append(self._on_socket_event)
|
||||
|
||||
self._running = True
|
||||
self.bus.subscribe_outbound(self._on_outbound)
|
||||
|
||||
# Start socket mode in background thread
|
||||
asyncio.get_event_loop().run_in_executor(None, self._socket_client.connect)
|
||||
logger.info("Slack channel started")
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._running = False
|
||||
self.bus.unsubscribe_outbound(self._on_outbound)
|
||||
if self._socket_client:
|
||||
self._socket_client.close()
|
||||
self._socket_client = None
|
||||
logger.info("Slack channel stopped")
|
||||
|
||||
async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -> None:
|
||||
if not self._web_client:
|
||||
return
|
||||
|
||||
kwargs: dict[str, Any] = {
|
||||
"channel": msg.chat_id,
|
||||
"text": _slack_md_converter.convert(msg.text),
|
||||
}
|
||||
if msg.thread_ts:
|
||||
kwargs["thread_ts"] = msg.thread_ts
|
||||
|
||||
last_exc: Exception | None = None
|
||||
for attempt in range(_max_retries):
|
||||
try:
|
||||
await asyncio.to_thread(self._web_client.chat_postMessage, **kwargs)
|
||||
# Add a completion reaction to the thread root
|
||||
if msg.thread_ts:
|
||||
await asyncio.to_thread(
|
||||
self._add_reaction,
|
||||
msg.chat_id,
|
||||
msg.thread_ts,
|
||||
"white_check_mark",
|
||||
)
|
||||
return
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt < _max_retries - 1:
|
||||
delay = 2**attempt # 1s, 2s
|
||||
logger.warning(
|
||||
"[Slack] send failed (attempt %d/%d), retrying in %ds: %s",
|
||||
attempt + 1,
|
||||
_max_retries,
|
||||
delay,
|
||||
exc,
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("[Slack] send failed after %d attempts: %s", _max_retries, last_exc)
|
||||
# Add failure reaction on error
|
||||
if msg.thread_ts:
|
||||
try:
|
||||
await asyncio.to_thread(
|
||||
self._add_reaction,
|
||||
msg.chat_id,
|
||||
msg.thread_ts,
|
||||
"x",
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
raise last_exc # type: ignore[misc]
|
||||
|
||||
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
|
||||
if not self._web_client:
|
||||
return False
|
||||
|
||||
try:
|
||||
kwargs: dict[str, Any] = {
|
||||
"channel": msg.chat_id,
|
||||
"file": str(attachment.actual_path),
|
||||
"filename": attachment.filename,
|
||||
"title": attachment.filename,
|
||||
}
|
||||
if msg.thread_ts:
|
||||
kwargs["thread_ts"] = msg.thread_ts
|
||||
|
||||
await asyncio.to_thread(self._web_client.files_upload_v2, **kwargs)
|
||||
logger.info("[Slack] file uploaded: %s to channel=%s", attachment.filename, msg.chat_id)
|
||||
return True
|
||||
except Exception:
|
||||
logger.exception("[Slack] failed to upload file: %s", attachment.filename)
|
||||
return False
|
||||
|
||||
# -- internal ----------------------------------------------------------
|
||||
|
||||
def _add_reaction(self, channel_id: str, timestamp: str, emoji: str) -> None:
|
||||
"""Add an emoji reaction to a message (best-effort, non-blocking)."""
|
||||
if not self._web_client:
|
||||
return
|
||||
try:
|
||||
self._web_client.reactions_add(
|
||||
channel=channel_id,
|
||||
timestamp=timestamp,
|
||||
name=emoji,
|
||||
)
|
||||
except Exception as exc:
|
||||
if "already_reacted" not in str(exc):
|
||||
logger.warning("[Slack] failed to add reaction %s: %s", emoji, exc)
|
||||
|
||||
def _send_running_reply(self, channel_id: str, thread_ts: str) -> None:
|
||||
"""Send a 'Working on it......' reply in the thread (called from SDK thread)."""
|
||||
if not self._web_client:
|
||||
return
|
||||
try:
|
||||
self._web_client.chat_postMessage(
|
||||
channel=channel_id,
|
||||
text=":hourglass_flowing_sand: Working on it...",
|
||||
thread_ts=thread_ts,
|
||||
)
|
||||
logger.info("[Slack] 'Working on it...' reply sent in channel=%s, thread_ts=%s", channel_id, thread_ts)
|
||||
except Exception:
|
||||
logger.exception("[Slack] failed to send running reply in channel=%s", channel_id)
|
||||
|
||||
def _on_socket_event(self, client, req) -> None:
|
||||
"""Called by slack-sdk for each Socket Mode event."""
|
||||
try:
|
||||
# Acknowledge the event
|
||||
response = self._SocketModeResponse(envelope_id=req.envelope_id)
|
||||
client.send_socket_mode_response(response)
|
||||
|
||||
event_type = req.type
|
||||
if event_type != "events_api":
|
||||
return
|
||||
|
||||
event = req.payload.get("event", {})
|
||||
etype = event.get("type", "")
|
||||
|
||||
# Handle message events (DM or @mention)
|
||||
if etype in ("message", "app_mention"):
|
||||
self._handle_message_event(event)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Error processing Slack event")
|
||||
|
||||
def _handle_message_event(self, event: dict) -> None:
|
||||
# Ignore bot messages
|
||||
if event.get("bot_id") or event.get("subtype"):
|
||||
return
|
||||
|
||||
user_id = event.get("user", "")
|
||||
|
||||
# Check allowed users
|
||||
if self._allowed_users and user_id not in self._allowed_users:
|
||||
logger.debug("Ignoring message from non-allowed user: %s", user_id)
|
||||
return
|
||||
|
||||
text = event.get("text", "").strip()
|
||||
if not text:
|
||||
return
|
||||
|
||||
channel_id = event.get("channel", "")
|
||||
thread_ts = event.get("thread_ts") or event.get("ts", "")
|
||||
|
||||
if text.startswith("/"):
|
||||
msg_type = InboundMessageType.COMMAND
|
||||
else:
|
||||
msg_type = InboundMessageType.CHAT
|
||||
|
||||
# topic_id: use thread_ts as the topic identifier.
|
||||
# For threaded messages, thread_ts is the root message ts (shared topic).
|
||||
# For non-threaded messages, thread_ts is the message's own ts (new topic).
|
||||
inbound = self._make_inbound(
|
||||
chat_id=channel_id,
|
||||
user_id=user_id,
|
||||
text=text,
|
||||
msg_type=msg_type,
|
||||
thread_ts=thread_ts,
|
||||
)
|
||||
inbound.topic_id = thread_ts
|
||||
|
||||
if self._loop and self._loop.is_running():
|
||||
# Acknowledge with an eyes reaction
|
||||
self._add_reaction(channel_id, event.get("ts", thread_ts), "eyes")
|
||||
# Send "running" reply first (fire-and-forget from SDK thread)
|
||||
self._send_running_reply(channel_id, thread_ts)
|
||||
asyncio.run_coroutine_threadsafe(self.bus.publish_inbound(inbound), self._loop)
|
||||
153
backend/app/channels/store.py
Normal file
153
backend/app/channels/store.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""ChannelStore — persists IM chat-to-DeerFlow thread mappings."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChannelStore:
|
||||
"""JSON-file-backed store that maps IM conversations to DeerFlow threads.
|
||||
|
||||
Data layout (on disk)::
|
||||
|
||||
{
|
||||
"<channel_name>:<chat_id>": {
|
||||
"thread_id": "<uuid>",
|
||||
"user_id": "<platform_user>",
|
||||
"created_at": 1700000000.0,
|
||||
"updated_at": 1700000000.0
|
||||
},
|
||||
...
|
||||
}
|
||||
|
||||
The store is intentionally simple — a single JSON file that is atomically
|
||||
rewritten on every mutation. For production workloads with high concurrency,
|
||||
this can be swapped for a proper database backend.
|
||||
"""
|
||||
|
||||
def __init__(self, path: str | Path | None = None) -> None:
|
||||
if path is None:
|
||||
from deerflow.config.paths import get_paths
|
||||
|
||||
path = Path(get_paths().base_dir) / "channels" / "store.json"
|
||||
self._path = Path(path)
|
||||
self._path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self._data: dict[str, dict[str, Any]] = self._load()
|
||||
self._lock = threading.Lock()
|
||||
|
||||
# -- persistence -------------------------------------------------------
|
||||
|
||||
def _load(self) -> dict[str, dict[str, Any]]:
|
||||
if self._path.exists():
|
||||
try:
|
||||
return json.loads(self._path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Corrupt channel store at %s, starting fresh", self._path)
|
||||
return {}
|
||||
|
||||
def _save(self) -> None:
|
||||
fd = tempfile.NamedTemporaryFile(
|
||||
mode="w",
|
||||
dir=self._path.parent,
|
||||
suffix=".tmp",
|
||||
delete=False,
|
||||
)
|
||||
try:
|
||||
json.dump(self._data, fd, indent=2)
|
||||
fd.close()
|
||||
Path(fd.name).replace(self._path)
|
||||
except BaseException:
|
||||
fd.close()
|
||||
Path(fd.name).unlink(missing_ok=True)
|
||||
raise
|
||||
|
||||
# -- key helpers -------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _key(channel_name: str, chat_id: str, topic_id: str | None = None) -> str:
|
||||
if topic_id:
|
||||
return f"{channel_name}:{chat_id}:{topic_id}"
|
||||
return f"{channel_name}:{chat_id}"
|
||||
|
||||
# -- public API --------------------------------------------------------
|
||||
|
||||
def get_thread_id(self, channel_name: str, chat_id: str, topic_id: str | None = None) -> str | None:
|
||||
"""Look up the DeerFlow thread_id for a given IM conversation/topic."""
|
||||
entry = self._data.get(self._key(channel_name, chat_id, topic_id))
|
||||
return entry["thread_id"] if entry else None
|
||||
|
||||
def set_thread_id(
|
||||
self,
|
||||
channel_name: str,
|
||||
chat_id: str,
|
||||
thread_id: str,
|
||||
*,
|
||||
topic_id: str | None = None,
|
||||
user_id: str = "",
|
||||
) -> None:
|
||||
"""Create or update the mapping for an IM conversation/topic."""
|
||||
with self._lock:
|
||||
key = self._key(channel_name, chat_id, topic_id)
|
||||
now = time.time()
|
||||
existing = self._data.get(key)
|
||||
self._data[key] = {
|
||||
"thread_id": thread_id,
|
||||
"user_id": user_id,
|
||||
"created_at": existing["created_at"] if existing else now,
|
||||
"updated_at": now,
|
||||
}
|
||||
self._save()
|
||||
|
||||
def remove(self, channel_name: str, chat_id: str, topic_id: str | None = None) -> bool:
|
||||
"""Remove a mapping.
|
||||
|
||||
If ``topic_id`` is provided, only that specific conversation/topic mapping is removed.
|
||||
If ``topic_id`` is omitted, all mappings whose key starts with
|
||||
``"<channel_name>:<chat_id>"`` (including topic-specific ones) are removed.
|
||||
|
||||
Returns True if at least one mapping was removed.
|
||||
"""
|
||||
with self._lock:
|
||||
# Remove a specific conversation/topic mapping.
|
||||
if topic_id is not None:
|
||||
key = self._key(channel_name, chat_id, topic_id)
|
||||
if key in self._data:
|
||||
del self._data[key]
|
||||
self._save()
|
||||
return True
|
||||
return False
|
||||
|
||||
# Remove all mappings for this channel/chat_id (base and any topic-specific keys).
|
||||
prefix = self._key(channel_name, chat_id)
|
||||
keys_to_delete = [k for k in self._data if k == prefix or k.startswith(prefix + ":")]
|
||||
if not keys_to_delete:
|
||||
return False
|
||||
|
||||
for k in keys_to_delete:
|
||||
del self._data[k]
|
||||
self._save()
|
||||
return True
|
||||
|
||||
def list_entries(self, channel_name: str | None = None) -> list[dict[str, Any]]:
|
||||
"""List all stored mappings, optionally filtered by channel."""
|
||||
results = []
|
||||
for key, entry in self._data.items():
|
||||
parts = key.split(":", 2)
|
||||
ch = parts[0]
|
||||
chat = parts[1] if len(parts) > 1 else ""
|
||||
topic = parts[2] if len(parts) > 2 else None
|
||||
if channel_name and ch != channel_name:
|
||||
continue
|
||||
item: dict[str, Any] = {"channel_name": ch, "chat_id": chat, **entry}
|
||||
if topic is not None:
|
||||
item["topic_id"] = topic
|
||||
results.append(item)
|
||||
return results
|
||||
299
backend/app/channels/telegram.py
Normal file
299
backend/app/channels/telegram.py
Normal file
@@ -0,0 +1,299 @@
|
||||
"""Telegram channel — connects via long-polling (no public IP needed)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import threading
|
||||
from typing import Any
|
||||
|
||||
from app.channels.base import Channel
|
||||
from app.channels.message_bus import InboundMessageType, MessageBus, OutboundMessage, ResolvedAttachment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TelegramChannel(Channel):
|
||||
"""Telegram bot channel using long-polling.
|
||||
|
||||
Configuration keys (in ``config.yaml`` under ``channels.telegram``):
|
||||
- ``bot_token``: Telegram Bot API token (from @BotFather).
|
||||
- ``allowed_users``: (optional) List of allowed Telegram user IDs. Empty = allow all.
|
||||
"""
|
||||
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
super().__init__(name="telegram", bus=bus, config=config)
|
||||
self._application = None
|
||||
self._thread: threading.Thread | None = None
|
||||
self._tg_loop: asyncio.AbstractEventLoop | None = None
|
||||
self._main_loop: asyncio.AbstractEventLoop | None = None
|
||||
self._allowed_users: set[int] = set()
|
||||
for uid in config.get("allowed_users", []):
|
||||
try:
|
||||
self._allowed_users.add(int(uid))
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
# chat_id -> last sent message_id for threaded replies
|
||||
self._last_bot_message: dict[str, int] = {}
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
return
|
||||
|
||||
try:
|
||||
from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, filters
|
||||
except ImportError:
|
||||
logger.error("python-telegram-bot is not installed. Install it with: uv add python-telegram-bot")
|
||||
return
|
||||
|
||||
bot_token = self.config.get("bot_token", "")
|
||||
if not bot_token:
|
||||
logger.error("Telegram channel requires bot_token")
|
||||
return
|
||||
|
||||
self._main_loop = asyncio.get_event_loop()
|
||||
self._running = True
|
||||
self.bus.subscribe_outbound(self._on_outbound)
|
||||
|
||||
# Build the application
|
||||
app = ApplicationBuilder().token(bot_token).build()
|
||||
|
||||
# Command handlers
|
||||
app.add_handler(CommandHandler("start", self._cmd_start))
|
||||
app.add_handler(CommandHandler("new", self._cmd_generic))
|
||||
app.add_handler(CommandHandler("status", self._cmd_generic))
|
||||
app.add_handler(CommandHandler("models", self._cmd_generic))
|
||||
app.add_handler(CommandHandler("memory", self._cmd_generic))
|
||||
app.add_handler(CommandHandler("help", self._cmd_generic))
|
||||
|
||||
# General message handler
|
||||
app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, self._on_text))
|
||||
|
||||
self._application = app
|
||||
|
||||
# Run polling in a dedicated thread with its own event loop
|
||||
self._thread = threading.Thread(target=self._run_polling, daemon=True)
|
||||
self._thread.start()
|
||||
logger.info("Telegram channel started")
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._running = False
|
||||
self.bus.unsubscribe_outbound(self._on_outbound)
|
||||
if self._tg_loop and self._tg_loop.is_running():
|
||||
self._tg_loop.call_soon_threadsafe(self._tg_loop.stop)
|
||||
if self._thread:
|
||||
self._thread.join(timeout=10)
|
||||
self._thread = None
|
||||
self._application = None
|
||||
logger.info("Telegram channel stopped")
|
||||
|
||||
async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -> None:
|
||||
if not self._application:
|
||||
return
|
||||
|
||||
try:
|
||||
chat_id = int(msg.chat_id)
|
||||
except (ValueError, TypeError):
|
||||
logger.error("Invalid Telegram chat_id: %s", msg.chat_id)
|
||||
return
|
||||
|
||||
kwargs: dict[str, Any] = {"chat_id": chat_id, "text": msg.text}
|
||||
|
||||
# Reply to the last bot message in this chat for threading
|
||||
reply_to = self._last_bot_message.get(msg.chat_id)
|
||||
if reply_to:
|
||||
kwargs["reply_to_message_id"] = reply_to
|
||||
|
||||
bot = self._application.bot
|
||||
last_exc: Exception | None = None
|
||||
for attempt in range(_max_retries):
|
||||
try:
|
||||
sent = await bot.send_message(**kwargs)
|
||||
self._last_bot_message[msg.chat_id] = sent.message_id
|
||||
return
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt < _max_retries - 1:
|
||||
delay = 2**attempt # 1s, 2s
|
||||
logger.warning(
|
||||
"[Telegram] send failed (attempt %d/%d), retrying in %ds: %s",
|
||||
attempt + 1,
|
||||
_max_retries,
|
||||
delay,
|
||||
exc,
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("[Telegram] send failed after %d attempts: %s", _max_retries, last_exc)
|
||||
raise last_exc # type: ignore[misc]
|
||||
|
||||
async def send_file(self, msg: OutboundMessage, attachment: ResolvedAttachment) -> bool:
|
||||
if not self._application:
|
||||
return False
|
||||
|
||||
try:
|
||||
chat_id = int(msg.chat_id)
|
||||
except (ValueError, TypeError):
|
||||
logger.error("[Telegram] Invalid chat_id: %s", msg.chat_id)
|
||||
return False
|
||||
|
||||
# Telegram limits: 10MB for photos, 50MB for documents
|
||||
if attachment.size > 50 * 1024 * 1024:
|
||||
logger.warning("[Telegram] file too large (%d bytes), skipping: %s", attachment.size, attachment.filename)
|
||||
return False
|
||||
|
||||
bot = self._application.bot
|
||||
reply_to = self._last_bot_message.get(msg.chat_id)
|
||||
|
||||
try:
|
||||
if attachment.is_image and attachment.size <= 10 * 1024 * 1024:
|
||||
with open(attachment.actual_path, "rb") as f:
|
||||
kwargs: dict[str, Any] = {"chat_id": chat_id, "photo": f}
|
||||
if reply_to:
|
||||
kwargs["reply_to_message_id"] = reply_to
|
||||
sent = await bot.send_photo(**kwargs)
|
||||
else:
|
||||
from telegram import InputFile
|
||||
|
||||
with open(attachment.actual_path, "rb") as f:
|
||||
input_file = InputFile(f, filename=attachment.filename)
|
||||
kwargs = {"chat_id": chat_id, "document": input_file}
|
||||
if reply_to:
|
||||
kwargs["reply_to_message_id"] = reply_to
|
||||
sent = await bot.send_document(**kwargs)
|
||||
|
||||
self._last_bot_message[msg.chat_id] = sent.message_id
|
||||
logger.info("[Telegram] file sent: %s to chat=%s", attachment.filename, msg.chat_id)
|
||||
return True
|
||||
except Exception:
|
||||
logger.exception("[Telegram] failed to send file: %s", attachment.filename)
|
||||
return False
|
||||
|
||||
# -- helpers -----------------------------------------------------------
|
||||
|
||||
async def _send_running_reply(self, chat_id: str, reply_to_message_id: int) -> None:
|
||||
"""Send a 'Working on it...' reply to the user's message."""
|
||||
if not self._application:
|
||||
return
|
||||
try:
|
||||
bot = self._application.bot
|
||||
await bot.send_message(
|
||||
chat_id=int(chat_id),
|
||||
text="Working on it...",
|
||||
reply_to_message_id=reply_to_message_id,
|
||||
)
|
||||
logger.info("[Telegram] 'Working on it...' reply sent in chat=%s", chat_id)
|
||||
except Exception:
|
||||
logger.exception("[Telegram] failed to send running reply in chat=%s", chat_id)
|
||||
|
||||
# -- internal ----------------------------------------------------------
|
||||
|
||||
def _run_polling(self) -> None:
|
||||
"""Run telegram polling in a dedicated thread."""
|
||||
self._tg_loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self._tg_loop)
|
||||
try:
|
||||
# Cannot use run_polling() because it calls add_signal_handler(),
|
||||
# which only works in the main thread. Instead, manually
|
||||
# initialize the application and start the updater.
|
||||
self._tg_loop.run_until_complete(self._application.initialize())
|
||||
self._tg_loop.run_until_complete(self._application.start())
|
||||
self._tg_loop.run_until_complete(self._application.updater.start_polling())
|
||||
self._tg_loop.run_forever()
|
||||
except Exception:
|
||||
if self._running:
|
||||
logger.exception("Telegram polling error")
|
||||
finally:
|
||||
# Graceful shutdown
|
||||
try:
|
||||
if self._application.updater.running:
|
||||
self._tg_loop.run_until_complete(self._application.updater.stop())
|
||||
self._tg_loop.run_until_complete(self._application.stop())
|
||||
self._tg_loop.run_until_complete(self._application.shutdown())
|
||||
except Exception:
|
||||
logger.exception("Error during Telegram shutdown")
|
||||
|
||||
def _check_user(self, user_id: int) -> bool:
|
||||
if not self._allowed_users:
|
||||
return True
|
||||
return user_id in self._allowed_users
|
||||
|
||||
async def _cmd_start(self, update, context) -> None:
|
||||
"""Handle /start command."""
|
||||
if not self._check_user(update.effective_user.id):
|
||||
return
|
||||
await update.message.reply_text("Welcome to DeerFlow! Send me a message to start a conversation.\nType /help for available commands.")
|
||||
|
||||
async def _cmd_generic(self, update, context) -> None:
|
||||
"""Forward slash commands to the channel manager."""
|
||||
if not self._check_user(update.effective_user.id):
|
||||
return
|
||||
|
||||
text = update.message.text
|
||||
chat_id = str(update.effective_chat.id)
|
||||
user_id = str(update.effective_user.id)
|
||||
msg_id = str(update.message.message_id)
|
||||
|
||||
# Use the same topic_id logic as _on_text so that commands
|
||||
# like /new target the correct thread mapping.
|
||||
if update.effective_chat.type == "private":
|
||||
topic_id = None
|
||||
else:
|
||||
reply_to = update.message.reply_to_message
|
||||
if reply_to:
|
||||
topic_id = str(reply_to.message_id)
|
||||
else:
|
||||
topic_id = msg_id
|
||||
|
||||
inbound = self._make_inbound(
|
||||
chat_id=chat_id,
|
||||
user_id=user_id,
|
||||
text=text,
|
||||
msg_type=InboundMessageType.COMMAND,
|
||||
thread_ts=msg_id,
|
||||
)
|
||||
inbound.topic_id = topic_id
|
||||
|
||||
if self._main_loop and self._main_loop.is_running():
|
||||
asyncio.run_coroutine_threadsafe(self._send_running_reply(chat_id, update.message.message_id), self._main_loop)
|
||||
asyncio.run_coroutine_threadsafe(self.bus.publish_inbound(inbound), self._main_loop)
|
||||
|
||||
async def _on_text(self, update, context) -> None:
|
||||
"""Handle regular text messages."""
|
||||
if not self._check_user(update.effective_user.id):
|
||||
return
|
||||
|
||||
text = update.message.text.strip()
|
||||
if not text:
|
||||
return
|
||||
|
||||
chat_id = str(update.effective_chat.id)
|
||||
user_id = str(update.effective_user.id)
|
||||
msg_id = str(update.message.message_id)
|
||||
|
||||
# topic_id determines which DeerFlow thread the message maps to.
|
||||
# In private chats, use None so that all messages share a single
|
||||
# thread (the store key becomes "channel:chat_id").
|
||||
# In group chats, use the reply-to message id or the current
|
||||
# message id to keep separate conversation threads.
|
||||
if update.effective_chat.type == "private":
|
||||
topic_id = None
|
||||
else:
|
||||
reply_to = update.message.reply_to_message
|
||||
if reply_to:
|
||||
topic_id = str(reply_to.message_id)
|
||||
else:
|
||||
topic_id = msg_id
|
||||
|
||||
inbound = self._make_inbound(
|
||||
chat_id=chat_id,
|
||||
user_id=user_id,
|
||||
text=text,
|
||||
msg_type=InboundMessageType.CHAT,
|
||||
thread_ts=msg_id,
|
||||
)
|
||||
inbound.topic_id = topic_id
|
||||
|
||||
if self._main_loop and self._main_loop.is_running():
|
||||
asyncio.run_coroutine_threadsafe(self._send_running_reply(chat_id, update.message.message_id), self._main_loop)
|
||||
asyncio.run_coroutine_threadsafe(self.bus.publish_inbound(inbound), self._main_loop)
|
||||
Reference in New Issue
Block a user