mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-13 18:24:45 +08:00
feat: add IM channels for Feishu, Slack, and Telegram (#1010)
* feat: add IM channels system for Feishu, Slack, and Telegram integration Bridge external messaging platforms to DeerFlow via LangGraph Server with async message bus, thread management, and per-channel configuration. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: address review comments on IM channels system Fix topic_id handling in store remove/list_entries and manager commands, correct Telegram reply threading, remove unused imports/variables, update docstrings and docs to match implementation, and prevent config mutation. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * update skill creator * fix im reply text * fix comments --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -243,6 +243,32 @@ Proxied through nginx: `/api/langgraph/*` → LangGraph, all other `/api/*` →
|
||||
- Config values starting with `$` resolved as environment variables
|
||||
- Missing provider modules surface actionable install hints from reflection resolvers (for example `uv add langchain-google-genai`)
|
||||
|
||||
### IM Channels System (`src/channels/`)
|
||||
|
||||
Bridges external messaging platforms (Feishu, Slack, Telegram) to the DeerFlow agent via the LangGraph Server.
|
||||
|
||||
**Architecture**: Channels communicate with the LangGraph Server through `langgraph-sdk` HTTP client (same as the frontend), ensuring threads are created and managed server-side.
|
||||
|
||||
**Components**:
|
||||
- `message_bus.py` - Async pub/sub hub (`InboundMessage` → queue → dispatcher; `OutboundMessage` → callbacks → channels)
|
||||
- `store.py` - JSON-file persistence mapping `channel_name:chat_id[:topic_id]` → `thread_id` (keys are `channel:chat` for root conversations and `channel:chat:topic` for threaded conversations)
|
||||
- `manager.py` - Core dispatcher: creates threads via `client.threads.create()`, sends messages via `client.runs.wait()`, routes commands
|
||||
- `base.py` - Abstract `Channel` base class (start/stop/send lifecycle)
|
||||
- `service.py` - Manages lifecycle of all configured channels from `config.yaml`
|
||||
- `slack.py` / `feishu.py` / `telegram.py` - Platform-specific implementations
|
||||
|
||||
**Message Flow**:
|
||||
1. External platform → Channel impl → `MessageBus.publish_inbound()`
|
||||
2. `ChannelManager._dispatch_loop()` consumes from queue
|
||||
3. For chat: look up/create thread on LangGraph Server → `runs.wait()` → extract response → publish outbound
|
||||
4. For commands (`/new`, `/status`, `/models`, `/memory`, `/help`): handle locally or query Gateway API
|
||||
5. Outbound → channel callbacks → platform reply
|
||||
|
||||
**Configuration** (`config.yaml` → `channels`):
|
||||
- `langgraph_url` - LangGraph Server URL (default: `http://localhost:2024`)
|
||||
- `gateway_url` - Gateway API URL for auxiliary commands (default: `http://localhost:8001`)
|
||||
- Per-channel configs: `feishu` (app_id, app_secret), `slack` (bot_token, app_token), `telegram` (bot_token)
|
||||
|
||||
### Memory System (`src/agents/memory/`)
|
||||
|
||||
**Components**:
|
||||
|
||||
@@ -20,6 +20,13 @@
|
||||
- [ ] Add metrics and monitoring
|
||||
- [ ] Support for more document formats in upload
|
||||
- [ ] Skill marketplace / remote skill installation
|
||||
- [ ] Optimize async concurrency in agent hot path (IM channels multi-task scenario)
|
||||
- Replace `time.sleep(5)` with `asyncio.sleep()` in `src/tools/builtins/task_tool.py` (subagent polling)
|
||||
- Replace `subprocess.run()` with `asyncio.create_subprocess_shell()` in `src/sandbox/local/local_sandbox.py`
|
||||
- Replace sync `requests` with `httpx.AsyncClient` in community tools (tavily, jina_ai, firecrawl, infoquest, image_search)
|
||||
- Replace sync `model.invoke()` with async `model.ainvoke()` in title_middleware and memory updater
|
||||
- Consider `asyncio.to_thread()` wrapper for remaining blocking file I/O
|
||||
- For production: use `langgraph up` (multi-worker) instead of `langgraph dev` (single-worker)
|
||||
|
||||
## Resolved Issues
|
||||
|
||||
|
||||
@@ -34,6 +34,11 @@ dependencies = [
|
||||
"duckdb>=1.4.4",
|
||||
"langchain-google-genai>=4.2.1",
|
||||
"langgraph-checkpoint-sqlite>=3.0.3",
|
||||
"lark-oapi>=1.4.0",
|
||||
"slack-sdk>=3.33.0",
|
||||
"python-telegram-bot>=21.0",
|
||||
"langgraph-sdk>=0.1.51",
|
||||
"markdown-to-mrkdwn>=0.3.1",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
|
||||
@@ -257,9 +257,7 @@ def format_conversation_for_update(messages: list[Any]) -> str:
|
||||
# ephemeral file path info into long-term memory. Skip the turn entirely
|
||||
# when nothing remains after stripping (upload-only message).
|
||||
if role == "human":
|
||||
content = re.sub(
|
||||
r"<uploaded_files>[\s\S]*?</uploaded_files>\n*", "", str(content)
|
||||
).strip()
|
||||
content = re.sub(r"<uploaded_files>[\s\S]*?</uploaded_files>\n*", "", str(content)).strip()
|
||||
if not content:
|
||||
continue
|
||||
|
||||
|
||||
@@ -168,11 +168,7 @@ def _strip_upload_mentions_from_memory(memory_data: dict[str, Any]) -> dict[str,
|
||||
# Also remove any facts that describe upload events
|
||||
facts = memory_data.get("facts", [])
|
||||
if facts:
|
||||
memory_data["facts"] = [
|
||||
f
|
||||
for f in facts
|
||||
if not _UPLOAD_SENTENCE_RE.search(f.get("content", ""))
|
||||
]
|
||||
memory_data["facts"] = [f for f in facts if not _UPLOAD_SENTENCE_RE.search(f.get("content", ""))]
|
||||
|
||||
return memory_data
|
||||
|
||||
|
||||
@@ -40,9 +40,7 @@ def _filter_messages_for_memory(messages: list[Any]) -> list[Any]:
|
||||
Returns:
|
||||
Filtered list containing only user inputs and final assistant responses.
|
||||
"""
|
||||
_UPLOAD_BLOCK_RE = re.compile(
|
||||
r"<uploaded_files>[\s\S]*?</uploaded_files>\n*", re.IGNORECASE
|
||||
)
|
||||
_UPLOAD_BLOCK_RE = re.compile(r"<uploaded_files>[\s\S]*?</uploaded_files>\n*", re.IGNORECASE)
|
||||
|
||||
filtered = []
|
||||
skip_next_ai = False
|
||||
@@ -52,9 +50,7 @@ def _filter_messages_for_memory(messages: list[Any]) -> list[Any]:
|
||||
if msg_type == "human":
|
||||
content = getattr(msg, "content", "")
|
||||
if isinstance(content, list):
|
||||
content = " ".join(
|
||||
p.get("text", "") for p in content if isinstance(p, dict)
|
||||
)
|
||||
content = " ".join(p.get("text", "") for p in content if isinstance(p, dict))
|
||||
content_str = str(content)
|
||||
if "<uploaded_files>" in content_str:
|
||||
# Strip the ephemeral upload block; keep the user's real question.
|
||||
|
||||
16
backend/src/channels/__init__.py
Normal file
16
backend/src/channels/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""IM Channel integration for DeerFlow.
|
||||
|
||||
Provides a pluggable channel system that connects external messaging platforms
|
||||
(Feishu/Lark, Slack, Telegram) to the DeerFlow agent via the ChannelManager,
|
||||
which uses ``langgraph-sdk`` to communicate with the underlying LangGraph Server.
|
||||
"""
|
||||
|
||||
from src.channels.base import Channel
|
||||
from src.channels.message_bus import InboundMessage, MessageBus, OutboundMessage
|
||||
|
||||
__all__ = [
|
||||
"Channel",
|
||||
"InboundMessage",
|
||||
"MessageBus",
|
||||
"OutboundMessage",
|
||||
]
|
||||
88
backend/src/channels/base.py
Normal file
88
backend/src/channels/base.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""Abstract base class for IM channels."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
from src.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Channel(ABC):
|
||||
"""Base class for all IM channel implementations.
|
||||
|
||||
Each channel connects to an external messaging platform and:
|
||||
1. Receives messages, wraps them as InboundMessage, publishes to the bus.
|
||||
2. Subscribes to outbound messages and sends replies back to the platform.
|
||||
|
||||
Subclasses must implement ``start``, ``stop``, and ``send``.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
self.name = name
|
||||
self.bus = bus
|
||||
self.config = config
|
||||
self._running = False
|
||||
|
||||
@property
|
||||
def is_running(self) -> bool:
|
||||
return self._running
|
||||
|
||||
# -- lifecycle ---------------------------------------------------------
|
||||
|
||||
@abstractmethod
|
||||
async def start(self) -> None:
|
||||
"""Start listening for messages from the external platform."""
|
||||
|
||||
@abstractmethod
|
||||
async def stop(self) -> None:
|
||||
"""Gracefully stop the channel."""
|
||||
|
||||
# -- outbound ----------------------------------------------------------
|
||||
|
||||
@abstractmethod
|
||||
async def send(self, msg: OutboundMessage) -> None:
|
||||
"""Send a message back to the external platform.
|
||||
|
||||
The implementation should use ``msg.chat_id`` and ``msg.thread_ts``
|
||||
to route the reply to the correct conversation/thread.
|
||||
"""
|
||||
|
||||
# -- helpers -----------------------------------------------------------
|
||||
|
||||
def _make_inbound(
|
||||
self,
|
||||
chat_id: str,
|
||||
user_id: str,
|
||||
text: str,
|
||||
*,
|
||||
msg_type: InboundMessageType = InboundMessageType.CHAT,
|
||||
thread_ts: str | None = None,
|
||||
files: list[dict[str, Any]] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> InboundMessage:
|
||||
"""Convenience factory for creating InboundMessage instances."""
|
||||
return InboundMessage(
|
||||
channel_name=self.name,
|
||||
chat_id=chat_id,
|
||||
user_id=user_id,
|
||||
text=text,
|
||||
msg_type=msg_type,
|
||||
thread_ts=thread_ts,
|
||||
files=files or [],
|
||||
metadata=metadata or {},
|
||||
)
|
||||
|
||||
async def _on_outbound(self, msg: OutboundMessage) -> None:
|
||||
"""Outbound callback registered with the bus.
|
||||
|
||||
Only forwards messages targeted at this channel.
|
||||
"""
|
||||
if msg.channel_name == self.name:
|
||||
try:
|
||||
await self.send(msg)
|
||||
except Exception:
|
||||
logger.exception("Failed to send outbound message on channel %s", self.name)
|
||||
301
backend/src/channels/feishu.py
Normal file
301
backend/src/channels/feishu.py
Normal file
@@ -0,0 +1,301 @@
|
||||
"""Feishu/Lark channel — connects to Feishu via WebSocket (no public IP needed)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
from typing import Any
|
||||
|
||||
from src.channels.base import Channel
|
||||
from src.channels.message_bus import InboundMessageType, MessageBus, OutboundMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FeishuChannel(Channel):
|
||||
"""Feishu/Lark IM channel using the ``lark-oapi`` WebSocket client.
|
||||
|
||||
Configuration keys (in ``config.yaml`` under ``channels.feishu``):
|
||||
- ``app_id``: Feishu app ID.
|
||||
- ``app_secret``: Feishu app secret.
|
||||
- ``verification_token``: (optional) Event verification token.
|
||||
|
||||
The channel uses WebSocket long-connection mode so no public IP is required.
|
||||
|
||||
Message flow:
|
||||
1. User sends a message → bot adds "OK" emoji reaction
|
||||
2. Bot replies in thread: "Working on it......"
|
||||
3. Agent processes the message and returns a result
|
||||
4. Bot replies in thread with the result
|
||||
5. Bot adds "DONE" emoji reaction to the original message
|
||||
"""
|
||||
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
super().__init__(name="feishu", bus=bus, config=config)
|
||||
self._thread: threading.Thread | None = None
|
||||
self._main_loop: asyncio.AbstractEventLoop | None = None
|
||||
self._api_client = None
|
||||
self._CreateMessageReactionRequest = None
|
||||
self._CreateMessageReactionRequestBody = None
|
||||
self._Emoji = None
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
return
|
||||
|
||||
try:
|
||||
import lark_oapi as lark
|
||||
from lark_oapi.api.im.v1 import (
|
||||
CreateMessageReactionRequest,
|
||||
CreateMessageReactionRequestBody,
|
||||
CreateMessageRequest,
|
||||
CreateMessageRequestBody,
|
||||
Emoji,
|
||||
ReplyMessageRequest,
|
||||
ReplyMessageRequestBody,
|
||||
)
|
||||
except ImportError:
|
||||
logger.error("lark-oapi is not installed. Install it with: uv add lark-oapi")
|
||||
return
|
||||
|
||||
self._lark = lark
|
||||
self._CreateMessageRequest = CreateMessageRequest
|
||||
self._CreateMessageRequestBody = CreateMessageRequestBody
|
||||
self._ReplyMessageRequest = ReplyMessageRequest
|
||||
self._ReplyMessageRequestBody = ReplyMessageRequestBody
|
||||
self._CreateMessageReactionRequest = CreateMessageReactionRequest
|
||||
self._CreateMessageReactionRequestBody = CreateMessageReactionRequestBody
|
||||
self._Emoji = Emoji
|
||||
|
||||
app_id = self.config.get("app_id", "")
|
||||
app_secret = self.config.get("app_secret", "")
|
||||
|
||||
if not app_id or not app_secret:
|
||||
logger.error("Feishu channel requires app_id and app_secret")
|
||||
return
|
||||
|
||||
self._api_client = lark.Client.builder().app_id(app_id).app_secret(app_secret).build()
|
||||
self._main_loop = asyncio.get_event_loop()
|
||||
|
||||
self._running = True
|
||||
self.bus.subscribe_outbound(self._on_outbound)
|
||||
|
||||
# Both ws.Client construction and start() must happen in a dedicated
|
||||
# thread with its own event loop. lark-oapi caches the running loop
|
||||
# at construction time and later calls loop.run_until_complete(),
|
||||
# which conflicts with an already-running uvloop.
|
||||
self._thread = threading.Thread(
|
||||
target=self._run_ws,
|
||||
args=(app_id, app_secret),
|
||||
daemon=True,
|
||||
)
|
||||
self._thread.start()
|
||||
logger.info("Feishu channel started")
|
||||
|
||||
def _run_ws(self, app_id: str, app_secret: str) -> None:
|
||||
"""Construct and run the lark WS client in a thread with a fresh event loop.
|
||||
|
||||
The lark-oapi SDK captures a module-level event loop at import time
|
||||
(``lark_oapi.ws.client.loop``). When uvicorn uses uvloop, that
|
||||
captured loop is the *main* thread's uvloop — which is already
|
||||
running, so ``loop.run_until_complete()`` inside ``Client.start()``
|
||||
raises ``RuntimeError``.
|
||||
|
||||
We work around this by creating a plain asyncio event loop for this
|
||||
thread and patching the SDK's module-level reference before calling
|
||||
``start()``.
|
||||
"""
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
import lark_oapi as lark
|
||||
import lark_oapi.ws.client as _ws_client_mod
|
||||
|
||||
# Replace the SDK's module-level loop so Client.start() uses
|
||||
# this thread's (non-running) event loop instead of the main
|
||||
# thread's uvloop.
|
||||
_ws_client_mod.loop = loop
|
||||
|
||||
event_handler = lark.EventDispatcherHandler.builder("", "").register_p2_im_message_receive_v1(self._on_message).build()
|
||||
ws_client = lark.ws.Client(
|
||||
app_id=app_id,
|
||||
app_secret=app_secret,
|
||||
event_handler=event_handler,
|
||||
log_level=lark.LogLevel.INFO,
|
||||
)
|
||||
ws_client.start()
|
||||
except Exception:
|
||||
if self._running:
|
||||
logger.exception("Feishu WebSocket error")
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._running = False
|
||||
self.bus.unsubscribe_outbound(self._on_outbound)
|
||||
if self._thread:
|
||||
self._thread.join(timeout=5)
|
||||
self._thread = None
|
||||
logger.info("Feishu channel stopped")
|
||||
|
||||
async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -> None:
|
||||
if not self._api_client:
|
||||
logger.warning("[Feishu] send called but no api_client available")
|
||||
return
|
||||
|
||||
logger.info(
|
||||
"[Feishu] sending reply: chat_id=%s, thread_ts=%s, text_len=%d",
|
||||
msg.chat_id,
|
||||
msg.thread_ts,
|
||||
len(msg.text),
|
||||
)
|
||||
content = self._build_card_content(msg.text)
|
||||
|
||||
last_exc: Exception | None = None
|
||||
for attempt in range(_max_retries):
|
||||
try:
|
||||
if msg.thread_ts:
|
||||
# Reply in thread (话题)
|
||||
request = self._ReplyMessageRequest.builder().message_id(msg.thread_ts).request_body(self._ReplyMessageRequestBody.builder().msg_type("interactive").content(content).reply_in_thread(True).build()).build()
|
||||
await asyncio.to_thread(self._api_client.im.v1.message.reply, request)
|
||||
else:
|
||||
# Send new message
|
||||
request = self._CreateMessageRequest.builder().receive_id_type("chat_id").request_body(self._CreateMessageRequestBody.builder().receive_id(msg.chat_id).msg_type("interactive").content(content).build()).build()
|
||||
await asyncio.to_thread(self._api_client.im.v1.message.create, request)
|
||||
|
||||
# Add "DONE" reaction to the original message on final reply
|
||||
if msg.is_final and msg.thread_ts:
|
||||
await self._add_reaction(msg.thread_ts, "DONE")
|
||||
|
||||
return # success
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt < _max_retries - 1:
|
||||
delay = 2**attempt # 1s, 2s
|
||||
logger.warning(
|
||||
"[Feishu] send failed (attempt %d/%d), retrying in %ds: %s",
|
||||
attempt + 1,
|
||||
_max_retries,
|
||||
delay,
|
||||
exc,
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("[Feishu] send failed after %d attempts: %s", _max_retries, last_exc)
|
||||
raise last_exc # type: ignore[misc]
|
||||
|
||||
# -- message formatting ------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _build_card_content(text: str) -> str:
|
||||
"""Build a Feishu interactive card with markdown content.
|
||||
|
||||
Feishu's interactive card format natively renders markdown, including
|
||||
headers, bold/italic, code blocks, lists, and links.
|
||||
"""
|
||||
card = {
|
||||
"config": {"wide_screen_mode": True},
|
||||
"elements": [{"tag": "markdown", "content": text}],
|
||||
}
|
||||
return json.dumps(card)
|
||||
|
||||
# -- reaction helpers --------------------------------------------------
|
||||
|
||||
async def _add_reaction(self, message_id: str, emoji_type: str = "THUMBSUP") -> None:
|
||||
"""Add an emoji reaction to a message."""
|
||||
if not self._api_client or not self._CreateMessageReactionRequest:
|
||||
return
|
||||
try:
|
||||
request = self._CreateMessageReactionRequest.builder().message_id(message_id).request_body(self._CreateMessageReactionRequestBody.builder().reaction_type(self._Emoji.builder().emoji_type(emoji_type).build()).build()).build()
|
||||
await asyncio.to_thread(self._api_client.im.v1.message_reaction.create, request)
|
||||
logger.info("[Feishu] reaction '%s' added to message %s", emoji_type, message_id)
|
||||
except Exception:
|
||||
logger.exception("[Feishu] failed to add reaction '%s' to message %s", emoji_type, message_id)
|
||||
|
||||
async def _send_running_reply(self, message_id: str) -> None:
|
||||
"""Reply to a message in-thread with a 'Working on it...' hint."""
|
||||
if not self._api_client:
|
||||
return
|
||||
try:
|
||||
content = self._build_card_content("Working on it...")
|
||||
request = self._ReplyMessageRequest.builder().message_id(message_id).request_body(self._ReplyMessageRequestBody.builder().msg_type("interactive").content(content).reply_in_thread(True).build()).build()
|
||||
await asyncio.to_thread(self._api_client.im.v1.message.reply, request)
|
||||
logger.info("[Feishu] 'Working on it......' reply sent for message %s", message_id)
|
||||
except Exception:
|
||||
logger.exception("[Feishu] failed to send running reply for message %s", message_id)
|
||||
|
||||
# -- internal ----------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _log_future_error(fut, name: str, msg_id: str) -> None:
|
||||
"""Callback for run_coroutine_threadsafe futures to surface errors."""
|
||||
try:
|
||||
exc = fut.exception()
|
||||
if exc:
|
||||
logger.error("[Feishu] %s failed for msg_id=%s: %s", name, msg_id, exc)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _on_message(self, event) -> None:
|
||||
"""Called by lark-oapi when a message is received (runs in lark thread)."""
|
||||
try:
|
||||
logger.info("[Feishu] raw event received: type=%s", type(event).__name__)
|
||||
message = event.event.message
|
||||
chat_id = message.chat_id
|
||||
msg_id = message.message_id
|
||||
sender_id = event.event.sender.sender_id.open_id
|
||||
|
||||
# root_id is set when the message is a reply within a Feishu thread.
|
||||
# Use it as topic_id so all replies share the same DeerFlow thread.
|
||||
root_id = getattr(message, "root_id", None) or None
|
||||
|
||||
# Parse message content
|
||||
content = json.loads(message.content)
|
||||
text = content.get("text", "").strip()
|
||||
logger.info(
|
||||
"[Feishu] parsed message: chat_id=%s, msg_id=%s, root_id=%s, sender=%s, text=%r",
|
||||
chat_id,
|
||||
msg_id,
|
||||
root_id,
|
||||
sender_id,
|
||||
text[:100] if text else "",
|
||||
)
|
||||
|
||||
if not text:
|
||||
logger.info("[Feishu] empty text, ignoring message")
|
||||
return
|
||||
|
||||
# Check if it's a command
|
||||
if text.startswith("/"):
|
||||
msg_type = InboundMessageType.COMMAND
|
||||
else:
|
||||
msg_type = InboundMessageType.CHAT
|
||||
|
||||
# topic_id: use root_id for replies (same topic), msg_id for new messages (new topic)
|
||||
topic_id = root_id or msg_id
|
||||
|
||||
inbound = self._make_inbound(
|
||||
chat_id=chat_id,
|
||||
user_id=sender_id,
|
||||
text=text,
|
||||
msg_type=msg_type,
|
||||
thread_ts=msg_id,
|
||||
metadata={"message_id": msg_id, "root_id": root_id},
|
||||
)
|
||||
inbound.topic_id = topic_id
|
||||
|
||||
# Schedule on the async event loop
|
||||
if self._main_loop and self._main_loop.is_running():
|
||||
logger.info("[Feishu] publishing inbound message to bus (type=%s, msg_id=%s)", msg_type.value, msg_id)
|
||||
# Schedule all coroutines and attach error logging to futures
|
||||
for name, coro in [
|
||||
("add_reaction", self._add_reaction(msg_id, "OK")),
|
||||
("send_running_reply", self._send_running_reply(msg_id)),
|
||||
("publish_inbound", self.bus.publish_inbound(inbound)),
|
||||
]:
|
||||
fut = asyncio.run_coroutine_threadsafe(coro, self._main_loop)
|
||||
fut.add_done_callback(lambda f, n=name, mid=msg_id: self._log_future_error(f, n, mid))
|
||||
else:
|
||||
logger.warning("[Feishu] main loop not running, cannot publish inbound message")
|
||||
except Exception:
|
||||
logger.exception("[Feishu] error processing message")
|
||||
367
backend/src/channels/manager.py
Normal file
367
backend/src/channels/manager.py
Normal file
@@ -0,0 +1,367 @@
|
||||
"""ChannelManager — consumes inbound messages and dispatches them to the DeerFlow agent via LangGraph Server."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
from src.channels.message_bus import InboundMessage, InboundMessageType, MessageBus, OutboundMessage
|
||||
from src.channels.store import ChannelStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_LANGGRAPH_URL = "http://localhost:2024"
|
||||
DEFAULT_GATEWAY_URL = "http://localhost:8001"
|
||||
DEFAULT_ASSISTANT_ID = "lead_agent"
|
||||
|
||||
|
||||
def _extract_response_text(result: dict | list) -> str:
|
||||
"""Extract the last AI message text from a LangGraph runs.wait result.
|
||||
|
||||
``runs.wait`` returns the final state dict which contains a ``messages``
|
||||
list. Each message is a dict with at least ``type`` and ``content``.
|
||||
|
||||
Handles special cases:
|
||||
- Regular AI text responses
|
||||
- Clarification interrupts (``ask_clarification`` tool messages)
|
||||
- AI messages with tool_calls but no text content
|
||||
"""
|
||||
if isinstance(result, list):
|
||||
messages = result
|
||||
elif isinstance(result, dict):
|
||||
messages = result.get("messages", [])
|
||||
else:
|
||||
return ""
|
||||
|
||||
# Walk backwards to find usable response text
|
||||
for msg in reversed(messages):
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
|
||||
msg_type = msg.get("type")
|
||||
|
||||
# Check for tool messages from ask_clarification (interrupt case)
|
||||
if msg_type == "tool" and msg.get("name") == "ask_clarification":
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, str) and content:
|
||||
return content
|
||||
|
||||
# Regular AI message with text content
|
||||
if msg_type == "ai":
|
||||
content = msg.get("content", "")
|
||||
if isinstance(content, str) and content:
|
||||
return content
|
||||
# content can be a list of content blocks
|
||||
if isinstance(content, list):
|
||||
parts = []
|
||||
for block in content:
|
||||
if isinstance(block, dict) and block.get("type") == "text":
|
||||
parts.append(block.get("text", ""))
|
||||
elif isinstance(block, str):
|
||||
parts.append(block)
|
||||
text = "".join(parts)
|
||||
if text:
|
||||
return text
|
||||
return ""
|
||||
|
||||
|
||||
def _extract_artifacts(result: dict | list) -> list[str]:
|
||||
"""Extract artifact paths from the last AI response cycle only.
|
||||
|
||||
Instead of reading the full accumulated ``artifacts`` state (which contains
|
||||
all artifacts ever produced in the thread), this inspects the messages after
|
||||
the last human message and collects file paths from ``present_files`` tool
|
||||
calls. This ensures only newly-produced artifacts are returned.
|
||||
"""
|
||||
if isinstance(result, list):
|
||||
messages = result
|
||||
elif isinstance(result, dict):
|
||||
messages = result.get("messages", [])
|
||||
else:
|
||||
return []
|
||||
|
||||
artifacts: list[str] = []
|
||||
for msg in reversed(messages):
|
||||
if not isinstance(msg, dict):
|
||||
continue
|
||||
# Stop at the last human message — anything before it is a previous turn
|
||||
if msg.get("type") == "human":
|
||||
break
|
||||
# Look for AI messages with present_files tool calls
|
||||
if msg.get("type") == "ai":
|
||||
for tc in msg.get("tool_calls", []):
|
||||
if isinstance(tc, dict) and tc.get("name") == "present_files":
|
||||
args = tc.get("args", {})
|
||||
paths = args.get("filepaths", [])
|
||||
if isinstance(paths, list):
|
||||
artifacts.extend(p for p in paths if isinstance(p, str))
|
||||
return artifacts
|
||||
|
||||
|
||||
def _format_artifact_text(artifacts: list[str]) -> str:
|
||||
"""Format artifact paths into a human-readable text block listing filenames."""
|
||||
import posixpath
|
||||
|
||||
filenames = [posixpath.basename(p) for p in artifacts]
|
||||
if len(filenames) == 1:
|
||||
return f"Created File: 📎 {filenames[0]}"
|
||||
return "Created Files: 📎 " + "、".join(filenames)
|
||||
|
||||
|
||||
class ChannelManager:
|
||||
"""Core dispatcher that bridges IM channels to the DeerFlow agent.
|
||||
|
||||
It reads from the MessageBus inbound queue, creates/reuses threads on
|
||||
the LangGraph Server, sends messages via ``runs.wait``, and publishes
|
||||
outbound responses back through the bus.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
bus: MessageBus,
|
||||
store: ChannelStore,
|
||||
*,
|
||||
max_concurrency: int = 5,
|
||||
langgraph_url: str = DEFAULT_LANGGRAPH_URL,
|
||||
gateway_url: str = DEFAULT_GATEWAY_URL,
|
||||
assistant_id: str = DEFAULT_ASSISTANT_ID,
|
||||
) -> None:
|
||||
self.bus = bus
|
||||
self.store = store
|
||||
self._max_concurrency = max_concurrency
|
||||
self._langgraph_url = langgraph_url
|
||||
self._gateway_url = gateway_url
|
||||
self._assistant_id = assistant_id
|
||||
self._client = None # lazy init — langgraph_sdk async client
|
||||
self._semaphore: asyncio.Semaphore | None = None
|
||||
self._running = False
|
||||
self._task: asyncio.Task | None = None
|
||||
|
||||
# -- LangGraph SDK client (lazy) ----------------------------------------
|
||||
|
||||
def _get_client(self):
|
||||
"""Return the ``langgraph_sdk`` async client, creating it on first use."""
|
||||
if self._client is None:
|
||||
from langgraph_sdk import get_client
|
||||
|
||||
self._client = get_client(url=self._langgraph_url)
|
||||
return self._client
|
||||
|
||||
# -- lifecycle ---------------------------------------------------------
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the dispatch loop."""
|
||||
if self._running:
|
||||
return
|
||||
self._running = True
|
||||
self._semaphore = asyncio.Semaphore(self._max_concurrency)
|
||||
self._task = asyncio.create_task(self._dispatch_loop())
|
||||
logger.info("ChannelManager started (max_concurrency=%d)", self._max_concurrency)
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop the dispatch loop."""
|
||||
self._running = False
|
||||
if self._task:
|
||||
self._task.cancel()
|
||||
try:
|
||||
await self._task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
self._task = None
|
||||
logger.info("ChannelManager stopped")
|
||||
|
||||
# -- dispatch loop -----------------------------------------------------
|
||||
|
||||
async def _dispatch_loop(self) -> None:
|
||||
logger.info("[Manager] dispatch loop started, waiting for inbound messages")
|
||||
while self._running:
|
||||
try:
|
||||
msg = await asyncio.wait_for(self.bus.get_inbound(), timeout=1.0)
|
||||
except TimeoutError:
|
||||
continue
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
|
||||
logger.info(
|
||||
"[Manager] received inbound: channel=%s, chat_id=%s, type=%s, text=%r",
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
msg.msg_type.value,
|
||||
msg.text[:100] if msg.text else "",
|
||||
)
|
||||
task = asyncio.create_task(self._handle_message(msg))
|
||||
task.add_done_callback(self._log_task_error)
|
||||
|
||||
@staticmethod
|
||||
def _log_task_error(task: asyncio.Task) -> None:
|
||||
"""Surface unhandled exceptions from background tasks."""
|
||||
if task.cancelled():
|
||||
return
|
||||
exc = task.exception()
|
||||
if exc:
|
||||
logger.error("[Manager] unhandled error in message task: %s", exc, exc_info=exc)
|
||||
|
||||
async def _handle_message(self, msg: InboundMessage) -> None:
|
||||
async with self._semaphore:
|
||||
try:
|
||||
if msg.msg_type == InboundMessageType.COMMAND:
|
||||
await self._handle_command(msg)
|
||||
else:
|
||||
await self._handle_chat(msg)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Error handling message from %s (chat=%s)",
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
)
|
||||
await self._send_error(msg, "An internal error occurred. Please try again.")
|
||||
|
||||
# -- chat handling -----------------------------------------------------
|
||||
|
||||
async def _create_thread(self, client, msg: InboundMessage) -> str:
|
||||
"""Create a new thread on the LangGraph Server and store the mapping."""
|
||||
thread = await client.threads.create()
|
||||
thread_id = thread["thread_id"]
|
||||
self.store.set_thread_id(
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
thread_id,
|
||||
topic_id=msg.topic_id,
|
||||
user_id=msg.user_id,
|
||||
)
|
||||
logger.info("[Manager] new thread created on LangGraph Server: thread_id=%s for chat_id=%s topic_id=%s", thread_id, msg.chat_id, msg.topic_id)
|
||||
return thread_id
|
||||
|
||||
async def _handle_chat(self, msg: InboundMessage) -> None:
|
||||
client = self._get_client()
|
||||
|
||||
# Look up existing DeerFlow thread by topic_id (if present)
|
||||
thread_id = None
|
||||
if msg.topic_id:
|
||||
thread_id = self.store.get_thread_id(msg.channel_name, msg.chat_id, topic_id=msg.topic_id)
|
||||
if thread_id:
|
||||
logger.info("[Manager] reusing thread: thread_id=%s for topic_id=%s", thread_id, msg.topic_id)
|
||||
|
||||
# No existing thread found — create a new one
|
||||
if thread_id is None:
|
||||
thread_id = await self._create_thread(client, msg)
|
||||
|
||||
logger.info("[Manager] invoking runs.wait(thread_id=%s, text=%r)", thread_id, msg.text[:100])
|
||||
result = await client.runs.wait(
|
||||
thread_id,
|
||||
self._assistant_id,
|
||||
input={"messages": [{"role": "human", "content": msg.text}]},
|
||||
config={"recursion_limit": 100},
|
||||
context={
|
||||
"thread_id": thread_id,
|
||||
"thinking_enabled": True,
|
||||
"is_plan_mode": False,
|
||||
"subagent_enabled": False,
|
||||
},
|
||||
)
|
||||
|
||||
response_text = _extract_response_text(result)
|
||||
artifacts = _extract_artifacts(result)
|
||||
|
||||
logger.info(
|
||||
"[Manager] agent response received: thread_id=%s, response_len=%d, artifacts=%d",
|
||||
thread_id,
|
||||
len(response_text) if response_text else 0,
|
||||
len(artifacts),
|
||||
)
|
||||
|
||||
# Append artifact filenames when present
|
||||
if artifacts:
|
||||
artifact_text = _format_artifact_text(artifacts)
|
||||
if response_text:
|
||||
response_text = response_text + "\n\n" + artifact_text
|
||||
else:
|
||||
response_text = artifact_text
|
||||
|
||||
if not response_text:
|
||||
response_text = "(No response from agent)"
|
||||
|
||||
outbound = OutboundMessage(
|
||||
channel_name=msg.channel_name,
|
||||
chat_id=msg.chat_id,
|
||||
thread_id=thread_id,
|
||||
text=response_text,
|
||||
artifacts=artifacts,
|
||||
thread_ts=msg.thread_ts,
|
||||
)
|
||||
logger.info("[Manager] publishing outbound message to bus: channel=%s, chat_id=%s", msg.channel_name, msg.chat_id)
|
||||
await self.bus.publish_outbound(outbound)
|
||||
|
||||
# -- command handling --------------------------------------------------
|
||||
|
||||
async def _handle_command(self, msg: InboundMessage) -> None:
|
||||
text = msg.text.strip()
|
||||
parts = text.split(maxsplit=1)
|
||||
command = parts[0].lower().lstrip("/")
|
||||
|
||||
if command == "new":
|
||||
# Create a new thread on the LangGraph Server
|
||||
client = self._get_client()
|
||||
thread = await client.threads.create()
|
||||
new_thread_id = thread["thread_id"]
|
||||
self.store.set_thread_id(
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
new_thread_id,
|
||||
topic_id=msg.topic_id,
|
||||
user_id=msg.user_id,
|
||||
)
|
||||
reply = "New conversation started."
|
||||
elif command == "status":
|
||||
thread_id = self.store.get_thread_id(msg.channel_name, msg.chat_id, topic_id=msg.topic_id)
|
||||
reply = f"Active thread: {thread_id}" if thread_id else "No active conversation."
|
||||
elif command == "models":
|
||||
reply = await self._fetch_gateway("/api/models", "models")
|
||||
elif command == "memory":
|
||||
reply = await self._fetch_gateway("/api/memory", "memory")
|
||||
elif command == "help":
|
||||
reply = "Available commands:\n/new — Start a new conversation\n/status — Show current thread info\n/models — List available models\n/memory — Show memory status\n/help — Show this help"
|
||||
else:
|
||||
reply = f"Unknown command: /{command}. Type /help for available commands."
|
||||
|
||||
outbound = OutboundMessage(
|
||||
channel_name=msg.channel_name,
|
||||
chat_id=msg.chat_id,
|
||||
thread_id=self.store.get_thread_id(msg.channel_name, msg.chat_id) or "",
|
||||
text=reply,
|
||||
thread_ts=msg.thread_ts,
|
||||
)
|
||||
await self.bus.publish_outbound(outbound)
|
||||
|
||||
async def _fetch_gateway(self, path: str, kind: str) -> str:
|
||||
"""Fetch data from the Gateway API for command responses."""
|
||||
import httpx
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as http:
|
||||
resp = await http.get(f"{self._gateway_url}{path}", timeout=10)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
except Exception:
|
||||
logger.exception("Failed to fetch %s from gateway", kind)
|
||||
return f"Failed to fetch {kind} information."
|
||||
|
||||
if kind == "models":
|
||||
names = [m["name"] for m in data.get("models", [])]
|
||||
return ("Available models:\n" + "\n".join(f"• {n}" for n in names)) if names else "No models configured."
|
||||
elif kind == "memory":
|
||||
facts = data.get("facts", [])
|
||||
return f"Memory contains {len(facts)} fact(s)."
|
||||
return str(data)
|
||||
|
||||
# -- error helper ------------------------------------------------------
|
||||
|
||||
async def _send_error(self, msg: InboundMessage, error_text: str) -> None:
|
||||
outbound = OutboundMessage(
|
||||
channel_name=msg.channel_name,
|
||||
chat_id=msg.chat_id,
|
||||
thread_id=self.store.get_thread_id(msg.channel_name, msg.chat_id) or "",
|
||||
text=error_text,
|
||||
thread_ts=msg.thread_ts,
|
||||
)
|
||||
await self.bus.publish_outbound(outbound)
|
||||
150
backend/src/channels/message_bus.py
Normal file
150
backend/src/channels/message_bus.py
Normal file
@@ -0,0 +1,150 @@
|
||||
"""MessageBus — async pub/sub hub that decouples channels from the agent dispatcher."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from collections.abc import Callable, Coroutine
|
||||
from dataclasses import dataclass, field
|
||||
from enum import StrEnum
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Message types
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class InboundMessageType(StrEnum):
|
||||
"""Types of messages arriving from IM channels."""
|
||||
|
||||
CHAT = "chat"
|
||||
COMMAND = "command"
|
||||
|
||||
|
||||
@dataclass
|
||||
class InboundMessage:
|
||||
"""A message arriving from an IM channel toward the agent dispatcher.
|
||||
|
||||
Attributes:
|
||||
channel_name: Name of the source channel (e.g. "feishu", "slack").
|
||||
chat_id: Platform-specific chat/conversation identifier.
|
||||
user_id: Platform-specific user identifier.
|
||||
text: The message text.
|
||||
msg_type: Whether this is a regular chat message or a command.
|
||||
thread_ts: Optional platform thread identifier (for threaded replies).
|
||||
topic_id: Conversation topic identifier used to map to a DeerFlow thread.
|
||||
Messages sharing the same ``topic_id`` within a ``chat_id`` will
|
||||
reuse the same DeerFlow thread. When ``None``, each message
|
||||
creates a new thread (one-shot Q&A).
|
||||
files: Optional list of file attachments (platform-specific dicts).
|
||||
metadata: Arbitrary extra data from the channel.
|
||||
created_at: Unix timestamp when the message was created.
|
||||
"""
|
||||
|
||||
channel_name: str
|
||||
chat_id: str
|
||||
user_id: str
|
||||
text: str
|
||||
msg_type: InboundMessageType = InboundMessageType.CHAT
|
||||
thread_ts: str | None = None
|
||||
topic_id: str | None = None
|
||||
files: list[dict[str, Any]] = field(default_factory=list)
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
created_at: float = field(default_factory=time.time)
|
||||
|
||||
|
||||
@dataclass
|
||||
class OutboundMessage:
|
||||
"""A message from the agent dispatcher back to a channel.
|
||||
|
||||
Attributes:
|
||||
channel_name: Target channel name (used for routing).
|
||||
chat_id: Target chat/conversation identifier.
|
||||
thread_id: DeerFlow thread ID that produced this response.
|
||||
text: The response text.
|
||||
artifacts: List of artifact paths produced by the agent.
|
||||
is_final: Whether this is the final message in the response stream.
|
||||
thread_ts: Optional platform thread identifier for threaded replies.
|
||||
metadata: Arbitrary extra data.
|
||||
created_at: Unix timestamp.
|
||||
"""
|
||||
|
||||
channel_name: str
|
||||
chat_id: str
|
||||
thread_id: str
|
||||
text: str
|
||||
artifacts: list[str] = field(default_factory=list)
|
||||
is_final: bool = True
|
||||
thread_ts: str | None = None
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
created_at: float = field(default_factory=time.time)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# MessageBus
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
OutboundCallback = Callable[[OutboundMessage], Coroutine[Any, Any, None]]
|
||||
|
||||
|
||||
class MessageBus:
|
||||
"""Async pub/sub hub connecting channels and the agent dispatcher.
|
||||
|
||||
Channels publish inbound messages; the dispatcher consumes them.
|
||||
The dispatcher publishes outbound messages; channels receive them
|
||||
via registered callbacks.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._inbound_queue: asyncio.Queue[InboundMessage] = asyncio.Queue()
|
||||
self._outbound_listeners: list[OutboundCallback] = []
|
||||
|
||||
# -- inbound -----------------------------------------------------------
|
||||
|
||||
async def publish_inbound(self, msg: InboundMessage) -> None:
|
||||
"""Enqueue an inbound message from a channel."""
|
||||
await self._inbound_queue.put(msg)
|
||||
logger.info(
|
||||
"[Bus] inbound enqueued: channel=%s, chat_id=%s, type=%s, queue_size=%d",
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
msg.msg_type.value,
|
||||
self._inbound_queue.qsize(),
|
||||
)
|
||||
|
||||
async def get_inbound(self) -> InboundMessage:
|
||||
"""Block until the next inbound message is available."""
|
||||
return await self._inbound_queue.get()
|
||||
|
||||
@property
|
||||
def inbound_queue(self) -> asyncio.Queue[InboundMessage]:
|
||||
return self._inbound_queue
|
||||
|
||||
# -- outbound ----------------------------------------------------------
|
||||
|
||||
def subscribe_outbound(self, callback: OutboundCallback) -> None:
|
||||
"""Register an async callback for outbound messages."""
|
||||
self._outbound_listeners.append(callback)
|
||||
|
||||
def unsubscribe_outbound(self, callback: OutboundCallback) -> None:
|
||||
"""Remove a previously registered outbound callback."""
|
||||
self._outbound_listeners = [cb for cb in self._outbound_listeners if cb is not callback]
|
||||
|
||||
async def publish_outbound(self, msg: OutboundMessage) -> None:
|
||||
"""Dispatch an outbound message to all registered listeners."""
|
||||
logger.info(
|
||||
"[Bus] outbound dispatching: channel=%s, chat_id=%s, listeners=%d, text_len=%d",
|
||||
msg.channel_name,
|
||||
msg.chat_id,
|
||||
len(self._outbound_listeners),
|
||||
len(msg.text),
|
||||
)
|
||||
for callback in self._outbound_listeners:
|
||||
try:
|
||||
await callback(msg)
|
||||
except Exception:
|
||||
logger.exception("Error in outbound callback for channel=%s", msg.channel_name)
|
||||
174
backend/src/channels/service.py
Normal file
174
backend/src/channels/service.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""ChannelService — manages the lifecycle of all IM channels."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from src.channels.manager import ChannelManager
|
||||
from src.channels.message_bus import MessageBus
|
||||
from src.channels.store import ChannelStore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Channel name → import path for lazy loading
|
||||
_CHANNEL_REGISTRY: dict[str, str] = {
|
||||
"feishu": "src.channels.feishu:FeishuChannel",
|
||||
"slack": "src.channels.slack:SlackChannel",
|
||||
"telegram": "src.channels.telegram:TelegramChannel",
|
||||
}
|
||||
|
||||
|
||||
class ChannelService:
|
||||
"""Manages the lifecycle of all configured IM channels.
|
||||
|
||||
Reads configuration from ``config.yaml`` under the ``channels`` key,
|
||||
instantiates enabled channels, and starts the ChannelManager dispatcher.
|
||||
"""
|
||||
|
||||
def __init__(self, channels_config: dict[str, Any] | None = None) -> None:
|
||||
self.bus = MessageBus()
|
||||
self.store = ChannelStore()
|
||||
config = dict(channels_config or {})
|
||||
langgraph_url = config.pop("langgraph_url", None) or "http://localhost:2024"
|
||||
gateway_url = config.pop("gateway_url", None) or "http://localhost:8001"
|
||||
self.manager = ChannelManager(
|
||||
bus=self.bus,
|
||||
store=self.store,
|
||||
langgraph_url=langgraph_url,
|
||||
gateway_url=gateway_url,
|
||||
)
|
||||
self._channels: dict[str, Any] = {} # name -> Channel instance
|
||||
self._config = config
|
||||
self._running = False
|
||||
|
||||
@classmethod
|
||||
def from_app_config(cls) -> ChannelService:
|
||||
"""Create a ChannelService from the application config."""
|
||||
from src.config.app_config import get_app_config
|
||||
|
||||
config = get_app_config()
|
||||
channels_config = {}
|
||||
# extra fields are allowed by AppConfig (extra="allow")
|
||||
extra = config.model_extra or {}
|
||||
if "channels" in extra:
|
||||
channels_config = extra["channels"]
|
||||
return cls(channels_config=channels_config)
|
||||
|
||||
async def start(self) -> None:
|
||||
"""Start the manager and all enabled channels."""
|
||||
if self._running:
|
||||
return
|
||||
|
||||
await self.manager.start()
|
||||
|
||||
for name, channel_config in self._config.items():
|
||||
if not isinstance(channel_config, dict):
|
||||
continue
|
||||
if not channel_config.get("enabled", False):
|
||||
logger.info("Channel %s is disabled, skipping", name)
|
||||
continue
|
||||
|
||||
await self._start_channel(name, channel_config)
|
||||
|
||||
self._running = True
|
||||
logger.info("ChannelService started with channels: %s", list(self._channels.keys()))
|
||||
|
||||
async def stop(self) -> None:
|
||||
"""Stop all channels and the manager."""
|
||||
for name, channel in list(self._channels.items()):
|
||||
try:
|
||||
await channel.stop()
|
||||
logger.info("Channel %s stopped", name)
|
||||
except Exception:
|
||||
logger.exception("Error stopping channel %s", name)
|
||||
self._channels.clear()
|
||||
|
||||
await self.manager.stop()
|
||||
self._running = False
|
||||
logger.info("ChannelService stopped")
|
||||
|
||||
async def restart_channel(self, name: str) -> bool:
|
||||
"""Restart a specific channel. Returns True if successful."""
|
||||
if name in self._channels:
|
||||
try:
|
||||
await self._channels[name].stop()
|
||||
except Exception:
|
||||
logger.exception("Error stopping channel %s for restart", name)
|
||||
del self._channels[name]
|
||||
|
||||
config = self._config.get(name)
|
||||
if not config or not isinstance(config, dict):
|
||||
logger.warning("No config for channel %s", name)
|
||||
return False
|
||||
|
||||
return await self._start_channel(name, config)
|
||||
|
||||
async def _start_channel(self, name: str, config: dict[str, Any]) -> bool:
|
||||
"""Instantiate and start a single channel."""
|
||||
import_path = _CHANNEL_REGISTRY.get(name)
|
||||
if not import_path:
|
||||
logger.warning("Unknown channel type: %s", name)
|
||||
return False
|
||||
|
||||
try:
|
||||
from src.reflection import resolve_class
|
||||
|
||||
channel_cls = resolve_class(import_path, base_class=None)
|
||||
except Exception:
|
||||
logger.exception("Failed to import channel class for %s", name)
|
||||
return False
|
||||
|
||||
try:
|
||||
channel = channel_cls(bus=self.bus, config=config)
|
||||
await channel.start()
|
||||
self._channels[name] = channel
|
||||
logger.info("Channel %s started", name)
|
||||
return True
|
||||
except Exception:
|
||||
logger.exception("Failed to start channel %s", name)
|
||||
return False
|
||||
|
||||
def get_status(self) -> dict[str, Any]:
|
||||
"""Return status information for all channels."""
|
||||
channels_status = {}
|
||||
for name in _CHANNEL_REGISTRY:
|
||||
config = self._config.get(name, {})
|
||||
enabled = isinstance(config, dict) and config.get("enabled", False)
|
||||
running = name in self._channels and self._channels[name].is_running
|
||||
channels_status[name] = {
|
||||
"enabled": enabled,
|
||||
"running": running,
|
||||
}
|
||||
return {
|
||||
"service_running": self._running,
|
||||
"channels": channels_status,
|
||||
}
|
||||
|
||||
|
||||
# -- singleton access -------------------------------------------------------
|
||||
|
||||
_channel_service: ChannelService | None = None
|
||||
|
||||
|
||||
def get_channel_service() -> ChannelService | None:
|
||||
"""Get the singleton ChannelService instance (if started)."""
|
||||
return _channel_service
|
||||
|
||||
|
||||
async def start_channel_service() -> ChannelService:
|
||||
"""Create and start the global ChannelService from app config."""
|
||||
global _channel_service
|
||||
if _channel_service is not None:
|
||||
return _channel_service
|
||||
_channel_service = ChannelService.from_app_config()
|
||||
await _channel_service.start()
|
||||
return _channel_service
|
||||
|
||||
|
||||
async def stop_channel_service() -> None:
|
||||
"""Stop the global ChannelService."""
|
||||
global _channel_service
|
||||
if _channel_service is not None:
|
||||
await _channel_service.stop()
|
||||
_channel_service = None
|
||||
223
backend/src/channels/slack.py
Normal file
223
backend/src/channels/slack.py
Normal file
@@ -0,0 +1,223 @@
|
||||
"""Slack channel — connects via Socket Mode (no public IP needed)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from markdown_to_mrkdwn import SlackMarkdownConverter
|
||||
|
||||
from src.channels.base import Channel
|
||||
from src.channels.message_bus import InboundMessageType, MessageBus, OutboundMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_slack_md_converter = SlackMarkdownConverter()
|
||||
|
||||
|
||||
class SlackChannel(Channel):
|
||||
"""Slack IM channel using Socket Mode (WebSocket, no public IP).
|
||||
|
||||
Configuration keys (in ``config.yaml`` under ``channels.slack``):
|
||||
- ``bot_token``: Slack Bot User OAuth Token (xoxb-...).
|
||||
- ``app_token``: Slack App-Level Token (xapp-...) for Socket Mode.
|
||||
- ``allowed_users``: (optional) List of allowed Slack user IDs. Empty = allow all.
|
||||
"""
|
||||
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
super().__init__(name="slack", bus=bus, config=config)
|
||||
self._socket_client = None
|
||||
self._web_client = None
|
||||
self._loop: asyncio.AbstractEventLoop | None = None
|
||||
self._allowed_users: set[str] = set(config.get("allowed_users", []))
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
return
|
||||
|
||||
try:
|
||||
from slack_sdk import WebClient
|
||||
from slack_sdk.socket_mode import SocketModeClient
|
||||
from slack_sdk.socket_mode.response import SocketModeResponse
|
||||
except ImportError:
|
||||
logger.error("slack-sdk is not installed. Install it with: uv add slack-sdk")
|
||||
return
|
||||
|
||||
self._SocketModeResponse = SocketModeResponse
|
||||
|
||||
bot_token = self.config.get("bot_token", "")
|
||||
app_token = self.config.get("app_token", "")
|
||||
|
||||
if not bot_token or not app_token:
|
||||
logger.error("Slack channel requires bot_token and app_token")
|
||||
return
|
||||
|
||||
self._web_client = WebClient(token=bot_token)
|
||||
self._socket_client = SocketModeClient(
|
||||
app_token=app_token,
|
||||
web_client=self._web_client,
|
||||
)
|
||||
self._loop = asyncio.get_event_loop()
|
||||
|
||||
self._socket_client.socket_mode_request_listeners.append(self._on_socket_event)
|
||||
|
||||
self._running = True
|
||||
self.bus.subscribe_outbound(self._on_outbound)
|
||||
|
||||
# Start socket mode in background thread
|
||||
asyncio.get_event_loop().run_in_executor(None, self._socket_client.connect)
|
||||
logger.info("Slack channel started")
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._running = False
|
||||
self.bus.unsubscribe_outbound(self._on_outbound)
|
||||
if self._socket_client:
|
||||
self._socket_client.close()
|
||||
self._socket_client = None
|
||||
logger.info("Slack channel stopped")
|
||||
|
||||
async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -> None:
|
||||
if not self._web_client:
|
||||
return
|
||||
|
||||
kwargs: dict[str, Any] = {
|
||||
"channel": msg.chat_id,
|
||||
"text": _slack_md_converter.convert(msg.text),
|
||||
}
|
||||
if msg.thread_ts:
|
||||
kwargs["thread_ts"] = msg.thread_ts
|
||||
|
||||
last_exc: Exception | None = None
|
||||
for attempt in range(_max_retries):
|
||||
try:
|
||||
await asyncio.to_thread(self._web_client.chat_postMessage, **kwargs)
|
||||
# Add a completion reaction to the thread root
|
||||
if msg.thread_ts:
|
||||
await asyncio.to_thread(
|
||||
self._add_reaction,
|
||||
msg.chat_id,
|
||||
msg.thread_ts,
|
||||
"white_check_mark",
|
||||
)
|
||||
return
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt < _max_retries - 1:
|
||||
delay = 2**attempt # 1s, 2s
|
||||
logger.warning(
|
||||
"[Slack] send failed (attempt %d/%d), retrying in %ds: %s",
|
||||
attempt + 1,
|
||||
_max_retries,
|
||||
delay,
|
||||
exc,
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("[Slack] send failed after %d attempts: %s", _max_retries, last_exc)
|
||||
# Add failure reaction on error
|
||||
if msg.thread_ts:
|
||||
try:
|
||||
await asyncio.to_thread(
|
||||
self._add_reaction,
|
||||
msg.chat_id,
|
||||
msg.thread_ts,
|
||||
"x",
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
raise last_exc # type: ignore[misc]
|
||||
|
||||
# -- internal ----------------------------------------------------------
|
||||
|
||||
def _add_reaction(self, channel_id: str, timestamp: str, emoji: str) -> None:
|
||||
"""Add an emoji reaction to a message (best-effort, non-blocking)."""
|
||||
if not self._web_client:
|
||||
return
|
||||
try:
|
||||
self._web_client.reactions_add(
|
||||
channel=channel_id,
|
||||
timestamp=timestamp,
|
||||
name=emoji,
|
||||
)
|
||||
except Exception as exc:
|
||||
if "already_reacted" not in str(exc):
|
||||
logger.warning("[Slack] failed to add reaction %s: %s", emoji, exc)
|
||||
|
||||
def _send_running_reply(self, channel_id: str, thread_ts: str) -> None:
|
||||
"""Send a 'Working on it......' reply in the thread (called from SDK thread)."""
|
||||
if not self._web_client:
|
||||
return
|
||||
try:
|
||||
self._web_client.chat_postMessage(
|
||||
channel=channel_id,
|
||||
text=":hourglass_flowing_sand: Working on it...",
|
||||
thread_ts=thread_ts,
|
||||
)
|
||||
logger.info("[Slack] 'Working on it...' reply sent in channel=%s, thread_ts=%s", channel_id, thread_ts)
|
||||
except Exception:
|
||||
logger.exception("[Slack] failed to send running reply in channel=%s", channel_id)
|
||||
|
||||
def _on_socket_event(self, client, req) -> None:
|
||||
"""Called by slack-sdk for each Socket Mode event."""
|
||||
try:
|
||||
# Acknowledge the event
|
||||
response = self._SocketModeResponse(envelope_id=req.envelope_id)
|
||||
client.send_socket_mode_response(response)
|
||||
|
||||
event_type = req.type
|
||||
if event_type != "events_api":
|
||||
return
|
||||
|
||||
event = req.payload.get("event", {})
|
||||
etype = event.get("type", "")
|
||||
|
||||
# Handle message events (DM or @mention)
|
||||
if etype in ("message", "app_mention"):
|
||||
self._handle_message_event(event)
|
||||
|
||||
except Exception:
|
||||
logger.exception("Error processing Slack event")
|
||||
|
||||
def _handle_message_event(self, event: dict) -> None:
|
||||
# Ignore bot messages
|
||||
if event.get("bot_id") or event.get("subtype"):
|
||||
return
|
||||
|
||||
user_id = event.get("user", "")
|
||||
|
||||
# Check allowed users
|
||||
if self._allowed_users and user_id not in self._allowed_users:
|
||||
logger.debug("Ignoring message from non-allowed user: %s", user_id)
|
||||
return
|
||||
|
||||
text = event.get("text", "").strip()
|
||||
if not text:
|
||||
return
|
||||
|
||||
channel_id = event.get("channel", "")
|
||||
thread_ts = event.get("thread_ts") or event.get("ts", "")
|
||||
|
||||
if text.startswith("/"):
|
||||
msg_type = InboundMessageType.COMMAND
|
||||
else:
|
||||
msg_type = InboundMessageType.CHAT
|
||||
|
||||
# topic_id: use thread_ts as the topic identifier.
|
||||
# For threaded messages, thread_ts is the root message ts (shared topic).
|
||||
# For non-threaded messages, thread_ts is the message's own ts (new topic).
|
||||
inbound = self._make_inbound(
|
||||
chat_id=channel_id,
|
||||
user_id=user_id,
|
||||
text=text,
|
||||
msg_type=msg_type,
|
||||
thread_ts=thread_ts,
|
||||
)
|
||||
inbound.topic_id = thread_ts
|
||||
|
||||
if self._loop and self._loop.is_running():
|
||||
# Acknowledge with an eyes reaction
|
||||
self._add_reaction(channel_id, event.get("ts", thread_ts), "eyes")
|
||||
# Send "running" reply first (fire-and-forget from SDK thread)
|
||||
self._send_running_reply(channel_id, thread_ts)
|
||||
asyncio.run_coroutine_threadsafe(self.bus.publish_inbound(inbound), self._loop)
|
||||
153
backend/src/channels/store.py
Normal file
153
backend/src/channels/store.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""ChannelStore — persists IM chat-to-DeerFlow thread mappings."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChannelStore:
|
||||
"""JSON-file-backed store that maps IM conversations to DeerFlow threads.
|
||||
|
||||
Data layout (on disk)::
|
||||
|
||||
{
|
||||
"<channel_name>:<chat_id>": {
|
||||
"thread_id": "<uuid>",
|
||||
"user_id": "<platform_user>",
|
||||
"created_at": 1700000000.0,
|
||||
"updated_at": 1700000000.0
|
||||
},
|
||||
...
|
||||
}
|
||||
|
||||
The store is intentionally simple — a single JSON file that is atomically
|
||||
rewritten on every mutation. For production workloads with high concurrency,
|
||||
this can be swapped for a proper database backend.
|
||||
"""
|
||||
|
||||
def __init__(self, path: str | Path | None = None) -> None:
|
||||
if path is None:
|
||||
from src.config.paths import get_paths
|
||||
|
||||
path = Path(get_paths().base_dir) / "channels" / "store.json"
|
||||
self._path = Path(path)
|
||||
self._path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self._data: dict[str, dict[str, Any]] = self._load()
|
||||
self._lock = threading.Lock()
|
||||
|
||||
# -- persistence -------------------------------------------------------
|
||||
|
||||
def _load(self) -> dict[str, dict[str, Any]]:
|
||||
if self._path.exists():
|
||||
try:
|
||||
return json.loads(self._path.read_text(encoding="utf-8"))
|
||||
except (json.JSONDecodeError, OSError):
|
||||
logger.warning("Corrupt channel store at %s, starting fresh", self._path)
|
||||
return {}
|
||||
|
||||
def _save(self) -> None:
|
||||
fd = tempfile.NamedTemporaryFile(
|
||||
mode="w",
|
||||
dir=self._path.parent,
|
||||
suffix=".tmp",
|
||||
delete=False,
|
||||
)
|
||||
try:
|
||||
json.dump(self._data, fd, indent=2)
|
||||
fd.close()
|
||||
Path(fd.name).replace(self._path)
|
||||
except BaseException:
|
||||
fd.close()
|
||||
Path(fd.name).unlink(missing_ok=True)
|
||||
raise
|
||||
|
||||
# -- key helpers -------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _key(channel_name: str, chat_id: str, topic_id: str | None = None) -> str:
|
||||
if topic_id:
|
||||
return f"{channel_name}:{chat_id}:{topic_id}"
|
||||
return f"{channel_name}:{chat_id}"
|
||||
|
||||
# -- public API --------------------------------------------------------
|
||||
|
||||
def get_thread_id(self, channel_name: str, chat_id: str, topic_id: str | None = None) -> str | None:
|
||||
"""Look up the DeerFlow thread_id for a given IM conversation/topic."""
|
||||
entry = self._data.get(self._key(channel_name, chat_id, topic_id))
|
||||
return entry["thread_id"] if entry else None
|
||||
|
||||
def set_thread_id(
|
||||
self,
|
||||
channel_name: str,
|
||||
chat_id: str,
|
||||
thread_id: str,
|
||||
*,
|
||||
topic_id: str | None = None,
|
||||
user_id: str = "",
|
||||
) -> None:
|
||||
"""Create or update the mapping for an IM conversation/topic."""
|
||||
with self._lock:
|
||||
key = self._key(channel_name, chat_id, topic_id)
|
||||
now = time.time()
|
||||
existing = self._data.get(key)
|
||||
self._data[key] = {
|
||||
"thread_id": thread_id,
|
||||
"user_id": user_id,
|
||||
"created_at": existing["created_at"] if existing else now,
|
||||
"updated_at": now,
|
||||
}
|
||||
self._save()
|
||||
|
||||
def remove(self, channel_name: str, chat_id: str, topic_id: str | None = None) -> bool:
|
||||
"""Remove a mapping.
|
||||
|
||||
If ``topic_id`` is provided, only that specific conversation/topic mapping is removed.
|
||||
If ``topic_id`` is omitted, all mappings whose key starts with
|
||||
``"<channel_name>:<chat_id>"`` (including topic-specific ones) are removed.
|
||||
|
||||
Returns True if at least one mapping was removed.
|
||||
"""
|
||||
with self._lock:
|
||||
# Remove a specific conversation/topic mapping.
|
||||
if topic_id is not None:
|
||||
key = self._key(channel_name, chat_id, topic_id)
|
||||
if key in self._data:
|
||||
del self._data[key]
|
||||
self._save()
|
||||
return True
|
||||
return False
|
||||
|
||||
# Remove all mappings for this channel/chat_id (base and any topic-specific keys).
|
||||
prefix = self._key(channel_name, chat_id)
|
||||
keys_to_delete = [k for k in self._data if k == prefix or k.startswith(prefix + ":")]
|
||||
if not keys_to_delete:
|
||||
return False
|
||||
|
||||
for k in keys_to_delete:
|
||||
del self._data[k]
|
||||
self._save()
|
||||
return True
|
||||
|
||||
def list_entries(self, channel_name: str | None = None) -> list[dict[str, Any]]:
|
||||
"""List all stored mappings, optionally filtered by channel."""
|
||||
results = []
|
||||
for key, entry in self._data.items():
|
||||
parts = key.split(":", 2)
|
||||
ch = parts[0]
|
||||
chat = parts[1] if len(parts) > 1 else ""
|
||||
topic = parts[2] if len(parts) > 2 else None
|
||||
if channel_name and ch != channel_name:
|
||||
continue
|
||||
item: dict[str, Any] = {"channel_name": ch, "chat_id": chat, **entry}
|
||||
if topic is not None:
|
||||
item["topic_id"] = topic
|
||||
results.append(item)
|
||||
return results
|
||||
225
backend/src/channels/telegram.py
Normal file
225
backend/src/channels/telegram.py
Normal file
@@ -0,0 +1,225 @@
|
||||
"""Telegram channel — connects via long-polling (no public IP needed)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import threading
|
||||
from typing import Any
|
||||
|
||||
from src.channels.base import Channel
|
||||
from src.channels.message_bus import InboundMessageType, MessageBus, OutboundMessage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TelegramChannel(Channel):
|
||||
"""Telegram bot channel using long-polling.
|
||||
|
||||
Configuration keys (in ``config.yaml`` under ``channels.telegram``):
|
||||
- ``bot_token``: Telegram Bot API token (from @BotFather).
|
||||
- ``allowed_users``: (optional) List of allowed Telegram user IDs. Empty = allow all.
|
||||
"""
|
||||
|
||||
def __init__(self, bus: MessageBus, config: dict[str, Any]) -> None:
|
||||
super().__init__(name="telegram", bus=bus, config=config)
|
||||
self._application = None
|
||||
self._thread: threading.Thread | None = None
|
||||
self._tg_loop: asyncio.AbstractEventLoop | None = None
|
||||
self._main_loop: asyncio.AbstractEventLoop | None = None
|
||||
self._allowed_users: set[int] = set()
|
||||
for uid in config.get("allowed_users", []):
|
||||
try:
|
||||
self._allowed_users.add(int(uid))
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
# chat_id -> last sent message_id for threaded replies
|
||||
self._last_bot_message: dict[str, int] = {}
|
||||
|
||||
async def start(self) -> None:
|
||||
if self._running:
|
||||
return
|
||||
|
||||
try:
|
||||
from telegram.ext import ApplicationBuilder, CommandHandler, MessageHandler, filters
|
||||
except ImportError:
|
||||
logger.error("python-telegram-bot is not installed. Install it with: uv add python-telegram-bot")
|
||||
return
|
||||
|
||||
bot_token = self.config.get("bot_token", "")
|
||||
if not bot_token:
|
||||
logger.error("Telegram channel requires bot_token")
|
||||
return
|
||||
|
||||
self._main_loop = asyncio.get_event_loop()
|
||||
self._running = True
|
||||
self.bus.subscribe_outbound(self._on_outbound)
|
||||
|
||||
# Build the application
|
||||
app = ApplicationBuilder().token(bot_token).build()
|
||||
|
||||
# Command handlers
|
||||
app.add_handler(CommandHandler("start", self._cmd_start))
|
||||
app.add_handler(CommandHandler("new", self._cmd_generic))
|
||||
app.add_handler(CommandHandler("status", self._cmd_generic))
|
||||
app.add_handler(CommandHandler("models", self._cmd_generic))
|
||||
app.add_handler(CommandHandler("memory", self._cmd_generic))
|
||||
app.add_handler(CommandHandler("help", self._cmd_generic))
|
||||
|
||||
# General message handler
|
||||
app.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, self._on_text))
|
||||
|
||||
self._application = app
|
||||
|
||||
# Run polling in a dedicated thread with its own event loop
|
||||
self._thread = threading.Thread(target=self._run_polling, daemon=True)
|
||||
self._thread.start()
|
||||
logger.info("Telegram channel started")
|
||||
|
||||
async def stop(self) -> None:
|
||||
self._running = False
|
||||
self.bus.unsubscribe_outbound(self._on_outbound)
|
||||
if self._application and self._tg_loop:
|
||||
self._tg_loop.call_soon_threadsafe(self._tg_loop.stop)
|
||||
if self._thread:
|
||||
self._thread.join(timeout=5)
|
||||
self._thread = None
|
||||
self._application = None
|
||||
logger.info("Telegram channel stopped")
|
||||
|
||||
async def send(self, msg: OutboundMessage, *, _max_retries: int = 3) -> None:
|
||||
if not self._application:
|
||||
return
|
||||
|
||||
try:
|
||||
chat_id = int(msg.chat_id)
|
||||
except (ValueError, TypeError):
|
||||
logger.error("Invalid Telegram chat_id: %s", msg.chat_id)
|
||||
return
|
||||
|
||||
kwargs: dict[str, Any] = {"chat_id": chat_id, "text": msg.text}
|
||||
|
||||
# Reply to the last bot message in this chat for threading
|
||||
reply_to = self._last_bot_message.get(msg.chat_id)
|
||||
if reply_to:
|
||||
kwargs["reply_to_message_id"] = reply_to
|
||||
|
||||
bot = self._application.bot
|
||||
last_exc: Exception | None = None
|
||||
for attempt in range(_max_retries):
|
||||
try:
|
||||
sent = await bot.send_message(**kwargs)
|
||||
self._last_bot_message[msg.chat_id] = sent.message_id
|
||||
return
|
||||
except Exception as exc:
|
||||
last_exc = exc
|
||||
if attempt < _max_retries - 1:
|
||||
delay = 2**attempt # 1s, 2s
|
||||
logger.warning(
|
||||
"[Telegram] send failed (attempt %d/%d), retrying in %ds: %s",
|
||||
attempt + 1,
|
||||
_max_retries,
|
||||
delay,
|
||||
exc,
|
||||
)
|
||||
await asyncio.sleep(delay)
|
||||
|
||||
logger.error("[Telegram] send failed after %d attempts: %s", _max_retries, last_exc)
|
||||
raise last_exc # type: ignore[misc]
|
||||
|
||||
# -- helpers -----------------------------------------------------------
|
||||
|
||||
async def _send_running_reply(self, chat_id: str, reply_to_message_id: int) -> None:
|
||||
"""Send a 'Working on it...' reply to the user's message."""
|
||||
if not self._application:
|
||||
return
|
||||
try:
|
||||
bot = self._application.bot
|
||||
await bot.send_message(
|
||||
chat_id=int(chat_id),
|
||||
text="Working on it...",
|
||||
reply_to_message_id=reply_to_message_id,
|
||||
)
|
||||
logger.info("[Telegram] 'Working on it...' reply sent in chat=%s", chat_id)
|
||||
except Exception:
|
||||
logger.exception("[Telegram] failed to send running reply in chat=%s", chat_id)
|
||||
|
||||
# -- internal ----------------------------------------------------------
|
||||
|
||||
def _run_polling(self) -> None:
|
||||
"""Run telegram polling in a dedicated thread."""
|
||||
self._tg_loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(self._tg_loop)
|
||||
try:
|
||||
self._tg_loop.run_until_complete(self._application.run_polling(close_loop=False))
|
||||
except Exception:
|
||||
if self._running:
|
||||
logger.exception("Telegram polling error")
|
||||
|
||||
def _check_user(self, user_id: int) -> bool:
|
||||
if not self._allowed_users:
|
||||
return True
|
||||
return user_id in self._allowed_users
|
||||
|
||||
async def _cmd_start(self, update, context) -> None:
|
||||
"""Handle /start command."""
|
||||
if not self._check_user(update.effective_user.id):
|
||||
return
|
||||
await update.message.reply_text("Welcome to DeerFlow! Send me a message to start a conversation.\nType /help for available commands.")
|
||||
|
||||
async def _cmd_generic(self, update, context) -> None:
|
||||
"""Forward slash commands to the channel manager."""
|
||||
if not self._check_user(update.effective_user.id):
|
||||
return
|
||||
|
||||
text = update.message.text
|
||||
chat_id = str(update.effective_chat.id)
|
||||
user_id = str(update.effective_user.id)
|
||||
msg_id = str(update.message.message_id)
|
||||
|
||||
inbound = self._make_inbound(
|
||||
chat_id=chat_id,
|
||||
user_id=user_id,
|
||||
text=text,
|
||||
msg_type=InboundMessageType.COMMAND,
|
||||
thread_ts=msg_id,
|
||||
)
|
||||
|
||||
if self._main_loop and self._main_loop.is_running():
|
||||
asyncio.run_coroutine_threadsafe(self._send_running_reply(chat_id, update.message.message_id), self._main_loop)
|
||||
asyncio.run_coroutine_threadsafe(self.bus.publish_inbound(inbound), self._main_loop)
|
||||
|
||||
async def _on_text(self, update, context) -> None:
|
||||
"""Handle regular text messages."""
|
||||
if not self._check_user(update.effective_user.id):
|
||||
return
|
||||
|
||||
text = update.message.text.strip()
|
||||
if not text:
|
||||
return
|
||||
|
||||
chat_id = str(update.effective_chat.id)
|
||||
user_id = str(update.effective_user.id)
|
||||
msg_id = str(update.message.message_id)
|
||||
|
||||
# topic_id: if the user is replying to a bot message, look up
|
||||
# the original topic_id stored for that reply chain. Otherwise
|
||||
# the current message starts a new topic.
|
||||
reply_to = update.message.reply_to_message
|
||||
if reply_to:
|
||||
topic_id = str(reply_to.message_id)
|
||||
else:
|
||||
topic_id = msg_id
|
||||
|
||||
inbound = self._make_inbound(
|
||||
chat_id=chat_id,
|
||||
user_id=user_id,
|
||||
text=text,
|
||||
msg_type=InboundMessageType.CHAT,
|
||||
thread_ts=msg_id,
|
||||
)
|
||||
inbound.topic_id = topic_id
|
||||
|
||||
if self._main_loop and self._main_loop.is_running():
|
||||
asyncio.run_coroutine_threadsafe(self._send_running_reply(chat_id, update.message.message_id), self._main_loop)
|
||||
asyncio.run_coroutine_threadsafe(self.bus.publish_inbound(inbound), self._main_loop)
|
||||
@@ -82,8 +82,7 @@ class InfoQuestClient:
|
||||
return response_data["reader_result"]
|
||||
elif "content" in response_data:
|
||||
# Fallback to content field if reader_result is not available
|
||||
logger.debug("reader_result missing in JSON response, falling back to content field: %s",
|
||||
response_data["content"])
|
||||
logger.debug("reader_result missing in JSON response, falling back to content field: %s", response_data["content"])
|
||||
return response_data["content"]
|
||||
else:
|
||||
# If neither field exists, return the original response
|
||||
|
||||
@@ -10,6 +10,7 @@ from src.gateway.config import get_gateway_config
|
||||
from src.gateway.routers import (
|
||||
agents,
|
||||
artifacts,
|
||||
channels,
|
||||
mcp,
|
||||
memory,
|
||||
models,
|
||||
@@ -47,7 +48,24 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
# 2. Gateway and LangGraph Server are separate processes with independent caches
|
||||
# MCP tools are lazily initialized in LangGraph Server when first needed
|
||||
|
||||
# Start IM channel service if any channels are configured
|
||||
try:
|
||||
from src.channels.service import start_channel_service
|
||||
|
||||
channel_service = await start_channel_service()
|
||||
logger.info("Channel service started: %s", channel_service.get_status())
|
||||
except Exception:
|
||||
logger.exception("No IM channels configured or channel service failed to start")
|
||||
|
||||
yield
|
||||
|
||||
# Stop channel service on shutdown
|
||||
try:
|
||||
from src.channels.service import stop_channel_service
|
||||
|
||||
await stop_channel_service()
|
||||
except Exception:
|
||||
logger.exception("Failed to stop channel service")
|
||||
logger.info("Shutting down API Gateway")
|
||||
|
||||
|
||||
@@ -117,6 +135,10 @@ This gateway provides custom endpoints for models, MCP configuration, skills, an
|
||||
"name": "suggestions",
|
||||
"description": "Generate follow-up question suggestions for conversations",
|
||||
},
|
||||
{
|
||||
"name": "channels",
|
||||
"description": "Manage IM channel integrations (Feishu, Slack, Telegram)",
|
||||
},
|
||||
{
|
||||
"name": "health",
|
||||
"description": "Health check and system status endpoints",
|
||||
@@ -151,6 +173,9 @@ This gateway provides custom endpoints for models, MCP configuration, skills, an
|
||||
# Suggestions API is mounted at /api/threads/{thread_id}/suggestions
|
||||
app.include_router(suggestions.router)
|
||||
|
||||
# Channels API is mounted at /api/channels
|
||||
app.include_router(channels.router)
|
||||
|
||||
@app.get("/health", tags=["health"])
|
||||
async def health_check() -> dict:
|
||||
"""Health check endpoint.
|
||||
|
||||
52
backend/src/gateway/routers/channels.py
Normal file
52
backend/src/gateway/routers/channels.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""Gateway router for IM channel management."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix="/api/channels", tags=["channels"])
|
||||
|
||||
|
||||
class ChannelStatusResponse(BaseModel):
|
||||
service_running: bool
|
||||
channels: dict[str, dict]
|
||||
|
||||
|
||||
class ChannelRestartResponse(BaseModel):
|
||||
success: bool
|
||||
message: str
|
||||
|
||||
|
||||
@router.get("/", response_model=ChannelStatusResponse)
|
||||
async def get_channels_status() -> ChannelStatusResponse:
|
||||
"""Get the status of all IM channels."""
|
||||
from src.channels.service import get_channel_service
|
||||
|
||||
service = get_channel_service()
|
||||
if service is None:
|
||||
return ChannelStatusResponse(service_running=False, channels={})
|
||||
status = service.get_status()
|
||||
return ChannelStatusResponse(**status)
|
||||
|
||||
|
||||
@router.post("/{name}/restart", response_model=ChannelRestartResponse)
|
||||
async def restart_channel(name: str) -> ChannelRestartResponse:
|
||||
"""Restart a specific IM channel."""
|
||||
from src.channels.service import get_channel_service
|
||||
|
||||
service = get_channel_service()
|
||||
if service is None:
|
||||
raise HTTPException(status_code=503, detail="Channel service is not running")
|
||||
|
||||
success = await service.restart_channel(name)
|
||||
if success:
|
||||
logger.info("Channel %s restarted successfully", name)
|
||||
return ChannelRestartResponse(success=True, message=f"Channel {name} restarted successfully")
|
||||
else:
|
||||
logger.warning("Failed to restart channel %s", name)
|
||||
return ChannelRestartResponse(success=False, message=f"Failed to restart channel {name}")
|
||||
@@ -19,10 +19,7 @@ def _build_missing_dependency_hint(module_path: str, err: ImportError) -> str:
|
||||
if package_name is None:
|
||||
package_name = MODULE_TO_PACKAGE_HINTS.get(missing_module, missing_module.replace("_", "-"))
|
||||
|
||||
return (
|
||||
f"Missing dependency '{missing_module}'. "
|
||||
f"Install it with `uv add {package_name}` (or `pip install {package_name}`), then restart DeerFlow."
|
||||
)
|
||||
return f"Missing dependency '{missing_module}'. Install it with `uv add {package_name}` (or `pip install {package_name}`), then restart DeerFlow."
|
||||
|
||||
|
||||
def resolve_variable[T](
|
||||
|
||||
@@ -147,10 +147,7 @@ class LocalSandbox(Sandbox):
|
||||
shell_from_path = shutil.which("sh")
|
||||
if shell_from_path is not None:
|
||||
return shell_from_path
|
||||
raise RuntimeError(
|
||||
"No suitable shell executable found. Tried /bin/zsh, /bin/bash, "
|
||||
"/bin/sh, and `sh` on PATH."
|
||||
)
|
||||
raise RuntimeError("No suitable shell executable found. Tried /bin/zsh, /bin/bash, /bin/sh, and `sh` on PATH.")
|
||||
|
||||
def execute_command(self, command: str) -> str:
|
||||
# Resolve container paths in command before execution
|
||||
|
||||
@@ -54,9 +54,7 @@ def _normalize_presented_filepath(
|
||||
try:
|
||||
relative_path = actual_path.relative_to(outputs_dir)
|
||||
except ValueError as exc:
|
||||
raise ValueError(
|
||||
f"Only files in {OUTPUTS_VIRTUAL_PREFIX} can be presented: {filepath}"
|
||||
) from exc
|
||||
raise ValueError(f"Only files in {OUTPUTS_VIRTUAL_PREFIX} can be presented: {filepath}") from exc
|
||||
|
||||
return f"{OUTPUTS_VIRTUAL_PREFIX}/{relative_path.as_posix()}"
|
||||
|
||||
@@ -87,22 +85,16 @@ def present_file_tool(
|
||||
filepaths: List of absolute file paths to present to the user. **Only** files in `/mnt/user-data/outputs` can be presented.
|
||||
"""
|
||||
try:
|
||||
normalized_paths = [
|
||||
_normalize_presented_filepath(runtime, filepath) for filepath in filepaths
|
||||
]
|
||||
normalized_paths = [_normalize_presented_filepath(runtime, filepath) for filepath in filepaths]
|
||||
except ValueError as exc:
|
||||
return Command(
|
||||
update={
|
||||
"messages": [ToolMessage(f"Error: {exc}", tool_call_id=tool_call_id)]
|
||||
},
|
||||
update={"messages": [ToolMessage(f"Error: {exc}", tool_call_id=tool_call_id)]},
|
||||
)
|
||||
|
||||
# The merge_artifacts reducer will handle merging and deduplication
|
||||
return Command(
|
||||
update={
|
||||
"artifacts": normalized_paths,
|
||||
"messages": [
|
||||
ToolMessage("Successfully presented files", tool_call_id=tool_call_id)
|
||||
],
|
||||
"messages": [ToolMessage("Successfully presented files", tool_call_id=tool_call_id)],
|
||||
},
|
||||
)
|
||||
|
||||
1094
backend/tests/test_channels.py
Normal file
1094
backend/tests/test_channels.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -20,6 +20,7 @@ from src.gateway.routers.uploads import UploadResponse
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_app_config():
|
||||
"""Provide a minimal AppConfig mock."""
|
||||
@@ -45,6 +46,7 @@ def client(mock_app_config):
|
||||
# __init__
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestClientInit:
|
||||
def test_default_params(self, client):
|
||||
assert client._model_name is None
|
||||
@@ -86,6 +88,7 @@ class TestClientInit:
|
||||
# list_models / list_skills / get_memory
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestConfigQueries:
|
||||
def test_list_models(self, client):
|
||||
result = client.list_models()
|
||||
@@ -135,6 +138,7 @@ class TestConfigQueries:
|
||||
# stream / chat
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _make_agent_mock(chunks: list[dict]):
|
||||
"""Create a mock agent whose .stream() yields the given chunks."""
|
||||
agent = MagicMock()
|
||||
@@ -314,6 +318,7 @@ class TestChat:
|
||||
# _extract_text
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestExtractText:
|
||||
def test_string(self):
|
||||
assert DeerFlowClient._extract_text("hello") == "hello"
|
||||
@@ -340,6 +345,7 @@ class TestExtractText:
|
||||
# _ensure_agent
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestEnsureAgent:
|
||||
def test_creates_agent(self, client):
|
||||
"""_ensure_agent creates an agent on first call."""
|
||||
@@ -374,6 +380,7 @@ class TestEnsureAgent:
|
||||
# get_model
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGetModel:
|
||||
def test_found(self, client):
|
||||
model_cfg = MagicMock()
|
||||
@@ -402,6 +409,7 @@ class TestGetModel:
|
||||
# MCP config
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestMcpConfig:
|
||||
def test_get_mcp_config(self, client):
|
||||
server = MagicMock()
|
||||
@@ -457,6 +465,7 @@ class TestMcpConfig:
|
||||
# Skills management
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSkillsManagement:
|
||||
def _make_skill(self, name="test-skill", enabled=True):
|
||||
s = MagicMock()
|
||||
@@ -556,6 +565,7 @@ class TestSkillsManagement:
|
||||
# Memory management
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestMemoryManagement:
|
||||
def test_reload_memory(self, client):
|
||||
data = {"version": "1.0", "facts": []}
|
||||
@@ -605,6 +615,7 @@ class TestMemoryManagement:
|
||||
# Uploads
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestUploads:
|
||||
def test_upload_files(self, client):
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
@@ -678,6 +689,7 @@ class TestUploads:
|
||||
# Artifacts
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestArtifacts:
|
||||
def test_get_artifact(self, client):
|
||||
with tempfile.TemporaryDirectory() as tmp:
|
||||
@@ -759,9 +771,13 @@ class TestScenarioMultiTurnConversation:
|
||||
|
||||
def test_stream_collects_all_event_types_across_turns(self, client):
|
||||
"""A full turn emits messages-tuple (tool_call, tool_result, ai text) + values + end."""
|
||||
ai_tc = AIMessage(content="", id="ai-1", tool_calls=[
|
||||
{"name": "web_search", "args": {"query": "LangGraph"}, "id": "tc-1"},
|
||||
])
|
||||
ai_tc = AIMessage(
|
||||
content="",
|
||||
id="ai-1",
|
||||
tool_calls=[
|
||||
{"name": "web_search", "args": {"query": "LangGraph"}, "id": "tc-1"},
|
||||
],
|
||||
)
|
||||
tool_r = ToolMessage(content="LangGraph is a framework...", id="tm-1", tool_call_id="tc-1", name="web_search")
|
||||
ai_final = AIMessage(content="LangGraph is a framework for building agents.", id="ai-2")
|
||||
|
||||
@@ -809,13 +825,21 @@ class TestScenarioToolChain:
|
||||
|
||||
def test_multi_tool_chain(self, client):
|
||||
"""Agent calls bash → reads output → calls write_file → responds."""
|
||||
ai_bash = AIMessage(content="", id="ai-1", tool_calls=[
|
||||
{"name": "bash", "args": {"cmd": "ls /mnt/user-data/workspace"}, "id": "tc-1"},
|
||||
])
|
||||
ai_bash = AIMessage(
|
||||
content="",
|
||||
id="ai-1",
|
||||
tool_calls=[
|
||||
{"name": "bash", "args": {"cmd": "ls /mnt/user-data/workspace"}, "id": "tc-1"},
|
||||
],
|
||||
)
|
||||
bash_result = ToolMessage(content="README.md\nsrc/", id="tm-1", tool_call_id="tc-1", name="bash")
|
||||
ai_write = AIMessage(content="", id="ai-2", tool_calls=[
|
||||
{"name": "write_file", "args": {"path": "/mnt/user-data/outputs/listing.txt", "content": "README.md\nsrc/"}, "id": "tc-2"},
|
||||
])
|
||||
ai_write = AIMessage(
|
||||
content="",
|
||||
id="ai-2",
|
||||
tool_calls=[
|
||||
{"name": "write_file", "args": {"path": "/mnt/user-data/outputs/listing.txt", "content": "README.md\nsrc/"}, "id": "tc-2"},
|
||||
],
|
||||
)
|
||||
write_result = ToolMessage(content="File written successfully.", id="tm-2", tool_call_id="tc-2", name="write_file")
|
||||
ai_final = AIMessage(content="I listed the workspace and saved the output.", id="ai-3")
|
||||
|
||||
@@ -862,10 +886,13 @@ class TestScenarioFileLifecycle:
|
||||
|
||||
with patch.object(DeerFlowClient, "_get_uploads_dir", return_value=uploads_dir):
|
||||
# Step 1: Upload
|
||||
result = client.upload_files("t-lifecycle", [
|
||||
tmp_path / "report.txt",
|
||||
tmp_path / "data.csv",
|
||||
])
|
||||
result = client.upload_files(
|
||||
"t-lifecycle",
|
||||
[
|
||||
tmp_path / "report.txt",
|
||||
tmp_path / "data.csv",
|
||||
],
|
||||
)
|
||||
assert result["success"] is True
|
||||
assert len(result["files"]) == 2
|
||||
assert {f["filename"] for f in result["files"]} == {"report.txt", "data.csv"}
|
||||
@@ -1166,10 +1193,13 @@ class TestScenarioMemoryWorkflow:
|
||||
def test_memory_full_lifecycle(self, client):
|
||||
"""get_memory → reload → get_status covers the full memory API."""
|
||||
initial_data = {"version": "1.0", "facts": [{"id": "f1", "content": "User likes Python"}]}
|
||||
updated_data = {"version": "1.0", "facts": [
|
||||
{"id": "f1", "content": "User likes Python"},
|
||||
{"id": "f2", "content": "User prefers dark mode"},
|
||||
]}
|
||||
updated_data = {
|
||||
"version": "1.0",
|
||||
"facts": [
|
||||
{"id": "f1", "content": "User likes Python"},
|
||||
{"id": "f2", "content": "User prefers dark mode"},
|
||||
],
|
||||
}
|
||||
|
||||
config = MagicMock()
|
||||
config.enabled = True
|
||||
@@ -1208,9 +1238,7 @@ class TestScenarioSkillInstallAndUse:
|
||||
# Create .skill archive
|
||||
skill_src = tmp_path / "my-analyzer"
|
||||
skill_src.mkdir()
|
||||
(skill_src / "SKILL.md").write_text(
|
||||
"---\nname: my-analyzer\ndescription: Analyze code\nlicense: MIT\n---\nAnalysis skill"
|
||||
)
|
||||
(skill_src / "SKILL.md").write_text("---\nname: my-analyzer\ndescription: Analyze code\nlicense: MIT\n---\nAnalysis skill")
|
||||
archive = tmp_path / "my-analyzer.skill"
|
||||
with zipfile.ZipFile(archive, "w") as zf:
|
||||
zf.write(skill_src / "SKILL.md", "my-analyzer/SKILL.md")
|
||||
@@ -1319,11 +1347,15 @@ class TestScenarioEdgeCases:
|
||||
|
||||
def test_concurrent_tool_calls_in_single_message(self, client):
|
||||
"""Agent produces multiple tool_calls in one AIMessage — emitted as single messages-tuple."""
|
||||
ai = AIMessage(content="", id="ai-1", tool_calls=[
|
||||
{"name": "web_search", "args": {"q": "a"}, "id": "tc-1"},
|
||||
{"name": "web_search", "args": {"q": "b"}, "id": "tc-2"},
|
||||
{"name": "bash", "args": {"cmd": "echo hi"}, "id": "tc-3"},
|
||||
])
|
||||
ai = AIMessage(
|
||||
content="",
|
||||
id="ai-1",
|
||||
tool_calls=[
|
||||
{"name": "web_search", "args": {"q": "a"}, "id": "tc-1"},
|
||||
{"name": "web_search", "args": {"q": "b"}, "id": "tc-2"},
|
||||
{"name": "bash", "args": {"cmd": "echo hi"}, "id": "tc-3"},
|
||||
],
|
||||
)
|
||||
chunks = [{"messages": [ai]}]
|
||||
agent = _make_agent_mock(chunks)
|
||||
|
||||
@@ -1367,6 +1399,7 @@ class TestScenarioEdgeCases:
|
||||
# Gateway conformance — validate client output against Gateway Pydantic models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGatewayConformance:
|
||||
"""Validate that DeerFlowClient return dicts conform to Gateway Pydantic response models.
|
||||
|
||||
@@ -1441,9 +1474,7 @@ class TestGatewayConformance:
|
||||
def test_install_skill(self, client, tmp_path):
|
||||
skill_dir = tmp_path / "my-skill"
|
||||
skill_dir.mkdir()
|
||||
(skill_dir / "SKILL.md").write_text(
|
||||
"---\nname: my-skill\ndescription: A test skill\n---\nBody\n"
|
||||
)
|
||||
(skill_dir / "SKILL.md").write_text("---\nname: my-skill\ndescription: A test skill\n---\nBody\n")
|
||||
|
||||
archive = tmp_path / "my-skill.skill"
|
||||
with zipfile.ZipFile(archive, "w") as zf:
|
||||
|
||||
@@ -125,7 +125,7 @@ class TestInfoQuestClient:
|
||||
|
||||
def test_clean_results_with_image_search(self):
|
||||
"""Test clean_results_with_image_search method with sample raw results."""
|
||||
raw_results = [{"content": {"results": {"images_results": [{"image_url": "https://example.com/image1.jpg", "thumbnail_url": "https://example.com/thumb1.jpg","url": "https://example.com/page1"}]}}}]
|
||||
raw_results = [{"content": {"results": {"images_results": [{"image_url": "https://example.com/image1.jpg", "thumbnail_url": "https://example.com/thumb1.jpg", "url": "https://example.com/page1"}]}}}]
|
||||
cleaned = InfoQuestClient.clean_results_with_image_search(raw_results)
|
||||
|
||||
assert len(cleaned) == 1
|
||||
@@ -181,4 +181,4 @@ class TestInfoQuestClient:
|
||||
client = InfoQuestClient()
|
||||
result = client.web_search("test query")
|
||||
|
||||
assert "Error" in result
|
||||
assert "Error" in result
|
||||
|
||||
@@ -16,14 +16,7 @@ from src.agents.middlewares.memory_middleware import _filter_messages_for_memory
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_UPLOAD_BLOCK = (
|
||||
"<uploaded_files>\n"
|
||||
"The following files have been uploaded and are available for use:\n\n"
|
||||
"- filename: secret.txt\n"
|
||||
" path: /mnt/user-data/uploads/abc123/secret.txt\n"
|
||||
" size: 42 bytes\n"
|
||||
"</uploaded_files>"
|
||||
)
|
||||
_UPLOAD_BLOCK = "<uploaded_files>\nThe following files have been uploaded and are available for use:\n\n- filename: secret.txt\n path: /mnt/user-data/uploads/abc123/secret.txt\n size: 42 bytes\n</uploaded_files>"
|
||||
|
||||
|
||||
def _human(text: str) -> HumanMessage:
|
||||
@@ -103,7 +96,7 @@ class TestFilterMessagesForMemory:
|
||||
msgs = [
|
||||
_human("Hello, how are you?"),
|
||||
_ai("I'm doing well, thank you!"),
|
||||
_human(_UPLOAD_BLOCK), # upload-only → dropped
|
||||
_human(_UPLOAD_BLOCK), # upload-only → dropped
|
||||
_ai("I read the uploaded file."), # paired AI → dropped
|
||||
_human("What is 2 + 2?"),
|
||||
_ai("4"),
|
||||
@@ -122,9 +115,11 @@ class TestFilterMessagesForMemory:
|
||||
|
||||
def test_multimodal_content_list_handled(self):
|
||||
"""Human messages with list-style content (multimodal) are handled."""
|
||||
msg = HumanMessage(content=[
|
||||
{"type": "text", "text": _UPLOAD_BLOCK},
|
||||
])
|
||||
msg = HumanMessage(
|
||||
content=[
|
||||
{"type": "text", "text": _UPLOAD_BLOCK},
|
||||
]
|
||||
)
|
||||
msgs = [msg, _ai("Done.")]
|
||||
result = _filter_messages_for_memory(msgs)
|
||||
assert result == []
|
||||
@@ -134,9 +129,7 @@ class TestFilterMessagesForMemory:
|
||||
combined = _UPLOAD_BLOCK + "\n\nSummarise the file please."
|
||||
msgs = [_human(combined), _ai("It says hello.")]
|
||||
result = _filter_messages_for_memory(msgs)
|
||||
all_content = " ".join(
|
||||
m.content for m in result if isinstance(m.content, str)
|
||||
)
|
||||
all_content = " ".join(m.content for m in result if isinstance(m.content, str))
|
||||
assert "/mnt/user-data/uploads/" not in all_content
|
||||
assert "<uploaded_files>" not in all_content
|
||||
|
||||
@@ -157,11 +150,7 @@ class TestStripUploadMentionsFromMemory:
|
||||
# --- summaries ---
|
||||
|
||||
def test_upload_event_sentence_removed_from_summary(self):
|
||||
mem = self._make_memory(
|
||||
"User is interested in AI. "
|
||||
"User uploaded a test file for verification purposes. "
|
||||
"User prefers concise answers."
|
||||
)
|
||||
mem = self._make_memory("User is interested in AI. User uploaded a test file for verification purposes. User prefers concise answers.")
|
||||
result = _strip_upload_mentions_from_memory(mem)
|
||||
summary = result["user"]["topOfMind"]["summary"]
|
||||
assert "uploaded a test file" not in summary
|
||||
@@ -169,11 +158,7 @@ class TestStripUploadMentionsFromMemory:
|
||||
assert "User prefers concise answers" in summary
|
||||
|
||||
def test_upload_path_sentence_removed_from_summary(self):
|
||||
mem = self._make_memory(
|
||||
"User uses Python. "
|
||||
"User uploaded file to /mnt/user-data/uploads/tid/data.csv. "
|
||||
"User likes clean code."
|
||||
)
|
||||
mem = self._make_memory("User uses Python. User uploaded file to /mnt/user-data/uploads/tid/data.csv. User likes clean code.")
|
||||
result = _strip_upload_mentions_from_memory(mem)
|
||||
summary = result["user"]["topOfMind"]["summary"]
|
||||
assert "/mnt/user-data/uploads/" not in summary
|
||||
@@ -193,10 +178,7 @@ class TestStripUploadMentionsFromMemory:
|
||||
|
||||
def test_uploading_a_test_file_removed(self):
|
||||
"""'uploading a test file' (with intervening words) must be caught."""
|
||||
mem = self._make_memory(
|
||||
"User conducted a hands-on test by uploading a test file titled "
|
||||
"'test_deerflow_memory_bug.txt'. User is also learning Python."
|
||||
)
|
||||
mem = self._make_memory("User conducted a hands-on test by uploading a test file titled 'test_deerflow_memory_bug.txt'. User is also learning Python.")
|
||||
result = _strip_upload_mentions_from_memory(mem)
|
||||
summary = result["user"]["topOfMind"]["summary"]
|
||||
assert "test_deerflow_memory_bug.txt" not in summary
|
||||
|
||||
@@ -3,9 +3,7 @@
|
||||
import importlib
|
||||
from types import SimpleNamespace
|
||||
|
||||
present_file_tool_module = importlib.import_module(
|
||||
"src.tools.builtins.present_file_tool"
|
||||
)
|
||||
present_file_tool_module = importlib.import_module("src.tools.builtins.present_file_tool")
|
||||
|
||||
|
||||
def _make_runtime(outputs_path: str) -> SimpleNamespace:
|
||||
@@ -40,9 +38,7 @@ def test_present_files_keeps_virtual_outputs_path(tmp_path, monkeypatch):
|
||||
monkeypatch.setattr(
|
||||
present_file_tool_module,
|
||||
"get_paths",
|
||||
lambda: SimpleNamespace(
|
||||
resolve_virtual_path=lambda thread_id, path: artifact_path
|
||||
),
|
||||
lambda: SimpleNamespace(resolve_virtual_path=lambda thread_id, path: artifact_path),
|
||||
)
|
||||
|
||||
result = present_file_tool_module.present_file_tool.func(
|
||||
@@ -69,7 +65,4 @@ def test_present_files_rejects_paths_outside_outputs(tmp_path):
|
||||
)
|
||||
|
||||
assert "artifacts" not in result.update
|
||||
assert (
|
||||
result.update["messages"][0].content
|
||||
== f"Error: Only files in /mnt/user-data/outputs can be presented: {leaked_path}"
|
||||
)
|
||||
assert result.update["messages"][0].content == f"Error: Only files in /mnt/user-data/outputs can be presented: {leaked_path}"
|
||||
|
||||
@@ -8,6 +8,7 @@ from src.reflection.resolvers import resolve_variable
|
||||
|
||||
def test_resolve_variable_reports_install_hint_for_missing_google_provider(monkeypatch: pytest.MonkeyPatch):
|
||||
"""Missing google provider should return actionable install guidance."""
|
||||
|
||||
def fake_import_module(module_path: str):
|
||||
raise ModuleNotFoundError(f"No module named '{module_path}'", name=module_path)
|
||||
|
||||
@@ -38,6 +39,8 @@ def test_resolve_variable_reports_install_hint_for_missing_google_transitive_dep
|
||||
message = str(exc_info.value)
|
||||
# Even when a transitive dependency is missing, the hint should still point to the provider package.
|
||||
assert "uv add langchain-google-genai" in message
|
||||
|
||||
|
||||
def test_resolve_variable_invalid_path_format():
|
||||
"""Invalid variable path should fail with format guidance."""
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
|
||||
@@ -5,22 +5,22 @@ from src.gateway.routers import suggestions
|
||||
|
||||
|
||||
def test_strip_markdown_code_fence_removes_wrapping():
|
||||
text = "```json\n[\"a\"]\n```"
|
||||
assert suggestions._strip_markdown_code_fence(text) == "[\"a\"]"
|
||||
text = '```json\n["a"]\n```'
|
||||
assert suggestions._strip_markdown_code_fence(text) == '["a"]'
|
||||
|
||||
|
||||
def test_strip_markdown_code_fence_no_fence_keeps_content():
|
||||
text = " [\"a\"] "
|
||||
assert suggestions._strip_markdown_code_fence(text) == "[\"a\"]"
|
||||
text = ' ["a"] '
|
||||
assert suggestions._strip_markdown_code_fence(text) == '["a"]'
|
||||
|
||||
|
||||
def test_parse_json_string_list_filters_invalid_items():
|
||||
text = "```json\n[\"a\", \" \", 1, \"b\"]\n```"
|
||||
text = '```json\n["a", " ", 1, "b"]\n```'
|
||||
assert suggestions._parse_json_string_list(text) == ["a", "b"]
|
||||
|
||||
|
||||
def test_parse_json_string_list_rejects_non_list():
|
||||
text = "{\"a\": 1}"
|
||||
text = '{"a": 1}'
|
||||
assert suggestions._parse_json_string_list(text) is None
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ def test_generate_suggestions_parses_and_limits(monkeypatch):
|
||||
model_name=None,
|
||||
)
|
||||
fake_model = MagicMock()
|
||||
fake_model.invoke.return_value = MagicMock(content="```json\n[\"Q1\", \"Q2\", \"Q3\", \"Q4\"]\n```")
|
||||
fake_model.invoke.return_value = MagicMock(content='```json\n["Q1", "Q2", "Q3", "Q4"]\n```')
|
||||
monkeypatch.setattr(suggestions, "create_chat_model", lambda **kwargs: fake_model)
|
||||
|
||||
result = asyncio.run(suggestions.generate_suggestions("t1", req))
|
||||
@@ -63,4 +63,4 @@ def test_generate_suggestions_returns_empty_on_model_error(monkeypatch):
|
||||
|
||||
result = asyncio.run(suggestions.generate_suggestions("t1", req))
|
||||
|
||||
assert result.suggestions == []
|
||||
assert result.suggestions == []
|
||||
|
||||
@@ -21,7 +21,6 @@ def test_upload_files_writes_thread_storage_and_skips_local_sandbox_sync(tmp_pat
|
||||
patch.object(uploads, "get_uploads_dir", return_value=thread_uploads_dir),
|
||||
patch.object(uploads, "get_sandbox_provider", return_value=provider),
|
||||
):
|
||||
|
||||
file = UploadFile(filename="notes.txt", file=BytesIO(b"hello uploads"))
|
||||
result = asyncio.run(uploads.upload_files("thread-local", files=[file]))
|
||||
|
||||
@@ -52,7 +51,6 @@ def test_upload_files_syncs_non_local_sandbox_and_marks_markdown_file(tmp_path):
|
||||
patch.object(uploads, "get_sandbox_provider", return_value=provider),
|
||||
patch.object(uploads, "convert_file_to_markdown", AsyncMock(side_effect=fake_convert)),
|
||||
):
|
||||
|
||||
file = UploadFile(filename="report.pdf", file=BytesIO(b"pdf-bytes"))
|
||||
result = asyncio.run(uploads.upload_files("thread-aio", files=[file]))
|
||||
|
||||
|
||||
86
backend/uv.lock
generated
86
backend/uv.lock
generated
@@ -662,12 +662,17 @@ dependencies = [
|
||||
{ name = "langgraph-checkpoint-sqlite" },
|
||||
{ name = "langgraph-cli" },
|
||||
{ name = "langgraph-runtime-inmem" },
|
||||
{ name = "langgraph-sdk" },
|
||||
{ name = "lark-oapi" },
|
||||
{ name = "markdown-to-mrkdwn" },
|
||||
{ name = "markdownify" },
|
||||
{ name = "markitdown", extra = ["all", "xlsx"] },
|
||||
{ name = "pydantic" },
|
||||
{ name = "python-multipart" },
|
||||
{ name = "python-telegram-bot" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "readabilipy" },
|
||||
{ name = "slack-sdk" },
|
||||
{ name = "sse-starlette" },
|
||||
{ name = "tavily-python" },
|
||||
{ name = "tiktoken" },
|
||||
@@ -701,12 +706,17 @@ requires-dist = [
|
||||
{ name = "langgraph-checkpoint-sqlite", specifier = ">=3.0.3" },
|
||||
{ name = "langgraph-cli", specifier = ">=0.4.14" },
|
||||
{ name = "langgraph-runtime-inmem", specifier = ">=0.22.1" },
|
||||
{ name = "langgraph-sdk", specifier = ">=0.1.51" },
|
||||
{ name = "lark-oapi", specifier = ">=1.4.0" },
|
||||
{ name = "markdown-to-mrkdwn", specifier = ">=0.3.1" },
|
||||
{ name = "markdownify", specifier = ">=1.2.2" },
|
||||
{ name = "markitdown", extras = ["all", "xlsx"], specifier = ">=0.0.1a2" },
|
||||
{ name = "pydantic", specifier = ">=2.12.5" },
|
||||
{ name = "python-multipart", specifier = ">=0.0.20" },
|
||||
{ name = "python-telegram-bot", specifier = ">=21.0" },
|
||||
{ name = "pyyaml", specifier = ">=6.0.3" },
|
||||
{ name = "readabilipy", specifier = ">=0.3.0" },
|
||||
{ name = "slack-sdk", specifier = ">=3.33.0" },
|
||||
{ name = "sse-starlette", specifier = ">=2.1.0" },
|
||||
{ name = "tavily-python", specifier = ">=0.7.17" },
|
||||
{ name = "tiktoken", specifier = ">=0.8.0" },
|
||||
@@ -1715,6 +1725,21 @@ otel = [
|
||||
{ name = "opentelemetry-sdk" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lark-oapi"
|
||||
version = "1.5.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpx" },
|
||||
{ name = "pycryptodome" },
|
||||
{ name = "requests" },
|
||||
{ name = "requests-toolbelt" },
|
||||
{ name = "websockets" },
|
||||
]
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/bf/ff/2ece5d735ebfa2af600a53176f2636ae47af2bf934e08effab64f0d1e047/lark_oapi-1.5.3-py3-none-any.whl", hash = "sha256:fda6b32bb38d21b6bdaae94979c600b94c7c521e985adade63a54e4b3e20cc36", size = 6993016, upload-time = "2026-01-27T08:21:49.307Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "6.0.2"
|
||||
@@ -1825,6 +1850,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ca/54/2e39566a131b13f6d8d193f974cb6a34e81bb7cc2fa6f7e03de067b36588/mammoth-1.11.0-py2.py3-none-any.whl", hash = "sha256:c077ab0d450bd7c0c6ecd529a23bf7e0fa8190c929e28998308ff4eada3f063b", size = 54752, upload-time = "2025-09-19T10:35:18.699Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdown-to-mrkdwn"
|
||||
version = "0.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/17/8e/f2c62a88097425b0dba3a8699d13154b4c5888b989ffaf6419c10058b338/markdown_to_mrkdwn-0.3.1.tar.gz", hash = "sha256:25f5c095516f8ad956c88c5dab75493aadfaa02e51e3c84459490058a8ca840b", size = 14191, upload-time = "2026-01-05T14:37:29.276Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/52/92/ce0a08fb9769a13be550a7079c3409300ca6eb14ccc9038f67ac44deeef4/markdown_to_mrkdwn-0.3.1-py3-none-any.whl", hash = "sha256:5a6d08f1eaa08aea66953ef0eba206e0bb244d5c62880c76d1e3a11ee46cd3f0", size = 13592, upload-time = "2026-01-05T14:37:28.21Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdownify"
|
||||
version = "1.2.2"
|
||||
@@ -2666,6 +2700,36 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pycryptodome"
|
||||
version = "3.23.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8e/a6/8452177684d5e906854776276ddd34eca30d1b1e15aa1ee9cefc289a33f5/pycryptodome-3.23.0.tar.gz", hash = "sha256:447700a657182d60338bab09fdb27518f8856aecd80ae4c6bdddb67ff5da44ef", size = 4921276, upload-time = "2025-05-17T17:21:45.242Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/04/5d/bdb09489b63cd34a976cc9e2a8d938114f7a53a74d3dd4f125ffa49dce82/pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:0011f7f00cdb74879142011f95133274741778abba114ceca229adbf8e62c3e4", size = 2495152, upload-time = "2025-05-17T17:20:20.833Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a7/ce/7840250ed4cc0039c433cd41715536f926d6e86ce84e904068eb3244b6a6/pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:90460fc9e088ce095f9ee8356722d4f10f86e5be06e2354230a9880b9c549aae", size = 1639348, upload-time = "2025-05-17T17:20:23.171Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ee/f0/991da24c55c1f688d6a3b5a11940567353f74590734ee4a64294834ae472/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4764e64b269fc83b00f682c47443c2e6e85b18273712b98aa43bcb77f8570477", size = 2184033, upload-time = "2025-05-17T17:20:25.424Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/16/0e11882deddf00f68b68dd4e8e442ddc30641f31afeb2bc25588124ac8de/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8f24adb74984aa0e5d07a2368ad95276cf38051fe2dc6605cbcf482e04f2a7", size = 2270142, upload-time = "2025-05-17T17:20:27.808Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d5/fc/4347fea23a3f95ffb931f383ff28b3f7b1fe868739182cb76718c0da86a1/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d97618c9c6684a97ef7637ba43bdf6663a2e2e77efe0f863cce97a76af396446", size = 2309384, upload-time = "2025-05-17T17:20:30.765Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/d9/c5261780b69ce66d8cfab25d2797bd6e82ba0241804694cd48be41add5eb/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9a53a4fe5cb075075d515797d6ce2f56772ea7e6a1e5e4b96cf78a14bac3d265", size = 2183237, upload-time = "2025-05-17T17:20:33.736Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/6f/3af2ffedd5cfa08c631f89452c6648c4d779e7772dfc388c77c920ca6bbf/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:763d1d74f56f031788e5d307029caef067febf890cd1f8bf61183ae142f1a77b", size = 2343898, upload-time = "2025-05-17T17:20:36.086Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/dc/9060d807039ee5de6e2f260f72f3d70ac213993a804f5e67e0a73a56dd2f/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:954af0e2bd7cea83ce72243b14e4fb518b18f0c1649b576d114973e2073b273d", size = 2269197, upload-time = "2025-05-17T17:20:38.414Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/34/e6c8ca177cb29dcc4967fef73f5de445912f93bd0343c9c33c8e5bf8cde8/pycryptodome-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:257bb3572c63ad8ba40b89f6fc9d63a2a628e9f9708d31ee26560925ebe0210a", size = 1768600, upload-time = "2025-05-17T17:20:40.688Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/1d/89756b8d7ff623ad0160f4539da571d1f594d21ee6d68be130a6eccb39a4/pycryptodome-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6501790c5b62a29fcb227bd6b62012181d886a767ce9ed03b303d1f22eb5c625", size = 1799740, upload-time = "2025-05-17T17:20:42.413Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/61/35a64f0feaea9fd07f0d91209e7be91726eb48c0f1bfc6720647194071e4/pycryptodome-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:9a77627a330ab23ca43b48b130e202582e91cc69619947840ea4d2d1be21eb39", size = 1703685, upload-time = "2025-05-17T17:20:44.388Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/db/6c/a1f71542c969912bb0e106f64f60a56cc1f0fabecf9396f45accbe63fa68/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:187058ab80b3281b1de11c2e6842a357a1f71b42cb1e15bce373f3d238135c27", size = 2495627, upload-time = "2025-05-17T17:20:47.139Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/4e/a066527e079fc5002390c8acdd3aca431e6ea0a50ffd7201551175b47323/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:cfb5cd445280c5b0a4e6187a7ce8de5a07b5f3f897f235caa11f1f435f182843", size = 1640362, upload-time = "2025-05-17T17:20:50.392Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/52/adaf4c8c100a8c49d2bd058e5b551f73dfd8cb89eb4911e25a0c469b6b4e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67bd81fcbe34f43ad9422ee8fd4843c8e7198dd88dd3d40e6de42ee65fbe1490", size = 2182625, upload-time = "2025-05-17T17:20:52.866Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5f/e9/a09476d436d0ff1402ac3867d933c61805ec2326c6ea557aeeac3825604e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8987bd3307a39bc03df5c8e0e3d8be0c4c3518b7f044b0f4c15d1aa78f52575", size = 2268954, upload-time = "2025-05-17T17:20:55.027Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f9/c5/ffe6474e0c551d54cab931918127c46d70cab8f114e0c2b5a3c071c2f484/pycryptodome-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa0698f65e5b570426fc31b8162ed4603b0c2841cbb9088e2b01641e3065915b", size = 2308534, upload-time = "2025-05-17T17:20:57.279Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/28/e199677fc15ecf43010f2463fde4c1a53015d1fe95fb03bca2890836603a/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:53ecbafc2b55353edcebd64bf5da94a2a2cdf5090a6915bcca6eca6cc452585a", size = 2181853, upload-time = "2025-05-17T17:20:59.322Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/ea/4fdb09f2165ce1365c9eaefef36625583371ee514db58dc9b65d3a255c4c/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:156df9667ad9f2ad26255926524e1c136d6664b741547deb0a86a9acf5ea631f", size = 2342465, upload-time = "2025-05-17T17:21:03.83Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/82/6edc3fc42fe9284aead511394bac167693fb2b0e0395b28b8bedaa07ef04/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:dea827b4d55ee390dc89b2afe5927d4308a8b538ae91d9c6f7a5090f397af1aa", size = 2267414, upload-time = "2025-05-17T17:21:06.72Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/fe/aae679b64363eb78326c7fdc9d06ec3de18bac68be4b612fc1fe8902693c/pycryptodome-3.23.0-cp37-abi3-win32.whl", hash = "sha256:507dbead45474b62b2bbe318eb1c4c8ee641077532067fec9c1aa82c31f84886", size = 1768484, upload-time = "2025-05-17T17:21:08.535Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/54/2f/e97a1b8294db0daaa87012c24a7bb714147c7ade7656973fd6c736b484ff/pycryptodome-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:c75b52aacc6c0c260f204cbdd834f76edc9fb0d8e0da9fbf8352ef58202564e2", size = 1799636, upload-time = "2025-05-17T17:21:10.393Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/3d/f9441a0d798bf2b1e645adc3265e55706aead1255ccdad3856dbdcffec14/pycryptodome-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:11eeeb6917903876f134b56ba11abe95c0b0fd5e3330def218083c7d98bbcb3c", size = 1703675, upload-time = "2025-05-17T17:21:13.146Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "2.12.5"
|
||||
@@ -2897,6 +2961,19 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d9/4f/00be2196329ebbff56ce564aa94efb0fbc828d00de250b1980de1a34ab49/python_pptx-1.0.2-py3-none-any.whl", hash = "sha256:160838e0b8565a8b1f67947675886e9fea18aa5e795db7ae531606d68e785cba", size = 472788, upload-time = "2024-08-07T17:33:28.192Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-telegram-bot"
|
||||
version = "22.6"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "httpcore", marker = "python_full_version >= '3.14'" },
|
||||
{ name = "httpx" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/cd/9b/8df90c85404166a6631e857027866263adb27440d8af1dbeffbdc4f0166c/python_telegram_bot-22.6.tar.gz", hash = "sha256:50ae8cc10f8dff01445628687951020721f37956966b92a91df4c1bf2d113742", size = 1503761, upload-time = "2026-01-24T13:57:00.269Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/13/97/7298f0e1afe3a1ae52ff4c5af5087ed4de319ea73eb3b5c8c4dd4e76e708/python_telegram_bot-22.6-py3-none-any.whl", hash = "sha256:e598fe171c3dde2dfd0f001619ee9110eece66761a677b34719fb18934935ce0", size = 737267, upload-time = "2026-01-24T13:56:58.06Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytz"
|
||||
version = "2026.1.post1"
|
||||
@@ -3252,6 +3329,15 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "slack-sdk"
|
||||
version = "3.40.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3a/18/784859b33a3f9c8cdaa1eda4115eb9fe72a0a37304718887d12991eeb2fd/slack_sdk-3.40.1.tar.gz", hash = "sha256:a215333bc251bc90abf5f5110899497bf61a3b5184b6d9ee35d73ebf09ec3fd0", size = 250379, upload-time = "2026-02-18T22:11:01.819Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6e/e1/bb81f93c9f403e3b573c429dd4838ec9b44e4ef35f3b0759eb49557ab6e3/slack_sdk-3.40.1-py2.py3-none-any.whl", hash = "sha256:cd8902252979aa248092b0d77f3a9ea3cc605bc5d53663ad728e892e26e14a65", size = 313687, upload-time = "2026-02-18T22:11:00.027Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sniffio"
|
||||
version = "1.3.1"
|
||||
|
||||
Reference in New Issue
Block a user