mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-14 02:34:46 +08:00
When using thinking-enabled models (like Kimi K2.5, DeepSeek), the API expects reasoning_content on all assistant messages. The original ChatDeepSeek stores reasoning_content in additional_kwargs but doesn't include it when making subsequent API calls, causing "reasoning_content is missing" errors. This adds PatchedChatDeepSeek which overrides _get_request_payload to restore reasoning_content from additional_kwargs into the payload. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
72 lines
3.0 KiB
Python
72 lines
3.0 KiB
Python
"""Patched ChatDeepSeek that preserves reasoning_content in multi-turn conversations.
|
|
|
|
This module provides a patched version of ChatDeepSeek that properly handles
|
|
reasoning_content when sending messages back to the API. The original implementation
|
|
stores reasoning_content in additional_kwargs but doesn't include it when making
|
|
subsequent API calls, which causes errors with APIs that require reasoning_content
|
|
on all assistant messages when thinking mode is enabled.
|
|
"""
|
|
|
|
from typing import Any
|
|
|
|
from langchain_core.language_models import LanguageModelInput
|
|
from langchain_core.messages import AIMessage
|
|
from langchain_deepseek import ChatDeepSeek
|
|
|
|
|
|
class PatchedChatDeepSeek(ChatDeepSeek):
|
|
"""ChatDeepSeek with proper reasoning_content preservation.
|
|
|
|
When using thinking/reasoning enabled models, the API expects reasoning_content
|
|
to be present on ALL assistant messages in multi-turn conversations. This patched
|
|
version ensures reasoning_content from additional_kwargs is included in the
|
|
request payload.
|
|
"""
|
|
|
|
def _get_request_payload(
|
|
self,
|
|
input_: LanguageModelInput,
|
|
*,
|
|
stop: list[str] | None = None,
|
|
**kwargs: Any,
|
|
) -> dict:
|
|
"""Get request payload with reasoning_content preserved.
|
|
|
|
Overrides the parent method to inject reasoning_content from
|
|
additional_kwargs into assistant messages in the payload.
|
|
"""
|
|
# Get the original messages before conversion
|
|
original_messages = self._convert_input(input_).to_messages()
|
|
|
|
# Call parent to get the base payload
|
|
payload = super()._get_request_payload(input_, stop=stop, **kwargs)
|
|
|
|
# Match payload messages with original messages to restore reasoning_content
|
|
payload_messages = payload.get("messages", [])
|
|
|
|
# The payload messages and original messages should be in the same order
|
|
# Iterate through both and match by position
|
|
if len(payload_messages) == len(original_messages):
|
|
for payload_msg, orig_msg in zip(payload_messages, original_messages):
|
|
if (
|
|
payload_msg.get("role") == "assistant"
|
|
and isinstance(orig_msg, AIMessage)
|
|
):
|
|
reasoning_content = orig_msg.additional_kwargs.get("reasoning_content")
|
|
if reasoning_content is not None:
|
|
payload_msg["reasoning_content"] = reasoning_content
|
|
else:
|
|
# Fallback: match by counting assistant messages
|
|
ai_messages = [m for m in original_messages if isinstance(m, AIMessage)]
|
|
assistant_payloads = [
|
|
(i, m) for i, m in enumerate(payload_messages)
|
|
if m.get("role") == "assistant"
|
|
]
|
|
|
|
for (idx, payload_msg), ai_msg in zip(assistant_payloads, ai_messages):
|
|
reasoning_content = ai_msg.additional_kwargs.get("reasoning_content")
|
|
if reasoning_content is not None:
|
|
payload_messages[idx]["reasoning_content"] = reasoning_content
|
|
|
|
return payload
|