mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-13 10:24:44 +08:00
feat(context): decrease token in web_search AIMessage (#827)
This PR addresses token limit issues when web_search is enabled with include_raw_content by implementing a two-pronged approach: changing the default behavior to exclude raw content and adding compression logic for when raw content is included.
This commit is contained in:
@@ -37,13 +37,13 @@ def crawl_tool(
|
||||
"error": "PDF files cannot be crawled directly. Please download and view the PDF manually.",
|
||||
"crawled_content": None,
|
||||
"is_pdf": True
|
||||
})
|
||||
}, ensure_ascii=False)
|
||||
return pdf_message
|
||||
|
||||
try:
|
||||
crawler = Crawler()
|
||||
article = crawler.crawl(url)
|
||||
return json.dumps({"url": url, "crawled_content": article.to_markdown()[:1000]})
|
||||
return json.dumps({"url": url, "crawled_content": article.to_markdown()[:1000]}, ensure_ascii=False)
|
||||
except BaseException as e:
|
||||
error_msg = f"Failed to crawl. Error: {repr(e)}"
|
||||
logger.error(error_msg)
|
||||
|
||||
@@ -57,7 +57,7 @@ def get_web_search_tool(max_search_results: int):
|
||||
exclude_domains: Optional[List[str]] = search_config.get("exclude_domains", [])
|
||||
include_answer: bool = search_config.get("include_answer", False)
|
||||
search_depth: str = search_config.get("search_depth", "advanced")
|
||||
include_raw_content: bool = search_config.get("include_raw_content", True)
|
||||
include_raw_content: bool = search_config.get("include_raw_content", False)
|
||||
include_images: bool = search_config.get("include_images", True)
|
||||
include_image_descriptions: bool = include_images and search_config.get(
|
||||
"include_image_descriptions", True
|
||||
|
||||
@@ -188,77 +188,86 @@ class ContextManager:
|
||||
|
||||
def _compress_messages(self, messages: List[BaseMessage]) -> List[BaseMessage]:
|
||||
"""
|
||||
Compress compressible messages
|
||||
|
||||
Compress messages to fit within token limit through two strategies:
|
||||
1. First, compress web_search ToolMessage raw_content by truncating to 1024 chars
|
||||
2. If still over limit, drop oldest messages while preserving prefix messages and system messages
|
||||
|
||||
Args:
|
||||
messages: List of messages to compress
|
||||
|
||||
Returns:
|
||||
Compressed message list
|
||||
List of messages with compressed content and/or dropped messages
|
||||
"""
|
||||
# Create a deep copy to avoid mutating original messages
|
||||
compressed = copy.deepcopy(messages)
|
||||
|
||||
# Step 1: Compress raw_content in web_search ToolMessages
|
||||
for msg in compressed:
|
||||
# Only compress ToolMessage with name 'web_search'
|
||||
if isinstance(msg, ToolMessage) and getattr(msg, "name", None) == "web_search":
|
||||
try:
|
||||
# Determine content type and check if compression is needed
|
||||
if isinstance(msg.content, str):
|
||||
# Early exit if content is small enough (avoid JSON parsing overhead)
|
||||
# A heuristic: if string is less than 2KB, raw_content likely doesn't need truncation
|
||||
if len(msg.content) < 2048:
|
||||
continue
|
||||
|
||||
try:
|
||||
content_data = json.loads(msg.content)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Failed to parse JSON content in web_search ToolMessage: {e}. Content: {msg.content[:200]}")
|
||||
continue
|
||||
elif isinstance(msg.content, list):
|
||||
content_data = copy.deepcopy(msg.content)
|
||||
else:
|
||||
continue
|
||||
|
||||
available_token = self.token_limit
|
||||
prefix_messages = []
|
||||
# Compress raw_content in the content (item by item processing)
|
||||
# Track if any modifications were made
|
||||
modified = False
|
||||
if isinstance(content_data, list):
|
||||
for item in content_data:
|
||||
if isinstance(item, dict) and "raw_content" in item:
|
||||
raw_content = item.get("raw_content")
|
||||
if raw_content and isinstance(raw_content, str) and len(raw_content) > 1024:
|
||||
item["raw_content"] = raw_content[:1024]
|
||||
modified = True
|
||||
|
||||
# Update message content with modified data only if changes were made
|
||||
if modified:
|
||||
msg.content = json.dumps(content_data, ensure_ascii=False)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error during message compression: {e}")
|
||||
continue
|
||||
|
||||
# 1. Preserve head messages of specified length to retain system prompts and user input
|
||||
for i in range(min(self.preserve_prefix_message_count, len(messages))):
|
||||
cur_token_cnt = self._count_message_tokens(messages[i])
|
||||
if available_token > 0 and available_token >= cur_token_cnt:
|
||||
prefix_messages.append(messages[i])
|
||||
available_token -= cur_token_cnt
|
||||
elif available_token > 0:
|
||||
# Truncate content to fit available tokens
|
||||
truncated_message = self._truncate_message_content(
|
||||
messages[i], available_token
|
||||
)
|
||||
prefix_messages.append(truncated_message)
|
||||
return prefix_messages
|
||||
else:
|
||||
break
|
||||
# Step 2: If still over limit after raw_content compression, drop oldest messages
|
||||
# while preserving prefix messages (e.g., system message) and recent messages
|
||||
if self.is_over_limit(compressed):
|
||||
# Identify messages to preserve at the beginning
|
||||
preserved_count = self.preserve_prefix_message_count
|
||||
preserved_messages = compressed[:preserved_count]
|
||||
remaining_messages = compressed[preserved_count:]
|
||||
|
||||
# Drop messages from the middle, keeping the most recent ones
|
||||
result_messages = preserved_messages
|
||||
for msg in reversed(remaining_messages):
|
||||
result_messages.insert(len(preserved_messages), msg)
|
||||
if not self.is_over_limit(result_messages):
|
||||
break
|
||||
|
||||
compressed = result_messages
|
||||
|
||||
# 2. Compress subsequent messages from the tail, some messages may be discarded
|
||||
messages = messages[len(prefix_messages) :]
|
||||
suffix_messages = []
|
||||
for i in range(len(messages) - 1, -1, -1):
|
||||
cur_token_cnt = self._count_message_tokens(messages[i])
|
||||
# Step 3: Verify that compression was successful and log warning if needed
|
||||
if self.is_over_limit(compressed):
|
||||
current_tokens = self.count_tokens(compressed)
|
||||
logger.warning(
|
||||
f"Message compression failed to bring tokens below limit: "
|
||||
f"{current_tokens} > {self.token_limit} tokens. "
|
||||
f"Total messages: {len(compressed)}. "
|
||||
f"Consider increasing token_limit or preserve_prefix_message_count."
|
||||
)
|
||||
|
||||
if cur_token_cnt > 0 and available_token >= cur_token_cnt:
|
||||
suffix_messages = [messages[i]] + suffix_messages
|
||||
available_token -= cur_token_cnt
|
||||
elif available_token > 0:
|
||||
# Truncate content to fit available tokens
|
||||
truncated_message = self._truncate_message_content(
|
||||
messages[i], available_token
|
||||
)
|
||||
suffix_messages = [truncated_message] + suffix_messages
|
||||
return prefix_messages + suffix_messages
|
||||
else:
|
||||
break
|
||||
|
||||
return prefix_messages + suffix_messages
|
||||
|
||||
def _truncate_message_content(
|
||||
self, message: BaseMessage, max_tokens: int
|
||||
) -> BaseMessage:
|
||||
"""
|
||||
Truncate message content while preserving all other attributes by copying the original message
|
||||
and only modifying its content attribute.
|
||||
|
||||
Args:
|
||||
message: The message to truncate
|
||||
max_tokens: Maximum number of tokens to keep
|
||||
|
||||
Returns:
|
||||
New message instance with truncated content
|
||||
"""
|
||||
|
||||
# Create a deep copy of the original message to preserve all attributes
|
||||
truncated_message = copy.deepcopy(message)
|
||||
|
||||
# Truncate only the content attribute
|
||||
truncated_message.content = message.content[:max_tokens]
|
||||
|
||||
return truncated_message
|
||||
return compressed
|
||||
|
||||
def _create_summary_message(self, messages: List[BaseMessage]) -> BaseMessage:
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user