mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-02 22:02:13 +08:00
feat: add configurable log level and token usage tracking (#1301)
* feat: add configurable log level and token usage tracking - Add `log_level` config to control deerflow module log level, synced to LangGraph Server via serve.sh `--server-log-level` - Add `token_usage.enabled` config with TokenUsageMiddleware that logs input/output/total tokens per LLM call from usage_metadata - Add .omc/ to .gitignore * fix: use info level for token usage logs since feature has its own toggle * fix: sort imports to pass lint check --------- Co-authored-by: greatmengqi <chenmengqi.0376@bytedance.com> Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,5 +1,7 @@
|
||||
# DeerFlow docker image cache
|
||||
docker/.cache/
|
||||
# oh-my-claudecode state
|
||||
.omc/
|
||||
# OS generated files
|
||||
.DS_Store
|
||||
*.local
|
||||
|
||||
@@ -11,6 +11,7 @@ from deerflow.agents.middlewares.memory_middleware import MemoryMiddleware
|
||||
from deerflow.agents.middlewares.subagent_limit_middleware import SubagentLimitMiddleware
|
||||
from deerflow.agents.middlewares.title_middleware import TitleMiddleware
|
||||
from deerflow.agents.middlewares.todo_middleware import TodoMiddleware
|
||||
from deerflow.agents.middlewares.token_usage_middleware import TokenUsageMiddleware
|
||||
from deerflow.agents.middlewares.tool_error_handling_middleware import build_lead_runtime_middlewares
|
||||
from deerflow.agents.middlewares.view_image_middleware import ViewImageMiddleware
|
||||
from deerflow.agents.thread_state import ThreadState
|
||||
@@ -227,6 +228,10 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
if todo_list_middleware is not None:
|
||||
middlewares.append(todo_list_middleware)
|
||||
|
||||
# Add TokenUsageMiddleware when token_usage tracking is enabled
|
||||
if get_app_config().token_usage.enabled:
|
||||
middlewares.append(TokenUsageMiddleware())
|
||||
|
||||
# Add TitleMiddleware
|
||||
middlewares.append(TitleMiddleware())
|
||||
|
||||
@@ -243,6 +248,7 @@ def _build_middlewares(config: RunnableConfig, model_name: str | None, agent_nam
|
||||
# Add DeferredToolFilterMiddleware to hide deferred tool schemas from model binding
|
||||
if app_config.tool_search.enabled:
|
||||
from deerflow.agents.middlewares.deferred_tool_filter_middleware import DeferredToolFilterMiddleware
|
||||
|
||||
middlewares.append(DeferredToolFilterMiddleware())
|
||||
|
||||
# Add SubagentLimitMiddleware to truncate excess parallel task calls
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
"""Middleware for logging LLM token usage."""
|
||||
|
||||
import logging
|
||||
from typing import override
|
||||
|
||||
from langchain.agents import AgentState
|
||||
from langchain.agents.middleware import AgentMiddleware
|
||||
from langgraph.runtime import Runtime
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TokenUsageMiddleware(AgentMiddleware):
|
||||
"""Logs token usage from model response usage_metadata."""
|
||||
|
||||
@override
|
||||
def after_model(self, state: AgentState, runtime: Runtime) -> dict | None:
|
||||
return self._log_usage(state)
|
||||
|
||||
@override
|
||||
async def aafter_model(self, state: AgentState, runtime: Runtime) -> dict | None:
|
||||
return self._log_usage(state)
|
||||
|
||||
def _log_usage(self, state: AgentState) -> None:
|
||||
messages = state.get("messages", [])
|
||||
if not messages:
|
||||
return None
|
||||
last = messages[-1]
|
||||
usage = getattr(last, "usage_metadata", None)
|
||||
if usage:
|
||||
logger.info(
|
||||
"LLM token usage: input=%s output=%s total=%s",
|
||||
usage.get("input_tokens", "?"),
|
||||
usage.get("output_tokens", "?"),
|
||||
usage.get("total_tokens", "?"),
|
||||
)
|
||||
return None
|
||||
@@ -17,6 +17,7 @@ from deerflow.config.skills_config import SkillsConfig
|
||||
from deerflow.config.subagents_config import load_subagents_config_from_dict
|
||||
from deerflow.config.summarization_config import load_summarization_config_from_dict
|
||||
from deerflow.config.title_config import load_title_config_from_dict
|
||||
from deerflow.config.token_usage_config import TokenUsageConfig
|
||||
from deerflow.config.tool_config import ToolConfig, ToolGroupConfig
|
||||
from deerflow.config.tool_search_config import ToolSearchConfig, load_tool_search_config_from_dict
|
||||
|
||||
@@ -28,6 +29,8 @@ logger = logging.getLogger(__name__)
|
||||
class AppConfig(BaseModel):
|
||||
"""Config for the DeerFlow application"""
|
||||
|
||||
log_level: str = Field(default="info", description="Logging level for deerflow modules (debug/info/warning/error)")
|
||||
token_usage: TokenUsageConfig = Field(default_factory=TokenUsageConfig, description="Token usage tracking configuration")
|
||||
models: list[ModelConfig] = Field(default_factory=list, description="Available models")
|
||||
sandbox: SandboxConfig = Field(description="Sandbox configuration")
|
||||
tools: list[ToolConfig] = Field(default_factory=list, description="Available tools")
|
||||
@@ -163,8 +166,7 @@ class AppConfig(BaseModel):
|
||||
|
||||
if user_version < example_version:
|
||||
logger.warning(
|
||||
"Your config.yaml (version %d) is outdated — the latest version is %d. "
|
||||
"Run `make config-upgrade` to merge new fields into your config.",
|
||||
"Your config.yaml (version %d) is outdated — the latest version is %d. Run `make config-upgrade` to merge new fields into your config.",
|
||||
user_version,
|
||||
example_version,
|
||||
)
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class TokenUsageConfig(BaseModel):
|
||||
"""Configuration for token usage tracking."""
|
||||
|
||||
enabled: bool = Field(default=False, description="Enable token usage tracking middleware")
|
||||
@@ -14,6 +14,20 @@
|
||||
# Run `make config-upgrade` to merge new fields into your local config.yaml.
|
||||
config_version: 3
|
||||
|
||||
# ============================================================================
|
||||
# Logging
|
||||
# ============================================================================
|
||||
# Log level for deerflow modules (debug/info/warning/error)
|
||||
log_level: info
|
||||
|
||||
# ============================================================================
|
||||
# Token Usage Tracking
|
||||
# ============================================================================
|
||||
# Track LLM token usage per model call (input/output/total tokens)
|
||||
# Logs at info level via TokenUsageMiddleware
|
||||
token_usage:
|
||||
enabled: false
|
||||
|
||||
# ============================================================================
|
||||
# Models Configuration
|
||||
# ============================================================================
|
||||
|
||||
@@ -122,7 +122,10 @@ else
|
||||
fi
|
||||
|
||||
echo "Starting LangGraph server..."
|
||||
(cd backend && NO_COLOR=1 uv run langgraph dev --no-browser --allow-blocking $LANGGRAPH_EXTRA_FLAGS > ../logs/langgraph.log 2>&1) &
|
||||
# Read log_level from config.yaml, fallback to env var, then to "info"
|
||||
CONFIG_LOG_LEVEL=$(grep -m1 '^log_level:' config.yaml 2>/dev/null | awk '{print $2}' | tr -d ' ')
|
||||
LANGGRAPH_LOG_LEVEL="${LANGGRAPH_LOG_LEVEL:-${CONFIG_LOG_LEVEL:-info}}"
|
||||
(cd backend && NO_COLOR=1 uv run langgraph dev --no-browser --allow-blocking --server-log-level $LANGGRAPH_LOG_LEVEL $LANGGRAPH_EXTRA_FLAGS > ../logs/langgraph.log 2>&1) &
|
||||
./scripts/wait-for-port.sh 2024 60 "LangGraph" || {
|
||||
echo " See logs/langgraph.log for details"
|
||||
tail -20 logs/langgraph.log
|
||||
|
||||
Reference in New Issue
Block a user