mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-02 22:02:13 +08:00
* feat: add configurable log level and token usage tracking - Add `log_level` config to control deerflow module log level, synced to LangGraph Server via serve.sh `--server-log-level` - Add `token_usage.enabled` config with TokenUsageMiddleware that logs input/output/total tokens per LLM call from usage_metadata - Add .omc/ to .gitignore * fix: use info level for token usage logs since feature has its own toggle * fix: sort imports to pass lint check --------- Co-authored-by: greatmengqi <chenmengqi.0376@bytedance.com> Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
38 lines
1.1 KiB
Python
38 lines
1.1 KiB
Python
"""Middleware for logging LLM token usage."""
|
|
|
|
import logging
|
|
from typing import override
|
|
|
|
from langchain.agents import AgentState
|
|
from langchain.agents.middleware import AgentMiddleware
|
|
from langgraph.runtime import Runtime
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class TokenUsageMiddleware(AgentMiddleware):
|
|
"""Logs token usage from model response usage_metadata."""
|
|
|
|
@override
|
|
def after_model(self, state: AgentState, runtime: Runtime) -> dict | None:
|
|
return self._log_usage(state)
|
|
|
|
@override
|
|
async def aafter_model(self, state: AgentState, runtime: Runtime) -> dict | None:
|
|
return self._log_usage(state)
|
|
|
|
def _log_usage(self, state: AgentState) -> None:
|
|
messages = state.get("messages", [])
|
|
if not messages:
|
|
return None
|
|
last = messages[-1]
|
|
usage = getattr(last, "usage_metadata", None)
|
|
if usage:
|
|
logger.info(
|
|
"LLM token usage: input=%s output=%s total=%s",
|
|
usage.get("input_tokens", "?"),
|
|
usage.get("output_tokens", "?"),
|
|
usage.get("total_tokens", "?"),
|
|
)
|
|
return None
|