fix: replace print() with logging across harness package (#1282)

Replace all bare print() calls with proper logging using Python's
standard logging module across the deerflow harness package.

Changes across 8 files (16 print statements replaced):

- agents/middlewares/clarification_middleware.py: use logger.info/debug
- agents/middlewares/memory_middleware.py: use logger.debug
- agents/middlewares/thread_data_middleware.py: use logger.debug
- agents/middlewares/view_image_middleware.py: use logger.debug
- agents/memory/queue.py: use logger.info/debug/warning/error
- agents/lead_agent/prompt.py: use logger.error
- skills/loader.py: use logger.warning
- skills/parser.py: use logger.error

Each file follows the established codebase convention:
  import logging
  logger = logging.getLogger(__name__)

Log levels chosen based on message semantics:
- debug: routine operational details (directory creation, timer resets)
- info: significant state changes (memory queued, updates processed)
- warning: recoverable issues (config load failures, skipped updates)
- error: unexpected failures (parsing errors, memory update errors)

Note: client.py is intentionally excluded as it uses print() for
CLI output, which is the correct behavior for a command-line client.

Co-authored-by: moose-lab <moose-lab@users.noreply.github.com>
This commit is contained in:
moose-lab
2026-03-27 23:15:35 +08:00
committed by GitHub
parent 9a4e8f438a
commit 03b144f9c9
8 changed files with 40 additions and 16 deletions

View File

@@ -1,8 +1,11 @@
import logging
from datetime import datetime from datetime import datetime
from deerflow.config.agents_config import load_agent_soul from deerflow.config.agents_config import load_agent_soul
from deerflow.skills import load_skills from deerflow.skills import load_skills
logger = logging.getLogger(__name__)
def _build_subagent_section(max_concurrent: int) -> str: def _build_subagent_section(max_concurrent: int) -> str:
"""Build the subagent system prompt section with dynamic concurrency limit. """Build the subagent system prompt section with dynamic concurrency limit.
@@ -364,7 +367,7 @@ def _get_memory_context(agent_name: str | None = None) -> str:
</memory> </memory>
""" """
except Exception as e: except Exception as e:
print(f"Failed to load memory context: {e}") logger.error("Failed to load memory context: %s", e)
return "" return ""

View File

@@ -1,5 +1,6 @@
"""Memory update queue with debounce mechanism.""" """Memory update queue with debounce mechanism."""
import logging
import threading import threading
import time import time
from dataclasses import dataclass, field from dataclasses import dataclass, field
@@ -8,6 +9,8 @@ from typing import Any
from deerflow.config.memory_config import get_memory_config from deerflow.config.memory_config import get_memory_config
logger = logging.getLogger(__name__)
@dataclass @dataclass
class ConversationContext: class ConversationContext:
@@ -61,7 +64,7 @@ class MemoryUpdateQueue:
# Reset or start the debounce timer # Reset or start the debounce timer
self._reset_timer() self._reset_timer()
print(f"Memory update queued for thread {thread_id}, queue size: {len(self._queue)}") logger.info("Memory update queued for thread %s, queue size: %d", thread_id, len(self._queue))
def _reset_timer(self) -> None: def _reset_timer(self) -> None:
"""Reset the debounce timer.""" """Reset the debounce timer."""
@@ -79,7 +82,7 @@ class MemoryUpdateQueue:
self._timer.daemon = True self._timer.daemon = True
self._timer.start() self._timer.start()
print(f"Memory update timer set for {config.debounce_seconds}s") logger.debug("Memory update timer set for %ss", config.debounce_seconds)
def _process_queue(self) -> None: def _process_queue(self) -> None:
"""Process all queued conversation contexts.""" """Process all queued conversation contexts."""
@@ -100,25 +103,25 @@ class MemoryUpdateQueue:
self._queue.clear() self._queue.clear()
self._timer = None self._timer = None
print(f"Processing {len(contexts_to_process)} queued memory updates") logger.info("Processing %d queued memory updates", len(contexts_to_process))
try: try:
updater = MemoryUpdater() updater = MemoryUpdater()
for context in contexts_to_process: for context in contexts_to_process:
try: try:
print(f"Updating memory for thread {context.thread_id}") logger.info("Updating memory for thread %s", context.thread_id)
success = updater.update_memory( success = updater.update_memory(
messages=context.messages, messages=context.messages,
thread_id=context.thread_id, thread_id=context.thread_id,
agent_name=context.agent_name, agent_name=context.agent_name,
) )
if success: if success:
print(f"Memory updated successfully for thread {context.thread_id}") logger.info("Memory updated successfully for thread %s", context.thread_id)
else: else:
print(f"Memory update skipped/failed for thread {context.thread_id}") logger.warning("Memory update skipped/failed for thread %s", context.thread_id)
except Exception as e: except Exception as e:
print(f"Error updating memory for thread {context.thread_id}: {e}") logger.error("Error updating memory for thread %s: %s", context.thread_id, e)
# Small delay between updates to avoid rate limiting # Small delay between updates to avoid rate limiting
if len(contexts_to_process) > 1: if len(contexts_to_process) > 1:

View File

@@ -1,5 +1,6 @@
"""Middleware for intercepting clarification requests and presenting them to the user.""" """Middleware for intercepting clarification requests and presenting them to the user."""
import logging
from collections.abc import Callable from collections.abc import Callable
from typing import override from typing import override
@@ -10,6 +11,8 @@ from langgraph.graph import END
from langgraph.prebuilt.tool_node import ToolCallRequest from langgraph.prebuilt.tool_node import ToolCallRequest
from langgraph.types import Command from langgraph.types import Command
logger = logging.getLogger(__name__)
class ClarificationMiddlewareState(AgentState): class ClarificationMiddlewareState(AgentState):
"""Compatible with the `ThreadState` schema.""" """Compatible with the `ThreadState` schema."""
@@ -101,8 +104,8 @@ class ClarificationMiddleware(AgentMiddleware[ClarificationMiddlewareState]):
args = request.tool_call.get("args", {}) args = request.tool_call.get("args", {})
question = args.get("question", "") question = args.get("question", "")
print("[ClarificationMiddleware] Intercepted clarification request") logger.info("Intercepted clarification request")
print(f"[ClarificationMiddleware] Question: {question}") logger.debug("Clarification question: %s", question)
# Format the clarification message # Format the clarification message
formatted_message = self._format_clarification_message(args) formatted_message = self._format_clarification_message(args)

View File

@@ -1,5 +1,6 @@
"""Middleware for memory mechanism.""" """Middleware for memory mechanism."""
import logging
import re import re
from typing import Any, override from typing import Any, override
@@ -10,6 +11,8 @@ from langgraph.runtime import Runtime
from deerflow.agents.memory.queue import get_memory_queue from deerflow.agents.memory.queue import get_memory_queue
from deerflow.config.memory_config import get_memory_config from deerflow.config.memory_config import get_memory_config
logger = logging.getLogger(__name__)
class MemoryMiddlewareState(AgentState): class MemoryMiddlewareState(AgentState):
"""Compatible with the `ThreadState` schema.""" """Compatible with the `ThreadState` schema."""
@@ -122,13 +125,13 @@ class MemoryMiddleware(AgentMiddleware[MemoryMiddlewareState]):
# Get thread ID from runtime context # Get thread ID from runtime context
thread_id = runtime.context.get("thread_id") if runtime.context else None thread_id = runtime.context.get("thread_id") if runtime.context else None
if not thread_id: if not thread_id:
print("MemoryMiddleware: No thread_id in context, skipping memory update") logger.debug("No thread_id in context, skipping memory update")
return None return None
# Get messages from state # Get messages from state
messages = state.get("messages", []) messages = state.get("messages", [])
if not messages: if not messages:
print("MemoryMiddleware: No messages in state, skipping memory update") logger.debug("No messages in state, skipping memory update")
return None return None
# Filter to only keep user inputs and final assistant responses # Filter to only keep user inputs and final assistant responses

View File

@@ -1,3 +1,4 @@
import logging
from typing import NotRequired, override from typing import NotRequired, override
from langchain.agents import AgentState from langchain.agents import AgentState
@@ -8,6 +9,8 @@ from langgraph.runtime import Runtime
from deerflow.agents.thread_state import ThreadDataState from deerflow.agents.thread_state import ThreadDataState
from deerflow.config.paths import Paths, get_paths from deerflow.config.paths import Paths, get_paths
logger = logging.getLogger(__name__)
class ThreadDataMiddlewareState(AgentState): class ThreadDataMiddlewareState(AgentState):
"""Compatible with the `ThreadState` schema.""" """Compatible with the `ThreadState` schema."""
@@ -87,7 +90,7 @@ class ThreadDataMiddleware(AgentMiddleware[ThreadDataMiddlewareState]):
else: else:
# Eager initialization: create directories immediately # Eager initialization: create directories immediately
paths = self._create_thread_directories(thread_id) paths = self._create_thread_directories(thread_id)
print(f"Created thread data directories for thread {thread_id}") logger.debug("Created thread data directories for thread %s", thread_id)
return { return {
"thread_data": { "thread_data": {

View File

@@ -1,5 +1,6 @@
"""Middleware for injecting image details into conversation before LLM call.""" """Middleware for injecting image details into conversation before LLM call."""
import logging
from typing import NotRequired, override from typing import NotRequired, override
from langchain.agents import AgentState from langchain.agents import AgentState
@@ -9,6 +10,8 @@ from langgraph.runtime import Runtime
from deerflow.agents.thread_state import ViewedImageData from deerflow.agents.thread_state import ViewedImageData
logger = logging.getLogger(__name__)
class ViewImageMiddlewareState(AgentState): class ViewImageMiddlewareState(AgentState):
"""Compatible with the `ThreadState` schema.""" """Compatible with the `ThreadState` schema."""
@@ -182,7 +185,7 @@ class ViewImageMiddleware(AgentMiddleware[ViewImageMiddlewareState]):
# Create a new human message with mixed content (text + images) # Create a new human message with mixed content (text + images)
human_msg = HumanMessage(content=image_content) human_msg = HumanMessage(content=image_content)
print("[ViewImageMiddleware] Injecting image details message with images before LLM call") logger.debug("Injecting image details message with images before LLM call")
# Return state update with the new message # Return state update with the new message
return {"messages": [human_msg]} return {"messages": [human_msg]}

View File

@@ -1,9 +1,12 @@
import logging
import os import os
from pathlib import Path from pathlib import Path
from .parser import parse_skill_file from .parser import parse_skill_file
from .types import Skill from .types import Skill
logger = logging.getLogger(__name__)
def get_skills_root_path() -> Path: def get_skills_root_path() -> Path:
""" """
@@ -86,7 +89,7 @@ def load_skills(skills_path: Path | None = None, use_config: bool = True, enable
skill.enabled = extensions_config.is_skill_enabled(skill.name, skill.category) skill.enabled = extensions_config.is_skill_enabled(skill.name, skill.category)
except Exception as e: except Exception as e:
# If config loading fails, default to all enabled # If config loading fails, default to all enabled
print(f"Warning: Failed to load extensions config: {e}") logger.warning("Failed to load extensions config: %s", e)
# Filter by enabled status if requested # Filter by enabled status if requested
if enabled_only: if enabled_only:

View File

@@ -1,8 +1,11 @@
import logging
import re import re
from pathlib import Path from pathlib import Path
from .types import Skill from .types import Skill
logger = logging.getLogger(__name__)
def parse_skill_file(skill_file: Path, category: str, relative_path: Path | None = None) -> Skill | None: def parse_skill_file(skill_file: Path, category: str, relative_path: Path | None = None) -> Skill | None:
""" """
@@ -61,5 +64,5 @@ def parse_skill_file(skill_file: Path, category: str, relative_path: Path | None
) )
except Exception as e: except Exception as e:
print(f"Error parsing skill file {skill_file}: {e}") logger.error("Error parsing skill file %s: %s", skill_file, e)
return None return None