mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-03 22:32:12 +08:00
* fix: support local models by making thought field optional in Plan model - Make thought field optional in Plan model to fix Pydantic validation errors with local models - Add Ollama configuration example to conf.yaml.example - Update documentation to include local model support - Improve planner prompt with better JSON format requirements Fixes local model integration issues where models like qwen3:14b would fail due to missing thought field in JSON output. * feat: Add intelligent clarification feature for research queries - Add multi-turn clarification process to refine vague research questions - Implement three-dimension clarification standard (Tech/App, Focus, Scope) - Add clarification state management in coordinator node - Update coordinator prompt with detailed clarification guidelines - Add UI settings to enable/disable clarification feature (disabled by default) - Update workflow to handle clarification rounds recursively - Add comprehensive test coverage for clarification functionality - Update documentation with clarification feature usage guide Key components: - src/graph/nodes.py: Core clarification logic and state management - src/prompts/coordinator.md: Detailed clarification guidelines - src/workflow.py: Recursive clarification handling - web/: UI settings integration - tests/: Comprehensive test coverage - docs/: Updated configuration guide * fix: Improve clarification conversation continuity - Add comprehensive conversation history to clarification context - Include previous exchanges summary in system messages - Add explicit guidelines for continuing rounds in coordinator prompt - Prevent LLM from starting new topics during clarification - Ensure topic continuity across clarification rounds Fixes issue where LLM would restart clarification instead of building upon previous exchanges. * fix: Add conversation history to clarification context * fix: resolve clarification feature message to planer, prompt, test issues - Optimize coordinator.md prompt template for better clarification flow - Simplify final message sent to planner after clarification - Fix API key assertion issues in test_search.py * fix: Add configurable max_clarification_rounds and comprehensive tests - Add max_clarification_rounds parameter for external configuration - Add comprehensive test cases for clarification feature in test_app.py - Fixes issues found during interactive mode testing where: - Recursive call failed due to missing initial_state parameter - Clarification exited prematurely at max rounds - Incorrect logging of max rounds reached * Move clarification tests to test_nodes.py and add max_clarification_rounds to zh.json
135 lines
4.4 KiB
Python
135 lines
4.4 KiB
Python
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
|
# SPDX-License-Identifier: MIT
|
|
|
|
import importlib
|
|
import sys
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
import pytest
|
|
|
|
import src.graph.builder as builder_mod
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_state():
|
|
class Step:
|
|
def __init__(self, execution_res=None, step_type=None):
|
|
self.execution_res = execution_res
|
|
self.step_type = step_type
|
|
|
|
class Plan:
|
|
def __init__(self, steps):
|
|
self.steps = steps
|
|
|
|
return {
|
|
"Step": Step,
|
|
"Plan": Plan,
|
|
}
|
|
|
|
|
|
def test_continue_to_running_research_team_no_plan(mock_state):
|
|
state = {"current_plan": None}
|
|
assert builder_mod.continue_to_running_research_team(state) == "planner"
|
|
|
|
|
|
def test_continue_to_running_research_team_no_steps(mock_state):
|
|
state = {"current_plan": mock_state["Plan"](steps=[])}
|
|
assert builder_mod.continue_to_running_research_team(state) == "planner"
|
|
|
|
|
|
def test_continue_to_running_research_team_all_executed(mock_state):
|
|
Step = mock_state["Step"]
|
|
Plan = mock_state["Plan"]
|
|
steps = [Step(execution_res=True), Step(execution_res=True)]
|
|
state = {"current_plan": Plan(steps=steps)}
|
|
assert builder_mod.continue_to_running_research_team(state) == "planner"
|
|
|
|
|
|
def test_continue_to_running_research_team_next_researcher(mock_state):
|
|
Step = mock_state["Step"]
|
|
Plan = mock_state["Plan"]
|
|
steps = [
|
|
Step(execution_res=True),
|
|
Step(execution_res=None, step_type=builder_mod.StepType.RESEARCH),
|
|
]
|
|
state = {"current_plan": Plan(steps=steps)}
|
|
assert builder_mod.continue_to_running_research_team(state) == "researcher"
|
|
|
|
|
|
def test_continue_to_running_research_team_next_coder(mock_state):
|
|
Step = mock_state["Step"]
|
|
Plan = mock_state["Plan"]
|
|
steps = [
|
|
Step(execution_res=True),
|
|
Step(execution_res=None, step_type=builder_mod.StepType.PROCESSING),
|
|
]
|
|
state = {"current_plan": Plan(steps=steps)}
|
|
assert builder_mod.continue_to_running_research_team(state) == "coder"
|
|
|
|
|
|
def test_continue_to_running_research_team_next_coder_withresult(mock_state):
|
|
Step = mock_state["Step"]
|
|
Plan = mock_state["Plan"]
|
|
steps = [
|
|
Step(execution_res=True),
|
|
Step(execution_res=True, step_type=builder_mod.StepType.PROCESSING),
|
|
]
|
|
state = {"current_plan": Plan(steps=steps)}
|
|
assert builder_mod.continue_to_running_research_team(state) == "planner"
|
|
|
|
|
|
def test_continue_to_running_research_team_default_planner(mock_state):
|
|
Step = mock_state["Step"]
|
|
Plan = mock_state["Plan"]
|
|
steps = [Step(execution_res=True), Step(execution_res=None, step_type=None)]
|
|
state = {"current_plan": Plan(steps=steps)}
|
|
assert builder_mod.continue_to_running_research_team(state) == "planner"
|
|
|
|
|
|
@patch("src.graph.builder.StateGraph")
|
|
def test_build_base_graph_adds_nodes_and_edges(MockStateGraph):
|
|
mock_builder = MagicMock()
|
|
MockStateGraph.return_value = mock_builder
|
|
|
|
builder_mod._build_base_graph()
|
|
|
|
# Check that all nodes and edges are added
|
|
assert mock_builder.add_edge.call_count >= 2
|
|
assert mock_builder.add_node.call_count >= 8
|
|
# Now we have 2 conditional edges: research_team and coordinator
|
|
assert mock_builder.add_conditional_edges.call_count == 2
|
|
|
|
|
|
@patch("src.graph.builder._build_base_graph")
|
|
@patch("src.graph.builder.MemorySaver")
|
|
def test_build_graph_with_memory_uses_memory(MockMemorySaver, mock_build_base_graph):
|
|
mock_builder = MagicMock()
|
|
mock_build_base_graph.return_value = mock_builder
|
|
mock_memory = MagicMock()
|
|
MockMemorySaver.return_value = mock_memory
|
|
|
|
builder_mod.build_graph_with_memory()
|
|
|
|
mock_builder.compile.assert_called_once_with(checkpointer=mock_memory)
|
|
|
|
|
|
@patch("src.graph.builder._build_base_graph")
|
|
def test_build_graph_without_memory(mock_build_base_graph):
|
|
mock_builder = MagicMock()
|
|
mock_build_base_graph.return_value = mock_builder
|
|
|
|
builder_mod.build_graph()
|
|
|
|
mock_builder.compile.assert_called_once_with()
|
|
|
|
|
|
def test_graph_is_compiled():
|
|
# The graph object should be the result of build_graph()
|
|
with patch("src.graph.builder._build_base_graph") as mock_base:
|
|
mock_builder = MagicMock()
|
|
mock_base.return_value = mock_builder
|
|
mock_builder.compile.return_value = "compiled_graph"
|
|
# reload the module to re-run the graph assignment
|
|
importlib.reload(sys.modules["src.graph.builder"])
|
|
assert builder_mod.graph is not None
|