mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-20 12:54:45 +08:00
fix: recover from stale model context when configured models change (#898)
* fix: recover from stale model context after config model changes * fix: fail fast on missing model config and expand model resolution tests * fix: remove duplicate get_app_config imports * fix: align model resolution tests with runtime imports * Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * fix: remove duplicate model resolution test case --------- Co-authored-by: Willem Jiang <willem.jiang@gmail.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
136
backend/tests/test_lead_agent_model_resolution.py
Normal file
136
backend/tests/test_lead_agent_model_resolution.py
Normal file
@@ -0,0 +1,136 @@
|
||||
"""Tests for lead agent runtime model resolution behavior."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from src.agents.lead_agent import agent as lead_agent_module
|
||||
from src.config.app_config import AppConfig
|
||||
from src.config.model_config import ModelConfig
|
||||
from src.config.sandbox_config import SandboxConfig
|
||||
|
||||
|
||||
def _make_app_config(models: list[ModelConfig]) -> AppConfig:
|
||||
return AppConfig(
|
||||
models=models,
|
||||
sandbox=SandboxConfig(use="src.sandbox.local:LocalSandboxProvider"),
|
||||
)
|
||||
|
||||
|
||||
def _make_model(name: str, *, supports_thinking: bool) -> ModelConfig:
|
||||
return ModelConfig(
|
||||
name=name,
|
||||
display_name=name,
|
||||
description=None,
|
||||
use="langchain_openai:ChatOpenAI",
|
||||
model=name,
|
||||
supports_thinking=supports_thinking,
|
||||
supports_vision=False,
|
||||
)
|
||||
|
||||
|
||||
def test_resolve_model_name_falls_back_to_default(monkeypatch, caplog):
|
||||
app_config = _make_app_config(
|
||||
[
|
||||
_make_model("default-model", supports_thinking=False),
|
||||
_make_model("other-model", supports_thinking=True),
|
||||
]
|
||||
)
|
||||
|
||||
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
||||
|
||||
with caplog.at_level("WARNING"):
|
||||
resolved = lead_agent_module._resolve_model_name("missing-model")
|
||||
|
||||
assert resolved == "default-model"
|
||||
assert "fallback to default model 'default-model'" in caplog.text
|
||||
|
||||
|
||||
def test_resolve_model_name_uses_default_when_none(monkeypatch):
|
||||
app_config = _make_app_config(
|
||||
[
|
||||
_make_model("default-model", supports_thinking=False),
|
||||
_make_model("other-model", supports_thinking=True),
|
||||
]
|
||||
)
|
||||
|
||||
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
||||
|
||||
resolved = lead_agent_module._resolve_model_name(None)
|
||||
|
||||
assert resolved == "default-model"
|
||||
|
||||
|
||||
def test_resolve_model_name_raises_when_no_models_configured(monkeypatch):
|
||||
app_config = _make_app_config([])
|
||||
|
||||
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
||||
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match="No chat models are configured",
|
||||
):
|
||||
lead_agent_module._resolve_model_name("missing-model")
|
||||
|
||||
|
||||
def test_make_lead_agent_disables_thinking_when_model_does_not_support_it(monkeypatch):
|
||||
app_config = _make_app_config([_make_model("safe-model", supports_thinking=False)])
|
||||
|
||||
import src.tools as tools_module
|
||||
|
||||
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
||||
monkeypatch.setattr(tools_module, "get_available_tools", lambda **kwargs: [])
|
||||
monkeypatch.setattr(lead_agent_module, "_build_middlewares", lambda config, model_name: [])
|
||||
|
||||
captured: dict[str, object] = {}
|
||||
|
||||
def _fake_create_chat_model(*, name, thinking_enabled):
|
||||
captured["name"] = name
|
||||
captured["thinking_enabled"] = thinking_enabled
|
||||
return object()
|
||||
|
||||
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)
|
||||
monkeypatch.setattr(lead_agent_module, "create_agent", lambda **kwargs: kwargs)
|
||||
|
||||
result = lead_agent_module.make_lead_agent(
|
||||
{
|
||||
"configurable": {
|
||||
"model_name": "safe-model",
|
||||
"thinking_enabled": True,
|
||||
"is_plan_mode": False,
|
||||
"subagent_enabled": False,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
assert captured["name"] == "safe-model"
|
||||
assert captured["thinking_enabled"] is False
|
||||
assert result["model"] is not None
|
||||
|
||||
|
||||
def test_build_middlewares_uses_resolved_model_name_for_vision(monkeypatch):
|
||||
app_config = _make_app_config(
|
||||
[
|
||||
_make_model("stale-model", supports_thinking=False),
|
||||
ModelConfig(
|
||||
name="vision-model",
|
||||
display_name="vision-model",
|
||||
description=None,
|
||||
use="langchain_openai:ChatOpenAI",
|
||||
model="vision-model",
|
||||
supports_thinking=False,
|
||||
supports_vision=True,
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
monkeypatch.setattr(lead_agent_module, "get_app_config", lambda: app_config)
|
||||
monkeypatch.setattr(lead_agent_module, "_create_summarization_middleware", lambda: None)
|
||||
monkeypatch.setattr(lead_agent_module, "_create_todo_list_middleware", lambda is_plan_mode: None)
|
||||
|
||||
middlewares = lead_agent_module._build_middlewares(
|
||||
{"configurable": {"model_name": "stale-model", "is_plan_mode": False, "subagent_enabled": False}},
|
||||
model_name="vision-model",
|
||||
)
|
||||
|
||||
assert any(isinstance(m, lead_agent_module.ViewImageMiddleware) for m in middlewares)
|
||||
Reference in New Issue
Block a user