mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-02 22:02:13 +08:00
* feat: add Claude Code OAuth and Codex CLI providers Port of bytedance/deer-flow#1136 from @solanian's feat/cli-oauth-providers branch.\n\nCarries the feature forward on top of current main without the original CLA-blocked commit metadata, while preserving attribution in the commit message for review. * fix: harden CLI credential loading Align Codex auth loading with the current ~/.codex/auth.json shape, make Docker credential mounts directory-based to avoid broken file binds on hosts without exported credential files, and add focused loader tests. * refactor: tighten codex auth typing Replace the temporary Any return type in CodexChatModel._load_codex_auth with the concrete CodexCliCredential type after the credential loader was stabilized. * fix: load Claude Code OAuth from Keychain Match Claude Code's macOS storage strategy more closely by checking the Keychain-backed credentials store before falling back to ~/.claude/.credentials.json. Keep explicit file overrides and add focused tests for the Keychain path. * fix: require explicit Claude OAuth handoff * style: format thread hooks reasoning request * docs: document CLI-backed auth providers * fix: address provider review feedback * fix: harden provider edge cases * Fix deferred tools, Codex message normalization, and local sandbox paths * chore: narrow PR scope to OAuth providers * chore: remove unrelated frontend changes * chore: reapply OAuth branch frontend scope cleanup * fix: preserve upload guards with reasoning effort wiring --------- Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
152 lines
4.7 KiB
Python
152 lines
4.7 KiB
Python
from __future__ import annotations
|
|
|
|
import json
|
|
|
|
import pytest
|
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
|
|
from deerflow.models.claude_provider import ClaudeChatModel
|
|
from deerflow.models.credential_loader import CodexCliCredential
|
|
from deerflow.models.openai_codex_provider import CodexChatModel
|
|
|
|
|
|
def test_codex_provider_rejects_non_positive_retry_attempts():
|
|
with pytest.raises(ValueError, match="retry_max_attempts must be >= 1"):
|
|
CodexChatModel(retry_max_attempts=0)
|
|
|
|
|
|
def test_codex_provider_requires_credentials(monkeypatch):
|
|
monkeypatch.setattr(CodexChatModel, "_load_codex_auth", lambda self: None)
|
|
|
|
with pytest.raises(ValueError, match="Codex CLI credential not found"):
|
|
CodexChatModel()
|
|
|
|
|
|
def test_codex_provider_concatenates_multiple_system_messages(monkeypatch):
|
|
monkeypatch.setattr(
|
|
CodexChatModel,
|
|
"_load_codex_auth",
|
|
lambda self: CodexCliCredential(access_token="token", account_id="acct"),
|
|
)
|
|
|
|
model = CodexChatModel()
|
|
instructions, input_items = model._convert_messages(
|
|
[
|
|
SystemMessage(content="First system prompt."),
|
|
SystemMessage(content="Second system prompt."),
|
|
HumanMessage(content="Hello"),
|
|
]
|
|
)
|
|
|
|
assert instructions == "First system prompt.\n\nSecond system prompt."
|
|
assert input_items == [{"role": "user", "content": "Hello"}]
|
|
|
|
|
|
def test_codex_provider_flattens_structured_text_blocks(monkeypatch):
|
|
monkeypatch.setattr(
|
|
CodexChatModel,
|
|
"_load_codex_auth",
|
|
lambda self: CodexCliCredential(access_token="token", account_id="acct"),
|
|
)
|
|
|
|
model = CodexChatModel()
|
|
instructions, input_items = model._convert_messages(
|
|
[
|
|
HumanMessage(content=[{"type": "text", "text": "Hello from blocks"}]),
|
|
]
|
|
)
|
|
|
|
assert instructions == "You are a helpful assistant."
|
|
assert input_items == [{"role": "user", "content": "Hello from blocks"}]
|
|
|
|
|
|
def test_claude_provider_rejects_non_positive_retry_attempts():
|
|
with pytest.raises(ValueError, match="retry_max_attempts must be >= 1"):
|
|
ClaudeChatModel(model="claude-sonnet-4-6", retry_max_attempts=0)
|
|
|
|
|
|
def test_codex_provider_skips_terminal_sse_markers(monkeypatch):
|
|
monkeypatch.setattr(
|
|
CodexChatModel,
|
|
"_load_codex_auth",
|
|
lambda self: CodexCliCredential(access_token="token", account_id="acct"),
|
|
)
|
|
|
|
model = CodexChatModel()
|
|
|
|
assert model._parse_sse_data_line("data: [DONE]") is None
|
|
assert model._parse_sse_data_line("event: response.completed") is None
|
|
|
|
|
|
def test_codex_provider_skips_non_json_sse_frames(monkeypatch):
|
|
monkeypatch.setattr(
|
|
CodexChatModel,
|
|
"_load_codex_auth",
|
|
lambda self: CodexCliCredential(access_token="token", account_id="acct"),
|
|
)
|
|
|
|
model = CodexChatModel()
|
|
|
|
assert model._parse_sse_data_line("data: not-json") is None
|
|
|
|
|
|
def test_codex_provider_marks_invalid_tool_call_arguments(monkeypatch):
|
|
monkeypatch.setattr(
|
|
CodexChatModel,
|
|
"_load_codex_auth",
|
|
lambda self: CodexCliCredential(access_token="token", account_id="acct"),
|
|
)
|
|
|
|
model = CodexChatModel()
|
|
result = model._parse_response(
|
|
{
|
|
"model": "gpt-5.4",
|
|
"output": [
|
|
{
|
|
"type": "function_call",
|
|
"name": "bash",
|
|
"arguments": "{invalid",
|
|
"call_id": "tc-1",
|
|
}
|
|
],
|
|
"usage": {},
|
|
}
|
|
)
|
|
|
|
message = result.generations[0].message
|
|
assert message.tool_calls == []
|
|
assert len(message.invalid_tool_calls) == 1
|
|
assert message.invalid_tool_calls[0]["type"] == "invalid_tool_call"
|
|
assert message.invalid_tool_calls[0]["name"] == "bash"
|
|
assert message.invalid_tool_calls[0]["args"] == "{invalid"
|
|
assert message.invalid_tool_calls[0]["id"] == "tc-1"
|
|
assert "Failed to parse tool arguments" in message.invalid_tool_calls[0]["error"]
|
|
|
|
|
|
def test_codex_provider_parses_valid_tool_arguments(monkeypatch):
|
|
monkeypatch.setattr(
|
|
CodexChatModel,
|
|
"_load_codex_auth",
|
|
lambda self: CodexCliCredential(access_token="token", account_id="acct"),
|
|
)
|
|
|
|
model = CodexChatModel()
|
|
result = model._parse_response(
|
|
{
|
|
"model": "gpt-5.4",
|
|
"output": [
|
|
{
|
|
"type": "function_call",
|
|
"name": "bash",
|
|
"arguments": json.dumps({"cmd": "pwd"}),
|
|
"call_id": "tc-1",
|
|
}
|
|
],
|
|
"usage": {},
|
|
}
|
|
)
|
|
|
|
assert result.generations[0].message.tool_calls == [
|
|
{"name": "bash", "args": {"cmd": "pwd"}, "id": "tc-1", "type": "tool_call"}
|
|
]
|