mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-03 06:12:14 +08:00
feat(codex): support explicit OpenAI Responses API config (#1235)
* feat: support explicit OpenAI Responses API config Co-authored-by: Codex <noreply@openai.com> * Update backend/packages/harness/deerflow/config/model_config.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: Codex <noreply@openai.com> Co-authored-by: Willem Jiang <willem.jiang@gmail.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
10
README.md
10
README.md
@@ -115,10 +115,20 @@ DeerFlow has newly integrated the intelligent search and crawling toolset indepe
|
||||
model: google/gemini-2.5-flash-preview
|
||||
api_key: $OPENAI_API_KEY # OpenRouter still uses the OpenAI-compatible field name here
|
||||
base_url: https://openrouter.ai/api/v1
|
||||
|
||||
- name: gpt-5-responses
|
||||
display_name: GPT-5 (Responses API)
|
||||
use: langchain_openai:ChatOpenAI
|
||||
model: gpt-5
|
||||
api_key: $OPENAI_API_KEY
|
||||
use_responses_api: true
|
||||
output_version: responses/v1
|
||||
```
|
||||
|
||||
OpenRouter and similar OpenAI-compatible gateways should be configured with `langchain_openai:ChatOpenAI` plus `base_url`. If you prefer a provider-specific environment variable name, point `api_key` at that variable explicitly (for example `api_key: $OPENROUTER_API_KEY`).
|
||||
|
||||
To route OpenAI models through `/v1/responses`, keep using `langchain_openai:ChatOpenAI` and set `use_responses_api: true` with `output_version: responses/v1`.
|
||||
|
||||
4. **Set API keys for your configured model(s)**
|
||||
|
||||
Choose one of the following methods:
|
||||
|
||||
@@ -181,6 +181,7 @@ Configuration priority:
|
||||
4. `config.yaml` in parent directory (project root - **recommended location**)
|
||||
|
||||
Config values starting with `$` are resolved as environment variables (e.g., `$OPENAI_API_KEY`).
|
||||
`ModelConfig` also declares `use_responses_api` and `output_version` so OpenAI `/v1/responses` can be enabled explicitly while still using `langchain_openai:ChatOpenAI`.
|
||||
|
||||
**Extensions Configuration** (`extensions_config.json`):
|
||||
|
||||
|
||||
@@ -169,6 +169,15 @@ models:
|
||||
api_key: $OPENAI_API_KEY
|
||||
supports_thinking: false
|
||||
supports_vision: true
|
||||
|
||||
- name: gpt-5-responses
|
||||
display_name: GPT-5 (Responses API)
|
||||
use: langchain_openai:ChatOpenAI
|
||||
model: gpt-5
|
||||
api_key: $OPENAI_API_KEY
|
||||
use_responses_api: true
|
||||
output_version: responses/v1
|
||||
supports_vision: true
|
||||
```
|
||||
|
||||
Set your API keys:
|
||||
|
||||
@@ -38,6 +38,19 @@ models:
|
||||
- DeepSeek (`langchain_deepseek:ChatDeepSeek`)
|
||||
- Any LangChain-compatible provider
|
||||
|
||||
To use OpenAI's `/v1/responses` endpoint with LangChain, keep using `langchain_openai:ChatOpenAI` and set:
|
||||
|
||||
```yaml
|
||||
models:
|
||||
- name: gpt-5-responses
|
||||
display_name: GPT-5 (Responses API)
|
||||
use: langchain_openai:ChatOpenAI
|
||||
model: gpt-5
|
||||
api_key: $OPENAI_API_KEY
|
||||
use_responses_api: true
|
||||
output_version: responses/v1
|
||||
```
|
||||
|
||||
For OpenAI-compatible gateways (for example Novita or OpenRouter), keep using `langchain_openai:ChatOpenAI` and set `base_url`:
|
||||
|
||||
```yaml
|
||||
|
||||
@@ -13,6 +13,14 @@ class ModelConfig(BaseModel):
|
||||
)
|
||||
model: str = Field(..., description="Model name")
|
||||
model_config = ConfigDict(extra="allow")
|
||||
use_responses_api: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether to route OpenAI ChatOpenAI calls through the /v1/responses API",
|
||||
)
|
||||
output_version: str | None = Field(
|
||||
default=None,
|
||||
description="Structured output version for OpenAI responses content, e.g. responses/v1",
|
||||
)
|
||||
supports_thinking: bool = Field(default_factory=lambda: False, description="Whether the model supports thinking")
|
||||
supports_reasoning_effort: bool = Field(default_factory=lambda: False, description="Whether the model supports reasoning effort")
|
||||
when_thinking_enabled: dict | None = Field(
|
||||
|
||||
30
backend/tests/test_model_config.py
Normal file
30
backend/tests/test_model_config.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from deerflow.config.model_config import ModelConfig
|
||||
|
||||
|
||||
def _make_model(**overrides) -> ModelConfig:
|
||||
return ModelConfig(
|
||||
name="openai-responses",
|
||||
display_name="OpenAI Responses",
|
||||
description=None,
|
||||
use="langchain_openai:ChatOpenAI",
|
||||
model="gpt-5",
|
||||
**overrides,
|
||||
)
|
||||
|
||||
|
||||
def test_responses_api_fields_are_declared_in_model_schema():
|
||||
assert "use_responses_api" in ModelConfig.model_fields
|
||||
assert "output_version" in ModelConfig.model_fields
|
||||
|
||||
|
||||
def test_responses_api_fields_round_trip_in_model_dump():
|
||||
config = _make_model(
|
||||
api_key="$OPENAI_API_KEY",
|
||||
use_responses_api=True,
|
||||
output_version="responses/v1",
|
||||
)
|
||||
|
||||
dumped = config.model_dump(exclude_none=True)
|
||||
|
||||
assert dumped["use_responses_api"] is True
|
||||
assert dumped["output_version"] == "responses/v1"
|
||||
@@ -498,3 +498,34 @@ def test_openai_compatible_provider_multiple_models(monkeypatch):
|
||||
# Create second model
|
||||
factory_module.create_chat_model(name="minimax-m2.5-highspeed")
|
||||
assert captured.get("model") == "MiniMax-M2.5-highspeed"
|
||||
|
||||
|
||||
def test_openai_responses_api_settings_are_passed_to_chatopenai(monkeypatch):
|
||||
model = ModelConfig(
|
||||
name="gpt-5-responses",
|
||||
display_name="GPT-5 Responses",
|
||||
description=None,
|
||||
use="langchain_openai:ChatOpenAI",
|
||||
model="gpt-5",
|
||||
api_key="test-key",
|
||||
use_responses_api=True,
|
||||
output_version="responses/v1",
|
||||
supports_thinking=False,
|
||||
supports_vision=True,
|
||||
)
|
||||
cfg = _make_app_config([model])
|
||||
_patch_factory(monkeypatch, cfg)
|
||||
|
||||
captured: dict = {}
|
||||
|
||||
class CapturingModel(FakeChatModel):
|
||||
def __init__(self, **kwargs):
|
||||
captured.update(kwargs)
|
||||
BaseChatModel.__init__(self, **kwargs)
|
||||
|
||||
monkeypatch.setattr(factory_module, "resolve_class", lambda path, base: CapturingModel)
|
||||
|
||||
factory_module.create_chat_model(name="gpt-5-responses")
|
||||
|
||||
assert captured.get("use_responses_api") is True
|
||||
assert captured.get("output_version") == "responses/v1"
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# ============================================================================
|
||||
# Bump this number when the config schema changes.
|
||||
# Run `make config-upgrade` to merge new fields into your local config.yaml.
|
||||
config_version: 2
|
||||
config_version: 3
|
||||
|
||||
# ============================================================================
|
||||
# Models Configuration
|
||||
@@ -45,6 +45,16 @@ models:
|
||||
# temperature: 0.7
|
||||
# supports_vision: true # Enable vision support for view_image tool
|
||||
|
||||
# Example: OpenAI Responses API model
|
||||
# - name: gpt-5-responses
|
||||
# display_name: GPT-5 (Responses API)
|
||||
# use: langchain_openai:ChatOpenAI
|
||||
# model: gpt-5
|
||||
# api_key: $OPENAI_API_KEY
|
||||
# use_responses_api: true
|
||||
# output_version: responses/v1
|
||||
# supports_vision: true
|
||||
|
||||
# Example: Anthropic Claude model
|
||||
# - name: claude-3-5-sonnet
|
||||
# display_name: Claude 3.5 Sonnet
|
||||
|
||||
Reference in New Issue
Block a user