mirror of
https://gitee.com/wanwujie/deer-flow
synced 2026-04-02 22:02:13 +08:00
Add MiniMax as an OpenAI-compatible model provider (#1120)
* Add MiniMax as an OpenAI-compatible model provider MiniMax offers high-performance LLMs (M2.5, M2.5-highspeed) with 204K context windows. This commit adds MiniMax as a selectable provider in the configuration system. Changes: - Add MiniMax to SUPPORTED_MODELS with model definitions - Add MiniMax provider configuration in conf/config.yaml - Update documentation with MiniMax setup instructions Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * Update README to remove MiniMax API details Removed mention of MiniMax API usage and configuration examples. --------- Co-authored-by: octo-patch <octo-patch@users.noreply.github.com> Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com> Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
This commit is contained in:
@@ -16,6 +16,7 @@ INFOQUEST_API_KEY=your-infoquest-api-key
|
||||
# GEMINI_API_KEY=your-gemini-api-key
|
||||
# DEEPSEEK_API_KEY=your-deepseek-api-key
|
||||
# NOVITA_API_KEY=your-novita-api-key # OpenAI-compatible, see https://novita.ai
|
||||
# MINIMAX_API_KEY=your-minimax-api-key # OpenAI-compatible, see https://platform.minimax.io
|
||||
# FEISHU_APP_ID=your-feishu-app-id
|
||||
# FEISHU_APP_SECRET=your-feishu-app-secret
|
||||
|
||||
|
||||
@@ -41,6 +41,25 @@ models:
|
||||
thinking:
|
||||
type: enabled
|
||||
|
||||
- name: minimax-m2.5
|
||||
display_name: MiniMax M2.5
|
||||
use: langchain_openai:ChatOpenAI
|
||||
model: MiniMax-M2.5
|
||||
api_key: $MINIMAX_API_KEY
|
||||
base_url: https://api.minimax.io/v1
|
||||
max_tokens: 4096
|
||||
temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
|
||||
supports_vision: true
|
||||
|
||||
- name: minimax-m2.5-highspeed
|
||||
display_name: MiniMax M2.5 Highspeed
|
||||
use: langchain_openai:ChatOpenAI
|
||||
model: MiniMax-M2.5-highspeed
|
||||
api_key: $MINIMAX_API_KEY
|
||||
base_url: https://api.minimax.io/v1
|
||||
max_tokens: 4096
|
||||
temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
|
||||
supports_vision: true
|
||||
- name: openrouter-gemini-2.5-flash
|
||||
display_name: Gemini 2.5 Flash (OpenRouter)
|
||||
use: langchain_openai:ChatOpenAI
|
||||
|
||||
@@ -410,3 +410,91 @@ def test_thinking_shortcut_not_leaked_into_model_when_disabled(monkeypatch):
|
||||
|
||||
# The disable path should have set thinking to disabled (not the raw enabled shortcut)
|
||||
assert captured.get("thinking") == {"type": "disabled"}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# OpenAI-compatible providers (MiniMax, Novita, etc.)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_openai_compatible_provider_passes_base_url(monkeypatch):
|
||||
"""OpenAI-compatible providers like MiniMax should pass base_url through to the model."""
|
||||
model = ModelConfig(
|
||||
name="minimax-m2.5",
|
||||
display_name="MiniMax M2.5",
|
||||
description=None,
|
||||
use="langchain_openai:ChatOpenAI",
|
||||
model="MiniMax-M2.5",
|
||||
base_url="https://api.minimax.io/v1",
|
||||
api_key="test-key",
|
||||
max_tokens=4096,
|
||||
temperature=1.0,
|
||||
supports_vision=True,
|
||||
supports_thinking=False,
|
||||
)
|
||||
cfg = _make_app_config([model])
|
||||
_patch_factory(monkeypatch, cfg)
|
||||
|
||||
captured: dict = {}
|
||||
|
||||
class CapturingModel(FakeChatModel):
|
||||
def __init__(self, **kwargs):
|
||||
captured.update(kwargs)
|
||||
BaseChatModel.__init__(self, **kwargs)
|
||||
|
||||
monkeypatch.setattr(factory_module, "resolve_class", lambda path, base: CapturingModel)
|
||||
|
||||
factory_module.create_chat_model(name="minimax-m2.5")
|
||||
|
||||
assert captured.get("model") == "MiniMax-M2.5"
|
||||
assert captured.get("base_url") == "https://api.minimax.io/v1"
|
||||
assert captured.get("api_key") == "test-key"
|
||||
assert captured.get("temperature") == 1.0
|
||||
assert captured.get("max_tokens") == 4096
|
||||
|
||||
|
||||
def test_openai_compatible_provider_multiple_models(monkeypatch):
|
||||
"""Multiple models from the same OpenAI-compatible provider should coexist."""
|
||||
m1 = ModelConfig(
|
||||
name="minimax-m2.5",
|
||||
display_name="MiniMax M2.5",
|
||||
description=None,
|
||||
use="langchain_openai:ChatOpenAI",
|
||||
model="MiniMax-M2.5",
|
||||
base_url="https://api.minimax.io/v1",
|
||||
api_key="test-key",
|
||||
temperature=1.0,
|
||||
supports_vision=True,
|
||||
supports_thinking=False,
|
||||
)
|
||||
m2 = ModelConfig(
|
||||
name="minimax-m2.5-highspeed",
|
||||
display_name="MiniMax M2.5 Highspeed",
|
||||
description=None,
|
||||
use="langchain_openai:ChatOpenAI",
|
||||
model="MiniMax-M2.5-highspeed",
|
||||
base_url="https://api.minimax.io/v1",
|
||||
api_key="test-key",
|
||||
temperature=1.0,
|
||||
supports_vision=True,
|
||||
supports_thinking=False,
|
||||
)
|
||||
cfg = _make_app_config([m1, m2])
|
||||
_patch_factory(monkeypatch, cfg)
|
||||
|
||||
captured: dict = {}
|
||||
|
||||
class CapturingModel(FakeChatModel):
|
||||
def __init__(self, **kwargs):
|
||||
captured.update(kwargs)
|
||||
BaseChatModel.__init__(self, **kwargs)
|
||||
|
||||
monkeypatch.setattr(factory_module, "resolve_class", lambda path, base: CapturingModel)
|
||||
|
||||
# Create first model
|
||||
factory_module.create_chat_model(name="minimax-m2.5")
|
||||
assert captured.get("model") == "MiniMax-M2.5"
|
||||
|
||||
# Create second model
|
||||
factory_module.create_chat_model(name="minimax-m2.5-highspeed")
|
||||
assert captured.get("model") == "MiniMax-M2.5-highspeed"
|
||||
|
||||
@@ -106,6 +106,29 @@ models:
|
||||
# thinking:
|
||||
# type: enabled
|
||||
|
||||
# Example: MiniMax (OpenAI-compatible)
|
||||
# MiniMax provides high-performance models with 204K context window
|
||||
# Docs: https://platform.minimax.io/docs/api-reference/text-openai-api
|
||||
# - name: minimax-m2.5
|
||||
# display_name: MiniMax M2.5
|
||||
# use: langchain_openai:ChatOpenAI
|
||||
# model: MiniMax-M2.5
|
||||
# api_key: $MINIMAX_API_KEY
|
||||
# base_url: https://api.minimax.io/v1
|
||||
# max_tokens: 4096
|
||||
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
|
||||
# supports_vision: true
|
||||
|
||||
# - name: minimax-m2.5-highspeed
|
||||
# display_name: MiniMax M2.5 Highspeed
|
||||
# use: langchain_openai:ChatOpenAI
|
||||
# model: MiniMax-M2.5-highspeed
|
||||
# api_key: $MINIMAX_API_KEY
|
||||
# base_url: https://api.minimax.io/v1
|
||||
# max_tokens: 4096
|
||||
# temperature: 1.0 # MiniMax requires temperature in (0.0, 1.0]
|
||||
# supports_vision: true
|
||||
|
||||
# Example: OpenRouter (OpenAI-compatible)
|
||||
# OpenRouter models use the same ChatOpenAI + base_url pattern as other OpenAI-compatible gateways.
|
||||
# - name: openrouter-gemini-2.5-flash
|
||||
|
||||
Reference in New Issue
Block a user