feat: add reasoning_effort configuration support for Doubao/GPT-5 models (#947)

* feat: Add reasoning effort configuration support

* Add `reasoning_effort` parameter to model config and agent initialization
* Support reasoning effort levels (minimal/low/medium/high) for Doubao/GPT-5 models
* Add UI controls in input box for reasoning effort selection
* Update doubao-seed-1.8 example config with reasoning effort support

Fixes & Cleanup:
* Ensure UTF-8 encoding for file operations
* Remove unused imports

* fix: set reasoning_effort to None for unsupported models

* fix: unit test error

* Update frontend/src/components/workspace/input-box.tsx

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
This commit is contained in:
Zhiyunyao
2026-03-02 20:49:41 +08:00
committed by GitHub
parent e399d09e8f
commit a138d5388a
21 changed files with 212 additions and 33 deletions

1
.gitignore vendored
View File

@@ -28,6 +28,7 @@ extensions_config.json
# IDE # IDE
.idea/ .idea/
.vscode/
# Coverage report # Coverage report
coverage.xml coverage.xml

View File

@@ -256,6 +256,7 @@ def make_lead_agent(config: RunnableConfig):
from src.tools import get_available_tools from src.tools import get_available_tools
thinking_enabled = config.get("configurable", {}).get("thinking_enabled", True) thinking_enabled = config.get("configurable", {}).get("thinking_enabled", True)
reasoning_effort = config.get("configurable", {}).get("reasoning_effort", None)
requested_model_name = config.get("configurable", {}).get("model_name") or config.get("configurable", {}).get("model") requested_model_name = config.get("configurable", {}).get("model_name") or config.get("configurable", {}).get("model")
model_name = _resolve_model_name(requested_model_name) model_name = _resolve_model_name(requested_model_name)
if model_name is None: if model_name is None:
@@ -274,8 +275,9 @@ def make_lead_agent(config: RunnableConfig):
thinking_enabled = False thinking_enabled = False
logger.info( logger.info(
"thinking_enabled: %s, model_name: %s, is_plan_mode: %s, subagent_enabled: %s, max_concurrent_subagents: %s", "thinking_enabled: %s, reasoning_effort: %s, model_name: %s, is_plan_mode: %s, subagent_enabled: %s, max_concurrent_subagents: %s",
thinking_enabled, thinking_enabled,
reasoning_effort,
model_name, model_name,
is_plan_mode, is_plan_mode,
subagent_enabled, subagent_enabled,
@@ -289,13 +291,14 @@ def make_lead_agent(config: RunnableConfig):
{ {
"model_name": model_name or "default", "model_name": model_name or "default",
"thinking_enabled": thinking_enabled, "thinking_enabled": thinking_enabled,
"reasoning_effort": reasoning_effort,
"is_plan_mode": is_plan_mode, "is_plan_mode": is_plan_mode,
"subagent_enabled": subagent_enabled, "subagent_enabled": subagent_enabled,
} }
) )
return create_agent( return create_agent(
model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled), model=create_chat_model(name=model_name, thinking_enabled=thinking_enabled, reasoning_effort=reasoning_effort),
tools=get_available_tools(model_name=model_name, subagent_enabled=subagent_enabled), tools=get_available_tools(model_name=model_name, subagent_enabled=subagent_enabled),
middleware=_build_middlewares(config, model_name=model_name), middleware=_build_middlewares(config, model_name=model_name),
system_prompt=apply_prompt_template(subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents), system_prompt=apply_prompt_template(subagent_enabled=subagent_enabled, max_concurrent_subagents=max_concurrent_subagents),

View File

@@ -400,6 +400,7 @@ class DeerFlowClient:
"display_name": getattr(model, "display_name", None), "display_name": getattr(model, "display_name", None),
"description": getattr(model, "description", None), "description": getattr(model, "description", None),
"supports_thinking": getattr(model, "supports_thinking", False), "supports_thinking": getattr(model, "supports_thinking", False),
"supports_reasoning_effort": getattr(model, "supports_reasoning_effort", False),
} }
for model in self._app_config.models for model in self._app_config.models
] ]
@@ -458,6 +459,7 @@ class DeerFlowClient:
"display_name": getattr(model, "display_name", None), "display_name": getattr(model, "display_name", None),
"description": getattr(model, "description", None), "description": getattr(model, "description", None),
"supports_thinking": getattr(model, "supports_thinking", False), "supports_thinking": getattr(model, "supports_thinking", False),
"supports_reasoning_effort": getattr(model, "supports_reasoning_effort", False),
} }
# ------------------------------------------------------------------ # ------------------------------------------------------------------

View File

@@ -72,7 +72,7 @@ class AppConfig(BaseModel):
AppConfig: The loaded config. AppConfig: The loaded config.
""" """
resolved_path = cls.resolve_config_path(config_path) resolved_path = cls.resolve_config_path(config_path)
with open(resolved_path) as f: with open(resolved_path, encoding="utf-8") as f:
config_data = yaml.safe_load(f) config_data = yaml.safe_load(f)
config_data = cls.resolve_env_variables(config_data) config_data = cls.resolve_env_variables(config_data)

View File

@@ -133,7 +133,7 @@ class ExtensionsConfig(BaseModel):
# Return empty config if extensions config file is not found # Return empty config if extensions config file is not found
return cls(mcp_servers={}, skills={}) return cls(mcp_servers={}, skills={})
with open(resolved_path) as f: with open(resolved_path, encoding="utf-8") as f:
config_data = json.load(f) config_data = json.load(f)
cls.resolve_env_variables(config_data) cls.resolve_env_variables(config_data)

View File

@@ -14,6 +14,7 @@ class ModelConfig(BaseModel):
model: str = Field(..., description="Model name") model: str = Field(..., description="Model name")
model_config = ConfigDict(extra="allow") model_config = ConfigDict(extra="allow")
supports_thinking: bool = Field(default_factory=lambda: False, description="Whether the model supports thinking") supports_thinking: bool = Field(default_factory=lambda: False, description="Whether the model supports thinking")
supports_reasoning_effort: bool = Field(default_factory=lambda: False, description="Whether the model supports reasoning effort")
when_thinking_enabled: dict | None = Field( when_thinking_enabled: dict | None = Field(
default_factory=lambda: None, default_factory=lambda: None,
description="Extra settings to be passed to the model when thinking is enabled", description="Extra settings to be passed to the model when thinking is enabled",

View File

@@ -13,6 +13,7 @@ class ModelResponse(BaseModel):
display_name: str | None = Field(None, description="Human-readable name") display_name: str | None = Field(None, description="Human-readable name")
description: str | None = Field(None, description="Model description") description: str | None = Field(None, description="Model description")
supports_thinking: bool = Field(default=False, description="Whether model supports thinking mode") supports_thinking: bool = Field(default=False, description="Whether model supports thinking mode")
supports_reasoning_effort: bool = Field(default=False, description="Whether model supports reasoning effort")
class ModelsListResponse(BaseModel): class ModelsListResponse(BaseModel):
@@ -63,6 +64,7 @@ async def list_models() -> ModelsListResponse:
display_name=model.display_name, display_name=model.display_name,
description=model.description, description=model.description,
supports_thinking=model.supports_thinking, supports_thinking=model.supports_thinking,
supports_reasoning_effort=model.supports_reasoning_effort,
) )
for model in config.models for model in config.models
] ]
@@ -107,4 +109,5 @@ async def get_model(model_name: str) -> ModelResponse:
display_name=model.display_name, display_name=model.display_name,
description=model.description, description=model.description,
supports_thinking=model.supports_thinking, supports_thinking=model.supports_thinking,
supports_reasoning_effort=model.supports_reasoning_effort,
) )

View File

@@ -32,6 +32,7 @@ def create_chat_model(name: str | None = None, thinking_enabled: bool = False, *
"display_name", "display_name",
"description", "description",
"supports_thinking", "supports_thinking",
"supports_reasoning_effort",
"when_thinking_enabled", "when_thinking_enabled",
"supports_vision", "supports_vision",
}, },
@@ -40,6 +41,11 @@ def create_chat_model(name: str | None = None, thinking_enabled: bool = False, *
if not model_config.supports_thinking: if not model_config.supports_thinking:
raise ValueError(f"Model {name} does not support thinking. Set `supports_thinking` to true in the `config.yaml` to enable thinking.") from None raise ValueError(f"Model {name} does not support thinking. Set `supports_thinking` to true in the `config.yaml` to enable thinking.") from None
model_settings_from_config.update(model_config.when_thinking_enabled) model_settings_from_config.update(model_config.when_thinking_enabled)
if not thinking_enabled and model_config.when_thinking_enabled and model_config.when_thinking_enabled.get("extra_body", {}).get("thinking", {}).get("type"):
kwargs.update({"extra_body": {"thinking": {"type": "disabled"}}})
kwargs.update({"reasoning_effort": "minimal"})
if not model_config.supports_reasoning_effort:
kwargs.update({"reasoning_effort": None})
model_instance = model_class(**kwargs, **model_settings_from_config) model_instance = model_class(**kwargs, **model_settings_from_config)
if is_tracing_enabled(): if is_tracing_enabled():

View File

@@ -1,7 +1,4 @@
from importlib import import_module from importlib import import_module
from typing import TypeVar
T = TypeVar("T")
def resolve_variable[T]( def resolve_variable[T](

View File

@@ -25,6 +25,8 @@ def mock_app_config():
"""Provide a minimal AppConfig mock.""" """Provide a minimal AppConfig mock."""
model = MagicMock() model = MagicMock()
model.name = "test-model" model.name = "test-model"
model.supports_thinking = False
model.supports_reasoning_effort = False
model.model_dump.return_value = {"name": "test-model", "use": "langchain_openai:ChatOpenAI"} model.model_dump.return_value = {"name": "test-model", "use": "langchain_openai:ChatOpenAI"}
config = MagicMock() config = MagicMock()
@@ -379,6 +381,7 @@ class TestGetModel:
model_cfg.display_name = "Test Model" model_cfg.display_name = "Test Model"
model_cfg.description = "A test model" model_cfg.description = "A test model"
model_cfg.supports_thinking = True model_cfg.supports_thinking = True
model_cfg.supports_reasoning_effort = True
client._app_config.get_model_config.return_value = model_cfg client._app_config.get_model_config.return_value = model_cfg
result = client.get_model("test-model") result = client.get_model("test-model")
@@ -387,6 +390,7 @@ class TestGetModel:
"display_name": "Test Model", "display_name": "Test Model",
"description": "A test model", "description": "A test model",
"supports_thinking": True, "supports_thinking": True,
"supports_reasoning_effort": True,
} }
def test_not_found(self, client): def test_not_found(self, client):
@@ -928,6 +932,7 @@ class TestScenarioConfigManagement:
model_cfg.display_name = None model_cfg.display_name = None
model_cfg.description = None model_cfg.description = None
model_cfg.supports_thinking = False model_cfg.supports_thinking = False
model_cfg.supports_reasoning_effort = False
client._app_config.get_model_config.return_value = model_cfg client._app_config.get_model_config.return_value = model_cfg
detail = client.get_model(model_name) detail = client.get_model(model_name)
assert detail["name"] == model_name assert detail["name"] == model_name

View File

@@ -84,9 +84,10 @@ def test_make_lead_agent_disables_thinking_when_model_does_not_support_it(monkey
captured: dict[str, object] = {} captured: dict[str, object] = {}
def _fake_create_chat_model(*, name, thinking_enabled): def _fake_create_chat_model(*, name, thinking_enabled, reasoning_effort=None):
captured["name"] = name captured["name"] = name
captured["thinking_enabled"] = thinking_enabled captured["thinking_enabled"] = thinking_enabled
captured["reasoning_effort"] = reasoning_effort
return object() return object()
monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model) monkeypatch.setattr(lead_agent_module, "create_chat_model", _fake_create_chat_model)

View File

@@ -65,18 +65,19 @@ models:
# type: enabled # type: enabled
# Example: Volcengine (Doubao) model # Example: Volcengine (Doubao) model
# - name: doubao-seed-1.8 - name: doubao-seed-1.8
# display_name: Doubao 1.8 (Thinking) display_name: Doubao-Seed-1.8
# use: langchain_deepseek:ChatDeepSeek use: src.models.patched_deepseek:PatchedChatDeepSeek
# model: ep-m-20260106111913-xxxxx model: doubao-seed-1-8-251228
# api_base: https://ark.cn-beijing.volces.com/api/v3 api_base: https://ark.cn-beijing.volces.com/api/v3
# api_key: $VOLCENGINE_API_KEY api_key: $VOLCENGINE_API_KEY
# supports_thinking: true supports_thinking: true
# supports_vision: false # Check your specific model's capabilities supports_vision: true
# when_thinking_enabled: supports_reasoning_effort: true
# extra_body: when_thinking_enabled:
# thinking: extra_body:
# type: enabled thinking:
type: enabled
# Example: Kimi K2.5 model # Example: Kimi K2.5 model
# - name: kimi-k2.5 # - name: kimi-k2.5

View File

@@ -177,6 +177,7 @@ export default function ChatPage() {
is_plan_mode: is_plan_mode:
settings.context.mode === "pro" || settings.context.mode === "ultra", settings.context.mode === "pro" || settings.context.mode === "ultra",
subagent_enabled: settings.context.mode === "ultra", subagent_enabled: settings.context.mode === "ultra",
reasoning_effort: settings.context.reasoning_effort,
}, },
afterSubmit() { afterSubmit() {
router.push(pathOfThread(threadId!)); router.push(pathOfThread(threadId!));
@@ -236,10 +237,9 @@ export default function ChatPage() {
className={cn("size-full", !isNewThread && "pt-10")} className={cn("size-full", !isNewThread && "pt-10")}
threadId={threadId} threadId={threadId}
thread={thread} thread={thread}
messagesOverride={ messages={
!thread.isLoading && finalState?.messages (finalState?.messages as Message[])
? (finalState.messages as Message[]) ?? thread.messages
: undefined
} }
paddingBottom={todoListCollapsed ? 160 : 280} paddingBottom={todoListCollapsed ? 160 : 280}
/> />

View File

@@ -106,6 +106,7 @@ export function InputBox({
"thread_id" | "is_plan_mode" | "thinking_enabled" | "subagent_enabled" "thread_id" | "is_plan_mode" | "thinking_enabled" | "subagent_enabled"
> & { > & {
mode: "flash" | "thinking" | "pro" | "ultra" | undefined; mode: "flash" | "thinking" | "pro" | "ultra" | undefined;
reasoning_effort?: "minimal" | "low" | "medium" | "high";
}; };
extraHeader?: React.ReactNode; extraHeader?: React.ReactNode;
isNewThread?: boolean; isNewThread?: boolean;
@@ -116,6 +117,7 @@ export function InputBox({
"thread_id" | "is_plan_mode" | "thinking_enabled" | "subagent_enabled" "thread_id" | "is_plan_mode" | "thinking_enabled" | "subagent_enabled"
> & { > & {
mode: "flash" | "thinking" | "pro" | "ultra" | undefined; mode: "flash" | "thinking" | "pro" | "ultra" | undefined;
reasoning_effort?: "minimal" | "low" | "medium" | "high";
}, },
) => void; ) => void;
onSubmit?: (message: PromptInputMessage) => void; onSubmit?: (message: PromptInputMessage) => void;
@@ -159,6 +161,11 @@ export function InputBox({
[selectedModel], [selectedModel],
); );
const supportReasoningEffort = useMemo(
() => selectedModel?.supports_reasoning_effort ?? false,
[selectedModel],
);
const handleModelSelect = useCallback( const handleModelSelect = useCallback(
(model_name: string) => { (model_name: string) => {
const model = models.find((m) => m.name === model_name); const model = models.find((m) => m.name === model_name);
@@ -169,6 +176,7 @@ export function InputBox({
...context, ...context,
model_name, model_name,
mode: getResolvedMode(context.mode, model.supports_thinking ?? false), mode: getResolvedMode(context.mode, model.supports_thinking ?? false),
reasoning_effort: context.reasoning_effort,
}); });
setModelDialogOpen(false); setModelDialogOpen(false);
}, },
@@ -180,10 +188,22 @@ export function InputBox({
onContextChange?.({ onContextChange?.({
...context, ...context,
mode: getResolvedMode(mode, supportThinking), mode: getResolvedMode(mode, supportThinking),
reasoning_effort: mode === "ultra" ? "high" : mode === "pro" ? "medium" : mode === "thinking" ? "low" : "minimal",
}); });
}, },
[onContextChange, context, supportThinking], [onContextChange, context, supportThinking],
); );
const handleReasoningEffortSelect = useCallback(
(effort: "minimal" | "low" | "medium" | "high") => {
onContextChange?.({
...context,
reasoning_effort: effort,
});
},
[onContextChange, context],
);
const handleSubmit = useCallback( const handleSubmit = useCallback(
async (message: PromptInputMessage) => { async (message: PromptInputMessage) => {
if (status === "streaming") { if (status === "streaming") {
@@ -244,9 +264,9 @@ export function InputBox({
<ModeHoverGuide <ModeHoverGuide
mode={ mode={
context.mode === "flash" || context.mode === "flash" ||
context.mode === "thinking" || context.mode === "thinking" ||
context.mode === "pro" || context.mode === "pro" ||
context.mode === "ultra" context.mode === "ultra"
? context.mode ? context.mode
: "flash" : "flash"
} }
@@ -297,7 +317,7 @@ export function InputBox({
className={cn( className={cn(
"mr-2 size-4", "mr-2 size-4",
context.mode === "flash" && context.mode === "flash" &&
"text-accent-foreground", "text-accent-foreground",
)} )}
/> />
{t.inputBox.flashMode} {t.inputBox.flashMode}
@@ -327,7 +347,7 @@ export function InputBox({
className={cn( className={cn(
"mr-2 size-4", "mr-2 size-4",
context.mode === "thinking" && context.mode === "thinking" &&
"text-accent-foreground", "text-accent-foreground",
)} )}
/> />
{t.inputBox.reasoningMode} {t.inputBox.reasoningMode}
@@ -409,6 +429,116 @@ export function InputBox({
</DropdownMenuGroup> </DropdownMenuGroup>
</PromptInputActionMenuContent> </PromptInputActionMenuContent>
</PromptInputActionMenu> </PromptInputActionMenu>
{supportReasoningEffort && context.mode !== "flash" && (
<PromptInputActionMenu>
<PromptInputActionMenuTrigger className="gap-1! px-2!">
<div className="text-xs font-normal">
{t.inputBox.reasoningEffort}:
{context.reasoning_effort === "minimal" && " " + t.inputBox.reasoningEffortMinimal}
{context.reasoning_effort === "low" && " " + t.inputBox.reasoningEffortLow}
{context.reasoning_effort === "medium" && " " + t.inputBox.reasoningEffortMedium}
{context.reasoning_effort === "high" && " " + t.inputBox.reasoningEffortHigh}
</div>
</PromptInputActionMenuTrigger>
<PromptInputActionMenuContent className="w-70">
<DropdownMenuGroup>
<DropdownMenuLabel className="text-muted-foreground text-xs">
{t.inputBox.reasoningEffort}
</DropdownMenuLabel>
<PromptInputActionMenu>
<PromptInputActionMenuItem
className={cn(
context.reasoning_effort === "minimal"
? "text-accent-foreground"
: "text-muted-foreground/65",
)}
onSelect={() => handleReasoningEffortSelect("minimal")}
>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-1 font-bold">
{t.inputBox.reasoningEffortMinimal}
</div>
<div className="pl-2 text-xs">
{t.inputBox.reasoningEffortMinimalDescription}
</div>
</div>
{context.reasoning_effort === "minimal" ? (
<CheckIcon className="ml-auto size-4" />
) : (
<div className="ml-auto size-4" />
)}
</PromptInputActionMenuItem>
<PromptInputActionMenuItem
className={cn(
context.reasoning_effort === "low"
? "text-accent-foreground"
: "text-muted-foreground/65",
)}
onSelect={() => handleReasoningEffortSelect("low")}
>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-1 font-bold">
{t.inputBox.reasoningEffortLow}
</div>
<div className="pl-2 text-xs">
{t.inputBox.reasoningEffortLowDescription}
</div>
</div>
{context.reasoning_effort === "low" ? (
<CheckIcon className="ml-auto size-4" />
) : (
<div className="ml-auto size-4" />
)}
</PromptInputActionMenuItem>
<PromptInputActionMenuItem
className={cn(
context.reasoning_effort === "medium" || !context.reasoning_effort
? "text-accent-foreground"
: "text-muted-foreground/65",
)}
onSelect={() => handleReasoningEffortSelect("medium")}
>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-1 font-bold">
{t.inputBox.reasoningEffortMedium}
</div>
<div className="pl-2 text-xs">
{t.inputBox.reasoningEffortMediumDescription}
</div>
</div>
{context.reasoning_effort === "medium" || !context.reasoning_effort ? (
<CheckIcon className="ml-auto size-4" />
) : (
<div className="ml-auto size-4" />
)}
</PromptInputActionMenuItem>
<PromptInputActionMenuItem
className={cn(
context.reasoning_effort === "high"
? "text-accent-foreground"
: "text-muted-foreground/65",
)}
onSelect={() => handleReasoningEffortSelect("high")}
>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-1 font-bold">
{t.inputBox.reasoningEffortHigh}
</div>
<div className="pl-2 text-xs">
{t.inputBox.reasoningEffortHighDescription}
</div>
</div>
{context.reasoning_effort === "high" ? (
<CheckIcon className="ml-auto size-4" />
) : (
<div className="ml-auto size-4" />
)}
</PromptInputActionMenuItem>
</PromptInputActionMenu>
</DropdownMenuGroup>
</PromptInputActionMenuContent>
</PromptInputActionMenu>
)}
</PromptInputTools> </PromptInputTools>
<PromptInputTools> <PromptInputTools>
<ModelSelector <ModelSelector

View File

@@ -34,20 +34,18 @@ export function MessageList({
className, className,
threadId, threadId,
thread, thread,
messagesOverride, messages,
paddingBottom = 160, paddingBottom = 160,
}: { }: {
className?: string; className?: string;
threadId: string; threadId: string;
thread: UseStream<AgentThreadState>; thread: UseStream<AgentThreadState>;
/** When set (e.g. from onFinish), use instead of thread.messages so SSE end shows complete state. */ messages: Message[];
messagesOverride?: Message[];
paddingBottom?: number; paddingBottom?: number;
}) { }) {
const { t } = useI18n(); const { t } = useI18n();
const rehypePlugins = useRehypeSplitWordsIntoSpans(thread.isLoading); const rehypePlugins = useRehypeSplitWordsIntoSpans(thread.isLoading);
const updateSubtask = useUpdateSubtask(); const updateSubtask = useUpdateSubtask();
const messages = messagesOverride ?? thread.messages;
if (thread.isThreadLoading) { if (thread.isThreadLoading) {
return <MessageListSkeleton />; return <MessageListSkeleton />;
} }

View File

@@ -82,6 +82,15 @@ export const enUS: Translations = {
ultraMode: "Ultra", ultraMode: "Ultra",
ultraModeDescription: ultraModeDescription:
"Pro mode with subagents to divide work; best for complex multi-step tasks", "Pro mode with subagents to divide work; best for complex multi-step tasks",
reasoningEffort: "Reasoning Effort",
reasoningEffortMinimal: "Minimal",
reasoningEffortMinimalDescription: "Retrieval + Direct Output",
reasoningEffortLow: "Low",
reasoningEffortLowDescription: "Simple Logic Check + Shallow Deduction",
reasoningEffortMedium: "Medium",
reasoningEffortMediumDescription: "Multi-layer Logic Analysis + Basic Verification",
reasoningEffortHigh: "High",
reasoningEffortHighDescription: "Full-dimensional Logic Deduction + Multi-path Verification + Backward Check",
searchModels: "Search models...", searchModels: "Search models...",
surpriseMe: "Surprise", surpriseMe: "Surprise",
surpriseMePrompt: "Surprise me", surpriseMePrompt: "Surprise me",

View File

@@ -64,6 +64,15 @@ export interface Translations {
proModeDescription: string; proModeDescription: string;
ultraMode: string; ultraMode: string;
ultraModeDescription: string; ultraModeDescription: string;
reasoningEffort: string;
reasoningEffortMinimal: string;
reasoningEffortMinimalDescription: string;
reasoningEffortLow: string;
reasoningEffortLowDescription: string;
reasoningEffortMedium: string;
reasoningEffortMediumDescription: string;
reasoningEffortHigh: string;
reasoningEffortHighDescription: string;
searchModels: string; searchModels: string;
surpriseMe: string; surpriseMe: string;
surpriseMePrompt: string; surpriseMePrompt: string;

View File

@@ -80,6 +80,15 @@ export const zhCN: Translations = {
ultraMode: "Ultra", ultraMode: "Ultra",
ultraModeDescription: ultraModeDescription:
"继承自 Pro 模式,可调用子代理分工协作,适合复杂多步骤任务,能力最强", "继承自 Pro 模式,可调用子代理分工协作,适合复杂多步骤任务,能力最强",
reasoningEffort: "推理深度",
reasoningEffortMinimal: "最低",
reasoningEffortMinimalDescription: "检索 + 直接输出",
reasoningEffortLow: "低",
reasoningEffortLowDescription: "简单逻辑校验 + 浅层推演",
reasoningEffortMedium: "中",
reasoningEffortMediumDescription: "多层逻辑分析 + 基础验证",
reasoningEffortHigh: "高",
reasoningEffortHighDescription: "全维度逻辑推演 + 多路径验证 + 反推校验",
searchModels: "搜索模型...", searchModels: "搜索模型...",
surpriseMe: "小惊喜", surpriseMe: "小惊喜",
surpriseMePrompt: "给我一个小惊喜吧", surpriseMePrompt: "给我一个小惊喜吧",

View File

@@ -4,4 +4,5 @@ export interface Model {
display_name: string; display_name: string;
description?: string | null; description?: string | null;
supports_thinking?: boolean; supports_thinking?: boolean;
supports_reasoning_effort?: boolean;
} }

View File

@@ -7,6 +7,7 @@ export const DEFAULT_LOCAL_SETTINGS: LocalSettings = {
context: { context: {
model_name: undefined, model_name: undefined,
mode: undefined, mode: undefined,
reasoning_effort: undefined,
}, },
layout: { layout: {
sidebar_collapsed: false, sidebar_collapsed: false,
@@ -24,6 +25,7 @@ export interface LocalSettings {
"thread_id" | "is_plan_mode" | "thinking_enabled" | "subagent_enabled" "thread_id" | "is_plan_mode" | "thinking_enabled" | "subagent_enabled"
> & { > & {
mode: "flash" | "thinking" | "pro" | "ultra" | undefined; mode: "flash" | "thinking" | "pro" | "ultra" | undefined;
reasoning_effort?: "minimal" | "low" | "medium" | "high";
}; };
layout: { layout: {
sidebar_collapsed: boolean; sidebar_collapsed: boolean;

View File

@@ -18,4 +18,5 @@ export interface AgentThreadContext extends Record<string, unknown> {
thinking_enabled: boolean; thinking_enabled: boolean;
is_plan_mode: boolean; is_plan_mode: boolean;
subagent_enabled: boolean; subagent_enabled: boolean;
reasoning_effort?: "minimal" | "low" | "medium" | "high";
} }