feat: add reasoning_effort configuration support for Doubao/GPT-5 models (#947)

* feat: Add reasoning effort configuration support

* Add `reasoning_effort` parameter to model config and agent initialization
* Support reasoning effort levels (minimal/low/medium/high) for Doubao/GPT-5 models
* Add UI controls in input box for reasoning effort selection
* Update doubao-seed-1.8 example config with reasoning effort support

Fixes & Cleanup:
* Ensure UTF-8 encoding for file operations
* Remove unused imports

* fix: set reasoning_effort to None for unsupported models

* fix: unit test error

* Update frontend/src/components/workspace/input-box.tsx

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
This commit is contained in:
Zhiyunyao
2026-03-02 20:49:41 +08:00
committed by GitHub
parent e399d09e8f
commit a138d5388a
21 changed files with 212 additions and 33 deletions

View File

@@ -177,6 +177,7 @@ export default function ChatPage() {
is_plan_mode:
settings.context.mode === "pro" || settings.context.mode === "ultra",
subagent_enabled: settings.context.mode === "ultra",
reasoning_effort: settings.context.reasoning_effort,
},
afterSubmit() {
router.push(pathOfThread(threadId!));
@@ -236,10 +237,9 @@ export default function ChatPage() {
className={cn("size-full", !isNewThread && "pt-10")}
threadId={threadId}
thread={thread}
messagesOverride={
!thread.isLoading && finalState?.messages
? (finalState.messages as Message[])
: undefined
messages={
(finalState?.messages as Message[])
?? thread.messages
}
paddingBottom={todoListCollapsed ? 160 : 280}
/>

View File

@@ -106,6 +106,7 @@ export function InputBox({
"thread_id" | "is_plan_mode" | "thinking_enabled" | "subagent_enabled"
> & {
mode: "flash" | "thinking" | "pro" | "ultra" | undefined;
reasoning_effort?: "minimal" | "low" | "medium" | "high";
};
extraHeader?: React.ReactNode;
isNewThread?: boolean;
@@ -116,6 +117,7 @@ export function InputBox({
"thread_id" | "is_plan_mode" | "thinking_enabled" | "subagent_enabled"
> & {
mode: "flash" | "thinking" | "pro" | "ultra" | undefined;
reasoning_effort?: "minimal" | "low" | "medium" | "high";
},
) => void;
onSubmit?: (message: PromptInputMessage) => void;
@@ -159,6 +161,11 @@ export function InputBox({
[selectedModel],
);
const supportReasoningEffort = useMemo(
() => selectedModel?.supports_reasoning_effort ?? false,
[selectedModel],
);
const handleModelSelect = useCallback(
(model_name: string) => {
const model = models.find((m) => m.name === model_name);
@@ -169,6 +176,7 @@ export function InputBox({
...context,
model_name,
mode: getResolvedMode(context.mode, model.supports_thinking ?? false),
reasoning_effort: context.reasoning_effort,
});
setModelDialogOpen(false);
},
@@ -180,10 +188,22 @@ export function InputBox({
onContextChange?.({
...context,
mode: getResolvedMode(mode, supportThinking),
reasoning_effort: mode === "ultra" ? "high" : mode === "pro" ? "medium" : mode === "thinking" ? "low" : "minimal",
});
},
[onContextChange, context, supportThinking],
);
const handleReasoningEffortSelect = useCallback(
(effort: "minimal" | "low" | "medium" | "high") => {
onContextChange?.({
...context,
reasoning_effort: effort,
});
},
[onContextChange, context],
);
const handleSubmit = useCallback(
async (message: PromptInputMessage) => {
if (status === "streaming") {
@@ -244,9 +264,9 @@ export function InputBox({
<ModeHoverGuide
mode={
context.mode === "flash" ||
context.mode === "thinking" ||
context.mode === "pro" ||
context.mode === "ultra"
context.mode === "thinking" ||
context.mode === "pro" ||
context.mode === "ultra"
? context.mode
: "flash"
}
@@ -297,7 +317,7 @@ export function InputBox({
className={cn(
"mr-2 size-4",
context.mode === "flash" &&
"text-accent-foreground",
"text-accent-foreground",
)}
/>
{t.inputBox.flashMode}
@@ -327,7 +347,7 @@ export function InputBox({
className={cn(
"mr-2 size-4",
context.mode === "thinking" &&
"text-accent-foreground",
"text-accent-foreground",
)}
/>
{t.inputBox.reasoningMode}
@@ -409,6 +429,116 @@ export function InputBox({
</DropdownMenuGroup>
</PromptInputActionMenuContent>
</PromptInputActionMenu>
{supportReasoningEffort && context.mode !== "flash" && (
<PromptInputActionMenu>
<PromptInputActionMenuTrigger className="gap-1! px-2!">
<div className="text-xs font-normal">
{t.inputBox.reasoningEffort}:
{context.reasoning_effort === "minimal" && " " + t.inputBox.reasoningEffortMinimal}
{context.reasoning_effort === "low" && " " + t.inputBox.reasoningEffortLow}
{context.reasoning_effort === "medium" && " " + t.inputBox.reasoningEffortMedium}
{context.reasoning_effort === "high" && " " + t.inputBox.reasoningEffortHigh}
</div>
</PromptInputActionMenuTrigger>
<PromptInputActionMenuContent className="w-70">
<DropdownMenuGroup>
<DropdownMenuLabel className="text-muted-foreground text-xs">
{t.inputBox.reasoningEffort}
</DropdownMenuLabel>
<PromptInputActionMenu>
<PromptInputActionMenuItem
className={cn(
context.reasoning_effort === "minimal"
? "text-accent-foreground"
: "text-muted-foreground/65",
)}
onSelect={() => handleReasoningEffortSelect("minimal")}
>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-1 font-bold">
{t.inputBox.reasoningEffortMinimal}
</div>
<div className="pl-2 text-xs">
{t.inputBox.reasoningEffortMinimalDescription}
</div>
</div>
{context.reasoning_effort === "minimal" ? (
<CheckIcon className="ml-auto size-4" />
) : (
<div className="ml-auto size-4" />
)}
</PromptInputActionMenuItem>
<PromptInputActionMenuItem
className={cn(
context.reasoning_effort === "low"
? "text-accent-foreground"
: "text-muted-foreground/65",
)}
onSelect={() => handleReasoningEffortSelect("low")}
>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-1 font-bold">
{t.inputBox.reasoningEffortLow}
</div>
<div className="pl-2 text-xs">
{t.inputBox.reasoningEffortLowDescription}
</div>
</div>
{context.reasoning_effort === "low" ? (
<CheckIcon className="ml-auto size-4" />
) : (
<div className="ml-auto size-4" />
)}
</PromptInputActionMenuItem>
<PromptInputActionMenuItem
className={cn(
context.reasoning_effort === "medium" || !context.reasoning_effort
? "text-accent-foreground"
: "text-muted-foreground/65",
)}
onSelect={() => handleReasoningEffortSelect("medium")}
>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-1 font-bold">
{t.inputBox.reasoningEffortMedium}
</div>
<div className="pl-2 text-xs">
{t.inputBox.reasoningEffortMediumDescription}
</div>
</div>
{context.reasoning_effort === "medium" || !context.reasoning_effort ? (
<CheckIcon className="ml-auto size-4" />
) : (
<div className="ml-auto size-4" />
)}
</PromptInputActionMenuItem>
<PromptInputActionMenuItem
className={cn(
context.reasoning_effort === "high"
? "text-accent-foreground"
: "text-muted-foreground/65",
)}
onSelect={() => handleReasoningEffortSelect("high")}
>
<div className="flex flex-col gap-2">
<div className="flex items-center gap-1 font-bold">
{t.inputBox.reasoningEffortHigh}
</div>
<div className="pl-2 text-xs">
{t.inputBox.reasoningEffortHighDescription}
</div>
</div>
{context.reasoning_effort === "high" ? (
<CheckIcon className="ml-auto size-4" />
) : (
<div className="ml-auto size-4" />
)}
</PromptInputActionMenuItem>
</PromptInputActionMenu>
</DropdownMenuGroup>
</PromptInputActionMenuContent>
</PromptInputActionMenu>
)}
</PromptInputTools>
<PromptInputTools>
<ModelSelector

View File

@@ -34,20 +34,18 @@ export function MessageList({
className,
threadId,
thread,
messagesOverride,
messages,
paddingBottom = 160,
}: {
className?: string;
threadId: string;
thread: UseStream<AgentThreadState>;
/** When set (e.g. from onFinish), use instead of thread.messages so SSE end shows complete state. */
messagesOverride?: Message[];
messages: Message[];
paddingBottom?: number;
}) {
const { t } = useI18n();
const rehypePlugins = useRehypeSplitWordsIntoSpans(thread.isLoading);
const updateSubtask = useUpdateSubtask();
const messages = messagesOverride ?? thread.messages;
if (thread.isThreadLoading) {
return <MessageListSkeleton />;
}

View File

@@ -82,6 +82,15 @@ export const enUS: Translations = {
ultraMode: "Ultra",
ultraModeDescription:
"Pro mode with subagents to divide work; best for complex multi-step tasks",
reasoningEffort: "Reasoning Effort",
reasoningEffortMinimal: "Minimal",
reasoningEffortMinimalDescription: "Retrieval + Direct Output",
reasoningEffortLow: "Low",
reasoningEffortLowDescription: "Simple Logic Check + Shallow Deduction",
reasoningEffortMedium: "Medium",
reasoningEffortMediumDescription: "Multi-layer Logic Analysis + Basic Verification",
reasoningEffortHigh: "High",
reasoningEffortHighDescription: "Full-dimensional Logic Deduction + Multi-path Verification + Backward Check",
searchModels: "Search models...",
surpriseMe: "Surprise",
surpriseMePrompt: "Surprise me",

View File

@@ -64,6 +64,15 @@ export interface Translations {
proModeDescription: string;
ultraMode: string;
ultraModeDescription: string;
reasoningEffort: string;
reasoningEffortMinimal: string;
reasoningEffortMinimalDescription: string;
reasoningEffortLow: string;
reasoningEffortLowDescription: string;
reasoningEffortMedium: string;
reasoningEffortMediumDescription: string;
reasoningEffortHigh: string;
reasoningEffortHighDescription: string;
searchModels: string;
surpriseMe: string;
surpriseMePrompt: string;

View File

@@ -80,6 +80,15 @@ export const zhCN: Translations = {
ultraMode: "Ultra",
ultraModeDescription:
"继承自 Pro 模式,可调用子代理分工协作,适合复杂多步骤任务,能力最强",
reasoningEffort: "推理深度",
reasoningEffortMinimal: "最低",
reasoningEffortMinimalDescription: "检索 + 直接输出",
reasoningEffortLow: "低",
reasoningEffortLowDescription: "简单逻辑校验 + 浅层推演",
reasoningEffortMedium: "中",
reasoningEffortMediumDescription: "多层逻辑分析 + 基础验证",
reasoningEffortHigh: "高",
reasoningEffortHighDescription: "全维度逻辑推演 + 多路径验证 + 反推校验",
searchModels: "搜索模型...",
surpriseMe: "小惊喜",
surpriseMePrompt: "给我一个小惊喜吧",

View File

@@ -4,4 +4,5 @@ export interface Model {
display_name: string;
description?: string | null;
supports_thinking?: boolean;
supports_reasoning_effort?: boolean;
}

View File

@@ -7,6 +7,7 @@ export const DEFAULT_LOCAL_SETTINGS: LocalSettings = {
context: {
model_name: undefined,
mode: undefined,
reasoning_effort: undefined,
},
layout: {
sidebar_collapsed: false,
@@ -24,6 +25,7 @@ export interface LocalSettings {
"thread_id" | "is_plan_mode" | "thinking_enabled" | "subagent_enabled"
> & {
mode: "flash" | "thinking" | "pro" | "ultra" | undefined;
reasoning_effort?: "minimal" | "low" | "medium" | "high";
};
layout: {
sidebar_collapsed: boolean;

View File

@@ -18,4 +18,5 @@ export interface AgentThreadContext extends Record<string, unknown> {
thinking_enabled: boolean;
is_plan_mode: boolean;
subagent_enabled: boolean;
reasoning_effort?: "minimal" | "low" | "medium" | "high";
}