fix: recover from stale model context when configured models change (#898)

* fix: recover from stale model context after config model changes

* fix: fail fast on missing model config and expand model resolution tests

* fix: remove duplicate get_app_config imports

* fix: align model resolution tests with runtime imports

* Apply suggestions from code review

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix: remove duplicate model resolution test case

---------

Co-authored-by: Willem Jiang <willem.jiang@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
Xinmin Zeng
2026-02-26 13:54:29 +08:00
committed by GitHub
parent 3e6e4b0b5f
commit 6a55860a15
3 changed files with 243 additions and 28 deletions

View File

@@ -12,7 +12,13 @@ import {
ZapIcon,
} from "lucide-react";
import { useSearchParams } from "next/navigation";
import { useCallback, useMemo, useState, type ComponentProps } from "react";
import {
useCallback,
useEffect,
useMemo,
useState,
type ComponentProps,
} from "react";
import {
PromptInput,
@@ -63,6 +69,21 @@ import {
import { ModeHoverGuide } from "./mode-hover-guide";
import { Tooltip } from "./tooltip";
type InputMode = "flash" | "thinking" | "pro" | "ultra";
function getResolvedMode(
mode: InputMode | undefined,
supportsThinking: boolean,
): InputMode {
if (!supportsThinking && mode !== "flash") {
return "flash";
}
if (mode) {
return mode;
}
return supportsThinking ? "pro" : "flash";
}
export function InputBox({
className,
disabled,
@@ -104,42 +125,64 @@ export function InputBox({
const searchParams = useSearchParams();
const [modelDialogOpen, setModelDialogOpen] = useState(false);
const { models } = useModels();
const selectedModel = useMemo(() => {
if (!context.model_name && models.length > 0) {
const model = models[0]!;
setTimeout(() => {
onContextChange?.({
...context,
model_name: model.name,
mode: model.supports_thinking ? "pro" : "flash",
});
}, 0);
return model;
useEffect(() => {
if (models.length === 0) {
return;
}
return models.find((m) => m.name === context.model_name);
const currentModel = models.find((m) => m.name === context.model_name);
const fallbackModel = currentModel ?? models[0]!;
const supportsThinking = fallbackModel.supports_thinking ?? false;
const nextModelName = fallbackModel.name;
const nextMode = getResolvedMode(context.mode, supportsThinking);
if (context.model_name === nextModelName && context.mode === nextMode) {
return;
}
onContextChange?.({
...context,
model_name: nextModelName,
mode: nextMode,
});
}, [context, models, onContextChange]);
const selectedModel = useMemo(() => {
if (models.length === 0) {
return undefined;
}
return models.find((m) => m.name === context.model_name) ?? models[0];
}, [context.model_name, models]);
const supportThinking = useMemo(
() => selectedModel?.supports_thinking ?? false,
[selectedModel],
);
const handleModelSelect = useCallback(
(model_name: string) => {
const model = models.find((m) => m.name === model_name);
if (!model) {
return;
}
onContextChange?.({
...context,
model_name,
mode: getResolvedMode(context.mode, model.supports_thinking ?? false),
});
setModelDialogOpen(false);
},
[onContextChange, context],
[onContextChange, context, models],
);
const handleModeSelect = useCallback(
(mode: "flash" | "thinking" | "pro" | "ultra") => {
(mode: InputMode) => {
onContextChange?.({
...context,
mode,
mode: getResolvedMode(mode, supportThinking),
});
},
[onContextChange, context],
[onContextChange, context, supportThinking],
);
const handleSubmit = useCallback(
async (message: PromptInputMessage) => {