Implement optimistic UI for file uploads and enhance message handling (#967)

* feat(upload): implement optimistic UI for file uploads and enhance message handling

* feat(middleware): enhance file handling by collecting historical uploads from directory

* feat(thread-title): update page title handling for new threads and improve loading state

* feat(uploads-middleware): enhance file extraction by verifying file existence in uploads directory

* feat(thread-stream): update file path reference to use virtual_path for uploads

* feat(tests): add core behaviour tests for UploadsMiddleware

* feat(tests): remove unused pytest import from test_uploads_middleware_core_logic.py

* feat: enhance file upload handling and localization support

- Update UploadsMiddleware to validate filenames more robustly.
- Modify MessageListItem to parse uploaded files from raw content for backward compatibility.
- Add localization for uploading messages in English and Chinese.
- Introduce parseUploadedFiles utility to extract uploaded files from message content.
This commit is contained in:
JeffJiang
2026-03-05 11:16:34 +08:00
committed by GitHub
parent 3ada4f98b1
commit b17c087174
9 changed files with 790 additions and 258 deletions

View File

@@ -1,22 +1,31 @@
import type { Message } from "@langchain/langgraph-sdk";
import { FileIcon } from "lucide-react";
import { FileIcon, Loader2Icon } from "lucide-react";
import { useParams } from "next/navigation";
import { memo, useMemo, type ImgHTMLAttributes } from "react";
import rehypeKatex from "rehype-katex";
import { Loader } from "@/components/ai-elements/loader";
import {
Message as AIElementMessage,
MessageContent as AIElementMessageContent,
MessageResponse as AIElementMessageResponse,
MessageToolbar,
} from "@/components/ai-elements/message";
import {
Reasoning,
ReasoningContent,
ReasoningTrigger,
} from "@/components/ai-elements/reasoning";
import { Task, TaskTrigger } from "@/components/ai-elements/task";
import { Badge } from "@/components/ui/badge";
import { resolveArtifactURL } from "@/core/artifacts/utils";
import { useI18n } from "@/core/i18n/hooks";
import {
extractContentFromMessage,
extractReasoningContentFromMessage,
parseUploadedFiles,
type UploadedFile,
stripUploadedFilesTag,
type FileInMessage,
} from "@/core/messages/utils";
import { useRehypeSplitWordsIntoSpans } from "@/core/rehype";
import { humanMessagePlugins } from "@/core/streamdown";
@@ -121,37 +130,67 @@ function MessageContent_({
const rawContent = extractContentFromMessage(message);
const reasoningContent = extractReasoningContentFromMessage(message);
const { contentToParse, uploadedFiles } = useMemo(() => {
if (!isLoading && reasoningContent && !rawContent) {
return {
contentToParse: reasoningContent,
uploadedFiles: [] as UploadedFile[],
};
const files = useMemo(() => {
const files = message.additional_kwargs?.files;
if (!Array.isArray(files) || files.length === 0) {
if (rawContent.includes("<uploaded_files>")) {
// If the content contains the <uploaded_files> tag, we return the parsed files from the content for backward compatibility.
return parseUploadedFiles(rawContent);
}
return null;
}
if (isHuman && rawContent) {
const { files, cleanContent: contentWithoutFiles } =
parseUploadedFiles(rawContent);
return { contentToParse: contentWithoutFiles, uploadedFiles: files };
return files as FileInMessage[];
}, [message.additional_kwargs?.files, rawContent]);
const contentToDisplay = useMemo(() => {
if (isHuman) {
return rawContent ? stripUploadedFilesTag(rawContent) : "";
}
return {
contentToParse: rawContent ?? "",
uploadedFiles: [] as UploadedFile[],
};
}, [isLoading, rawContent, reasoningContent, isHuman]);
return rawContent ?? "";
}, [rawContent, isHuman]);
const filesList =
uploadedFiles.length > 0 && thread_id ? (
<UploadedFilesList files={uploadedFiles} threadId={thread_id} />
files && files.length > 0 && thread_id ? (
<RichFilesList files={files} threadId={thread_id} />
) : null;
// Uploading state: mock AI message shown while files upload
if (message.additional_kwargs?.element === "task") {
return (
<AIElementMessageContent className={className}>
<Task defaultOpen={false}>
<TaskTrigger title="">
<div className="text-muted-foreground flex w-full cursor-default items-center gap-2 text-sm select-none">
<Loader className="size-4" />
<span>{contentToDisplay}</span>
</div>
</TaskTrigger>
</Task>
</AIElementMessageContent>
);
}
// Reasoning-only AI message (no main response content yet)
if (!isHuman && reasoningContent && !rawContent) {
return (
<AIElementMessageContent className={className}>
<Reasoning isStreaming={isLoading}>
<ReasoningTrigger />
<ReasoningContent>{reasoningContent}</ReasoningContent>
</Reasoning>
</AIElementMessageContent>
);
}
if (isHuman) {
const messageResponse = contentToParse ? (
const messageResponse = contentToDisplay ? (
<AIElementMessageResponse
remarkPlugins={humanMessagePlugins.remarkPlugins}
rehypePlugins={humanMessagePlugins.rehypePlugins}
components={components}
>
{contentToParse}
{contentToDisplay}
</AIElementMessageResponse>
) : null;
return (
@@ -170,7 +209,7 @@ function MessageContent_({
<AIElementMessageContent className={className}>
{filesList}
<MarkdownContent
content={contentToParse}
content={contentToDisplay}
isLoading={isLoading}
rehypePlugins={[...rehypePlugins, [rehypeKatex, { output: "html" }]]}
className="my-3"
@@ -224,22 +263,31 @@ function isImageFile(filename: string): boolean {
}
/**
* Uploaded files list component
* Format bytes to human-readable size string
*/
function UploadedFilesList({
function formatBytes(bytes: number): string {
if (bytes === 0) return "—";
const kb = bytes / 1024;
if (kb < 1024) return `${kb.toFixed(1)} KB`;
return `${(kb / 1024).toFixed(1)} MB`;
}
/**
* List of files from additional_kwargs.files (with optional upload status)
*/
function RichFilesList({
files,
threadId,
}: {
files: UploadedFile[];
files: FileInMessage[];
threadId: string;
}) {
if (files.length === 0) return null;
return (
<div className="mb-2 flex flex-wrap justify-end gap-2">
{files.map((file, index) => (
<UploadedFileCard
key={`${file.path}-${index}`}
<RichFileCard
key={`${file.filename}-${index}`}
file={file}
threadId={threadId}
/>
@@ -249,18 +297,48 @@ function UploadedFilesList({
}
/**
* Single uploaded file card component
* Single file card that handles FileInMessage (supports uploading state)
*/
function UploadedFileCard({
function RichFileCard({
file,
threadId,
}: {
file: UploadedFile;
file: FileInMessage;
threadId: string;
}) {
if (!threadId) return null;
const { t } = useI18n();
const isUploading = file.status === "uploading";
const isImage = isImageFile(file.filename);
if (isUploading) {
return (
<div className="bg-background border-border/40 flex max-w-50 min-w-30 flex-col gap-1 rounded-lg border p-3 opacity-60 shadow-sm">
<div className="flex items-start gap-2">
<Loader2Icon className="text-muted-foreground mt-0.5 size-4 shrink-0 animate-spin" />
<span
className="text-foreground truncate text-sm font-medium"
title={file.filename}
>
{file.filename}
</span>
</div>
<div className="flex items-center justify-between gap-2">
<Badge
variant="secondary"
className="rounded px-1.5 py-0.5 text-[10px] font-normal"
>
{getFileTypeLabel(file.filename)}
</Badge>
<span className="text-muted-foreground text-[10px]">
{t.uploads.uploading}
</span>
</div>
</div>
);
}
if (!file.path) return null;
const fileUrl = resolveArtifactURL(file.path, threadId);
if (isImage) {
@@ -274,14 +352,14 @@ function UploadedFileCard({
<img
src={fileUrl}
alt={file.filename}
className="h-32 w-auto max-w-[240px] object-cover transition-transform group-hover:scale-105"
className="h-32 w-auto max-w-60 object-cover transition-transform group-hover:scale-105"
/>
</a>
);
}
return (
<div className="bg-background border-border/40 flex max-w-[200px] min-w-[120px] flex-col gap-1 rounded-lg border p-3 shadow-sm">
<div className="bg-background border-border/40 flex max-w-50 min-w-30 flex-col gap-1 rounded-lg border p-3 shadow-sm">
<div className="flex items-start gap-2">
<FileIcon className="text-muted-foreground mt-0.5 size-4 shrink-0" />
<span
@@ -298,7 +376,9 @@ function UploadedFileCard({
>
{getFileTypeLabel(file.filename)}
</Badge>
<span className="text-muted-foreground text-[10px]">{file.size}</span>
<span className="text-muted-foreground text-[10px]">
{formatBytes(file.size)}
</span>
</div>
</div>
);

View File

@@ -4,6 +4,7 @@ import { useEffect } from "react";
import { useI18n } from "@/core/i18n/hooks";
import type { AgentThreadState } from "@/core/threads";
import { useThreadChat } from "./chats";
import { FlipDisplay } from "./flip-display";
export function ThreadTitle({
@@ -15,8 +16,9 @@ export function ThreadTitle({
thread: BaseStream<AgentThreadState>;
}) {
const { t } = useI18n();
const { isNewThread } = useThreadChat();
useEffect(() => {
const pageTitle = !thread.values
const pageTitle = isNewThread
? t.pages.newChat
: thread.values?.title && thread.values.title !== "Untitled"
? thread.values.title
@@ -27,6 +29,7 @@ export function ThreadTitle({
document.title = `${pageTitle} - ${t.pages.appName}`;
}
}, [
isNewThread,
t.pages.newChat,
t.pages.untitled,
t.pages.appName,

View File

@@ -88,9 +88,11 @@ export const enUS: Translations = {
reasoningEffortLow: "Low",
reasoningEffortLowDescription: "Simple Logic Check + Shallow Deduction",
reasoningEffortMedium: "Medium",
reasoningEffortMediumDescription: "Multi-layer Logic Analysis + Basic Verification",
reasoningEffortMediumDescription:
"Multi-layer Logic Analysis + Basic Verification",
reasoningEffortHigh: "High",
reasoningEffortHighDescription: "Full-dimensional Logic Deduction + Multi-path Verification + Backward Check",
reasoningEffortHighDescription:
"Full-dimensional Logic Deduction + Multi-path Verification + Backward Check",
searchModels: "Search models...",
surpriseMe: "Surprise",
surpriseMePrompt: "Surprise me",
@@ -248,6 +250,11 @@ export const enUS: Translations = {
},
// Subtasks
uploads: {
uploading: "Uploading...",
uploadingFiles: "Uploading files, please wait...",
},
subtasks: {
subtask: "Subtask",
executing: (count: number) =>

View File

@@ -187,6 +187,12 @@ export interface Translations {
skillInstallTooltip: string;
};
// Uploads
uploads: {
uploading: string;
uploadingFiles: string;
};
// Subtasks
subtasks: {
subtask: string;

View File

@@ -238,6 +238,11 @@ export const zhCN: Translations = {
skillInstallTooltip: "安装技能并使其可在 DeerFlow 中使用",
},
uploads: {
uploading: "上传中...",
uploadingFiles: "文件上传中,请稍候...",
},
subtasks: {
subtask: "子任务",
executing: (count: number) =>

View File

@@ -263,57 +263,56 @@ export function findToolCallResult(toolCallId: string, messages: Message[]) {
}
/**
* Represents an uploaded file parsed from the <uploaded_files> tag
* Represents a file stored in message additional_kwargs.files.
* Used for optimistic UI (uploading state) and structured file metadata.
*/
export interface UploadedFile {
export interface FileInMessage {
filename: string;
size: string;
path: string;
size: number; // bytes
path?: string; // virtual path, may not be set during upload
status?: "uploading" | "uploaded";
}
/**
* Result of parsing uploaded files from message content
* Strip <uploaded_files> tag from message content.
* Returns the content with the tag removed.
*/
export interface ParsedUploadedFiles {
files: UploadedFile[];
cleanContent: string;
export function stripUploadedFilesTag(content: string): string {
return content
.replace(/<uploaded_files>[\s\S]*?<\/uploaded_files>/g, "")
.trim();
}
/**
* Parse <uploaded_files> tag from message content and extract file information.
* Returns the list of uploaded files and the content with the tag removed.
*/
export function parseUploadedFiles(content: string): ParsedUploadedFiles {
export function parseUploadedFiles(content: string): FileInMessage[] {
// Match <uploaded_files>...</uploaded_files> tag
const uploadedFilesRegex = /<uploaded_files>([\s\S]*?)<\/uploaded_files>/;
// eslint-disable-next-line @typescript-eslint/prefer-regexp-exec
const match = content.match(uploadedFilesRegex);
if (!match) {
return { files: [], cleanContent: content };
return [];
}
const uploadedFilesContent = match[1];
const cleanContent = content.replace(uploadedFilesRegex, "").trim();
// Check if it's "No files have been uploaded yet."
if (uploadedFilesContent?.includes("No files have been uploaded yet.")) {
return { files: [], cleanContent };
return [];
}
// Parse file list
// Format: - filename (size)\n Path: /path/to/file
const fileRegex = /- ([^\n(]+)\s*\(([^)]+)\)\s*\n\s*Path:\s*([^\n]+)/g;
const files: UploadedFile[] = [];
const files: FileInMessage[] = [];
let fileMatch;
while ((fileMatch = fileRegex.exec(uploadedFilesContent ?? "")) !== null) {
files.push({
filename: fileMatch[1].trim(),
size: fileMatch[2].trim(),
size: parseInt(fileMatch[2].trim(), 10) ?? 0,
path: fileMatch[3].trim(),
});
}
return { files, cleanContent };
return files;
}

View File

@@ -1,15 +1,18 @@
import type { AIMessage } from "@langchain/langgraph-sdk";
import type { AIMessage, Message } from "@langchain/langgraph-sdk";
import type { ThreadsClient } from "@langchain/langgraph-sdk/client";
import { useStream } from "@langchain/langgraph-sdk/react";
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query";
import { useCallback, useEffect, useState } from "react";
import { useCallback, useEffect, useRef, useState } from "react";
import { toast } from "sonner";
import type { PromptInputMessage } from "@/components/ai-elements/prompt-input";
import { getAPIClient } from "../api";
import { useI18n } from "../i18n/hooks";
import type { FileInMessage } from "../messages/utils";
import type { LocalSettings } from "../settings";
import { useUpdateSubtask } from "../tasks/context";
import type { UploadedFileInfo } from "../uploads";
import { uploadFiles } from "../uploads";
import type { AgentThread, AgentThreadState } from "./types";
@@ -36,11 +39,14 @@ export function useThreadStream({
onFinish,
onToolEnd,
}: ThreadStreamOptions) {
const { t } = useI18n();
const [_threadId, setThreadId] = useState<string | null>(threadId ?? null);
const startedRef = useRef(false);
useEffect(() => {
if (_threadId && _threadId !== threadId) {
setThreadId(threadId ?? null);
startedRef.current = false; // Reset for new thread
}
}, [threadId, _threadId]);
@@ -54,7 +60,10 @@ export function useThreadStream({
fetchStateHistory: { limit: 1 },
onCreated(meta) {
setThreadId(meta.thread_id);
onStart?.(meta.thread_id);
if (!startedRef.current) {
onStart?.(meta.thread_id);
startedRef.current = true;
}
},
onLangChainEvent(event) {
if (event.event === "on_tool_end") {
@@ -85,6 +94,21 @@ export function useThreadStream({
},
});
// Optimistic messages shown before the server stream responds
const [optimisticMessages, setOptimisticMessages] = useState<Message[]>([]);
// Track message count before sending so we know when server has responded
const prevMsgCountRef = useRef(thread.messages.length);
// Clear optimistic when server messages arrive (count increases)
useEffect(() => {
if (
optimisticMessages.length > 0 &&
thread.messages.length > prevMsgCountRef.current
) {
setOptimisticMessages([]);
}
}, [thread.messages.length, optimisticMessages.length]);
const sendMessage = useCallback(
async (
threadId: string,
@@ -93,98 +117,191 @@ export function useThreadStream({
) => {
const text = message.text.trim();
// Upload files first if any
if (message.files && message.files.length > 0) {
try {
// Convert FileUIPart to File objects by fetching blob URLs
const filePromises = message.files.map(async (fileUIPart) => {
if (fileUIPart.url && fileUIPart.filename) {
try {
// Fetch the blob URL to get the file data
const response = await fetch(fileUIPart.url);
const blob = await response.blob();
// Capture current count before showing optimistic messages
prevMsgCountRef.current = thread.messages.length;
// Create a File object from the blob
return new File([blob], fileUIPart.filename, {
type: fileUIPart.mediaType || blob.type,
});
} catch (error) {
console.error(
`Failed to fetch file ${fileUIPart.filename}:`,
error,
);
return null;
}
}
return null;
});
// Build optimistic files list with uploading status
const optimisticFiles: FileInMessage[] = (message.files ?? []).map(
(f) => ({
filename: f.filename ?? "",
size: 0,
status: "uploading" as const,
}),
);
const conversionResults = await Promise.all(filePromises);
const files = conversionResults.filter(
(file): file is File => file !== null,
);
const failedConversions = conversionResults.length - files.length;
// Create optimistic human message (shown immediately)
const optimisticHumanMsg: Message = {
type: "human",
id: `opt-human-${Date.now()}`,
content: text ? [{ type: "text", text }] : "",
additional_kwargs:
optimisticFiles.length > 0 ? { files: optimisticFiles } : {},
};
if (failedConversions > 0) {
throw new Error(
`Failed to prepare ${failedConversions} attachment(s) for upload. Please retry.`,
);
}
const newOptimistic: Message[] = [optimisticHumanMsg];
if (optimisticFiles.length > 0) {
// Mock AI message while files are being uploaded
newOptimistic.push({
type: "ai",
id: `opt-ai-${Date.now()}`,
content: t.uploads.uploadingFiles,
additional_kwargs: { element: "task" },
});
}
setOptimisticMessages(newOptimistic);
if (!threadId) {
throw new Error("Thread is not ready for file upload.");
}
if (files.length > 0) {
await uploadFiles(threadId, files);
}
} catch (error) {
console.error("Failed to upload files:", error);
const errorMessage =
error instanceof Error ? error.message : "Failed to upload files.";
toast.error(errorMessage);
throw error;
}
if (!startedRef.current) {
onStart?.(threadId);
startedRef.current = true;
}
await thread.submit(
{
messages: [
{
type: "human",
content: [
{
type: "text",
text,
},
],
let uploadedFileInfo: UploadedFileInfo[] = [];
try {
// Upload files first if any
if (message.files && message.files.length > 0) {
try {
// Convert FileUIPart to File objects by fetching blob URLs
const filePromises = message.files.map(async (fileUIPart) => {
if (fileUIPart.url && fileUIPart.filename) {
try {
// Fetch the blob URL to get the file data
const response = await fetch(fileUIPart.url);
const blob = await response.blob();
// Create a File object from the blob
return new File([blob], fileUIPart.filename, {
type: fileUIPart.mediaType || blob.type,
});
} catch (error) {
console.error(
`Failed to fetch file ${fileUIPart.filename}:`,
error,
);
return null;
}
}
return null;
});
const conversionResults = await Promise.all(filePromises);
const files = conversionResults.filter(
(file): file is File => file !== null,
);
const failedConversions = conversionResults.length - files.length;
if (failedConversions > 0) {
throw new Error(
`Failed to prepare ${failedConversions} attachment(s) for upload. Please retry.`,
);
}
if (!threadId) {
throw new Error("Thread is not ready for file upload.");
}
if (files.length > 0) {
const uploadResponse = await uploadFiles(threadId, files);
uploadedFileInfo = uploadResponse.files;
// Update optimistic human message with uploaded status + paths
const uploadedFiles: FileInMessage[] = uploadedFileInfo.map(
(info) => ({
filename: info.filename,
size: info.size,
path: info.virtual_path,
status: "uploaded" as const,
}),
);
setOptimisticMessages((messages) => {
if (messages.length > 1 && messages[0]) {
const humanMessage: Message = messages[0];
return [
{
...humanMessage,
additional_kwargs: { files: uploadedFiles },
},
...messages.slice(1),
];
}
return messages;
});
}
} catch (error) {
console.error("Failed to upload files:", error);
const errorMessage =
error instanceof Error
? error.message
: "Failed to upload files.";
toast.error(errorMessage);
setOptimisticMessages([]);
throw error;
}
}
// Build files metadata for submission (included in additional_kwargs)
const filesForSubmit: FileInMessage[] = uploadedFileInfo.map(
(info) => ({
filename: info.filename,
size: info.size,
path: info.virtual_path,
status: "uploaded" as const,
}),
);
await thread.submit(
{
messages: [
{
type: "human",
content: [
{
type: "text",
text,
},
],
additional_kwargs:
filesForSubmit.length > 0 ? { files: filesForSubmit } : {},
},
],
},
{
threadId: threadId,
streamSubgraphs: true,
streamResumable: true,
streamMode: ["values", "messages-tuple", "custom"],
config: {
recursion_limit: 1000,
},
context: {
...extraContext,
...context,
thinking_enabled: context.mode !== "flash",
is_plan_mode: context.mode === "pro" || context.mode === "ultra",
subagent_enabled: context.mode === "ultra",
thread_id: threadId,
},
],
},
{
threadId: threadId,
streamSubgraphs: true,
streamResumable: true,
streamMode: ["values", "messages-tuple", "custom"],
config: {
recursion_limit: 1000,
},
context: {
...extraContext,
...context,
thinking_enabled: context.mode !== "flash",
is_plan_mode: context.mode === "pro" || context.mode === "ultra",
subagent_enabled: context.mode === "ultra",
thread_id: threadId,
},
},
);
void queryClient.invalidateQueries({ queryKey: ["threads", "search"] });
// afterSubmit?.();
);
void queryClient.invalidateQueries({ queryKey: ["threads", "search"] });
} catch (error) {
setOptimisticMessages([]);
throw error;
}
},
[thread, context, queryClient],
[thread, t.uploads.uploadingFiles, onStart, context, queryClient],
);
return [thread, sendMessage] as const;
// Merge thread with optimistic messages for display
const mergedThread =
optimisticMessages.length > 0
? ({
...thread,
messages: [...thread.messages, ...optimisticMessages],
} as typeof thread)
: thread;
return [mergedThread, sendMessage] as const;
}
export function useThreads(