diff --git a/LICENSE b/LICENSE index 047f9431e7d..4864ab00d2c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023-2024 Zhang Yifei +Copyright (c) 2023-2025 NextChat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index a2f0660d828..c2f128128fe 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -22,7 +22,7 @@ import { } from "@fortaine/fetch-event-source"; import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; +import { preProcessImageContent } from "@/app/utils/chat"; import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { @@ -84,10 +84,11 @@ export class DoubaoApi implements LLMApi { } async chat(options: ChatOptions) { - const messages = options.messages.map((v) => ({ - role: v.role, - content: getMessageTextContent(v), - })); + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = await preProcessImageContent(v.content); + messages.push({ role: v.role, content }); + } const modelConfig = { ...useAppConfig.getState().modelConfig, diff --git a/app/client/platforms/deepseek.ts b/app/client/platforms/deepseek.ts index 2bf3b2338ea..c436ae61d01 100644 --- a/app/client/platforms/deepseek.ts +++ b/app/client/platforms/deepseek.ts @@ -5,6 +5,7 @@ import { DEEPSEEK_BASE_URL, DeepSeek, REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { useAccessStore, @@ -117,10 +118,14 @@ export class DeepSeekApi implements LLMApi { // console.log(chatPayload); + const isR1 = + options.config.model.endsWith("-reasoner") || + options.config.model.endsWith("-r1"); + // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, ); if (shouldStream) { diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 5ca8e1071a7..1e593dd4257 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,4 +1,9 @@ -import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { + ApiPath, + Google, + REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, +} from "@/app/constant"; import { ChatOptions, getHeaders, @@ -69,9 +74,16 @@ export class GeminiProApi implements LLMApi { .join("\n\n"); }; + let content = ""; + if (Array.isArray(res)) { + res.map((item) => { + content += getTextFromParts(item?.candidates?.at(0)?.content?.parts); + }); + } + return ( getTextFromParts(res?.candidates?.at(0)?.content?.parts) || - getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) || + content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) || res?.error?.message || "" ); @@ -190,10 +202,11 @@ export class GeminiProApi implements LLMApi { headers: getHeaders(), }; + const isThinking = options.config.model.includes("-thinking"); // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, ); if (shouldStream) { diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 467bb82e0ac..fbe533cadab 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -8,6 +8,7 @@ import { Azure, REQUEST_TIMEOUT_MS, ServiceProvider, + REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { ChatMessageTool, @@ -195,7 +196,9 @@ export class ChatGPTApi implements LLMApi { let requestPayload: RequestPayload | DalleRequestPayload; const isDalle3 = _isDalle3(options.config.model); - const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3"); + const isO1OrO3 = + options.config.model.startsWith("o1") || + options.config.model.startsWith("o3"); if (isDalle3) { const prompt = getMessageTextContent( options.messages.slice(-1)?.pop() as any, @@ -359,7 +362,9 @@ export class ChatGPTApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + isDalle3 || isO1OrO3 + ? REQUEST_TIMEOUT_MS_FOR_THINKING + : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. ); const res = await fetch(chatPath, chatPayload); diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts index e5cbd241fe0..1ad316a6143 100644 --- a/app/client/platforms/siliconflow.ts +++ b/app/client/platforms/siliconflow.ts @@ -4,7 +4,7 @@ import { ApiPath, SILICONFLOW_BASE_URL, SiliconFlow, - REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { useAccessStore, @@ -120,11 +120,11 @@ export class SiliconflowApi implements LLMApi { // console.log(chatPayload); - // make a fetch request - const requestTimeoutId = setTimeout(() => { - console.error("[Request] SiliconFlow API timeout"); - controller.abort(); - }, 10 * REQUEST_TIMEOUT_MS); + // Use extended timeout for thinking models as they typically require more processing time + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS_FOR_THINKING, + ); if (shouldStream) { const [tools, funcs] = usePluginStore diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts index 06dbaaa29ff..8c41c2d988f 100644 --- a/app/client/platforms/xai.ts +++ b/app/client/platforms/xai.ts @@ -17,7 +17,7 @@ import { SpeechOptions, } from "../api"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; +import { preProcessImageContent } from "@/app/utils/chat"; import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; @@ -62,7 +62,7 @@ export class XAIApi implements LLMApi { async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; for (const v of options.messages) { - const content = getMessageTextContent(v); + const content = await preProcessImageContent(v.content); messages.push({ role: v.role, content }); } diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 6686d87310d..ecb1c65819e 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -54,6 +54,8 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { if ( modelName.startsWith("gpt") || modelName.startsWith("chatgpt") || + modelName.startsWith("dall-e") || + modelName.startsWith("dalle") || modelName.startsWith("o1") || modelName.startsWith("o3") ) { @@ -80,9 +82,13 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { LlmIcon = BotIconGrok; } else if (modelName.startsWith("hunyuan")) { LlmIcon = BotIconHunyuan; - } else if (modelName.startsWith("doubao")) { + } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) { LlmIcon = BotIconDoubao; - } else if (modelName.startsWith("glm")) { + } else if ( + modelName.startsWith("glm") || + modelName.startsWith("cogview-") || + modelName.startsWith("cogvideox-") + ) { LlmIcon = BotIconChatglm; } diff --git a/app/constant.ts b/app/constant.ts index 821e9fe56a1..09eec44b68d 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -110,6 +110,7 @@ export const UNFINISHED_INPUT = (id: string) => "unfinished-input-" + id; export const STORAGE_KEY = "chatgpt-next-web"; export const REQUEST_TIMEOUT_MS = 60000; +export const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5; export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown"; @@ -589,7 +590,16 @@ const iflytekModels = [ const deepseekModels = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"]; -const xAIModes = ["grok-beta"]; +const xAIModes = [ + "grok-beta", + "grok-2", + "grok-2-1212", + "grok-2-latest", + "grok-vision-beta", + "grok-2-vision-1212", + "grok-2-vision", + "grok-2-vision-latest", +]; const chatglmModels = [ "glm-4-plus",