Skip to content

Commit 34bdd4b

Browse files
authored
Merge pull request #6194 from siliconflow/vl-support-on-sf
Support VLM on SiliconFlow
2 parents a029b43 + 86f8696 commit 34bdd4b

File tree

2 files changed

+7
-2
lines changed

2 files changed

+7
-2
lines changed

app/client/platforms/siliconflow.ts

+6-2
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ import {
1313
ChatMessageTool,
1414
usePluginStore,
1515
} from "@/app/store";
16-
import { streamWithThink } from "@/app/utils/chat";
16+
import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
1717
import {
1818
ChatOptions,
1919
getHeaders,
@@ -25,6 +25,7 @@ import { getClientConfig } from "@/app/config/client";
2525
import {
2626
getMessageTextContent,
2727
getMessageTextContentWithoutThinking,
28+
isVisionModel,
2829
} from "@/app/utils";
2930
import { RequestPayload } from "./openai";
3031
import { fetch } from "@/app/utils/stream";
@@ -71,13 +72,16 @@ export class SiliconflowApi implements LLMApi {
7172
}
7273

7374
async chat(options: ChatOptions) {
75+
const visionModel = isVisionModel(options.config.model);
7476
const messages: ChatOptions["messages"] = [];
7577
for (const v of options.messages) {
7678
if (v.role === "assistant") {
7779
const content = getMessageTextContentWithoutThinking(v);
7880
messages.push({ role: v.role, content });
7981
} else {
80-
const content = getMessageTextContent(v);
82+
const content = visionModel
83+
? await preProcessImageContent(v.content)
84+
: getMessageTextContent(v);
8185
messages.push({ role: v.role, content });
8286
}
8387
}

app/constant.ts

+1
Original file line numberDiff line numberDiff line change
@@ -462,6 +462,7 @@ export const VISION_MODEL_REGEXES = [
462462
/gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
463463
/^dall-e-3$/, // Matches exactly "dall-e-3"
464464
/glm-4v/,
465+
/vl/i,
465466
];
466467

467468
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];

0 commit comments

Comments
 (0)