File tree 2 files changed +7
-2
lines changed
2 files changed +7
-2
lines changed Original file line number Diff line number Diff line change @@ -13,7 +13,7 @@ import {
13
13
ChatMessageTool ,
14
14
usePluginStore ,
15
15
} from "@/app/store" ;
16
- import { streamWithThink } from "@/app/utils/chat" ;
16
+ import { preProcessImageContent , streamWithThink } from "@/app/utils/chat" ;
17
17
import {
18
18
ChatOptions ,
19
19
getHeaders ,
@@ -25,6 +25,7 @@ import { getClientConfig } from "@/app/config/client";
25
25
import {
26
26
getMessageTextContent ,
27
27
getMessageTextContentWithoutThinking ,
28
+ isVisionModel ,
28
29
} from "@/app/utils" ;
29
30
import { RequestPayload } from "./openai" ;
30
31
import { fetch } from "@/app/utils/stream" ;
@@ -71,13 +72,16 @@ export class SiliconflowApi implements LLMApi {
71
72
}
72
73
73
74
async chat ( options : ChatOptions ) {
75
+ const visionModel = isVisionModel ( options . config . model ) ;
74
76
const messages : ChatOptions [ "messages" ] = [ ] ;
75
77
for ( const v of options . messages ) {
76
78
if ( v . role === "assistant" ) {
77
79
const content = getMessageTextContentWithoutThinking ( v ) ;
78
80
messages . push ( { role : v . role , content } ) ;
79
81
} else {
80
- const content = getMessageTextContent ( v ) ;
82
+ const content = visionModel
83
+ ? await preProcessImageContent ( v . content )
84
+ : getMessageTextContent ( v ) ;
81
85
messages . push ( { role : v . role , content } ) ;
82
86
}
83
87
}
Original file line number Diff line number Diff line change @@ -462,6 +462,7 @@ export const VISION_MODEL_REGEXES = [
462
462
/ g p t - 4 - t u r b o (? ! .* p r e v i e w ) / , // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
463
463
/ ^ d a l l - e - 3 $ / , // Matches exactly "dall-e-3"
464
464
/ g l m - 4 v / ,
465
+ / v l / i,
465
466
] ;
466
467
467
468
export const EXCLUDE_VISION_MODEL_REGEXES = [ / c l a u d e - 3 - 5 - h a i k u - 2 0 2 4 1 0 2 2 / ] ;
You can’t perform that action at this time.
0 commit comments