12
12
// Note that this is static. We will use these options for other generations.
13
13
static ollama::options options;
14
14
15
+ static std::string test_model = " llama3:8b" , image_test_model = " llava" ;
16
+
15
17
TEST_SUITE (" Ollama Tests" ) {
16
18
17
19
TEST_CASE (" Initialize Options" ) {
@@ -52,19 +54,19 @@ TEST_SUITE("Ollama Tests") {
52
54
53
55
TEST_CASE (" Load Model" ) {
54
56
55
- CHECK ( ollama::load_model (" llama3:8b " ) );
57
+ CHECK ( ollama::load_model (test_model ) );
56
58
}
57
59
58
60
TEST_CASE (" Pull, Copy, and Delete Models" ) {
59
61
60
62
// Pull a model by specifying a model name.
61
- CHECK ( ollama::pull_model (" llama3:8b " ) == true );
63
+ CHECK ( ollama::pull_model (test_model ) == true );
62
64
63
65
// Copy a model by specifying a source model and destination model name.
64
- CHECK ( ollama::copy_model (" llama3:8b " , " llama3_copy " ) ==true );
66
+ CHECK ( ollama::copy_model (test_model, test_model+ " _copy " ) ==true );
65
67
66
68
// Delete a model by specifying a model name.
67
- CHECK ( ollama::delete_model (" llama3_copy " ) == true );
69
+ CHECK ( ollama::delete_model (test_model+ " _copy " ) == true );
68
70
}
69
71
70
72
TEST_CASE (" Model Info" ) {
@@ -81,7 +83,7 @@ TEST_SUITE("Ollama Tests") {
81
83
// List the models available locally in the ollama server
82
84
std::vector<std::string> models = ollama::list_models ();
83
85
84
- bool contains_model = (std::find (models.begin (), models.end (), " llama3:8b " ) != models.end () );
86
+ bool contains_model = (std::find (models.begin (), models.end (), test_model ) != models.end () );
85
87
86
88
CHECK ( contains_model );
87
89
}
@@ -101,12 +103,9 @@ TEST_SUITE("Ollama Tests") {
101
103
102
104
TEST_CASE (" Basic Generation" ) {
103
105
104
- ollama::response response = ollama::generate (" llama3:8b" , " Why is the sky blue?" , options);
105
- // std::cout << response << std::endl;
106
-
107
- std::string expected_response = " What a great question!\n\n The sky appears blue because of a phenomenon called Rayleigh scattering," ;
106
+ ollama::response response = ollama::generate (test_model, " Why is the sky blue?" , options);
108
107
109
- CHECK (response.as_simple_string () == expected_response );
108
+ CHECK ( response.as_json (). contains ( " response " ) == true );
110
109
}
111
110
112
111
@@ -124,35 +123,34 @@ TEST_SUITE("Ollama Tests") {
124
123
TEST_CASE (" Streaming Generation" ) {
125
124
126
125
std::function<void (const ollama::response&)> response_callback = on_receive_response;
127
- ollama::generate (" llama3:8b " , " Why is the sky blue?" , response_callback, options);
126
+ ollama::generate (test_model , " Why is the sky blue?" , response_callback, options);
128
127
129
128
std::string expected_response = " What a great question!\n\n The sky appears blue because of a phenomenon called Rayleigh scattering," ;
130
129
131
- CHECK ( streamed_response == expected_response );
130
+ CHECK ( streamed_response != " " );
132
131
}
133
132
134
133
TEST_CASE (" Non-Singleton Generation" ) {
135
134
136
135
Ollama my_ollama_server (" http://localhost:11434" );
137
136
138
137
// You can use all of the same functions from this instanced version of the class.
139
- ollama::response response = my_ollama_server.generate (" llama3:8b" , " Why is the sky blue?" , options);
140
- // std::cout << response << std::endl;
138
+ ollama::response response = my_ollama_server.generate (test_model, " Why is the sky blue?" , options);
141
139
142
140
std::string expected_response = " What a great question!\n\n The sky appears blue because of a phenomenon called Rayleigh scattering," ;
143
141
144
- CHECK (response.as_simple_string () == expected_response );
142
+ CHECK (response.as_json (). contains ( " response " ) == true );
145
143
}
146
144
147
145
TEST_CASE (" Single-Message Chat" ) {
148
146
149
147
ollama::message message (" user" , " Why is the sky blue?" );
150
148
151
- ollama::response response = ollama::chat (" llama3:8b " , message, options);
149
+ ollama::response response = ollama::chat (test_model , message, options);
152
150
153
151
std::string expected_response = " What a great question!\n\n The sky appears blue because of a phenomenon called Rayleigh scattering," ;
154
152
155
- CHECK (response.as_simple_string ()!= " " );
153
+ CHECK (response.as_json (). contains ( " message " ) == true );
156
154
}
157
155
158
156
TEST_CASE (" Multi-Message Chat" ) {
@@ -163,11 +161,11 @@ TEST_SUITE("Ollama Tests") {
163
161
164
162
ollama::messages messages = {message1, message2, message3};
165
163
166
- ollama::response response = ollama::chat (" llama3:8b " , messages, options);
164
+ ollama::response response = ollama::chat (test_model , messages, options);
167
165
168
166
std::string expected_response = " " ;
169
167
170
- CHECK (response.as_simple_string ()!= " " );
168
+ CHECK (response.as_json (). contains ( " message " ) == true );
171
169
}
172
170
173
171
TEST_CASE (" Chat with Streaming Response" ) {
@@ -182,7 +180,7 @@ TEST_SUITE("Ollama Tests") {
182
180
183
181
ollama::message message (" user" , " Why is the sky blue?" );
184
182
185
- ollama::chat (" llama3:8b " , message, response_callback, options);
183
+ ollama::chat (test_model , message, response_callback, options);
186
184
187
185
CHECK (streamed_response!=" " );
188
186
}
@@ -195,12 +193,9 @@ TEST_SUITE("Ollama Tests") {
195
193
196
194
ollama::image image = ollama::image::from_file (" llama.jpg" );
197
195
198
- // ollama::images images={image};
199
-
200
- ollama::response response = ollama::generate (" llava" , " What do you see in this image?" , options, image);
201
- std::string expected_response = " The image features a large, fluffy white llama" ;
196
+ ollama::response response = ollama::generate (image_test_model, " What do you see in this image?" , options, image);
202
197
203
- CHECK (response.as_simple_string () == expected_response );
198
+ CHECK ( response.as_json (). contains ( " response " ) == true );
204
199
}
205
200
206
201
TEST_CASE (" Generation with Multiple Images" ) {
@@ -214,10 +209,10 @@ TEST_SUITE("Ollama Tests") {
214
209
215
210
ollama::images images={image, base64_image};
216
211
217
- ollama::response response = ollama::generate (" llava " , " What do you see in this image?" , options, images);
212
+ ollama::response response = ollama::generate (image_test_model , " What do you see in this image?" , options, images);
218
213
std::string expected_response = " The image features a large, fluffy white and gray llama" ;
219
214
220
- CHECK (response.as_simple_string () == expected_response );
215
+ CHECK (response.as_json (). contains ( " response " ) == true );
221
216
}
222
217
223
218
TEST_CASE (" Chat with Image" ) {
@@ -230,21 +225,20 @@ TEST_SUITE("Ollama Tests") {
230
225
231
226
// We can optionally include images with each message. Vision-enabled models will be able to utilize these.
232
227
ollama::message message_with_image (" user" , " What do you see in this image?" , image);
233
- ollama::response response = ollama::chat (" llava " , message_with_image, options);
228
+ ollama::response response = ollama::chat (image_test_model , message_with_image, options);
234
229
235
230
std::string expected_response = " The image features a large, fluffy white llama" ;
236
231
237
- CHECK (response.as_simple_string ()!= " " );
232
+ CHECK (response.as_json (). contains ( " message " ) == true );
238
233
}
239
234
240
235
TEST_CASE (" Embedding Generation" ) {
241
236
242
237
options[" num_predict" ] = 18 ;
243
238
244
- ollama::response response = ollama::generate_embeddings (" llama3:8b" , " Why is the sky blue?" );
245
- // std::cout << response << std::endl;
239
+ ollama::response response = ollama::generate_embeddings (test_model, " Why is the sky blue?" );
246
240
247
- CHECK (response.as_json ().contains (" embedding " ) == true );
241
+ CHECK (response.as_json ().contains (" embeddings " ) == true );
248
242
}
249
243
250
244
TEST_CASE (" Enable Debug Logging" ) {
0 commit comments