@@ -35171,6 +35171,13 @@ class Ollama
35171
35171
Ollama(): Ollama("http://localhost:11434") {}
35172
35172
~Ollama() { delete this->cli; }
35173
35173
35174
+ ollama::response generate(const std::string& model,const std::string& prompt, const ollama::response& context, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
35175
+ {
35176
+ ollama::request request(model, prompt, options, false, images);
35177
+ if ( context.as_json().contains("context") ) request["context"] = context.as_json()["context"];
35178
+ return generate(request);
35179
+ }
35180
+
35174
35181
ollama::response generate(const std::string& model,const std::string& prompt, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
35175
35182
{
35176
35183
ollama::request request(model, prompt, options, false, images);
@@ -35201,6 +35208,13 @@ class Ollama
35201
35208
return response;
35202
35209
}
35203
35210
35211
+ bool generate(const std::string& model,const std::string& prompt, ollama::response& context, std::function<void(const ollama::response&)> on_receive_token, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
35212
+ {
35213
+ ollama::request request(model, prompt, options, true, images);
35214
+ if ( context.as_json().contains("context") ) request["context"] = context.as_json()["context"];
35215
+ return generate(request, on_receive_token);
35216
+ }
35217
+
35204
35218
bool generate(const std::string& model,const std::string& prompt, std::function<void(const ollama::response&)> on_receive_token, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
35205
35219
{
35206
35220
ollama::request request(model, prompt, options, true, images);
@@ -35640,11 +35654,16 @@ namespace ollama
35640
35654
ollama.setServerURL(server_url);
35641
35655
}
35642
35656
35643
- inline ollama::response generate(const std::string& model,const std::string& prompt,const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
35657
+ inline ollama::response generate(const std::string& model, const std::string& prompt, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
35644
35658
{
35645
35659
return ollama.generate(model, prompt, options, images);
35646
35660
}
35647
35661
35662
+ ollama::response generate(const std::string& model,const std::string& prompt, const ollama::response& context, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
35663
+ {
35664
+ return ollama.generate(model, prompt, context, options, images);
35665
+ }
35666
+
35648
35667
inline ollama::response generate(const ollama::request& request)
35649
35668
{
35650
35669
return ollama.generate(request);
@@ -35655,6 +35674,11 @@ namespace ollama
35655
35674
return ollama.generate(model, prompt, on_receive_response, options, images);
35656
35675
}
35657
35676
35677
+ inline bool generate(const std::string& model,const std::string& prompt, ollama::response& context, std::function<void(const ollama::response&)> on_receive_response, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
35678
+ {
35679
+ return ollama.generate(model, prompt, context, on_receive_response, options, images);
35680
+ }
35681
+
35658
35682
inline bool generate(ollama::request& request, std::function<void(const ollama::response&)> on_receive_response)
35659
35683
{
35660
35684
return ollama.generate(request, on_receive_response);
0 commit comments