Skip to content

Commit b0e8941

Browse files
authored
Merge pull request #13 from jmont-dev/tool-support
Handle verbose model info calls.
2 parents 89915d7 + 9304b95 commit b0e8941

File tree

3 files changed

+8
-8
lines changed

3 files changed

+8
-8
lines changed

Diff for: examples/main.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,6 @@ int main()
4444
// Generate embeddings for a model and a prompt.
4545
std::cout << ollama::generate_embeddings("llama3:8b", "Why is the sky blue?") << std::endl;
4646

47-
sleep(10);
48-
4947
// Push a model a model library with the syntax <namespace>/<model>:<tag>. Note that you must have registered on ollama.ai and added a public key to do this.
5048
try { if ( ollama::push_model("jmont/my_model:latest") ) std::cout << "Model was pushed" << std::endl; }catch(...) {std::cout << "Unable to push model." << std::endl; }
5149

Diff for: include/ollama.hpp

+4-3
Original file line numberDiff line numberDiff line change
@@ -608,10 +608,11 @@ class Ollama
608608

609609
}
610610

611-
json show_model_info(const std::string& model)
611+
json show_model_info(const std::string& model, bool verbose=false)
612612
{
613613
json request, response;
614614
request["name"] = model;
615+
if (verbose) request["verbose"] = true;
615616

616617
std::string request_string = request.dump();
617618
if (ollama::log_requests) std::cout << request_string << std::endl;
@@ -859,9 +860,9 @@ namespace ollama
859860
return ollama.create_blob(digest);
860861
}
861862

862-
inline json show_model_info(const std::string& model)
863+
inline json show_model_info(const std::string& model, bool verbose=false)
863864
{
864-
return ollama.show_model_info(model);
865+
return ollama.show_model_info(model, verbose);
865866
}
866867

867868
inline bool copy_model(const std::string& source_model, const std::string& dest_model)

Diff for: singleheader/ollama.hpp

+4-3
Original file line numberDiff line numberDiff line change
@@ -35398,10 +35398,11 @@ class Ollama
3539835398

3539935399
}
3540035400

35401-
json show_model_info(const std::string& model)
35401+
json show_model_info(const std::string& model, bool verbose=false)
3540235402
{
3540335403
json request, response;
3540435404
request["name"] = model;
35405+
if (verbose) request["verbose"] = true;
3540535406

3540635407
std::string request_string = request.dump();
3540735408
if (ollama::log_requests) std::cout << request_string << std::endl;
@@ -35649,9 +35650,9 @@ namespace ollama
3564935650
return ollama.create_blob(digest);
3565035651
}
3565135652

35652-
inline json show_model_info(const std::string& model)
35653+
inline json show_model_info(const std::string& model, bool verbose=false)
3565335654
{
35654-
return ollama.show_model_info(model);
35655+
return ollama.show_model_info(model, verbose);
3565535656
}
3565635657

3565735658
inline bool copy_model(const std::string& source_model, const std::string& dest_model)

0 commit comments

Comments
 (0)