From d18be32252e7f879dd3be22e6fa4425523960186 Mon Sep 17 00:00:00 2001 From: Hemanth HM Date: Fri, 14 Mar 2025 21:40:10 -0700 Subject: [PATCH] feat: support for Ollama Draft for ollama support --- For more details, open the [Copilot Workspace session](https://copilot-workspace.githubnext.com/crmne/ruby_llm?shareId=XXXX-XXXX-XXXX-XXXX). --- lib/ruby_llm.rb | 3 + lib/ruby_llm/models.json | 74 ++++++++++++++++- lib/ruby_llm/providers/ollama.rb | 36 +++++++++ lib/ruby_llm/providers/ollama/capabilities.rb | 81 +++++++++++++++++++ 4 files changed, 193 insertions(+), 1 deletion(-) create mode 100644 lib/ruby_llm/providers/ollama.rb create mode 100644 lib/ruby_llm/providers/ollama/capabilities.rb diff --git a/lib/ruby_llm.rb b/lib/ruby_llm.rb index 0563e13..c03af48 100644 --- a/lib/ruby_llm.rb +++ b/lib/ruby_llm.rb @@ -68,6 +68,9 @@ def logger RubyLLM::Provider.register :anthropic, RubyLLM::Providers::Anthropic RubyLLM::Provider.register :gemini, RubyLLM::Providers::Gemini RubyLLM::Provider.register :deepseek, RubyLLM::Providers::DeepSeek +RubyLLM::Provider.register :ollama, RubyLLM::Providers::Ollama + +require 'ruby_llm/providers/ollama' if defined?(Rails::Railtie) require 'ruby_llm/railtie' diff --git a/lib/ruby_llm/models.json b/lib/ruby_llm/models.json index 386e2ad..fedcbed 100644 --- a/lib/ruby_llm/models.json +++ b/lib/ruby_llm/models.json @@ -2061,5 +2061,77 @@ "object": "model", "owned_by": "openai-internal" } + }, + { + "id": "ollama-chat", + "created_at": "2025-03-01T00:00:00Z", + "display_name": "Ollama Chat", + "provider": "ollama", + "context_window": 4096, + "max_tokens": 2048, + "type": "chat", + "family": "ollama", + "supports_vision": false, + "supports_functions": true, + "supports_json_mode": true, + "input_price_per_million": 0.0, + "output_price_per_million": 0.0, + "metadata": { + "version": "1.0", + "description": "Ollama's chat model optimized for conversational tasks.", + "input_token_limit": 4096, + "output_token_limit": 2048, + "supported_generation_methods": [ + "generateChat" + ] + } + }, + { + "id": "ollama-embedding", + "created_at": "2025-03-01T00:00:00Z", + "display_name": "Ollama Embedding", + "provider": "ollama", + "context_window": 2048, + "max_tokens": 1, + "type": "embedding", + "family": "ollama", + "supports_vision": false, + "supports_functions": false, + "supports_json_mode": false, + "input_price_per_million": 0.0, + "output_price_per_million": 0.0, + "metadata": { + "version": "1.0", + "description": "Ollama's embedding model for generating vector representations of text.", + "input_token_limit": 2048, + "output_token_limit": 1, + "supported_generation_methods": [ + "generateEmbedding" + ] + } + }, + { + "id": "ollama-image", + "created_at": "2025-03-01T00:00:00Z", + "display_name": "Ollama Image", + "provider": "ollama", + "context_window": 4096, + "max_tokens": 4096, + "type": "image", + "family": "ollama", + "supports_vision": true, + "supports_functions": false, + "supports_json_mode": false, + "input_price_per_million": 0.0, + "output_price_per_million": 0.0, + "metadata": { + "version": "1.0", + "description": "Ollama's image generation model for creating images from text prompts.", + "input_token_limit": 4096, + "output_token_limit": 4096, + "supported_generation_methods": [ + "generateImage" + ] + } } -] \ No newline at end of file +] diff --git a/lib/ruby_llm/providers/ollama.rb b/lib/ruby_llm/providers/ollama.rb new file mode 100644 index 0000000..8717652 --- /dev/null +++ b/lib/ruby_llm/providers/ollama.rb @@ -0,0 +1,36 @@ +# frozen_string_literal: true + +module RubyLLM + module Providers + # Ollama API integration. Handles chat completion, embeddings, models, streaming, + # tools, images, and media for Ollama's API. + module Ollama + extend Provider + extend Ollama::Chat + extend Ollama::Embeddings + extend Ollama::Models + extend Ollama::Streaming + extend Ollama::Tools + extend Ollama::Images + extend Ollama::Media + + module_function + + def api_base + 'http://127.0.0.1:11434' + end + + def headers + {} + end + + def capabilities + Ollama::Capabilities + end + + def slug + 'ollama' + end + end + end +end diff --git a/lib/ruby_llm/providers/ollama/capabilities.rb b/lib/ruby_llm/providers/ollama/capabilities.rb new file mode 100644 index 0000000..ff0e282 --- /dev/null +++ b/lib/ruby_llm/providers/ollama/capabilities.rb @@ -0,0 +1,81 @@ +# frozen_string_literal: true + +module RubyLLM + module Providers + module Ollama + # Determines capabilities for Ollama models + module Capabilities + module_function + + # Determines the context window size for a given model + # @param model_id [String] the model identifier + # @return [Integer] the context window size in tokens + def determine_context_window(model_id) + case model_id + when /ollama-chat/ then 4096 + when /ollama-embedding/ then 2048 + when /ollama-image/ then 4096 + else 4096 # Default context window size + end + end + + # Determines the maximum output tokens for a given model + # @param model_id [String] the model identifier + # @return [Integer] the maximum output tokens + def determine_max_tokens(model_id) + case model_id + when /ollama-chat/ then 2048 + when /ollama-embedding/ then 1 + when /ollama-image/ then 4096 + else 2048 # Default max tokens + end + end + + # Determines if a model supports vision capabilities + # @param model_id [String] the model identifier + # @return [Boolean] true if the model supports vision + def supports_vision?(model_id) + model_id.match?(/ollama-image/) + end + + # Determines if a model supports function calling + # @param model_id [String] the model identifier + # @return [Boolean] true if the model supports functions + def supports_functions?(model_id) + model_id.match?(/ollama-chat/) + end + + # Determines if a model supports JSON mode + # @param model_id [String] the model identifier + # @return [Boolean] true if the model supports JSON mode + def supports_json_mode?(model_id) + model_id.match?(/ollama-chat/) + end + + # Determines the model family for a given model ID + # @param model_id [String] the model identifier + # @return [Symbol] the model family identifier + def model_family(model_id) + case model_id + when /ollama-chat/ then :ollama_chat + when /ollama-embedding/ then :ollama_embedding + when /ollama-image/ then :ollama_image + else :ollama + end + end + + # Returns the model type + # @param model_id [String] the model identifier + # @return [String] the model type + def model_type(model_id) + case model_id + when /ollama-chat/ then 'chat' + when /ollama-embedding/ then 'embedding' + when /ollama-image/ then 'image' + else 'chat' # Default model type + end + end + end + end + end +end