diff --git a/README.md b/README.md index 55a4b80..d6f9b3f 100644 --- a/README.md +++ b/README.md @@ -110,6 +110,7 @@ Result: - [Embeddings](#embeddings) - [predict](#predict) - [embed_content](#embed_content) + - [batch_embed_content](#batch_embed_content) - [Modes](#modes) - [Text](#text) - [Image](#image) @@ -624,6 +625,46 @@ Result: -0.014878989] } } ``` +##### batch_embed_content + +The Generative Language API allows you to generates embedding for multiple strings at the same time using the `batch_embed_content` method ([documentation](https://ai.google.dev/api/embeddings#method:-models.batchembedcontents)). + +```ruby +result = client.batch_embed_content( + { + requests: [ + { + model: 'models/text-embedding-004', + content: { parts: [{ text: 'What is life?' }] } }, + output_dimensionality: 64, + task_type: 'CLUSTERING' + }, + { + model: 'models/text-embedding-004', + content: { parts: [{ text: 'What is the meaning of life?' }] } + } + ], + } +) +``` + +Result: +```ruby +{"embeddings" => + [{"values" => + [-0.0065307966, + -0.000163254, + -0.0283708, + ... + -0.02459646]}, + {"values" => + [-0.010632273, + 0.019375853, + -0.006665491, + ... + -0.024252947]}]} +``` + ### Modes #### Text diff --git a/controllers/client.rb b/controllers/client.rb index 96c1c3b..461bf54 100644 --- a/controllers/client.rb +++ b/controllers/client.rb @@ -133,6 +133,17 @@ def embed_content(payload, server_sent_events: nil, &callback) result end + def batch_embed_content(payload, server_sent_events: nil, &callback) + result = request( + "#{@model_address}:batchEmbedContents", payload, + server_sent_events:, &callback + ) + + return result.first if result.is_a?(Array) && result.size == 1 + + result + end + def stream_generate_content(payload, server_sent_events: nil, &callback) request("#{@model_address}:streamGenerateContent", payload, server_sent_events:, &callback) end diff --git a/spec/tasks/run-embed.rb b/spec/tasks/run-embed.rb index e5c6746..d4362ff 100644 --- a/spec/tasks/run-embed.rb +++ b/spec/tasks/run-embed.rb @@ -22,6 +22,29 @@ puts '-' * 20 +result = client.batch_embed_content( + { + requests: [ + { + model: 'models/text-embedding-004', + content: { parts: [ {text: 'What is life?' } ] }, + output_dimensionality: 64, + task_type: "CLUSTERING" + }, + { + model: 'models/text-embedding-004', + content: { parts: [ {text: 'What is the meaning of life?' } ] }, + output_dimensionality: 64, + task_type: "CLUSTERING" + } + ] + } +) + +puts result.keys + +puts '-' * 20 + client = Gemini.new( credentials: { service: 'vertex-ai-api', diff --git a/template.md b/template.md index 88e9da8..f369344 100644 --- a/template.md +++ b/template.md @@ -573,6 +573,46 @@ Result: -0.014878989] } } ``` +##### batch_embed_content + +The Generative Language API allows you to generates embedding for multiple strings at the same time using the `batch_embed_content` method ([documentation](https://ai.google.dev/api/embeddings#method:-models.batchembedcontents)). + +```ruby +result = client.batch_embed_content( + { + requests: [ + { + model: 'models/text-embedding-004', + content: { parts: [{ text: 'What is life?' }] } }, + output_dimensionality: 64, + task_type: 'CLUSTERING' + }, + { + model: 'models/text-embedding-004', + content: { parts: [{ text: 'What is the meaning of life?' }] } + } + ], + } +) +``` + +Result: +```ruby +{"embeddings" => + [{"values" => + [-0.0065307966, + -0.000163254, + -0.0283708, + ... + -0.02459646]}, + {"values" => + [-0.010632273, + 0.019375853, + -0.006665491, + ... + -0.024252947]}]} +``` + ### Modes #### Text