branch: externals/llm commit 6a7e1dc5cce8591e2af568b3ee10e441785d28fa Author: Andrew Hyatt <ahy...@gmail.com> Commit: GitHub <nore...@github.com>
Fix ollama batch embeddings (#194) These errored out on ollama calls previously. --- NEWS.org | 3 ++- llm-ollama.el | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/NEWS.org b/NEWS.org index 8c3bab3431..e531e0bc70 100644 --- a/NEWS.org +++ b/NEWS.org @@ -1,5 +1,6 @@ * Version 0.26.0 -- Call tools with =nil= when called with false JSON values +- Call tools with =nil= when called with false JSON values. +- Fix bug in ollama batch embedding generation. - Add Qwen 3 and Gemma 3 to model list. - Fix broken model error message * Version 0.25.0 diff --git a/llm-ollama.el b/llm-ollama.el index a3558a10b4..ddf13f53f6 100644 --- a/llm-ollama.el +++ b/llm-ollama.el @@ -103,7 +103,7 @@ PROVIDER is the llm-ollama provider." :model ,(llm-ollama-embedding-model provider))) (cl-defmethod llm-provider-batch-embeddings-request ((provider llm-ollama) strings) - (llm-provider-embedding-request provider strings)) + (llm-provider-embedding-request provider (apply #'vector strings))) (cl-defmethod llm-provider-embedding-extract-result ((_ llm-ollama) response) "Return the embedding from the server RESPONSE."