branch: externals/llm
commit 7cb15d4c563400fffdd262f8b7bd0b4f6632e85a
Author: Andrew Hyatt <ahy...@gmail.com>
Commit: Andrew Hyatt <ahy...@gmail.com>

    Add non-standard-params, so clients can add any parameter
    
    These non-standard-params are in the prompt, since most of them are 
expected to
    vary with the task, and are not general properties of the model.
    
    This is a potential fix to https://github.com/ahyatt/llm/issues/43.
---
 NEWS.org      |  2 ++
 README.org    |  8 +-------
 llm-claude.el | 20 ++++++++++----------
 llm-ollama.el |  1 +
 llm-openai.el |  2 +-
 llm-vertex.el |  2 +-
 llm.el        | 17 +++++++++++++----
 7 files changed, 29 insertions(+), 23 deletions(-)

diff --git a/NEWS.org b/NEWS.org
index 0f8d6f1f88..22c1a2b647 100644
--- a/NEWS.org
+++ b/NEWS.org
@@ -1,3 +1,5 @@
+* Version 0.16.0
+- Add "non-standard params" to set per-provider options.
 * Version 0.15.0
 - Move to =plz= backend, which uses =curl=.  This helps move this package to a
   stronger foundation backed by parsing to spec.  Thanks to Roman Scherer for
diff --git a/README.org b/README.org
index 989a44689c..e56cf6aa75 100644
--- a/README.org
+++ b/README.org
@@ -102,13 +102,7 @@ For all callbacks, the callback will be executed in the 
buffer the function was
 - ~llm-chat-token-limit~.  Gets the token limit for the chat model.  This 
isn't possible for some backends like =llama.cpp=, in which the model isn't 
selected or known by this library.
 
   And the following helper functions:
-  - ~llm-make-chat-prompt text &keys context examples functions temperature
-    max-tokens~: This is how you make prompts.  ~text~ can be a string (the 
user
-    input to the llm chatbot), or a list representing a series of 
back-and-forth
-    exchanges, of odd number, with the last element of the list representing 
the
-    user's latest input.  This supports inputting context (also commonly called
-    a system prompt, although it isn't guaranteed to replace the actual system
-    prompt), examples, and other important elements, all detailed in the 
docstring for this function.
+  - ~llm-make-chat-prompt text &keys context examples functions temperature 
max-tokens~: This is how you make prompts.  ~text~ can be a string (the user 
input to the llm chatbot), or a list representing a series of back-and-forth 
exchanges, of odd number, with the last element of the list representing the 
user's latest input.  This supports inputting context (also commonly called a 
system prompt, although it isn't guaranteed to replace the actual system 
prompt), examples, and other impor [...]
   - ~llm-chat-prompt-to-text prompt~: From a prompt, return a string 
representation.  This is not usually suitable for passing to LLMs, but for 
debugging purposes.
   - ~llm-chat-streaming-to-point provider prompt buffer point 
finish-callback~: Same basic arguments as ~llm-chat-streaming~, but will stream 
to ~point~ in ~buffer~.
   - ~llm-chat-prompt-append-response prompt response role~: Append a new 
response (from the user, usually) to the prompt.  The ~role~ is optional, and 
defaults to ~'user~.
diff --git a/llm-claude.el b/llm-claude.el
index 129d6a1baa..9a9c5d07f1 100644
--- a/llm-claude.el
+++ b/llm-claude.el
@@ -59,12 +59,12 @@
                     ,(mapcar (lambda (interaction)
                                (append
                                 `(("role" . ,(pcase 
(llm-chat-prompt-interaction-role interaction)
-                                              ('function 'user)
-                                              ('assistant 'assistant)
-                                              ('user 'user)))
-                                 ("content" . ,(or 
(llm-chat-prompt-interaction-content interaction)
-                                                   
(llm-chat-prompt-function-call-result-result
-                                                    
(llm-chat-prompt-interaction-function-call-result interaction)))))
+                                               ('function 'user)
+                                               ('assistant 'assistant)
+                                               ('user 'user)))
+                                  ("content" . ,(or 
(llm-chat-prompt-interaction-content interaction)
+                                                    
(llm-chat-prompt-function-call-result-result
+                                                     
(llm-chat-prompt-interaction-function-call-result interaction)))))
                                 (when-let ((r 
(llm-chat-prompt-interaction-function-call-result interaction)))
                                   `(("tool_use_id" . 
,(llm-chat-prompt-function-call-result-call-id r))))))
                              (llm-chat-prompt-interactions prompt)))))
@@ -76,16 +76,16 @@
       (push `("system" . ,system) request))
     (when (llm-chat-prompt-temperature prompt)
       (push `("temperature" . ,(llm-chat-prompt-temperature prompt)) request))
-    request))
+    (append request (llm-chat-prompt-non-standard-params prompt))))
 
 (cl-defmethod llm-provider-extract-function-calls ((_ llm-claude) response)
   (let ((content (append (assoc-default 'content response) nil)))
     (cl-loop for item in content
              when (equal "tool_use" (assoc-default 'type item))
              collect (make-llm-provider-utils-function-call
-                     :id (assoc-default 'id item)
-                     :name (assoc-default 'name item)
-                     :args (assoc-default 'input item)))))
+                      :id (assoc-default 'id item)
+                      :name (assoc-default 'name item)
+                      :args (assoc-default 'input item)))))
 
 (cl-defmethod llm-provider-populate-function-calls ((_ llm-claude) _ _)
   ;; Claude does not need to be sent back the function calls it sent in the
diff --git a/llm-ollama.el b/llm-ollama.el
index 5844ef368c..7ca1f89bec 100644
--- a/llm-ollama.el
+++ b/llm-ollama.el
@@ -119,6 +119,7 @@ PROVIDER is the llm-ollama provider."
       (push `("temperature" . ,(llm-chat-prompt-temperature prompt)) options))
     (when (llm-chat-prompt-max-tokens prompt)
       (push `("num_predict" . ,(llm-chat-prompt-max-tokens prompt)) options))
+    (setq options (append options (llm-chat-prompt-non-standard-params 
prompt)))    
     (when options (push `("options" . ,options) request-alist))
     request-alist))
 
diff --git a/llm-openai.el b/llm-openai.el
index 80447092d2..0d3ace7941 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -169,7 +169,7 @@ STREAMING if non-nil, turn on response streaming."
       (push `("tools" . ,(mapcar #'llm-provider-utils-openai-function-spec
                                  (llm-chat-prompt-functions prompt)))
             request-alist))
-    request-alist))
+    (append request-alist (llm-chat-prompt-non-standard-params prompt))))
 
 (cl-defmethod llm-provider-chat-extract-result ((_ llm-openai) response)
   (assoc-default 'content
diff --git a/llm-vertex.el b/llm-vertex.el
index 4a86e064e5..2f0ffa59a6 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -221,7 +221,7 @@ the key must be regenerated every hour."
   "From PROMPT, create the parameters section.
 Return value is a cons for adding to an alist, unless there is
 nothing to add, in which case it is nil."
-  (let ((params-alist))
+  (let ((params-alist (llm-chat-prompt-non-standard-params prompt)))
     (when (llm-chat-prompt-temperature prompt)
       (push `(temperature . ,(llm-chat-prompt-temperature prompt))
             params-alist))
diff --git a/llm.el b/llm.el
index a5645217dd..9886d011e8 100644
--- a/llm.el
+++ b/llm.el
@@ -70,7 +70,7 @@ See %s for the details on the restrictions on use." name 
tos)))
   "This stores all the information needed for a structured chat prompt.
 
 Use of this directly is deprecated, instead use `llm-make-chat-prompt'."
-  context examples interactions functions temperature max-tokens)
+  context examples interactions functions temperature max-tokens 
non-standard-params)
 
 (cl-defstruct llm-chat-prompt-interaction
   "This defines a single interaction given as part of a chat prompt.
@@ -167,7 +167,8 @@ instead."
   (llm-make-chat-prompt text))
 
 (cl-defun llm-make-chat-prompt (text &key context examples functions
-                                     temperature max-tokens)
+                                     temperature max-tokens
+                                     non-standard-params)
   "Create a `llm-chat-prompt' with TEXT sent to the LLM provider.
 
 This is the most correct and easy way to create an
@@ -214,7 +215,14 @@ MAX-TOKENS is the maximum number of tokens to generate.  
This is optional.
 CONTEXT, EXAMPLES, FUNCTIONS, TEMPERATURE, and MAX-TOKENS are
 usually turned into part of the interaction, and if so, they will
 be put in the first interaction of the prompt (before anything in
-PREVIOUS-INTERACTIONS)."
+PREVIOUS-INTERACTIONS).
+
+NON-STANDARD-PARAMS is an alist of other options that the
+provider may or may not know how to handle.  These are expected
+to be provider specific.  Don't use this if you want the prompt
+to be used amongst different providers, because it is likely to
+cause a request error.  The cars of the alist are strings and the
+cdrs can be strings or numbers.  This is optional."
   (unless text
     (error "TEXT is required"))
   (when (and (listp text) (zerop (mod (length text) 2)))
@@ -229,7 +237,8 @@ PREVIOUS-INTERACTIONS)."
                                   (if (listp text) text (list text)))
    :functions functions
    :temperature temperature
-   :max-tokens max-tokens))
+   :max-tokens max-tokens
+   :non-standard-params non-standard-params))
 
 (defun llm-chat-prompt-append-response (prompt response &optional role)
   "Append a new RESPONSE to PROMPT, to continue a conversation.

Reply via email to