branch: externals/llm
commit 94eb0a06892f482f60278db776e851f15da8682a
Author: Andrew Hyatt <ahy...@gmail.com>
Commit: Andrew Hyatt <ahy...@gmail.com>

    Fix rest of lint errors
---
 llm-claude.el         |  4 +++-
 llm-fake.el           | 11 +++++-----
 llm-gemini.el         |  3 ++-
 llm-gpt4all.el        |  2 +-
 llm-llamacpp.el       |  7 +++----
 llm-ollama.el         | 12 +++++------
 llm-openai.el         | 16 ++++++++-------
 llm-provider-utils.el | 57 ++++++++++++++++++++++++++++++++++++---------------
 llm-request-plz.el    | 12 +++++------
 llm-tester.el         | 14 ++++++++-----
 llm-vertex.el         | 23 ++++++++++++---------
 llm.el                |  2 +-
 12 files changed, 99 insertions(+), 64 deletions(-)

diff --git a/llm-claude.el b/llm-claude.el
index dac7e48f5c..129d6a1baa 100644
--- a/llm-claude.el
+++ b/llm-claude.el
@@ -36,10 +36,10 @@
   (chat-model "claude-3-opus-20240229" :read-only t))
 
 (cl-defmethod llm-nonfree-message-info ((_ llm-claude))
+  "Return Claude's nonfree ToS."
   "https://www.anthropic.com/legal/consumer-terms";)
 
 (cl-defmethod llm-provider-prelude ((provider llm-claude))
-  "Check if the API key is valid, error if not."
   (unless (llm-claude-key provider)
     (error "No API key provided for Claude")))
 
@@ -130,6 +130,7 @@
             (assoc-default 'message err))))
 
 (cl-defmethod llm-provider-chat-url ((_ llm-claude))
+  "Return the URL for the Claude API."
   "https://api.anthropic.com/v1/messages";)
 
 ;; See https://docs.anthropic.com/claude/docs/models-overview
@@ -140,6 +141,7 @@
     (_ 200000)))
 
 (cl-defmethod llm-name ((_ llm-claude))
+  "Return the name of the provider."
   "Claude")
 
 (cl-defmethod llm-capabilities ((_ llm-claude))
diff --git a/llm-fake.el b/llm-fake.el
index 69d0cccb48..01ff16b977 100644
--- a/llm-fake.el
+++ b/llm-fake.el
@@ -21,8 +21,8 @@
 
 ;;; Commentary:
 ;; This file implements the llm functionality defined in llm.el, for developers
-;; who want to just understand what llm calls are made, and with what data. Or,
-;; to test out various functionality they have. The functions return something,
+;; who want to just understand what llm calls are made, and with what data.  
Or,
+;; to test out various functionality they have.  The functions return 
something,
 ;; or throw errors, depending on how the `llm-fake' provider is configured.
 
 (require 'cl-lib)
@@ -34,16 +34,16 @@
  "A provider for the fake LLM provider.
 
 OUTPUT-TO-BUFFER can be nil, in which case, nothing will be
-output. If a string or a buffer, it will append the request as
+output.  If a string or a buffer, it will append the request as
 text to that buffer.
 
 CHAT-ACTION-FUNC will be called with no arguments to produce
 either a string response for the chat, or a signal symbol and
-message cons. If nil, the response will be a short text string.
+message cons.  If nil, the response will be a short text string.
 
 EMBEDDING-ACTION-FUNC will be called with no arguments to produce
 either a vector response for the chat, or a signal symbol and
-message cons. If nil, the response will be a simple vector."
+message cons.  If nil, the response will be a simple vector."
  output-to-buffer chat-action-func embedding-action-func)
 
 (cl-defmethod llm-chat-async ((provider llm-fake) prompt response-callback 
error-callback)
@@ -116,6 +116,7 @@ message cons. If nil, the response will be a simple vector."
   nil)
 
 (cl-defmethod llm-name ((_ llm-fake))
+  "The name of the provider."
   "Fake")
 
 (cl-defmethod llm-capabilities ((_ llm-fake))
diff --git a/llm-gemini.el b/llm-gemini.el
index d84706e85a..c1ff41a999 100644
--- a/llm-gemini.el
+++ b/llm-gemini.el
@@ -21,7 +21,7 @@
 
 ;;; Commentary:
 ;; This file implements the llm functionality defined in llm.el, for Google's
-;; Gemini AI. The documentation is at
+;; Gemini AI.  he documentation is at
 ;; https://ai.google.dev/tutorials/rest_quickstart.
 
 ;;; Code:
@@ -40,6 +40,7 @@ You can get this at https://makersuite.google.com/app/apikey.";
   key (embedding-model "embedding-001") (chat-model "gemini-pro"))
 
 (cl-defmethod llm-nonfree-message-info ((_ llm-gemini))
+  "Return nonfree terms of service for Gemini."
   "https://policies.google.com/terms/generative-ai";)
 
 (cl-defmethod llm-provider-embedding-url ((provider llm-gemini))
diff --git a/llm-gpt4all.el b/llm-gpt4all.el
index 5c6a69b4c6..f1a45d5735 100644
--- a/llm-gpt4all.el
+++ b/llm-gpt4all.el
@@ -55,7 +55,7 @@ default the default GPT4all port."
   (llm-chat-async provider prompt response-callback error-callback))
 
 (cl-defmethod llm-name ((provider llm-gpt4all))
-  "Return the name of the provider."
+  "Return the name of the PROVIDER."
   (llm-gpt4all-chat-model provider))
 
 (cl-defmethod llm-chat-token-limit ((provider llm-gpt4all))
diff --git a/llm-llamacpp.el b/llm-llamacpp.el
index 2a462ff063..c25e9bdb88 100644
--- a/llm-llamacpp.el
+++ b/llm-llamacpp.el
@@ -31,8 +31,6 @@
 (require 'llm-provider-utils)
 (require 'json)
 
-(lwarn 'llm-llamacpp :warning "The LlamaCPP module is deprecated, you should 
use the Open AI Compatible provider instead")
-
 (defgroup llm-llamacpp nil
   "LLM implementation for llama.cpp."
   :group 'llm)
@@ -55,6 +53,8 @@ This is needed because there is no API support for previous 
chat conversation."
 (defun llm-llamacpp--url (provider path)
   "From PROVIDER, return the URL for llama.cpp.
 PATH is the path to append to the URL, not prefixed with a slash."
+  (lwarn 'llm-llamacpp :warning
+         "The LlamaCPP module is deprecated, you should use the Open AI 
Compatible provider instead")
   (let ((scheme (llm-llamacpp-scheme provider))
         (host (llm-llamacpp-host provider))
         (port (llm-llamacpp-port provider)))
@@ -67,8 +67,7 @@ PATH is the path to append to the URL, not prefixed with a 
slash."
   (llm-llamacpp--url provider "chat/completions"))
 
 (cl-defmethod llm-name ((_ llm-llamacpp))
-  ;; We don't actually know the name of the model, so we have to just name 
Llama
-  ;; CPP itself.
+  "Name of Llama CPP, because we don't know the model name."
   "Llama CPP")
 
 (cl-defmethod llm-capabilities ((_ llm-llamacpp))
diff --git a/llm-ollama.el b/llm-ollama.el
index f267d2e05a..4a95b033b3 100644
--- a/llm-ollama.el
+++ b/llm-ollama.el
@@ -21,7 +21,7 @@
 
 ;;; Commentary:
 ;; This file implements the llm functionality defined in llm.el, for Ollama, an
-;; interface to running LLMs locally. Ollama can be found at 
https://ollama.ai/.
+;; interface to running LLMs locally.  Ollama can be found at 
https://ollama.ai/.
 
 ;;; Code:
 
@@ -48,20 +48,20 @@
 (cl-defstruct (llm-ollama (:include llm-standard-full-provider))
   "A structure for holding information needed by Ollama's API.
 
-SCHEME is the http scheme to use, a string. It is optional and
+SCHEME is the http scheme to use, a string.  It is optional and
 default to `http'.
 
-HOST is the host that Ollama is running on. It is optional and
+HOST is the host that Ollama is running on.  It is optional and
 default to localhost.
 
 PORT is the localhost port that Ollama is running on.  It is optional.
 
-CHAT-MODEL is the model to use for chat queries. It is required.
+CHAT-MODEL is the model to use for chat queries.  It is required.
 
 EMBEDDING-MODEL is the model to use for embeddings.  It is required."
   (scheme "http") (host "localhost") (port 11434) chat-model embedding-model)
 
-;; Ollama's models may or may not be free, we have no way of knowing. There's 
no
+;; Ollama's models may or may not be free, we have no way of knowing.  There's 
no
 ;; way to tell, and no ToS to point out here.
 (cl-defmethod llm-nonfree-message-info ((provider llm-ollama))
   (ignore provider)
@@ -98,7 +98,7 @@ PROVIDER is the llm-ollama provider."
   (assoc-default 'embedding response))
 
 (cl-defmethod llm-provider-chat-extract-result ((_ llm-ollama) response)
-  "Return the chat response from the server RESPONSE"
+  "Return the chat response from the server RESPONSE."
   (assoc-default 'content (assoc-default 'message response)))
 
 (cl-defmethod llm-provider-chat-request ((provider llm-ollama) prompt 
streaming)
diff --git a/llm-openai.el b/llm-openai.el
index a15db99d73..c03f13c897 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -45,7 +45,7 @@
 
 KEY is the API key for Open AI, which is required.
 
-CHAT-MODEL is the model to use for chat queries. If unset, it
+CHAT-MODEL is the model to use for chat queries.  If unset, it
 will use a reasonable default.
 
 EMBEDDING-MODEL is the model to use for embeddings.  If unset, it
@@ -55,18 +55,19 @@ will use a reasonable default."
 (cl-defstruct (llm-openai-compatible (:include llm-openai))
   "A structure for other APIs that use the Open AI's API.
 
-URL is the URL to use for the API, up to the command. So, for
+URL is the URL to use for the API, up to the command.  So, for
 example, if the API for chat is at
 https://api.example.com/v1/chat, then URL should be
 \"https://api.example.com/v1/\".";
   url)
 
 (cl-defmethod llm-nonfree-message-info ((_ llm-openai))
+  "Return Open AI's nonfree terms of service."
   "https://openai.com/policies/terms-of-use";)
 
 (cl-defmethod llm-provider-embedding-request ((provider llm-openai) string)
   "Return the request to the server for the embedding of STRING.
-MODEL is the embedding model to use, or nil to use the default.."
+PROVIDER is the Open AI provider struct."
   `(("input" . ,string)
     ("model" . ,(or (llm-openai-embedding-model provider)
                     "text-embedding-3-small"))))
@@ -76,11 +77,11 @@ MODEL is the embedding model to use, or nil to use the 
default.."
   (assoc-default 'embedding (aref (assoc-default 'data response) 0)))
 
 (cl-defgeneric llm-openai--check-key (provider)
-  "Check that the key is set for the Open AI provider.")
+  "Check that the key is set for the Open AI PROVIDER.")
 
 (cl-defmethod llm-openai--check-key ((provider llm-openai))
   (unless (llm-openai-key provider)
-    (error "To call Open AI API, add a key to the `llm-openai' provider.")))
+    (error "To call Open AI API, add a key to the `llm-openai' provider")))
 
 (cl-defmethod llm-openai--check-key ((_ llm-openai-compatible))
   ;; It isn't always the case that a key is needed for Open AI compatible APIs.
@@ -95,9 +96,9 @@ MODEL is the embedding model to use, or nil to use the 
default.."
 
 (cl-defmethod llm-openai--headers ((provider llm-openai))
   (when-let ((key (llm-openai-key provider)))
-    ;; Encode the API key to ensure it is unibyte. The request library gets
+    ;; Encode the API key to ensure it is unibyte.  The request library gets
     ;; confused by multibyte headers, which turn the entire body multibyte if
-    ;; there’s a non-ascii character, regardless of encoding. And API keys are
+    ;; there’s a non-ascii character, regardless of encoding.  And API keys are
     ;; likely to be obtained from external sources like 
shell-command-to-string,
     ;; which always returns multibyte.
     `(("Authorization" . ,(format "Bearer %s" (encode-coding-string key 
'utf-8))))))
@@ -243,6 +244,7 @@ RESPONSE can be nil if the response is complete."
                   (append cvec nil)))))
 
 (cl-defmethod llm-name ((_ llm-openai))
+  "Return the name of the provider."
   "Open AI")
 
 ;; See https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
diff --git a/llm-provider-utils.el b/llm-provider-utils.el
index c16c7dd236..4efc4aafe0 100644
--- a/llm-provider-utils.el
+++ b/llm-provider-utils.el
@@ -16,7 +16,7 @@
 ;; along with GNU Emacs.  If not, see <http://www.gnu.org/licenses/>.
 
 ;;; Commentary:
-;; This file provides functions to help build providers. It should only be used
+;; This file provides functions to help build providers.  It should only be 
used
 ;; by modules implementing an LLM provider.
 
 ;;; Code:
@@ -42,7 +42,8 @@ This should not be used outside of this file.")
 ;; Methods necessary for both embedding and chat requests.
 
 (cl-defgeneric llm-provider-request-prelude (provider)
-  "Execute any prelude code necessary before running a request.")
+  "Execute any prelude code necessary before running a request.
+PROVIDER is the provider that will be used to make the request.")
 
 (cl-defmethod llm-provider-request-prelude ((_ llm-standard-provider))
   "Do nothing for the standard provider."
@@ -85,7 +86,10 @@ Return nil if there is no error.")
   "Return the URL for streaming chat for the PROVIDER.")
 
 (cl-defmethod llm-provider-chat-streaming-url ((provider 
llm-standard-chat-provider))
-  "By default, use the same URL as normal chat."
+  "By default, use the same URL as normal chat.
+
+PROVIDER is the standard chat provider that is used to make the
+request."
   (llm-provider-chat-url provider))
 
 (cl-defgeneric llm-provider-chat-timeout (provider)
@@ -112,11 +116,14 @@ STREAMING is true if this is a streaming request.")
 
 (cl-defgeneric llm-provider-append-to-prompt (provider prompt result 
func-results)
   "Append RESULT to PROMPT for the PROVIDER.
+
+PROMPT is the prompt that was already sent to the provider.
+
 FUNC-RESULTS is a list of function results, if any.")
 
 (cl-defmethod llm-provider-append-to-prompt ((_ llm-standard-chat-provider) 
prompt result
                                              &optional func-results)
-  "By default, the standard provider appends to the prompt."
+  ;; By default, we just append to the prompt.
   (llm-provider-utils-append-to-prompt prompt result func-results))
 
 (cl-defgeneric llm-provider-streaming-media-handler (provider msg-receiver 
fc-receiver err-receiver)
@@ -138,9 +145,10 @@ ERR-RECEIVER with the error message.")
 ;; Methods for chat function calling
 
 (cl-defgeneric llm-provider-extract-function-calls (provider response)
-  "Return the function calls from RESPONSE for the PROVIDER.
-If there are no function calls, return nil.  If there are
-function calls, return a list of
+  "Return the function call results from RESPONSE for the PROVIDER.
+
+If there are no function call results, return nil.  If there are
+function call results, return a list of
 `llm-provider-utils-function-call'.")
 
 (cl-defmethod llm-provider-extract-function-calls ((_ 
llm-standard-chat-provider) _)
@@ -148,20 +156,21 @@ function calls, return a list of
   nil)
 
 (cl-defgeneric llm-provider-populate-function-calls (provider prompt calls)
-  "For PROVIDER, in PROMPT, record that function CALLS were received.
+  "For PROVIDER, in PROMPT, record function call execution.
 This is the recording before the calls were executed.
 CALLS are a list of `llm-provider-utils-function-call'.")
 
 (cl-defgeneric llm-provider-collect-streaming-function-data (provider data)
   "Transform a list of streaming function call DATA responses.
 
+PROVIDER is the struct specifying the LLM provider and its configuration.
+
 The DATA responses are a list of whatever is sent to the function
 call handler in `llm-provider-streaming-media-handler'.  This should
 return a list of `llm-chat-function-call' structs.")
 
-(cl-defmethod llm-provider-collect-streaming-function-data ((provider 
llm-standard-chat-provider) data)
-  "By default, there is no streaming function calling."
-  (ignore provider data)
+(cl-defmethod llm-provider-collect-streaming-function-data ((_ 
llm-standard-chat-provider) _)
+  ;; by default, there is no function calling
   nil)
 
 ;; Standard provider implementations of llm functionality
@@ -307,10 +316,13 @@ EXAMPLE-PRELUDE is a string to prepend to the examples."
 
 (defun llm-provider-utils-combine-to-system-prompt (prompt &optional 
example-prelude)
   "Add context and examples to a system prompt in PROMPT.
+
 This should be used for providers that have a notion of a system prompt.
 If there is a system prompt, and no assistant response, add to it.
 If there is no system prompt, create one.
-If there is an assistance response, do nothing."
+If there is an assistance response, do nothing.
+
+EXAMPLE-PRELUDE is the text to introduce any examples with."
   (let ((system-prompt (seq-find
                           (lambda (interaction)
                             (eq (llm-chat-prompt-interaction-role interaction) 
'system))
@@ -331,7 +343,9 @@ If there is an assistance response, do nothing."
 
 (defun llm-provider-utils-combine-to-user-prompt (prompt &optional 
example-prelude)
   "Add context and examples to a user prompt in PROMPT.
-This should be used for providers that do not have a notion of a system 
prompt."
+This should be used for providers that do not have a notion of a system prompt.
+
+EXAMPLE-PRELUDE is the text to introduce any examples with."
   (when-let ((system-content (llm-provider-utils-get-system-prompt prompt 
example-prelude)))
       (setf (llm-chat-prompt-interaction-content (car 
(llm-chat-prompt-interactions prompt)))
             (concat system-content
@@ -341,11 +355,15 @@ This should be used for providers that do not have a 
notion of a system prompt."
             (llm-chat-prompt-examples prompt) nil)))
 
 (defun llm-provider-utils-collapse-history (prompt &optional history-prelude)
-  "Collapse history to a single prompt.
+  "Collapse history to a single PROMPT.
+
 This is useful for providers that cannot handle conversations.
-Essentially it's a way to fake conversation. Caution: tokens will
+Essentially it's a way to fake conversation.  aution: tokens will
 eventually run out, though, so this isn't a sustainable way to do
-things.  Providers should probably issue a warning when using this."
+things.  Providers should probably issue a warning when using this.
+
+HISTORY-PRELUDE is the text to use to tell the LLM that
+conversation history will follow."
   (when (> (length (llm-chat-prompt-interactions prompt)) 1)
     (setf (llm-chat-prompt-interactions prompt)
           (list (make-llm-chat-prompt-interaction
@@ -472,6 +490,8 @@ This execute function calls if there are any, does any 
result
 appending to the prompt, and returns an appropriate response for
 the client.
 
+PROVIDER is the struct that configures the use of the LLM.
+
 FUNCALLS is a list of function calls, if any.
 
 TEXT is the text output from the provider, if any.  There should
@@ -488,7 +508,10 @@ be either FUNCALLS or TEXT."
 
 (defun llm-provider-utils-populate-function-results (provider prompt func 
result)
   "Append the RESULT of FUNC to PROMPT.
-FUNC is a `llm-provider-utils-function-call' struct."
+
+FUNC is a `llm-provider-utils-function-call' struct.
+
+PROVIDER is the struct that configures the user of the LLM."
   (llm-provider-append-to-prompt
    provider prompt result
    (make-llm-chat-prompt-function-call-result
diff --git a/llm-request-plz.el b/llm-request-plz.el
index 9c55b80f30..e3fbbe5cde 100644
--- a/llm-request-plz.el
+++ b/llm-request-plz.el
@@ -16,8 +16,8 @@
 ;; along with GNU Emacs.  If not, see <http://www.gnu.org/licenses/>.
 
 ;;; Commentary:
-;; This file provides basic functions for providers who need to request data. 
It
-;; assumes the server is using json.
+;; This file provides basic functions for providers who need to request data.
+;; It assumes the server is using json.
 
 ;;; Code:
 (require 'cl-lib)
@@ -53,7 +53,7 @@ not very long so that we can end stuck requests."
   "Make a request to URL.  The raw text response will be returned.
 
 HEADERS will be added in the Authorization header, in addition to
-standard json header. This is optional.
+standard json header.  This is optional.
 
 DATA will be jsonified and sent as the request body.
 This is required.
@@ -89,7 +89,7 @@ TIMEOUT is the number of seconds to wait for a response."
   "Make a request to URL.  The parsed response will be returned.
 
 HEADERS will be added in the Authorization header, in addition to
-standard json header. This is optional.
+the standard json header.  This is optional.
 
 DATA will be jsonified and sent as the request body.
 This is required.
@@ -121,13 +121,13 @@ TIMEOUT is the number of seconds to wait for a response."
 Nothing will be returned.
 
 HEADERS will be added in the Authorization header, in addition to
-standard json header. This is optional.
+standard json header.  This is optional.
 
 DATA will be jsonified and sent as the request body.
 This is required.
 
 ON-SUCCESS will be called with the response body as a json
-object. This is optional in the case that ON-SUCCESS-DATA is set,
+object.  This is optional in the case that ON-SUCCESS-DATA is set,
 and required otherwise.
 
 ON-ERROR will be called with the error code and a response-body.
diff --git a/llm-tester.el b/llm-tester.el
index 4607492c2e..5e56ce29ae 100644
--- a/llm-tester.el
+++ b/llm-tester.el
@@ -19,7 +19,7 @@
 ;; along with GNU Emacs.  If not, see <http://www.gnu.org/licenses/>.
 
 ;;; Commentary:
-;; This file contains functions to help test the LLM implementation. Because of
+;; This file contains functions to help test the LLM implementation.  Because 
of
 ;; LLM's inherent randomness, it isn't possible to have normal unit tests.
 ;; Humans should be looking at these results and seeing if they make sense.
 ;; However, we can test a few things automatically, including making sure that
@@ -27,11 +27,13 @@
 ;; should be.
 ;;
 ;; The normal way to use this is to create a provider for each LLM you have
-;; access to, and run `llm-tester-all' on it. Or, you can test individual parts
+;; access to, and run `llm-tester-all' on it.  Or, you can test individual 
parts
 ;; with their respective functions.'
 ;;
 ;; Both normal output and errors are output to the `*Messages*' buffer.
 
+;;; Code:
+
 (require 'llm)
 
 (defun llm-tester-log (message &rest args)
@@ -141,7 +143,7 @@
        (llm-tester-log "ERROR: Provider %s returned an error of type %s with 
message %s" (type-of provider) type message)))))
 
 (defun llm-tester-verify-prompt (prompt)
-  "Test PROMPT to make sure there are no obvious problems"
+  "Test PROMPT to make sure there are no obvious problems."
   (mapc (lambda (i)
           (when (equal (llm-chat-prompt-interaction-content i) "")
             (llm-tester-log "ERROR: prompt had an empty interaction")))
@@ -329,7 +331,7 @@ of by calling the `describe_function' function."
                 (type-of provider) type message)))))
 
 (defun llm-tester-cancel (provider)
-  "Test that PROVIDER can do async calls which can be cancelled."
+  "Test that PROVIDER can do async which can be cancelled."
   (llm-tester-log "Testing provider %s for cancellation" (type-of provider))
   (let ((embedding-request (llm-embedding-async
                             provider "This is a test."
@@ -349,7 +351,9 @@ of by calling the `describe_function' function."
     (llm-cancel-request chat-async-request)))
 
 (defun llm-tester--bad-provider-callback (provider call)
-  "Return testing error callback for CALL."
+  "Return testing error callback for CALL.
+
+PROVIDER is the provider that is being tested."
   (lambda (type message)
     (cond
      ((not (symbolp type))
diff --git a/llm-vertex.el b/llm-vertex.el
index 652858c207..1e433d9b2f 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -23,6 +23,8 @@
 ;; This file implements the llm functionality defined in llm.el, for Google
 ;; Cloud Vertex AI.
 
+;;; Code:
+
 (require 'cl-lib)
 (require 'llm)
 (require 'llm-request-plz)
@@ -53,7 +55,7 @@ This is only used for streaming calls."
 (defcustom llm-vertex-default-max-output-tokens 500
   "The default maximum number of tokens to ask for.
 This is only used when setting the maximum tokens is required,
-and there is no default. The maximum value possible here is 2049."
+and there is no default.  The maximum value possible here is 2049."
   :type 'integer
   :group 'llm-vertex)
 
@@ -71,14 +73,17 @@ Gemini.")
 (cl-defstruct (llm-vertex (:include llm-google))
   "A struct representing a Vertex AI client.
 
-KEY is the temporary API key for the Vertex AI. It is required to
+KEY is the temporary API key for the Vertex AI.  It is required to
 be populated before any call.
 
-CHAT-MODEL is the name of the chat model to use. If unset, will use a 
reasonable default.
+CHAT-MODEL is the name of the chat model to use.  If unset, will
+use a reasonable default.
 
-EMBEDDING-MODEL is the name of the embedding model to use. If unset, will use 
a reasonable default.
+EMBEDDING-MODEL is the name of the embedding model to use.  If
+unset, will use a reasonable default.
 
-KEY-GENTIME keeps track of when the key was generated, because the key must be 
regenerated every hour."
+KEY-GENTIME keeps track of when the key was generated, because
+the key must be regenerated every hour."
   key
   project
   embedding-model
@@ -100,9 +105,6 @@ KEY-GENTIME keeps track of when the key was generated, 
because the key must be r
       (setf (llm-vertex-key provider) (encode-coding-string result 'utf-8)))
     (setf (llm-vertex-key-gentime provider) (current-time))))
 
-(cl-defmethod llm-nonfree-message-info ((_ llm-vertex))
-  "https://policies.google.com/terms/generative-ai";)
-
 (cl-defmethod llm-provider-embedding-url ((provider llm-vertex))
   (format 
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict";
           llm-vertex-gcloud-region
@@ -281,7 +283,7 @@ If STREAMING is non-nil, use the URL for the streaming API."
                                (not (eq (car c) 'parameters)))) request))
 
 (defun llm-vertex--count-tokens-extract-response (response)
-  "Extract the token count from the response."
+  "Extract the token count from the RESPONSE."
   (assoc-default 'totalTokens response))
 
 (cl-defgeneric llm-google-count-tokens-url (provider)
@@ -309,6 +311,7 @@ If STREAMING is non-nil, use the URL for the streaming API."
     (llm-vertex--count-tokens-extract-response response)))
 
 (cl-defmethod llm-name ((_ llm-vertex))
+  "Return the name of the provider."
   "Gemini")
 
 (defun llm-vertex--chat-token-limit (model)
@@ -316,7 +319,7 @@ If STREAMING is non-nil, use the URL for the streaming API."
   (cond ((equal "gemini-pro" model) 30720)
         ((equal "gemini-pro-vision" model) 12288)
         ;; This shouldn't happen unless there's a new model, which could be a
-        ;; smaller or larger model. We'll play it safe and choose a reasonable
+        ;; smaller or larger model.  We'll play it safe and choose a reasonable
         ;; number.
         (t 4096)))
 
diff --git a/llm.el b/llm.el
index 3819d9d3fa..a2230a9f25 100644
--- a/llm.el
+++ b/llm.el
@@ -1,4 +1,4 @@
-;;; llm.el --- Interface to pluggable llm backends -*- lexical-binding: t -*-
+;;; llm.el --- Interface to pluggable llm backends -*- lexical-binding: t; 
byte-compile-docstring-max-column: 200 -*-
 
 ;; Copyright (c) 2023  Free Software Foundation, Inc.
 

Reply via email to