branch: externals/llm
commit 0faa9e5cc779c502221900820eb908ed3e21eb1a
Merge: 38a627409c d70ca0b352
Author: Andrew Hyatt <[email protected]>
Commit: Andrew Hyatt <[email protected]>
Merge branch 'main' into streaming
---
NEWS.org | 2 +
llm-openai.el | 141 ++++++++++++++++-----------------
llm-request.el | 97 +++++++++++++++++++++++
llm-vertex.el | 244 +++++++++++++++++++++------------------------------------
llm.el | 4 +-
5 files changed, 258 insertions(+), 230 deletions(-)
diff --git a/NEWS.org b/NEWS.org
new file mode 100644
index 0000000000..484483d2fb
--- /dev/null
+++ b/NEWS.org
@@ -0,0 +1,2 @@
+* Version 0.2
+- Remove the dependency on non-GNU request library.
diff --git a/llm-openai.el b/llm-openai.el
index 11dcf2912c..bd4d8896ac 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -27,7 +27,7 @@
(require 'cl-lib)
(require 'llm)
-(require 'request)
+(require 'llm-request)
(require 'json)
(defgroup llm-openai nil
@@ -55,56 +55,54 @@ will use a reasonable default."
(ignore provider)
(cons "Open AI" "https://openai.com/policies/terms-of-use"))
-(defun llm-openai--embedding-make-request (provider string vector-callback
error-callback sync)
- "Make a request to Open AI to get an embedding for STRING.
-PROVIDER, VECTOR-CALLBACK and ERROR-CALLBACK are as in the
-`llm-embedding-async' call. SYNC is non-nil when the request
-should wait until the response is received."
- (unless (llm-openai-key provider)
- (error "To call Open AI API, add a key to the `llm-openai' provider."))
- (request "https://api.openai.com/v1/embeddings"
- :type "POST"
- :sync sync
- :timeout 5
- :headers `(("Authorization" . ,(format "Bearer %s" (llm-openai-key
provider)))
- ("Content-Type" . "application/json"))
- :data (json-encode `(("input" . ,string) ("model" . ,(or
(llm-openai-embedding-model provider) "text-embedding-ada-002"))))
- :parser 'json-read
- :success (cl-function (lambda (&key data &allow-other-keys)
- (funcall vector-callback
- (cdr (assoc 'embedding (aref (cdr (assoc
'data data)) 0))))))
- :error (cl-function (lambda (&key error-thrown data &allow-other-keys)
- (funcall error-callback 'error
- (format "Problem calling Open AI: %s, type:
%s message: %s"
- (cdr error-thrown)
- (assoc-default 'type (cdar data))
- (assoc-default 'message (cdar
data))))))))
+(defun llm-openai--embedding-request (provider string)
+ "Return the request to the server for the embedding of STRING.
+PROVIDER is the llm-openai provider."
+ `(("input" . ,string)
+ ("model" . ,(or (llm-openai-embedding-model provider)
"text-embedding-ada-002"))))
+
+(defun llm-openai--embedding-extract-response (response)
+ "Return the embedding from the server RESPONSE."
+ (cdr (assoc 'embedding (aref (cdr (assoc 'data response)) 0))))
+
+(defun llm-openai--error-message (err-response)
+ "Return a user-visible error message from ERR-RESPONSE."
+ (let ((errdata (cdr (assoc 'error err-response))))
+ (format "Problem calling Open AI: %s message: %s"
+ (cdr (assoc 'type errdata))
+ (cdr (assoc 'message errdata)))))
+
+(defun llm-openai--handle-response (response extractor)
+ "If RESPONSE is an error, throw it, else call EXTRACTOR."
+ (if (cdr (assoc 'error response))
+ (error (llm-openai--error-message response))
+ (funcall extractor response)))
(cl-defmethod llm-embedding-async ((provider llm-openai) string
vector-callback error-callback)
- (llm-openai--embedding-make-request provider string vector-callback
error-callback nil))
+ (unless (llm-openai-key provider)
+ (error "To call Open AI API, add a key to the `llm-openai' provider."))
+ (llm-request-async "https://api.openai.com/v1/embeddings"
+ :headers `(("Authorization" . ,(format "Bearer %s"
(llm-openai-key provider))))
+ :data (llm-openai--embedding-request provider string)
+ :on-success (lambda (data)
+ (funcall vector-callback
(llm-openai--embedding-extract-response data)))
+ :on-error (lambda (_ data)
+ (funcall error-callback 'error
+ (llm-openai--error-message
data)))))
(cl-defmethod llm-embedding ((provider llm-openai) string)
- (let ((response))
- (llm-openai--embedding-make-request provider string
- (lambda (vector) (setq response
vector))
- (lambda (_ error-message) (error
error-message)) t)
- response))
-
-(defun llm-openai--chat (provider prompt response-callback error-callback
&optional return-json-spec sync)
- "Main method to send a PROMPT as a chat prompt to Open AI.
-RETURN-JSON-SPEC, if specified, is a JSON spec to return from the
-Open AI API.
-
-PROVIDER is a `llm-openai' struct which holds the key and other options.
-
-RESPONSE-CALLBACK is a function to call with the LLM response.
-
-ERROR-CALLBACK is called if there is an error, with the error
-signal and message.
-
-SYNC is non-nil when the request should wait until the response is received."
(unless (llm-openai-key provider)
- (error "To call Open AI API, the key must have been set"))
+ (error "To call Open AI API, add a key to the `llm-openai' provider."))
+ (llm-openai--handle-response
+ (llm-request-sync "https://api.openai.com/v1/embeddings"
+ :headers `(("Authorization" . ,(format "Bearer %s"
(llm-openai-key provider))))
+ :data (llm-openai--embedding-request provider string))
+ #'llm-openai--embedding-extract-response))
+
+(defun llm-openai--chat-request (provider prompt &optional return-json-spec)
+ "From PROMPT, create the chat request data to send.
+PROVIDER is the llm-openai provider to use.
+RETURN-JSON-SPEC is the optional specification for the JSON to return."
(let (request-alist system-prompt)
(when (llm-chat-prompt-context prompt)
(setq system-prompt (llm-chat-prompt-context prompt)))
@@ -139,37 +137,36 @@ SYNC is non-nil when the request should wait until the
response is received."
("parameters" . ,return-json-spec))))
request-alist)
(push '("function_call" . (("name" . "output"))) request-alist))
+ request-alist))
- (request "https://api.openai.com/v1/chat/completions"
- :type "POST"
- :sync sync
- :headers `(("Authorization" . ,(format "Bearer %s" (llm-openai-key
provider)))
- ("Content-Type" . "application/json"))
- :data (json-encode request-alist)
- :parser 'json-read
- :success (cl-function
- (lambda (&key data &allow-other-keys)
- (let ((result (cdr (assoc 'content (cdr (assoc 'message
(aref (cdr (assoc 'choices data)) 0))))))
- (func-result (cdr (assoc 'arguments (cdr (assoc
'function_call (cdr (assoc 'message (aref (cdr (assoc 'choices data)) 0)))))))))
- (funcall response-callback (or func-result result)))))
- :error (cl-function (lambda (&key error-thrown data &allow-other-keys)
- (funcall error-callback
- 'error
- (format "Problem calling Open AI: %s,
type: %s message: %s"
- (cdr error-thrown)
- (assoc-default 'type (cdar data))
- (assoc-default 'message (cdar
data)))))))))
+(defun llm-openai--extract-chat-response (response)
+ "Return chat response from server RESPONSE."
+ (let ((result (cdr (assoc 'content (cdr (assoc 'message (aref (cdr (assoc
'choices response)) 0))))))
+ (func-result (cdr (assoc 'arguments (cdr (assoc 'function_call (cdr
(assoc 'message (aref (cdr (assoc 'choices response)) 0)))))))))
+ (or func-result result)))
(cl-defmethod llm-chat-async ((provider llm-openai) prompt response-callback
error-callback)
- (llm-openai--chat provider prompt response-callback error-callback))
+ (unless (llm-openai-key provider)
+ (error "To call Open AI API, the key must have been set"))
+ (llm-request-async "https://api.openai.com/v1/chat/completions"
+ :headers `(("Authorization" . ,(format "Bearer %s" (llm-openai-key
provider))))
+ :data (llm-openai--chat-request provider prompt)
+ :on-success (lambda (data) (funcall response-callback
(llm-openai--extract-chat-response data)))
+ :on-error (lambda (_ data)
+ (let ((errdata (cdr (assoc 'error data))))
+ (funcall error-callback 'error
+ (format "Problem calling Open AI: %s message: %s"
+ (cdr (assoc 'type errdata))
+ (cdr (assoc 'message errdata))))))))
(cl-defmethod llm-chat ((provider llm-openai) prompt)
- (let ((response))
- (llm-openai--chat provider prompt
- (lambda (result) (setq response result))
- (lambda (_ msg) (error msg))
- nil t)
- response))
+ (unless (llm-openai-key provider)
+ (error "To call Open AI API, the key must have been set"))
+ (llm-openai--handle-response
+ (llm-request-sync "https://api.openai.com/v1/chat/completions"
+ :headers `(("Authorization" . ,(format "Bearer %s"
(llm-openai-key provider))))
+ :data (llm-openai--chat-request provider prompt) )
+ #'llm-openai--extract-chat-response))
(provide 'llm-openai)
diff --git a/llm-request.el b/llm-request.el
new file mode 100644
index 0000000000..2f57822c78
--- /dev/null
+++ b/llm-request.el
@@ -0,0 +1,97 @@
+;;; llm-request.el --- Request handling code -*- lexical-binding: t -*-
+
+;; Copyright (c) 2023 Free Software Foundation, Inc.
+
+;; This program is free software; you can redistribute it and/or
+;; modify it under the terms of the GNU General Public License as
+;; published by the Free Software Foundation; either version 3 of the
+;; License, or (at your option) any later version.
+;;
+;; This program is distributed in the hope that it will be useful, but
+;; WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;; General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>.
+
+;;; Commentary:
+;; This file provides basic functions for providers who need to request data.
It
+;; assumes the server is using json.
+
+;;; Code:
+(require 'json)
+(require 'url-http)
+(require 'rx)
+
+(defun llm-request--content ()
+ "From the current buffer, return the content of the response."
+ (goto-char (point-min))
+ (re-search-forward (rx (seq line-start
+ (zero-or-one control)
+ line-end)))
+ (forward-line)
+ (buffer-substring-no-properties (point) (point-max)))
+
+(cl-defun llm-request-sync (url &key headers data timeout)
+ "Make a request to URL. The parsed response will be returned.
+
+HEADERS will be added in the Authorization header, in addition to
+standard json header. This is optional.
+
+DATA will be jsonified and sent as the request body.
+This is required.
+
+TIMEOUT is the number of seconds to wait for a response."
+ (let ((url-request-method "POST")
+ (url-request-extra-headers
+ (append headers '(("Content-Type" . "application/json"))))
+ (url-request-data (json-encode data)))
+ (let ((buf (url-retrieve-synchronously url t nil (or timeout 5))))
+ (if buf
+ (with-current-buffer buf
+ (json-read-from-string (llm-request--content)))
+ (error "LLM request timed out")))))
+
+(cl-defun llm-request-async (url &key headers data on-success on-error
on-chunk)
+ "Make a request to URL.
+Nothing will be returned.
+
+HEADERS will be added in the Authorization header, in addition to
+standard json header. This is optional.
+
+DATA will be jsonified and sent as the request body.
+This is required.
+
+ON-SUCCESS will be called with the response body as a json object.
+This is required.
+
+ON-ERROR will be called with the error code and a response-body.
+This is required.
+
+ON-CHUNK will be called with the potentially incomplete response
+body as a string. This is an optional argument."
+ (let ((url-request-method "POST")
+ (url-request-extra-headers
+ (append headers '(("Content-Type" . "application/json"))))
+ (url-request-data (json-encode data)))
+ (let ((buffer
+ (url-retrieve
+ url
+ ;; For some reason the closure you'd expect did not work here.
+ (lambda (_ on-success on-error)
+ (let ((code (url-http-parse-response)))
+ (if (eq code 200)
+ (funcall on-success (json-read-from-string
(llm-request--content)))
+ (funcall on-error code (ignore-errors
+ (json-read-from-string
(llm-request--content)))))))
+ (list on-success on-error)
+ t)))
+ (when on-chunk
+ (with-current-buffer buffer
+ (add-hook 'after-change-functions
+ (lambda (_ _ _)
+ (funcall on-chunk (llm-request--content)))))))))
+
+(provide 'llm-request)
+;;; llm-request.el ends here
diff --git a/llm-vertex.el b/llm-vertex.el
index 4c1c6c134e..b878e92fbb 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -25,7 +25,7 @@
(require 'cl-lib)
(require 'llm)
-(require 'request)
+(require 'llm-request)
(require 'json)
(defgroup llm-vertex nil
@@ -81,42 +81,42 @@ KEY-GENTIME keeps track of when the key was generated,
because the key must be r
(ignore provider)
(cons "Google Cloud Vertex"
"https://policies.google.com/terms/generative-ai"))
-(defun llm-vertex--embedding (provider string vector-callback error-callback
sync)
- "Get the embedding for STRING.
-PROVIDER, VECTOR-CALLBACK, ERROR-CALLBACK are all the same as
-`llm-embedding-async'. SYNC, when non-nil, will wait until the
-response is available to return."
- (llm-vertex-refresh-key provider)
- (request (format
"https://%s-aiplatform.googleapis.com/%s/projects/%s/locations/%s/publishers/google/models/%s:predict"
- llm-vertex-gcloud-region
- (llm-vertex-project provider)
- llm-vertex-gcloud-region
- (or (llm-vertex-embedding-model provider)
"textembedding-gecko"))
- :sync sync
- :timeout 5
- :type "POST"
- :headers `(("Authorization" . ,(format "Bearer %s" (llm-vertex-key
provider)))
- ("Content-Type" . "application/json"))
- :data (json-encode `(("instances" . [(("content" . ,string))])))
- :parser 'json-read
- :success (cl-function
- (lambda (&key data &allow-other-keys)
- (funcall vector-callback
- (cdr (assoc 'values (cdr (assoc 'embeddings (aref
(cdr (assoc 'predictions data)) 0))))))))
- :error (cl-function (lambda (&key error-thrown data &allow-other-keys)
- (funcall error-callback
- (error (format "Problem calling GCloud AI:
%s (%S)"
- (cdr error-thrown)
data)))))))
+(defun llm-vertex--embedding-url (provider)
+ "From the PROVIDER, return the URL to use for embeddings"
+ (format
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict"
+ llm-vertex-gcloud-region
+ (llm-vertex-project provider)
+ llm-vertex-gcloud-region
+ (or (llm-vertex-embedding-model provider)
"textembedding-gecko")))
+
+(defun llm-vertex--embedding-extract-response (response)
+ "Return the embedding contained in RESPONSE."
+ (cdr (assoc 'values (cdr (assoc 'embeddings (aref (cdr (assoc 'predictions
response)) 0))))))
+
+(defun llm-vertex--error-message (err-response)
+ "Return a user-visible error message from ERR-RESPONSE."
+ (format "Problem calling GCloud Vertex AI: status: %s message: %s (%s)"
+ (assoc-default 'status (assoc-default 'error err-response))
+ (assoc-default 'message (assoc-default 'error err-response))
+ err-response))
+
+(defun llm-vertex--handle-response (response extractor)
+ "If RESPONSE is an error, throw it, else call EXTRACTOR."
+ (if (assoc 'error response)
+ (error (llm-vertex--error-message response))
+ (funcall extractor response)))
(cl-defmethod llm-embedding-async ((provider llm-vertex) string
vector-callback error-callback)
- (llm-vertex--embedding provider string vector-callback error-callback nil))
-
-(cl-defmethod llm-embedding ((provider llm-vertex) string)
- (let ((response))
- (llm-vertex--embedding provider string
- (lambda (vector) (setq response vector))
- (lambda (_ error-message) (error error-message)) t)
- response))
+ (llm-vertex-refresh-key provider)
+ (llm-request-async (llm-vertex--embedding-url provider)
+ :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider))))
+ :data `(("instances" . [(("content" . ,string))]))
+ :on-success (lambda (data)
+ (funcall vector-callback
(llm-vertex--embedding-extract-response data)))
+ :on-error (lambda (_ data)
+ (funcall error-callback
+ 'error
+ (llm-vertex--error-message data)))))
(defun llm-vertex--parameters-ui (prompt)
"Return a alist setting parameters, appropriate for the ui API.
@@ -142,6 +142,14 @@ If nothing needs to be set, return nil."
(if param-struct-alist
`(("parameters" . ,param-struct-alist)))))
+(cl-defmethod llm-embedding ((provider llm-vertex) string)
+ (llm-vertex-refresh-key provider)
+ (llm-vertex--handle-response
+ (llm-request-sync (llm-vertex--embedding-url provider)
+ :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider))))
+ :data `(("instances" . [(("content" . ,string))])))
+ #'llm-vertex--embedding-extract-response))
+
(defun llm-vertex--input-ui (prompt)
"Return an alist with chat input, appropriate for ui API.
PROMPT contains the input to the call to the chat API."
@@ -153,7 +161,7 @@ PROMPT contains the input to the call to the chat API."
(mapconcat (lambda (example)
(concat "User:\n" (car example)
"\nAssistant:\n" (cdr example)))
(llm-chat-prompt-examples prompt) "\n"))
- system-prompt))
+ system-prompt)))
`(("inputs" . ((("struct_val" .
(("messages" .
(("list_val" .
@@ -187,131 +195,55 @@ PROMPT contains the input to the call to the chat API."
(push `("examples" . ,(mapcar (lambda (example)
`(("input" . (("content" . ,(car
example))))
("output" . (("content" . ,(cdr
example))))))
- (llm-chat-prompt-examples prompt)))
- param-alist))
- (push `("messages" . ,(mapcar (lambda (interaction)
- `(("author" . ,(pcase
(llm-chat-prompt-interaction-role interaction)
- ('user "user")
- ('system (error "System
role not supported"))
- ('assistant
"assistant")))
- ("content" .
,(llm-chat-prompt-interaction-content interaction))))
- (llm-chat-prompt-interactions prompt)))
- param-alist)
- `(("instances" . (,param-alist)))))
-
-(defun llm-vertex--request-data-v1 (prompt)
- "Return all request data to be passed to the v1 API.
-PROMPT contains the data that will be transformed into the result."
- (append
- (llm-vertex--input-v1 prompt)
- (llm-vertex--parameters-v1 prompt)))
-
-(defun llm-vertex--request-data-ui (prompt)
- "Return all request data to be passed to the ui API.
-PROMPT contains the data that will be transformed into the result."
- (append
- (llm-vertex--input-ui prompt)
- (llm-vertex--parameters-ui prompt)))
-
-(defun llm-vertex--get-response-v1 (response)
- "Return the actual response from the RESPONSE struct returned."
+ (llm-chat-prompt-examples prompt))))
+ prompt-alist))
+ (push `("messages" . ,(apply #'vector
+ (mapcar (lambda (interaction)
+ `(("author" . (pcase
(llm-chat-prompt-interaction-role interaction)
+ ('user "user")
+ ('system (error
"System role not supported"))
+ ('assistant
"assistant")))
+ ("content" .
,(llm-chat-prompt-interaction-content interaction))))
+ (llm-chat-prompt-interactions
prompt))))
+ prompt-alist)
+ (when (llm-chat-prompt-temperature prompt)
+ (push `("temperature" . ,(llm-chat-prompt-temperature prompt))
+ params-alist))
+ (when (llm-chat-prompt-max-tokens prompt)
+ (push `("max_tokens" . ,(llm-chat-prompt-max-tokens prompt))
params-alist))
+ `(("instances" . [,prompt-alist])
+ ("parameters" . ,params-alist))))
+
+(defun llm-vertex--chat-url (provider)
+ "Return the correct url to use for PROVIDER."
+ (format
"https://%s-aiplatform.googleapis.com/v1/projects/%s/locations/%s/publishers/google/models/%s:predict"
+ llm-vertex-gcloud-region
+ (llm-vertex-project provider)
+ llm-vertex-gcloud-region
+ (or (llm-vertex-chat-model provider)
"chat-bison")))
+
+(defun llm-vertex--chat-extract-response (response)
+ "Return the chat response contained in the server RESPONSE."
(cdr (assoc 'content (aref (cdr (assoc 'candidates (aref (cdr (assoc
'predictions response)) 0))) 0))))
-(defun llm-vertex--get-response-ui (response)
- "Return the actual response from the RESPONSE struct returned."
- (pcase (type-of response)
- ('vector (mapconcat #'llm-vertex--get-response-ui
- response ""))
- ('cons (let* ((outputs (cdr (assoc 'outputs response)))
- (structVal-list (cdr (assoc 'structVal (aref outputs 0))))
- (candidates (cdr (assoc 'candidates structVal-list)))
- (listVal (cdr (assoc 'listVal candidates)))
- (structVal (cdr (assoc 'structVal (aref listVal 0))))
- (content (cdr (assoc 'content structVal)))
- (stringVal (aref (cdr (assoc 'stringVal content)) 0)))
- stringVal))))
-
-(defun llm-vertex--chat (provider prompt response-callback error-callback mode)
- "Get the chat response for PROMPT.
-PROVIDER, RESPONSE-CALLBACK, ERROR-CALLBACK are all the same as
-`llm-chat-async'.
-
-MODE, is either the symbols sync, async, or streaming. If async or
-streaming, the value will not be returned with the response, but
-sent to RESPONSE-CALLBACK."
- (llm-vertex-refresh-key provider)
- (let ((r (request (format
"https://%s-aiplatform.googleapis.com/%s/projects/%s/locations/%s/publishers/google/models/%s:%s"
- llm-vertex-gcloud-region
- (if (eq mode 'streaming) "ui" "v1")
- (llm-vertex-project provider)
- llm-vertex-gcloud-region
- (or (llm-vertex-chat-model provider) "chat-bison")
- (if (eq mode 'streaming) "serverStreamingPredict"
- "predict"))
- :type "POST"
- :sync (eq mode 'sync)
- :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider)))
- ("Content-Type" . "application/json"))
- :data (json-encode (if (eq mode 'streaming)
- (llm-vertex--request-data-ui prompt)
- (llm-vertex--request-data-v1 prompt)))
- :parser 'json-read
- :success (cl-function (lambda (&key data &allow-other-keys)
- ;; If it's streaming, pass back nil,
since we will have passed
- ;; back everything else.
- (funcall response-callback
- (unless (eq mode 'streaming)
- (llm-vertex--get-response-v1
data)))))
- :error (cl-function (lambda (&key error-thrown data
&allow-other-keys)
- (funcall error-callback 'error
- (error (format "Problem calling
GCloud AI: %s, status: %s message: %s (%s)"
- (cdr error-thrown)
- (assoc-default
'status (assoc-default 'error data))
- (assoc-default
'message (assoc-default 'error data))
- data))))))))
- (when (eq mode 'streaming)
- (with-current-buffer (request-response--buffer r)
- (add-hook 'after-change-functions
- (lambda (_ _ _)
- (let ((start (save-excursion
- (goto-char (point-min))
- (search-forward-regexp (rx (seq line-start
"[")) nil t)
- (beginning-of-line)
- (point)))
- (end-of-valid-chunk
- (save-excursion
- (point-max)
- (search-backward-regexp (rx (seq line-start ","))
nil t)
- (point))))
- (when (and start end-of-valid-chunk)
- ;; It'd be nice if our little algorithm always worked,
but doesn't, so let's
- ;; just ignore when it fails. As long as it mostly
succeeds, it should be fine.
- (condition-case nil
- (funcall response-callback
- (llm-vertex--get-response-ui
(json-read-from-string
- (concat
-
(buffer-substring-no-properties
- start
end-of-valid-chunk)
- ;; Close off
the json
- "]"))))
- (error (message "Unparseable buffer saved to
*llm-vertex-unparseable*")
- (let ((s (buffer-string)))
- (with-current-buffer (get-buffer-create
"*llm-vertex-unparseable*")
- (erase-buffer)
- (insert s)))))))) nil t)))))
-
(cl-defmethod llm-chat-async ((provider llm-vertex) prompt response-callback
error-callback)
- (llm-vertex--chat provider prompt response-callback error-callback 'async))
-
-(cl-defgeneric llm-chat-streaming (provider prompt response-callback
error-callback)
- (llm-vertex--chat provider prompt response-callback error-callback
'streaming))
+ (llm-vertex-refresh-key provider)
+ (llm-request-async (llm-vertex--chat-url provider)
+ :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider))))
+ :data (llm-vertex--chat-request prompt)
+ :on-success (lambda (data)
+ (funcall response-callback
(llm-vertex--chat-extract-response data)))
+ :on-error (lambda (_ data)
+ (funcall error-callback 'error
+ (llm-vertex--error-message data)))))
(cl-defmethod llm-chat ((provider llm-vertex) prompt)
- (let ((response))
- (llm-vertex--chat provider prompt
- (lambda (result) (setq response result))
- (lambda (_ error-message) (error
error-message)) 'sync)
- response))
+ (llm-vertex-refresh-key provider)
+ (llm-vertex--handle-response
+ (llm-request-sync (llm-vertex--chat-url provider)
+ :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider))))
+ :data (llm-vertex--chat-request prompt))
+ #'llm-vertex--chat-extract-response))
(provide 'llm-vertex)
diff --git a/llm.el b/llm.el
index 1087ab7f0a..d10222aa1e 100644
--- a/llm.el
+++ b/llm.el
@@ -4,8 +4,8 @@
;; Author: Andrew Hyatt <[email protected]>
;; Homepage: https://github.com/ahyatt/llm
-;; Package-Requires: ((request "0.3.3") (emacs "28.1"))
-;; Package-Version: 0.1.1
+;; Package-Requires: ((emacs "28.1"))
+;; Package-Version: 0.2
;; SPDX-License-Identifier: GPL-3.0-or-later
;;
;; This program is free software; you can redistribute it and/or