branch: elpa/gptel
commit 4f8df08b1287194b78ef0720530ba00d16fe4076
Author: orge-dev <73769047+orge-...@users.noreply.github.com>
Commit: GitHub <nore...@github.com>

    gptel-openai: Fix max_tokens field for o4-mini (#791)
    
    gptel-openai.el (gptel--request-data): Handle the max_tokens
    field separately for o4-mini, similar to the other OpenAI o-series
    models.
---
 gptel-openai.el | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/gptel-openai.el b/gptel-openai.el
index f5ae38c32e..9a49ba5542 100644
--- a/gptel-openai.el
+++ b/gptel-openai.el
@@ -289,7 +289,7 @@ Mutate state INFO with response metadata."
            :messages [,@prompts]
            :stream ,(or gptel-stream :json-false)))
         (reasoning-model-p ; TODO: Embed this capability in the model's 
properties
-         (memq gptel-model '(o1 o1-preview o1-mini o3-mini o3))))
+         (memq gptel-model '(o1 o1-preview o1-mini o3-mini o3 o4-mini))))
     (when (and gptel-temperature (not reasoning-model-p))
       (plist-put prompts-plist :temperature gptel-temperature))
     (when gptel-use-tools

Reply via email to