This is an automated email from the ASF dual-hosted git repository.

xtsong pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/flink-agents.git


The following commit(s) were added to refs/heads/main by this push:
     new 2a4d808  [integration] Configure the model name uniformly in 
ChatModelSetup. (#224)
2a4d808 is described below

commit 2a4d80867b286efc43d3fb52b7dec3afe3945a14
Author: Wenjin Xie <[email protected]>
AuthorDate: Sat Sep 27 14:46:02 2025 +0800

    [integration] Configure the model name uniformly in ChatModelSetup. (#224)
---
 python/flink_agents/examples/chat_model_example.py | 11 +++++---
 .../integrate_table_with_react_agent_example.py    |  9 ++++---
 .../agents/custom_types_and_resources.py           |  2 +-
 .../quickstart/agents/product_suggestion_agent.py  |  9 +++++--
 .../quickstart/agents/review_analysis_agent.py     |  1 +
 .../examples/quickstart/react_agent_example.py     |  9 ++++---
 .../flink_agents/examples/react_agent_example.py   |  9 ++++---
 .../integrations/chat_models/ollama_chat_model.py  | 14 ++++++-----
 .../chat_models/tests/test_ollama_chat_model.py    | 29 +++++++++++-----------
 .../chat_models/tests/test_tongyi_chat_model.py    | 22 ++++++++++------
 .../integrations/chat_models/tongyi_chat_model.py  | 27 +++++++++++---------
 11 files changed, 85 insertions(+), 57 deletions(-)

diff --git a/python/flink_agents/examples/chat_model_example.py 
b/python/flink_agents/examples/chat_model_example.py
index e657c43..f089f1f 100644
--- a/python/flink_agents/examples/chat_model_example.py
+++ b/python/flink_agents/examples/chat_model_example.py
@@ -58,13 +58,13 @@ class MyAgent(Agent):
         if not os.environ.get("DASHSCOPE_API_KEY"):
             msg = "Please set the 'DASHSCOPE_API_KEY' environment variable."
             raise ValueError(msg)
-        return ResourceDescriptor(clazz=TongyiChatModelConnection, 
model=TONGYI_MODEL)
+        return ResourceDescriptor(clazz=TongyiChatModelConnection)
 
     @chat_model_connection
     @staticmethod
     def ollama_connection() -> ResourceDescriptor:
         """ChatModelConnection responsible for ollama model service 
connection."""
-        return ResourceDescriptor(clazz=OllamaChatModelConnection, 
model=OLLAMA_MODEL)
+        return ResourceDescriptor(clazz=OllamaChatModelConnection)
 
     @chat_model_setup
     @staticmethod
@@ -74,12 +74,14 @@ class MyAgent(Agent):
             return ResourceDescriptor(
                 clazz=TongyiChatModelSetup,
                 connection="tongyi_connection",
+                model=TONGYI_MODEL,
                 tools=["add"],
             )
         else:
             return ResourceDescriptor(
                 clazz=OllamaChatModelSetup,
                 connection="ollama_connection",
+                model=OLLAMA_MODEL,
                 tools=["add"],
                 extract_reasoning=True,
             )
@@ -90,12 +92,15 @@ class MyAgent(Agent):
         """ChatModel which focus on text generate, and reuse 
ChatModelConnection."""
         if CURRENT_BACKEND == "Tongyi":
             return ResourceDescriptor(
-                clazz=TongyiChatModelSetup, connection="tongyi_connection"
+                clazz=TongyiChatModelSetup,
+                connection="tongyi_connection",
+                model=TONGYI_MODEL,
             )
         else:
             return ResourceDescriptor(
                 clazz=TongyiChatModelSetup,
                 connection="ollama_connection",
+                model=OLLAMA_MODEL,
                 extract_reasoning=True,
             )
 
diff --git 
a/python/flink_agents/examples/integrate_table_with_react_agent_example.py 
b/python/flink_agents/examples/integrate_table_with_react_agent_example.py
index ad8ec0a..c767c2d 100644
--- a/python/flink_agents/examples/integrate_table_with_react_agent_example.py
+++ b/python/flink_agents/examples/integrate_table_with_react_agent_example.py
@@ -76,9 +76,7 @@ if __name__ == "__main__":
 
     # register resource to execution environment
     (
-        env.add_resource(
-            "ollama", ResourceDescriptor(clazz=OllamaChatModelConnection, 
model=model)
-        )
+        env.add_resource("ollama", 
ResourceDescriptor(clazz=OllamaChatModelConnection))
         .add_resource("add", Tool.from_callable(add))
         .add_resource("multiply", Tool.from_callable(multiply))
     )
@@ -102,7 +100,10 @@ if __name__ == "__main__":
     # create ReAct agent.
     agent = ReActAgent(
         chat_model=ResourceDescriptor(
-            clazz=OllamaChatModelSetup, connection="ollama", tools=["add", 
"multiply"]
+            clazz=OllamaChatModelSetup,
+            connection="ollama",
+            model=model,
+            tools=["add", "multiply"],
         ),
         prompt=prompt,
         output_schema=output_type_info,
diff --git 
a/python/flink_agents/examples/quickstart/agents/custom_types_and_resources.py 
b/python/flink_agents/examples/quickstart/agents/custom_types_and_resources.py
index 709ae9b..98f1c9f 100644
--- 
a/python/flink_agents/examples/quickstart/agents/custom_types_and_resources.py
+++ 
b/python/flink_agents/examples/quickstart/agents/custom_types_and_resources.py
@@ -201,5 +201,5 @@ class ProductReviewAnalysisRes(BaseModel):
 
 # ollama chat model connection descriptor
 ollama_server_descriptor = ResourceDescriptor(
-    clazz=OllamaChatModelConnection, model="qwen3:8b", request_timeout=120
+    clazz=OllamaChatModelConnection, request_timeout=120
 )
diff --git 
a/python/flink_agents/examples/quickstart/agents/product_suggestion_agent.py 
b/python/flink_agents/examples/quickstart/agents/product_suggestion_agent.py
index 54762fc..c06f39c 100644
--- a/python/flink_agents/examples/quickstart/agents/product_suggestion_agent.py
+++ b/python/flink_agents/examples/quickstart/agents/product_suggestion_agent.py
@@ -67,8 +67,13 @@ class ProductSuggestionAgent(Agent):
     @staticmethod
     def generate_suggestion_model() -> ResourceDescriptor:
         """ChatModel which focus on generating product suggestions."""
-        return ResourceDescriptor(clazz=OllamaChatModelSetup, 
connection="ollama_server",
-                                  prompt="generate_suggestion_prompt", 
extract_reasoning=True)
+        return ResourceDescriptor(
+            clazz=OllamaChatModelSetup,
+            connection="ollama_server",
+            model="qwen3:8b",
+            prompt="generate_suggestion_prompt",
+            extract_reasoning=True,
+        )
 
     @action(InputEvent)
     @staticmethod
diff --git 
a/python/flink_agents/examples/quickstart/agents/review_analysis_agent.py 
b/python/flink_agents/examples/quickstart/agents/review_analysis_agent.py
index fe5917c..ddd620b 100644
--- a/python/flink_agents/examples/quickstart/agents/review_analysis_agent.py
+++ b/python/flink_agents/examples/quickstart/agents/review_analysis_agent.py
@@ -86,6 +86,7 @@ class ReviewAnalysisAgent(Agent):
         return ResourceDescriptor(
             clazz=OllamaChatModelSetup,
             connection="ollama_server",
+            model="qwen3:8b",
             prompt="review_analysis_prompt",
             tools=["notify_shipping_manager"],
             extract_reasoning=True,
diff --git a/python/flink_agents/examples/quickstart/react_agent_example.py 
b/python/flink_agents/examples/quickstart/react_agent_example.py
index 4227b58..2d54603 100644
--- a/python/flink_agents/examples/quickstart/react_agent_example.py
+++ b/python/flink_agents/examples/quickstart/react_agent_example.py
@@ -57,10 +57,10 @@ def main() -> None:
     # by the Agent.
     agents_env.add_resource(
         "ollama_server",
-        ResourceDescriptor(
-            clazz=OllamaChatModelConnection, model="qwen3:8b", 
request_timeout=120
-        ),
-    ).add_resource("notify_shipping_manager", 
Tool.from_callable(notify_shipping_manager))
+        ResourceDescriptor(clazz=OllamaChatModelConnection, 
request_timeout=120),
+    ).add_resource(
+        "notify_shipping_manager", Tool.from_callable(notify_shipping_manager)
+    )
 
     # Read product reviews from a text file as a streaming source.
     # Each line in the file should be a JSON string representing a 
ProductReview.
@@ -84,6 +84,7 @@ def main() -> None:
         chat_model=ResourceDescriptor(
             clazz=OllamaChatModelSetup,
             connection="ollama_server",
+            model="qwen3:8b",
             tools=["notify_shipping_manager"],
         ),
         prompt=review_analysis_react_prompt,
diff --git a/python/flink_agents/examples/react_agent_example.py 
b/python/flink_agents/examples/react_agent_example.py
index 0723480..830bc6a 100644
--- a/python/flink_agents/examples/react_agent_example.py
+++ b/python/flink_agents/examples/react_agent_example.py
@@ -49,9 +49,7 @@ if __name__ == "__main__":
 
     # register resource to execution environment
     (
-        env.add_resource(
-            "ollama", ResourceDescriptor(clazz=OllamaChatModelConnection, 
model=model)
-        )
+        env.add_resource("ollama", 
ResourceDescriptor(clazz=OllamaChatModelConnection))
         .add_resource("add", Tool.from_callable(add))
         .add_resource("multiply", Tool.from_callable(multiply))
     )
@@ -70,7 +68,10 @@ if __name__ == "__main__":
     # create ReAct agent.
     agent = ReActAgent(
         chat_model=ResourceDescriptor(
-            clazz=OllamaChatModelSetup, connection="ollama", tools=["add", 
"multiply"]
+            clazz=OllamaChatModelSetup,
+            connection="ollama",
+            model=model,
+            tools=["add", "multiply"],
         ),
         prompt=prompt,
         output_schema=OutputData,
diff --git a/python/flink_agents/integrations/chat_models/ollama_chat_model.py 
b/python/flink_agents/integrations/chat_models/ollama_chat_model.py
index 0931467..8549ff3 100644
--- a/python/flink_agents/integrations/chat_models/ollama_chat_model.py
+++ b/python/flink_agents/integrations/chat_models/ollama_chat_model.py
@@ -46,8 +46,6 @@ class OllamaChatModelConnection(BaseChatModelConnection):
     ----------
     base_url : str
         Base url the model is hosted under.
-    model : str
-        Model name to use.
     request_timeout : float
         The timeout for making http request to Ollama API server.
     """
@@ -56,7 +54,6 @@ class OllamaChatModelConnection(BaseChatModelConnection):
         default="http://localhost:11434";,
         description="Base url the model is hosted under.",
     )
-    model: str = Field(description="Model name to use.")
     request_timeout: float = Field(
         default=DEFAULT_REQUEST_TIMEOUT,
         description="The timeout for making http request to Ollama API 
server.",
@@ -66,14 +63,12 @@ class OllamaChatModelConnection(BaseChatModelConnection):
 
     def __init__(
         self,
-        model: str,
         base_url: str = "http://localhost:11434";,
         request_timeout: float | None = DEFAULT_REQUEST_TIMEOUT,
         **kwargs: Any,
     ) -> None:
         """Init method."""
         super().__init__(
-            model=model,
             base_url=base_url,
             request_timeout=request_timeout,
             **kwargs,
@@ -101,7 +96,7 @@ class OllamaChatModelConnection(BaseChatModelConnection):
             ollama_tools = [to_openai_tool(metadata=tool.metadata) for tool in 
tools]
 
         response = self.client.chat(
-            model=self.model,
+            model=kwargs.pop("model"),
             messages=ollama_messages,
             stream=False,
             tools=ollama_tools,
@@ -173,6 +168,8 @@ class OllamaChatModelSetup(BaseChatModelSetup):
         Prompt template or string for the model. (Inherited from 
BaseChatModelSetup)
     tools : Optional[List[str]]
         List of available tools to use in the chat. (Inherited from 
BaseChatModelSetup)
+    model : str
+        Model name to use.
     temperature : float
         The temperature to use for sampling.
     num_ctx : int
@@ -187,6 +184,8 @@ class OllamaChatModelSetup(BaseChatModelSetup):
         stores it in additional_kwargs.
     """
 
+    model: str = Field(description="Model name to use.")
+
     temperature: float = Field(
         default=0.75,
         description="The temperature to use for sampling.",
@@ -217,6 +216,7 @@ class OllamaChatModelSetup(BaseChatModelSetup):
     def __init__(
         self,
         connection: str,
+        model: str,
         temperature: float = 0.75,
         num_ctx: int = DEFAULT_CONTEXT_WINDOW,
         request_timeout: float | None = DEFAULT_REQUEST_TIMEOUT,
@@ -230,6 +230,7 @@ class OllamaChatModelSetup(BaseChatModelSetup):
             additional_kwargs = {}
         super().__init__(
             connection=connection,
+            model=model,
             temperature=temperature,
             num_ctx=num_ctx,
             request_timeout=request_timeout,
@@ -243,6 +244,7 @@ class OllamaChatModelSetup(BaseChatModelSetup):
     def model_kwargs(self) -> Dict[str, Any]:
         """Return ollama model configuration."""
         base_kwargs = {
+            "model": self.model,
             "temperature": self.temperature,
             "num_ctx": self.num_ctx,
             "keep_alive": self.keep_alive,
diff --git 
a/python/flink_agents/integrations/chat_models/tests/test_ollama_chat_model.py 
b/python/flink_agents/integrations/chat_models/tests/test_ollama_chat_model.py
index 763c504..b230a3b 100644
--- 
a/python/flink_agents/integrations/chat_models/tests/test_ollama_chat_model.py
+++ 
b/python/flink_agents/integrations/chat_models/tests/test_ollama_chat_model.py
@@ -60,8 +60,10 @@ except Exception:
     client is None, reason="Ollama client is not available or test model is 
missing"
 )
 def test_ollama_chat() -> None:  # noqa :D103
-    server = OllamaChatModelConnection(name="ollama", model=test_model)
-    response = server.chat([ChatMessage(role=MessageRole.USER, 
content="Hello!")])
+    server = OllamaChatModelConnection(name="ollama")
+    response = server.chat(
+        [ChatMessage(role=MessageRole.USER, content="Hello!")], 
model=test_model
+    )
     assert response is not None
     assert str(response).strip() != ""
 
@@ -92,7 +94,7 @@ def get_tool(name: str, type: ResourceType) -> FunctionTool:  
# noqa :D103
     client is None, reason="Ollama client is not available or test model is 
missing"
 )
 def test_ollama_chat_with_tools() -> None:  # noqa :D103
-    connection = OllamaChatModelConnection(name="ollama", model=test_model)
+    connection = OllamaChatModelConnection(name="ollama")
 
     def get_resource(name: str, type: ResourceType) -> Resource:
         if type == ResourceType.TOOL:
@@ -101,7 +103,11 @@ def test_ollama_chat_with_tools() -> None:  # noqa :D103
             return connection
 
     llm = OllamaChatModelSetup(
-        name="ollama", connection="ollama", tools=["add"], 
get_resource=get_resource
+        name="ollama",
+        connection="ollama",
+        model=test_model,
+        tools=["add"],
+        get_resource=get_resource,
     )
     response = llm.chat(
         [
@@ -122,9 +128,7 @@ def test_extract_think_tags() -> None:
     """Test the static method that extracts content from <think></think> 
tags."""
     # Test with a think tag at the beginning (most common case)
     content = "<think>First, I need to understand the question.\nThen I need 
to formulate an answer.</think>The answer is 42."
-    cleaned, reasoning = (
-        OllamaChatModelConnection._extract_reasoning(content)
-    )
+    cleaned, reasoning = OllamaChatModelConnection._extract_reasoning(content)
     assert cleaned == "The answer is 42."
     assert (
         reasoning
@@ -132,17 +136,13 @@ def test_extract_think_tags() -> None:
     )
     # Test with a think tag only
     content = "<think>This is just my thought process.</think>"
-    cleaned, reasoning = (
-        OllamaChatModelConnection._extract_reasoning(content)
-    )
+    cleaned, reasoning = OllamaChatModelConnection._extract_reasoning(content)
     assert cleaned == ""
     assert reasoning == "This is just my thought process."
 
     # Test with no think tags
     content = "This is a regular response without any thinking tags."
-    cleaned, reasoning = (
-        OllamaChatModelConnection._extract_reasoning(content)
-    )
+    cleaned, reasoning = OllamaChatModelConnection._extract_reasoning(content)
     assert cleaned == content
     assert reasoning is None
 
@@ -160,7 +160,7 @@ def test_ollama_chat_with_extract_reasoning() -> None:
     # Configure mock client to return our mock response
     mock_client.chat.return_value = mock_response
     # Create model with mocked client
-    connection = OllamaChatModelConnection(name="ollama", model=test_model)
+    connection = OllamaChatModelConnection(name="ollama")
 
     def get_resource(name: str, type: ResourceType) -> Resource:
         return connection
@@ -168,6 +168,7 @@ def test_ollama_chat_with_extract_reasoning() -> None:
     llm = OllamaChatModelSetup(
         name="ollama",
         connection="ollama",
+        model=test_model,
         extract_reasoning=True,
         get_resource=get_resource,
     )
diff --git 
a/python/flink_agents/integrations/chat_models/tests/test_tongyi_chat_model.py 
b/python/flink_agents/integrations/chat_models/tests/test_tongyi_chat_model.py
index 7871b5d..3c3f13d 100644
--- 
a/python/flink_agents/integrations/chat_models/tests/test_tongyi_chat_model.py
+++ 
b/python/flink_agents/integrations/chat_models/tests/test_tongyi_chat_model.py
@@ -36,8 +36,10 @@ api_key_available = "DASHSCOPE_API_KEY" in os.environ
 @pytest.mark.skipif(not api_key_available, reason="DashScope API key is not 
set")
 def test_tongyi_chat() -> None:
     """Test basic chat functionality of TongyiChatModelConnection."""
-    connection = TongyiChatModelConnection(name="tongyi", model=test_model)
-    response = connection.chat([ChatMessage(role=MessageRole.USER, 
content="Hello!")])
+    connection = TongyiChatModelConnection(name="tongyi")
+    response = connection.chat(
+        [ChatMessage(role=MessageRole.USER, content="Hello!")], 
model=test_model
+    )
     assert response is not None
     assert response.content is not None
     assert response.content.strip() != ""
@@ -70,7 +72,7 @@ def get_tool(name: str, type: ResourceType) -> FunctionTool:
 @pytest.mark.skipif(not api_key_available, reason="DashScope API key is not 
set")
 def test_tongyi_chat_with_tools() -> None:
     """Test chat functionality with tool calling."""
-    connection = TongyiChatModelConnection(name="tongyi", model=test_model)
+    connection = TongyiChatModelConnection(name="tongyi")
 
     def get_resource(name: str, type: ResourceType) -> Resource:
         if type == ResourceType.TOOL:
@@ -79,7 +81,11 @@ def test_tongyi_chat_with_tools() -> None:
             return connection
 
     llm = TongyiChatModelSetup(
-        name="tongyi", connection="tongyi", tools=["add"], 
get_resource=get_resource,
+        name="tongyi",
+        model=test_model,
+        connection="tongyi",
+        tools=["add"],
+        get_resource=get_resource,
     )
 
     response = llm.chat(
@@ -100,8 +106,10 @@ def test_tongyi_chat_with_tools() -> None:
 def test_tongyi_chat_with_extract_reasoning(monkeypatch: pytest.MonkeyPatch) 
-> None:
     """Test that extract_reasoning functionality works correctly (mock 
DashScope)."""
     content = "The meaning of life is often considered to be 42, according to 
the Hitchhiker's Guide to the Galaxy."
-    reasoning_content = ("To answer what the meaning of life is, I should 
consider philosophical perspectives. "
-                         "The question is often associated with the number 42 
from Hitchhiker's Guide to the Galaxy.")
+    reasoning_content = (
+        "To answer what the meaning of life is, I should consider 
philosophical perspectives. "
+        "The question is often associated with the number 42 from Hitchhiker's 
Guide to the Galaxy."
+    )
 
     mocked_response = SimpleNamespace(
         status_code=200,
@@ -128,7 +136,6 @@ def test_tongyi_chat_with_extract_reasoning(monkeypatch: 
pytest.MonkeyPatch) ->
 
     connection = TongyiChatModelConnection(
         name="tongyi",
-        model=test_model,
         api_key=os.environ.get("DASHSCOPE_API_KEY", "fake-key"),
     )
 
@@ -137,6 +144,7 @@ def test_tongyi_chat_with_extract_reasoning(monkeypatch: 
pytest.MonkeyPatch) ->
 
     llm = TongyiChatModelSetup(
         name="tongyi",
+        model=test_model,
         connection="tongyi",
         extract_reasoning=True,
         get_resource=get_resource,
diff --git a/python/flink_agents/integrations/chat_models/tongyi_chat_model.py 
b/python/flink_agents/integrations/chat_models/tongyi_chat_model.py
index 3eed5a6..0148603 100644
--- a/python/flink_agents/integrations/chat_models/tongyi_chat_model.py
+++ b/python/flink_agents/integrations/chat_models/tongyi_chat_model.py
@@ -35,7 +35,9 @@ DEFAULT_REQUEST_TIMEOUT = 60.0
 DEFAULT_MODEL = "qwen-plus"
 
 
-def to_dashscope_tool(metadata: ToolMetadata, skip_length_check: bool = False) 
-> Dict[str, Any]:  # noqa:FBT001
+def to_dashscope_tool(
+    metadata: ToolMetadata, skip_length_check: bool = False # noqa:FBT001
+) -> Dict[str, Any]:
     """To DashScope tool."""
     if not skip_length_check and len(metadata.description) > 1024:
         msg = (
@@ -60,8 +62,6 @@ class TongyiChatModelConnection(BaseChatModelConnection):
     ----------
     api_key : str
         Your DashScope API key.
-    model : str
-        Model name to use.
     request_timeout : float
         The timeout for making http request to Tongyi API server.
     """
@@ -70,7 +70,6 @@ class TongyiChatModelConnection(BaseChatModelConnection):
         default_factory=lambda: os.environ.get("DASHSCOPE_API_KEY"),
         description="Your DashScope API key.",
     )
-    model: str = Field(default=DEFAULT_MODEL, description="Model name to use.")
     request_timeout: float = Field(
         default=DEFAULT_REQUEST_TIMEOUT,
         description="The timeout for making http request to Tongyi API 
server.",
@@ -78,7 +77,6 @@ class TongyiChatModelConnection(BaseChatModelConnection):
 
     def __init__(
         self,
-        model: str = DEFAULT_MODEL,
         api_key: str | None = None,
         request_timeout: float | None = DEFAULT_REQUEST_TIMEOUT,
         **kwargs: Any,
@@ -92,9 +90,7 @@ class TongyiChatModelConnection(BaseChatModelConnection):
             )
             raise ValueError(msg)
 
-
         super().__init__(
-            model=model,
             api_key=resolved_api_key,
             request_timeout=request_timeout,
             **kwargs,
@@ -118,7 +114,7 @@ class TongyiChatModelConnection(BaseChatModelConnection):
         req_api_key = kwargs.pop("api_key", self.api_key)
 
         response = Generation.call(
-            model=self.model,
+            model=kwargs.pop("model", DEFAULT_MODEL),
             messages=tongyi_messages,
             tools=tongyi_tools,
             result_format="message",
@@ -129,9 +125,7 @@ class TongyiChatModelConnection(BaseChatModelConnection):
 
         if getattr(response, "status_code", 200) != 200:
             msg = f"DashScope call failed: {getattr(response, 'message', 
'unknown error')}"
-            raise RuntimeError(
-                msg
-            )
+            raise RuntimeError(msg)
 
         choice = response.output["choices"][0]
         response_message: Dict[str, Any] = choice["message"]
@@ -146,7 +140,10 @@ class TongyiChatModelConnection(BaseChatModelConnection):
             tool_call_dict = {
                 "id": uuid.uuid4(),
                 "type": "function",
-                "function": {"name": fn.get("name"),"arguments": args,},
+                "function": {
+                    "name": fn.get("name"),
+                    "arguments": args,
+                },
                 "additional_kwargs": {"original_tool_call_id": tc.get("id")},
             }
             tool_calls.append(tool_call_dict)
@@ -217,6 +214,8 @@ class TongyiChatModelSetup(BaseChatModelSetup):
         Prompt template or string for the model. (Inherited from 
BaseChatModelSetup)
     tools : Optional[List[str]]
         List of available tools to use in the chat. (Inherited from 
BaseChatModelSetup)
+    model : str
+        Model name to use.
     temperature : float
         The temperature to use for sampling.
     additional_kwargs : Dict[str, Any]
@@ -226,6 +225,7 @@ class TongyiChatModelSetup(BaseChatModelSetup):
         in additional_kwargs.
     """
 
+    model: str = Field(default=DEFAULT_MODEL, description="Model name to use.")
     temperature: float = Field(
         default=0.7,
         description="The temperature to use for sampling.",
@@ -244,6 +244,7 @@ class TongyiChatModelSetup(BaseChatModelSetup):
     def __init__(
         self,
         connection: str,
+        model: str = DEFAULT_MODEL,
         temperature: float = 0.7,
         additional_kwargs: Dict[str, Any] | None = None,
         extract_reasoning: bool | None = False,
@@ -254,6 +255,7 @@ class TongyiChatModelSetup(BaseChatModelSetup):
             additional_kwargs = {}
         super().__init__(
             connection=connection,
+            model=model,
             temperature=temperature,
             additional_kwargs=additional_kwargs,
             extract_reasoning=extract_reasoning,
@@ -264,6 +266,7 @@ class TongyiChatModelSetup(BaseChatModelSetup):
     def model_kwargs(self) -> Dict[str, Any]:
         """Return Tongyi model configuration."""
         base_kwargs = {
+            "model": self.model,
             "temperature": self.temperature,
             "extract_reasoning": self.extract_reasoning,
         }

Reply via email to