This is an automated email from the ASF dual-hosted git repository.
alexstocks pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/dubbo-go-samples.git
The following commit(s) were added to refs/heads/main by this push:
new a1bb8c98 LLM Context, Centralized Config, Model Selection (#823)
a1bb8c98 is described below
commit a1bb8c983e0a6a7ba78adeb5cf88546bbdd52d59
Author: Gucvii <[email protected]>
AuthorDate: Sun Apr 6 23:39:04 2025 +0800
LLM Context, Centralized Config, Model Selection (#823)
* LLM Context, Centralized Config, Model Selection
Fixes:
LLM conversations(front client) now retain context.
Configuration settings are now centralized at config.go.
Redundant code has been removed.
Features:
Users can now select different models.
* Update llm/go-client/cmd/client.go
remove \n
Co-authored-by: Alan <[email protected]>
* Update llm/go-client/cmd/client.go
remove \n
Co-authored-by: Alan <[email protected]>
* Update llm/go-client/cmd/client.go
remove \n
Co-authored-by: Alan <[email protected]>
* fix: ui (front/client)
---------
Co-authored-by: Alan <[email protected]>
---
llm/.env.example | 2 +-
llm/config/config.go | 103 ++++++++
llm/go-client/cmd/client.go | 76 ++++--
llm/go-client/frontend/handlers/chat.go | 25 +-
llm/go-client/frontend/main.go | 27 +-
llm/go-client/frontend/service/context.go | 3 -
llm/go-client/frontend/static/script.js | 328 ++++++++++--------------
llm/go-client/frontend/static/style.css | 370 +++++++++++++++-------------
llm/go-client/frontend/templates/index.html | 97 ++++----
llm/go-server/cmd/server.go | 94 ++++---
llm/proto/chat.pb.go | 129 +++++-----
llm/proto/chat.proto | 2 +
llm/proto/chat.triple.go | 2 +-
13 files changed, 686 insertions(+), 572 deletions(-)
diff --git a/llm/.env.example b/llm/.env.example
index b0770c54..dbae2ee7 100644
--- a/llm/.env.example
+++ b/llm/.env.example
@@ -16,6 +16,6 @@
#
-OLLAMA_MODEL = llava:7b
+OLLAMA_MODELS = llava:7b, qwen2.5:7b
OLLAMA_URL = http://localhost:11434
TIME_OUT_SECOND = 300
\ No newline at end of file
diff --git a/llm/config/config.go b/llm/config/config.go
new file mode 100644
index 00000000..046a6544
--- /dev/null
+++ b/llm/config/config.go
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package config
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+import (
+ "github.com/joho/godotenv"
+)
+
+type Config struct {
+ OllamaModels []string
+ OllamaURL string
+
+ TimeoutSeconds int
+}
+
+var (
+ config *Config
+ configOnce sync.Once
+ configErr error
+)
+
+func Load(envFile string) (*Config, error) {
+ configOnce.Do(func() {
+ config = &Config{}
+ err := godotenv.Load(envFile)
+ if err != nil {
+ configErr = fmt.Errorf("error loading .env file: %v",
err)
+ return
+ }
+
+ modelsEnv := os.Getenv("OLLAMA_MODELS")
+ if modelsEnv == "" {
+ configErr = fmt.Errorf("error: OLLAMA_MODELS
environment variable is not set")
+ return
+ }
+
+ modelsList := strings.Split(modelsEnv, ",")
+ for i, model := range modelsList {
+ modelsList[i] = strings.TrimSpace(model)
+ }
+ if len(modelsList) == 0 {
+ configErr = fmt.Errorf("error: No models available")
+ return
+ }
+
+ config.OllamaModels = modelsList
+
+ ollamaURL := os.Getenv("OLLAMA_URL")
+ if ollamaURL == "" {
+ configErr = fmt.Errorf("OLLAMA_URL is not set")
+ return
+ }
+ config.OllamaURL = ollamaURL
+
+ timeoutStr := os.Getenv("TIME_OUT_SECOND")
+ if timeoutStr == "" {
+ config.TimeoutSeconds = 300
+ } else {
+ timeout, err := strconv.Atoi(timeoutStr)
+ if err != nil {
+ configErr = fmt.Errorf("invalid TIME_OUT_SECOND
value: %v", err)
+ return
+ }
+ config.TimeoutSeconds = timeout
+ }
+ })
+
+ return config, configErr
+}
+
+func GetConfig() (*Config, error) {
+ return Load(".env")
+}
+
+func (c *Config) DefaultModel() string {
+ if len(c.OllamaModels) > 0 {
+ return c.OllamaModels[0]
+ }
+ return ""
+}
diff --git a/llm/go-client/cmd/client.go b/llm/go-client/cmd/client.go
index d63e141c..d1474a36 100644
--- a/llm/go-client/cmd/client.go
+++ b/llm/go-client/cmd/client.go
@@ -31,6 +31,7 @@ import (
)
import (
+ "github.com/apache/dubbo-go-samples/llm/config"
chat "github.com/apache/dubbo-go-samples/llm/proto"
)
@@ -40,10 +41,12 @@ type ChatContext struct {
}
var (
- contexts = make(map[string]*ChatContext)
- currentCtxID string
- contextOrder []string
- maxID uint8 = 0
+ contexts = make(map[string]*ChatContext)
+ currentCtxID string
+ contextOrder []string
+ maxID uint8 = 0
+ availableModels []string
+ currentModel string
)
func handleCommand(cmd string) (resp string) {
@@ -55,19 +58,22 @@ func handleCommand(cmd string) (resp string) {
resp += "/? help - Show this help\n"
resp += "/list - List all contexts\n"
resp += "/cd <context> - Switch context\n"
- resp += "/new - Create new context"
+ resp += "/new - Create new context\n"
+ resp += "/models - List available models\n"
+ resp += "/model <name> - Switch to specified model"
return resp
case cmd == "/list":
fmt.Println("Stored contexts (max 3):")
for _, ctxID := range contextOrder {
resp += fmt.Sprintf("- %s\n", ctxID)
}
+ resp = strings.TrimSuffix(resp, "\n")
return resp
case strings.HasPrefix(cmd, "/cd "):
target := strings.TrimPrefix(cmd, "/cd ")
if ctx, exists := contexts[target]; exists {
currentCtxID = ctx.ID
- resp += fmt.Sprintf("Switched to context: %s\n", target)
+ resp += fmt.Sprintf("Switched to context: %s", target)
} else {
resp += "Context not found"
}
@@ -75,15 +81,36 @@ func handleCommand(cmd string) (resp string) {
case cmd == "/new":
newID := createContext()
currentCtxID = newID
- resp += fmt.Sprintf("Created new context: %s\n", newID)
+ resp += fmt.Sprintf("Created new context: %s", newID)
return resp
- default:
- resp += "Available commands:\n"
- resp += "/? help - Show this help\n"
- resp += "/list - List all contexts\n"
- resp += "/cd <context> - Switch context\n"
- resp += "/new - Create new context"
+ case cmd == "/models":
+ resp += "Available models:"
+ for _, model := range availableModels {
+ marker := " "
+ if model == currentModel {
+ marker = "*"
+ }
+ resp += fmt.Sprintf("\n%s %s", marker, model)
+ }
+ return resp
+ case strings.HasPrefix(cmd, "/model "):
+ modelName := strings.TrimPrefix(cmd, "/model ")
+ modelFound := false
+ for _, model := range availableModels {
+ if model == modelName {
+ currentModel = model
+ modelFound = true
+ break
+ }
+ }
+ if modelFound {
+ resp += fmt.Sprintf("Switched to model: %s", modelName)
+ } else {
+ resp += fmt.Sprintf("Model '%s' not found. Use /models
to see available models.", modelName)
+ }
return resp
+ default:
+ return "Invalid command, use /? for help"
}
}
@@ -105,6 +132,15 @@ func createContext() string {
}
func main() {
+ cfg, err := config.GetConfig()
+ if err != nil {
+ fmt.Printf("Error loading config: %v\n", err)
+ return
+ }
+
+ availableModels = cfg.OllamaModels
+ currentModel = cfg.DefaultModel()
+
currentCtxID = createContext()
cli, err := client.NewClient(
@@ -120,7 +156,7 @@ func main() {
return
}
- fmt.Print("\nSend a message (/? for help)")
+ fmt.Printf("\nSend a message (/? for help) - Using model: %s\n",
currentModel)
scanner := bufio.NewScanner(os.Stdin)
for {
fmt.Print("\n> ")
@@ -144,19 +180,27 @@ func main() {
stream, err := svc.Chat(context.Background(),
&chat.ChatRequest{
Messages: currentCtx.History,
+ Model: currentModel,
})
if err != nil {
panic(err)
}
- defer stream.Close()
+ defer func(stream chat.ChatService_ChatClient) {
+ err := stream.Close()
+ if err != nil {
+ fmt.Printf("Error closing stream:
%v\n", err)
+ }
+ }(stream)
resp := ""
for stream.Recv() {
- c := stream.Msg().Content
+ msg := stream.Msg()
+ c := msg.Content
resp += c
fmt.Print(c)
}
+ fmt.Print("\n")
if err := stream.Err(); err != nil {
fmt.Printf("Stream error: %v\n", err)
diff --git a/llm/go-client/frontend/handlers/chat.go
b/llm/go-client/frontend/handlers/chat.go
index 5654506e..45c5dfa4 100644
--- a/llm/go-client/frontend/handlers/chat.go
+++ b/llm/go-client/frontend/handlers/chat.go
@@ -19,23 +19,22 @@ package handlers
import (
"context"
+ "fmt"
"io"
"log"
"net/http"
- "os"
"regexp"
"runtime/debug"
- "strconv"
"time"
)
import (
"github.com/gin-contrib/sessions"
-
"github.com/gin-gonic/gin"
)
import (
+ "github.com/apache/dubbo-go-samples/llm/config"
"github.com/apache/dubbo-go-samples/llm/go-client/frontend/service"
chat "github.com/apache/dubbo-go-samples/llm/proto"
)
@@ -84,6 +83,7 @@ func (h *ChatHandler) Chat(c *gin.Context) {
var req struct {
Message string `json:"message"`
Bin string `json:"bin"`
+ Model string `json:"model"`
}
if err := c.BindJSON(&req); err != nil {
@@ -105,15 +105,16 @@ func (h *ChatHandler) Chat(c *gin.Context) {
img = matches[2]
}
- messages := h.ctxManager.GetHistory(ctxID)
- messages = append(messages, &chat.ChatMessage{
+ h.ctxManager.AppendMessage(ctxID, &chat.ChatMessage{
Role: "human",
Content: req.Message,
Bin: []byte(img),
})
+ messages := h.ctxManager.GetHistory(ctxID)
stream, err := h.svc.Chat(context.Background(), &chat.ChatRequest{
Messages: messages,
+ Model: req.Model,
})
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error":
err.Error()})
@@ -139,6 +140,7 @@ func (h *ChatHandler) Chat(c *gin.Context) {
close(responseCh)
}()
+ resp := ""
for {
select {
case <-c.Request.Context().Done(): // client disconnect
@@ -150,19 +152,28 @@ func (h *ChatHandler) Chat(c *gin.Context) {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
log.Printf("Stream receive
error: %v", err)
}
+ h.ctxManager.AppendMessage(ctxID,
&chat.ChatMessage{
+ Role: "ai",
+ Content: resp,
+ Bin: nil,
+ })
return
}
content := stream.Msg().Content
+ resp += content
responseCh <- content
}
}
}()
// SSE stream output
- timeout, err := strconv.Atoi(os.Getenv("TIME_OUT_SECOND"))
+ cfg, err := config.GetConfig()
if err != nil {
- timeout = 300
+ fmt.Printf("Error loading config: %v\n", err)
+ return
}
+ timeout := cfg.TimeoutSeconds
+
c.Stream(func(w io.Writer) bool {
select {
case chunk, ok := <-responseCh:
diff --git a/llm/go-client/frontend/main.go b/llm/go-client/frontend/main.go
index 60146ff2..a9b37ec4 100644
--- a/llm/go-client/frontend/main.go
+++ b/llm/go-client/frontend/main.go
@@ -20,7 +20,6 @@ package main
import (
"fmt"
"net/http"
- "os"
)
import (
@@ -29,35 +28,20 @@ import (
"github.com/gin-contrib/sessions"
"github.com/gin-contrib/sessions/cookie"
-
"github.com/gin-gonic/gin"
-
- "github.com/joho/godotenv"
)
import (
+ "github.com/apache/dubbo-go-samples/llm/config"
"github.com/apache/dubbo-go-samples/llm/go-client/frontend/handlers"
"github.com/apache/dubbo-go-samples/llm/go-client/frontend/service"
chat "github.com/apache/dubbo-go-samples/llm/proto"
)
func main() {
- err := godotenv.Load(".env")
+ cfg, err := config.GetConfig()
if err != nil {
- panic(fmt.Sprintf("Error loading .env file: %v", err))
- }
-
- _, exist := os.LookupEnv("TIME_OUT_SECOND")
-
- if !exist {
- fmt.Println("TIME_OUT_SECOND is not set")
- return
- }
-
- _, exist = os.LookupEnv("OLLAMA_MODEL")
-
- if !exist {
- fmt.Println("OLLAMA_MODEL is not set")
+ fmt.Printf("Error loading config: %v\n", err)
return
}
@@ -92,8 +76,9 @@ func main() {
h := handlers.NewChatHandler(svc, ctxManager)
r.GET("/", func(c *gin.Context) {
c.HTML(http.StatusOK, "index.html", gin.H{
- "TimeoutSecond": os.Getenv("TIME_OUT_SECOND"),
- "OllamaModel": os.Getenv("OLLAMA_MODEL"),
+ "TimeoutSecond": cfg.TimeoutSeconds,
+ "OllamaModels": cfg.OllamaModels,
+ "DefaultModel": cfg.OllamaModels[0],
})
})
r.POST("/api/chat", h.Chat)
diff --git a/llm/go-client/frontend/service/context.go
b/llm/go-client/frontend/service/context.go
index 0241740a..ef296e54 100644
--- a/llm/go-client/frontend/service/context.go
+++ b/llm/go-client/frontend/service/context.go
@@ -57,9 +57,6 @@ func (m *ContextManager) GetHistory(ctxID string)
[]*chat.ChatMessage {
func (m *ContextManager) AppendMessage(ctxID string, msg *chat.ChatMessage) {
m.Mu.Lock()
defer m.Mu.Unlock()
- if len(m.Contexts[ctxID]) >= 3 {
- m.Contexts[ctxID] = m.Contexts[ctxID][1:]
- }
m.Contexts[ctxID] = append(m.Contexts[ctxID], msg)
}
diff --git a/llm/go-client/frontend/static/script.js
b/llm/go-client/frontend/static/script.js
index 22ae1715..34dae5db 100644
--- a/llm/go-client/frontend/static/script.js
+++ b/llm/go-client/frontend/static/script.js
@@ -1,256 +1,184 @@
-// This file originally cloned from
https://github.com/yotam-halperin/chatbot-static-UI
-
-const chatbox = document.querySelector(".chatbox");
-const chatInput = document.querySelector(".chat-input textarea");
-const sendChatBtn = document.querySelector(".chat-input span");
+const chatMessages = document.getElementById('chatMessages');
+const userInput = document.getElementById('userInput');
+const imageUpload = document.getElementById('imageUpload');
+const previewContainer = document.getElementById('previewContainer');
+const modelSelect = document.getElementById('model-select');
+
+let selectedModel = modelSelect.value;
+let imageFile = null;
+let imageBlob = null;
+
+modelSelect.addEventListener("change", (e) => {
+ selectedModel = e.target.value;
+ const modelChangeMsg = document.createElement("div");
+ modelChangeMsg.className = "message ai";
+ modelChangeMsg.innerHTML = `
+ <div class="avatar"><span
class="material-symbols-outlined">smart_toy</span></div>
+ <div class="message-content"><p>Model switched to:
<strong>${selectedModel}</strong></p></div>
+ `;
+ chatMessages.appendChild(modelChangeMsg);
+ chatMessages.scrollTop = chatMessages.scrollHeight;
+});
-let userMessage = null; // Variable to store user's message
-let userBin = null; // Variable to store user's message
-const inputInitHeight = chatInput.scrollHeight;
+// ============ Image Handling =============
+imageUpload.addEventListener('change', async (e) => {
+ const file = e.target.files[0];
+ if (!file || !file.type.startsWith('image/')) {
+ alert('Only image files are supported');
+ return;
+ }
-let fileBlobArr = [];
-let fileArr = [];
+ imageFile = file;
+ await filesToBlob(file);
+});
-const createChatLi = (content, className) => {
- const chatLi = document.createElement("li");
- chatLi.classList.add("chat", `${className}`);
+function filesToBlob(file) {
+ return new Promise((resolve, reject) => {
+ const reader = new FileReader();
+ reader.readAsDataURL(file);
- if (!(className === "outgoing")) {
- let toy = document.createElement('span');
- toy.className = "material-symbols-outlined";
- toy.innerText = "smart_toy"
- chatLi.appendChild(toy);
- }
+ reader.onload = (e) => {
+ imageBlob = e.target.result;
- const contents = Array.isArray(content) ? content : [content];
+ previewContainer.innerHTML = '';
+ const preview = document.createElement('div');
+ preview.className = 'preview';
- contents.forEach(item => {
- if (!item) return;
- if (item.startsWith('data:image')) {
const img = document.createElement('img');
- img.src = item;
- chatLi.appendChild(img);
- } else {
- const p = document.createElement('p');
- p.textContent = item;
- chatLi.appendChild(p);
- }
- });
-
- return chatLi;
-};
+ img.src = imageBlob;
-const handleChat = () => {
- userMessage = chatInput.value.trim();
- userBin = fileBlobArr.length > 0 ? fileBlobArr[0] : null;
+ const deleteBtn = document.createElement('button');
+ deleteBtn.className = 'delete-btn';
+ deleteBtn.textContent = '×';
+ deleteBtn.onclick = clearImage;
- const contents = [];
- if (userMessage) contents.push(userMessage);
- if (userBin) contents.push(userBin);
+ preview.appendChild(img);
+ preview.appendChild(deleteBtn);
+ previewContainer.appendChild(preview);
- if (contents.length === 0) return;
+ resolve();
+ };
- chatInput.value = "";
- chatInput.style.height = `${inputInitHeight}px`;
- clear();
+ reader.onerror = reject;
+ });
+}
- // user's message
- chatbox.appendChild(createChatLi(contents, "outgoing"));
- chatbox.scrollTo(0, chatbox.scrollHeight);
- // "Thinking..."
- const incomingChatLi = createChatLi("Thinking...", "incoming");
- chatbox.appendChild(incomingChatLi);
- chatbox.scrollTo(0, chatbox.scrollHeight);
+function clearImage() {
+ imageFile = null;
+ imageBlob = null;
+ previewContainer.innerHTML = '';
+ imageUpload.value = '';
+}
- // timeout
- const TIMEOUT_MS = CONFIG.TIME_OUT_SECOND;
+// ============ Chat Logic =============
+function sendMessage() {
+ const message = userInput.value.trim();
+ if (!message && !imageBlob) return;
+
+ // Display user message
+ const userMsg = document.createElement('div');
+ userMsg.className = 'message user';
+ userMsg.innerHTML = `
+ <div class="message-content">
+ ${message ? `<p>${message}</p>` : ''}
+ ${imageBlob ? `<img src="${imageBlob}"
style="width:100px;height:100px;margin-top:6px;border-radius:8px;object-fit:cover;"
alt="">` : ''}
+ </div>`;
+ chatMessages.appendChild(userMsg);
+ chatMessages.scrollTop = chatMessages.scrollHeight;
+
+ // Display AI "thinking" message
+ const aiMsg = document.createElement("div");
+ aiMsg.className = "message ai";
+ aiMsg.innerHTML = `
+ <div class="avatar"><span
class="material-symbols-outlined">smart_toy</span></div>
+ <div class="message-content"><p
id="streaming-response">Thinking...</p></div>`;
+ chatMessages.appendChild(aiMsg);
+ chatMessages.scrollTop = chatMessages.scrollHeight;
+
+ let b = imageBlob
+ userInput.value = '';
+ clearImage();
+
+ // Set timeout control
+ const TIMEOUT_MS = 5000; // 5 seconds
let isTimeout = false;
const timeoutId = setTimeout(() => {
isTimeout = true;
- incomingChatLi.querySelector(".content").textContent = "Request timed
out. Please try again.";
- chatbox.scrollTo(0, chatbox.scrollHeight);
+ const p = aiMsg.querySelector("#streaming-response");
+ if (p) p.textContent = "Request timed out, please try again.";
}, TIMEOUT_MS);
- // send request
- generateResponse(incomingChatLi, () => {
+ generateResponse(message, b, selectedModel, aiMsg, () => {
if (!isTimeout) clearTimeout(timeoutId);
});
}
-const generateResponse = (chatElement, callback) => {
+function generateResponse(message, imageBlob, model, containerEl, onFinish) {
const API_URL = "/api/chat";
- const messageElement = chatElement.querySelector("p");
+ const p = containerEl.querySelector("#streaming-response");
+ p.textContent = "";
- // Initialize stream
let accumulatedResponse = "";
- messageElement.textContent = "";
- messageElement.id = "content";
fetch(API_URL, {
method: "POST",
- headers: {
- "Content-Type": "application/json",
- },
- body: JSON.stringify({ message: userMessage, bin: userBin }),
+ headers: { "Content-Type": "application/json" },
+ body: JSON.stringify({
+ message,
+ bin: imageBlob,
+ model
+ })
})
- .then(response => {
- if (!response.ok) {
- throw new Error(`HTTP error! status: ${response.status}`);
- }
- const reader = response.body.getReader();
+ .then(res => {
+ if (!res.ok) throw new Error(`Request failed: ${res.status}`);
+ const reader = res.body.getReader();
const decoder = new TextDecoder();
- // Function to read the stream recursively
- function readStream() {
+ function read() {
return reader.read().then(({ done, value }) => {
if (done) {
- // Stream is complete, invoke the callback
- if (callback) callback();
+ onFinish && onFinish();
return;
}
- // Decode the chunk and process events
const chunk = decoder.decode(value);
const events = chunk.split('\n\n');
events.forEach(event => {
- if (event.startsWith('event:message')) {
- // Extract data from the event
- const dataLine = event.split('\n')[1];
- if (dataLine && dataLine.startsWith('data:')) {
+ if (event.startsWith("event:message")) {
+ const dataLine = event.split('\n').find(line =>
line.startsWith("data:"));
+ if (dataLine) {
try {
- // Parse the JSON data and update the UI
- const data =
JSON.parse(dataLine.replace('data:', ''));
+ const data =
JSON.parse(dataLine.replace("data:", "").trim());
accumulatedResponse += data.content;
- messageElement.textContent =
accumulatedResponse;
- chatbox.scrollTo(0, chatbox.scrollHeight);
- } catch (error) {
- console.error('Failed to parse event
data:', error);
+ p.textContent = accumulatedResponse;
+ chatMessages.scrollTop =
chatMessages.scrollHeight;
+ } catch (err) {
+ console.warn("Parsing failed:", err);
}
}
}
});
- // Continue reading the stream
- return readStream();
+ return read();
});
}
- // Start reading the stream
- return readStream();
+ return read();
})
- .catch(error => {
- console.error('Error:', error);
- messageElement.classList.add("error");
- messageElement.textContent = "Oops! Something went wrong. Please
try again.";
-
- // Invoke the callback in case of error
- if (callback) callback();
+ .catch(err => {
+ p.textContent = "An error occurred, please try again later.";
+ p.style.color = "red";
+ console.error(err);
+ onFinish && onFinish();
});
-};
-
-chatInput.addEventListener("input", () => {
- // Adjust the height of the input textarea based on its content
- chatInput.style.height = `${inputInitHeight}px`;
- chatInput.style.height = `${chatInput.scrollHeight}px`;
-});
-chatInput.addEventListener("keydown", (e) => {
- // If Enter key is pressed without Shift key and the window
- // width is greater than 800px, handle the chat
- if(e.key === "Enter" && !e.shiftKey && window.innerWidth > 800) {
- e.preventDefault();
- handleChat();
- }
-});
-sendChatBtn.addEventListener("click", handleChat);
-
-addBtn = document.getElementById("add-btn");
-fileInput = document.getElementById("input");
-
-// file process
-function filesToBlob(file) {
- let reader = new FileReader();
- reader.readAsDataURL(file);
- reader.onload = e => {
- fileBlobArr.push(e.target.result);
- let fileDiv = document.createElement('div');
- // delete btn
- let removeDiv = document.createElement('div');
- removeDiv.id = 'file' + '-' + fileBlobArr.length;
- removeDiv.innerHTML = '×';
- // file name
- let fileName = document.createElement('p');
- fileName.innerHTML = file.name;
- fileName.title = file.name;
-
- let img = document.createElement('img');
- img.src = e.target.result;
-
-
- fileDiv.appendChild(img);
- fileDiv.appendChild(removeDiv);
- fileDiv.appendChild(fileName);
-
- document.getElementById("drop").appendChild(fileDiv);
- };
-
- reader.onerror = () => {
- switch(reader.error.code) {
- case '1':
- alert('File not found');
- break;
- case '2':
- alert('Security error');
- break;
- case '3':
- alert('Loading interrupted');
- break;
- case '4':
- alert('File is not readable');
- break;
- case '5':
- alert('Encode error');
- break;
- default:
- alert('File read fail');
- }
- };
}
-function handleFileSelect(event) {
- const files = event.target.files;
- if (files.length > 0) {
- const file = files[0];
-
- if (!file.type.startsWith('image/')) {
- alert("Only support image files");
- return;
- }
-
- fileArr.push(file);
- filesToBlob(file);
-
- document.querySelector('.drop-box').style.setProperty('--div-count',
"1");
- document.getElementById("drop").style.display = "flex";
- addBtn.style.display = "none";
+// ============ Input Field Events =============
+userInput.addEventListener('keypress', (e) => {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault();
+ sendMessage();
}
-}
-
-fileInput.addEventListener('change', handleFileSelect);
-
-addBtn.addEventListener('click', () => {
- fileInput.click();
- document.getElementById("drop").style.display = "flex";
-});
-
-function clear() {
- document.getElementById("drop").innerHTML = '';
- document.getElementById("drop").style.display = "none";
- addBtn.style.display = "flex";
- fileInput.value = "";
- fileBlobArr = [];
- fileArr = [];
-}
-
-document.getElementById("drop").addEventListener('click', clear);
\ No newline at end of file
+});
\ No newline at end of file
diff --git a/llm/go-client/frontend/static/style.css
b/llm/go-client/frontend/static/style.css
index 2af632c5..6e89cec4 100644
--- a/llm/go-client/frontend/static/style.css
+++ b/llm/go-client/frontend/static/style.css
@@ -1,250 +1,268 @@
-/* This file originally cloned from
https://github.com/yotam-halperin/chatbot-static-UI*/
+@import
url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600&display=swap');
+:root {
+ --primary-color: #5f3dc4;
+ --primary-light: #7e57c2;
+ --background: #E3F2FD;
+ --text-primary: #000;
+ --text-secondary: #495057;
+}
-/* Import Google font - Poppins */
-@import
url('https://fonts.googleapis.com/css2?family=Poppins:wght@400;500;600&display=swap');
* {
+ box-sizing: border-box;
margin: 0;
padding: 0;
- box-sizing: border-box;
font-family: "Poppins", sans-serif;
}
+
body {
- background: #E3F2FD;
+ background-color: var(--background);
+ height: 100vh;
+ display: flex;
+ flex-direction: column;
}
-
-.chatbot {
+.chat-container {
+ max-width: 460px;
+ max-height: 620px;
+ margin: 0 auto;
+ width: 100%;
+ height: 100vh;
+ display: flex;
+ flex-direction: column;
position: fixed;
left: 50%;
top: 50%;
- width: 420px;
- background: #fff;
- border-radius: 15px;
+ transform: translate(-50%, -50%);
+ box-shadow: 0 0 128px 0 rgba(0, 0, 0, 0.1),
+ 0 32px 64px -48px rgba(0, 0, 0, 0.5);
overflow: hidden;
- opacity: 1;
- transform-origin: bottom right;
- box-shadow: 0 0 128px 0 rgba(0,0,0,0.1),
- 0 32px 64px -48px rgba(0,0,0,0.5);
- transition: all 0.1s ease;
- pointer-events: auto;
- transform: translate(-50%, -50%) scale(1);
-}
-body.show-chatbot .chatbot {
- opacity: 1;
- pointer-events: auto;
- transform: scale(1);
}
-.chatbot header {
+
+.chat-header {
padding: 16px 0;
position: relative;
text-align: center;
color: #fff;
background: #724ae8;
- box-shadow: 0 2px 10px rgba(0,0,0,0.1);
+ border-radius: 15px 15px 0 0;
+ box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
+}
+
+.chat-header h2 {
+ font-size: 1.4rem;
+ padding: 6px 0;
}
-.chatbot header span {
+
+.model-selector {
position: absolute;
- right: 15px;
+ left: 15px;
top: 50%;
- display: none;
- cursor: pointer;
transform: translateY(-50%);
}
-header h2 {
- font-size: 1.4rem;
+
+.model-selector select {
+ background-color: #5f3dc4;
+ color: white;
+ border: none;
+ padding: 5px 10px;
+ border-radius: 5px;
+ cursor: pointer;
+ font-size: 0.9rem;
+}
+
+.model-selector select:focus {
+ outline: none;
}
-.chatbot .chatbox {
+
+.chat-messages {
+ flex: 1;
overflow-y: auto;
+ padding: 1.8rem 1rem;
+ background: white;
+ box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05);
height: 510px;
- padding: 30px 20px 100px;
}
-.chatbot :where(.chatbox, textarea)::-webkit-scrollbar {
- width: 6px;
-}
-.chatbot :where(.chatbox, textarea)::-webkit-scrollbar-track {
- background: #fff;
- border-radius: 25px;
+
+.message {
+ display: flex;
+ align-items: flex-end;
+ gap: 1rem;
+ margin-bottom: 1.5rem;
}
-.chatbot :where(.chatbox, textarea)::-webkit-scrollbar-thumb {
- background: #ccc;
- border-radius: 25px;
+
+.message.user {
+ flex-direction: row-reverse;
}
-.chatbox .chat {
+
+.avatar {
+ width: 40px;
+ height: 40px;
+ border-radius: 8px;
+ background: var(--primary-color);
display: flex;
- list-style: none;
-}
-.chatbox .chat img {
- max-width: 250px;
- max-height: 100px;
- height: auto;
- border-radius: 10px;
- margin-top: 5px;
+ align-items: center;
+ justify-content: center;
+ color: white;
+ font-weight: bold;
}
-.chatbox .outgoing {
- margin: 20px 0;
- justify-content: flex-end;
+.message-content {
+ max-width: 70%;
+ padding: 1rem;
+ border-radius: 12px;
+ position: relative;
display: flex;
- list-style: none;
- align-items: flex-end;
flex-direction: column;
+ align-items: flex-end;
}
-.chatbox .incoming span {
- width: 32px;
- height: 32px;
- color: #fff;
- cursor: default;
- text-align: center;
- line-height: 32px;
- align-self: flex-end;
- background: #724ae8;
- border-radius: 4px;
- margin: 0 10px 7px 0;
-}
-.chatbox .chat p {
- white-space: pre-wrap;
+
+.message.user .message-content p {
padding: 12px 16px;
border-radius: 10px 10px 0 10px;
- max-width: 75%;
- color: #fff;
+ color: white;
font-size: 0.95rem;
- background: #724ae8;
+ background: var(--primary-color);
}
-.chatbox .incoming p {
+
+.message.ai .message-content {
border-radius: 10px 10px 10px 0;
-}
-.chatbox .chat p.error {
- color: #721c24;
- background: #f8d7da;
-}
-.chatbox .incoming p {
- color: #000;
background: #f2f2f2;
}
-.chatbot .chat-input {
+
+.message.ai .message-content p {
+ word-break: break-word;
+ white-space: pre-wrap;
+ overflow-wrap: anywhere;
+ margin: 0;
+}
+
+.input-container {
display: flex;
- gap: 5px;
- position: absolute;
- bottom: 0;
- width: 100%;
- background: #fff;
- padding: 3px 20px;
+ flex-direction: column;
+ gap: 0.5rem;
+ padding: 1rem;
+ background: white;
+ border-radius: 0 0 15px 15px;
border-top: 1px solid #ddd;
+ box-shadow: 0 2px 8px rgba(0, 0, 0, 0.05);
+}
+
+.preview-container {
+ display: flex;
+ gap: 10px;
+ flex-wrap: wrap;
}
-.chat-input textarea {
- height: 55px;
+
+.preview {
+ position: relative;
+ width: 80px;
+ height: 80px;
+ border-radius: 8px;
+ overflow: hidden;
+ border: 1px solid #ccc;
+}
+
+.preview img {
width: 100%;
- border: none;
- outline: none;
- resize: none;
- max-height: 180px;
- padding: 15px 15px 15px 0;
- font-size: 0.95rem;
+ height: 100%;
+ object-fit: cover;
}
-.chat-input span {
- align-self: flex-end;
- color: #724ae8;
+
+.delete-btn {
+ position: absolute;
+ top: 4px;
+ right: 4px;
+ background-color: rgba(0, 0, 0, 0.6);
+ color: white;
+ border: none;
+ border-radius: 50%;
+ width: 20px;
+ height: 20px;
+ font-size: 14px;
cursor: pointer;
- height: 55px;
- display: flex;
- align-items: center;
- visibility: hidden;
- font-size: 1.35rem;
+ opacity: 0;
+ transition: opacity 0.2s;
}
-.chat-input span1 {
- align-self: flex-end;
- color: #724ae8;
- cursor: pointer;
- height: 55px;
+
+.preview:hover .delete-btn {
+ opacity: 1;
+}
+
+.input-row {
display: flex;
+ gap: 0.75rem;
align-items: center;
- font-size: 1.35rem;
}
-.chat-input textarea:valid ~ span {
- visibility: visible;
+
+.input-row input[type="text"] {
+ flex: 1;
+ padding: 0.75rem 1rem;
+ border: 1px solid #dee2e6;
+ border-radius: 8px;
+ outline: none;
+ font-size: 1rem;
+ height: 44px;
+}
+
+.input-row input[type="text"]:focus {
+ border-color: var(--primary-light);
+ box-shadow: 0 0 0 3px rgba(95, 61, 196, 0.1);
}
-.drop-box {
+.file-input input {
display: none;
- height: 60px;
- width: 20%;
- border: 1px dashed #a89b9b;
- overflow: hidden;
- overflow-y: auto;
}
-.drop-box > div {
- width: 40px;
- height: 90%;
- margin: 1px 3px;
- display: block;
- position: relative;
+.material-symbols-rounded {
+ font-variation-settings: 'FILL' 1;
+ font-size: 28px;
+ vertical-align: middle;
+ color: var(--primary-color);
+ background-color: #fff;
}
-.drop-box > div > img {
- width: 90%;
- height: 70%;
+.file-input label {
+ padding: 0.5rem;
+ border-radius: 8px;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ height: 44px;
+ width: 44px;
+ transition: all 0.2s;
}
-.drop-box > div > div {
- position: absolute;
- right: 5px;
- top: 5px;
- width: 15px;
- height: 15px;
- background: #347aa5;
+
+button {
+ padding: 0 1rem;
+ border: none;
+ border-radius: 8px;
+ background: #fff;
color: white;
- border-radius: 50%;
- text-align: center;
- line-height: 15px;
- /* font-weight: bold; */
- font-size: 15px;
cursor: pointer;
+ transition: all 0.2s;
+ height: 44px;
+ width: 44px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
}
-.drop-box > div > p {
- bottom: 5px;
- text-align: center;
- line-height: 10px;
- font-size: 10px;
- overflow: hidden;
- text-overflow: ellipsis;
- white-space: nowrap;
+.input-row {
+ gap: 0.5rem;
}
-.drop-text {
- position: absolute;
- width: 450px;
- height: 50px;
- top: 200px;
- text-align: center;
- line-height: 50px;
- opacity: 0.3;
+::-webkit-scrollbar {
+ width: 8px;
}
-.drop-text > span {
- color: #347aa5;
- cursor: pointer;
+::-webkit-scrollbar-track {
+ background: #f1f1f1;
}
-
-@media (max-width: 600px), (max-height: 600px) {
- .chatbot {
- right: 0;
- bottom: 0;
- height: 100%;
- border-radius: 0;
- width: 100%;
- }
- .chatbot .chatbox {
- height: 90%;
- padding: 25px 15px 100px;
- }
- .chatbot .chat-input {
- padding: 5px 15px;
- }
- .chatbot header span {
- display: block;
- }
+::-webkit-scrollbar-thumb {
+ background: var(--primary-light);
+ border-radius: 4px;
}
\ No newline at end of file
diff --git a/llm/go-client/frontend/templates/index.html
b/llm/go-client/frontend/templates/index.html
index 44067546..34da4399 100644
--- a/llm/go-client/frontend/templates/index.html
+++ b/llm/go-client/frontend/templates/index.html
@@ -1,56 +1,59 @@
-<!-- This file originally cloned from
https://github.com/yotam-halperin/chatbot-static-UI-->
-
-
<!DOCTYPE html>
-<!-- Coding By CodingNepal - www.codingnepalweb.com -->
<html lang="en" dir="ltr">
- <head>
- <meta charset="utf-8">
- <title>Chatbot | {{ .OllamaModel }}</title>
- <link rel="stylesheet" href="../static/style.css">
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
- <!-- Google Fonts Link For Icons -->
- <link rel="stylesheet"
href="https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined:opsz,wght,FILL,GRAD@48,400,0,0"
/>
- <link rel="stylesheet"
href="https://fonts.googleapis.com/css2?family=Material+Symbols+Rounded:opsz,wght,FILL,GRAD@48,400,1,0"
/>
- <script src="../static/script.js" defer></script>
- </head>
- <body>
-
- <div class="chatbot">
- <header>
- <h2>{{ .OllamaModel }}</h2>
- <span class="close-btn material-symbols-outlined">close</span>
- </header>
- <ul class="chatbox">
- <li class="chat incoming">
- <span class="material-symbols-outlined">smart_toy</span>
- <p class="chat incoming content" id="content">Hi there 👋</p>
- </li>
- </ul>
- <div class="chat-input">
-
- <textarea placeholder="Enter a message..." spellcheck="false"
required></textarea>
-
- <span id="send-btn" class="material-symbols-rounded">send</span>
-
- <div id="drop" class="drop-box"></div>
-
- <span1 id="add-btn" class="material-symbols-rounded">add</span1>
+<head>
+ <meta charset="utf-8">
+ <title>Chatbot</title>
+ <link rel="stylesheet" href="../static/style.css">
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
+ <!-- Google Fonts Link For Icons -->
+ <link rel="stylesheet"
+
href="https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined:opsz,wght,FILL,GRAD@48,400,0,0"
/>
+ <link rel="stylesheet"
+
href="https://fonts.googleapis.com/css2?family=Material+Symbols+Rounded:opsz,wght,FILL,GRAD@48,400,1,0"
/>
+ <script src="../static/script.js" defer></script>
+</head>
+
+<body>
+
+ <div class="chat-container">
+ <div class="chat-header">
+ <h2>LLM Chat</h2>
+ <div class="model-selector">
+ <label for="model-select"></label><select id="model-select">
+ {{range .OllamaModels}}
+ <option value="{{.}}">{{.}}</option>
+ {{end}}
+ </select>
+ </div>
+ </div>
+ <div class="chat-messages" id="chatMessages">
+ <div class="message ai">
+ <div class="avatar">
+ <span class="material-symbols-outlined">smart_toy</span>
+ </div>
+ <div class="message-content">
+ Hi there 👋
+ </div>
+ </div>
</div>
+ <div class="input-container">
+ <div class="preview-container" id="previewContainer"></div>
+ <div class="input-row">
+ <div class="file-input">
+ <input type="file" id="imageUpload" accept="image/*" multiple>
+ <label for="imageUpload">
+ <span class="material-symbols-rounded">photo_camera</span>
+ </label>
+ </div>
+ <label for="userInput" style="display: none"></label><input
type="text" id="userInput" placeholder="Enter your message...">
+ <button id="send-btn" onclick="sendMessage()">
+ <span class="material-symbols-rounded">send</span>
+ </button>
+ </div>
+ </div>
</div>
-
- <input id="input" accept="image/png, image/jpeg, image/gif" type="file"
style="display: none" >
-
</body>
-<script>
- const CONFIG = {
- TIME_OUT_SECOND: {{ .TimeoutSecond }} * 1000
- };
-
-</script>
-
-
</html>
\ No newline at end of file
diff --git a/llm/go-server/cmd/server.go b/llm/go-server/cmd/server.go
index 4437b0c3..f1eb865b 100644
--- a/llm/go-server/cmd/server.go
+++ b/llm/go-server/cmd/server.go
@@ -23,7 +23,6 @@ import (
"fmt"
"log"
"net/http"
- "os"
"runtime/debug"
)
@@ -32,29 +31,44 @@ import (
"dubbo.apache.org/dubbo-go/v3/protocol"
"dubbo.apache.org/dubbo-go/v3/server"
- "github.com/joho/godotenv"
-
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/ollama"
)
import (
+ "github.com/apache/dubbo-go-samples/llm/config"
chat "github.com/apache/dubbo-go-samples/llm/proto"
)
type ChatServer struct {
- llm *ollama.LLM
+ llms map[string]*ollama.LLM
}
func NewChatServer() (*ChatServer, error) {
- llm, err := ollama.New(
- ollama.WithModel(os.Getenv("OLLAMA_MODEL")),
- ollama.WithServerURL(os.Getenv("OLLAMA_URL")),
- )
+ cfg, err := config.GetConfig()
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("Error loading config: %v\n", err)
}
- return &ChatServer{llm: llm}, nil
+
+ llmMap := make(map[string]*ollama.LLM)
+
+ for _, model := range cfg.OllamaModels {
+ if model == "" {
+ continue
+ }
+
+ llm, err := ollama.New(
+ ollama.WithModel(model),
+ ollama.WithServerURL(cfg.OllamaURL),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize model %s:
%v", model, err)
+ }
+ llmMap[model] = llm
+ log.Printf("Initialized model: %s", model)
+ }
+
+ return &ChatServer{llms: llmMap}, nil
}
func (s *ChatServer) Chat(ctx context.Context, req *chat.ChatRequest, stream
chat.ChatService_ChatServer) (err error) {
@@ -65,8 +79,8 @@ func (s *ChatServer) Chat(ctx context.Context, req
*chat.ChatRequest, stream cha
}
}()
- if s.llm == nil {
- return fmt.Errorf("LLM is not initialized")
+ if len(s.llms) == 0 {
+ return fmt.Errorf("no LLM models are initialized")
}
if len(req.Messages) == 0 {
@@ -74,17 +88,40 @@ func (s *ChatServer) Chat(ctx context.Context, req
*chat.ChatRequest, stream cha
return fmt.Errorf("empty messages in request")
}
+ modelName := req.Model
+ var llm *ollama.LLM
+
+ if modelName != "" {
+ var ok bool
+ llm, ok = s.llms[modelName]
+ if !ok {
+ return fmt.Errorf("requested model '%s' is not
available", modelName)
+ }
+ } else {
+ for name, l := range s.llms {
+ modelName = name
+ llm = l
+ break
+ }
+ log.Printf("No model specified, using default model: %s",
modelName)
+ }
+
var messages []llms.MessageContent
for _, msg := range req.Messages {
- msgType := llms.ChatMessageTypeHuman
- if msg.Role == "ai" {
+ var msgType llms.ChatMessageType
+ switch msg.Role {
+ case "human":
+ msgType = llms.ChatMessageTypeHuman
+ case "ai":
msgType = llms.ChatMessageTypeAI
+ case "system":
+ msgType = llms.ChatMessageTypeSystem
}
messageContent := llms.MessageContent{
Role: msgType,
Parts: []llms.ContentPart{
- llms.TextContent{msg.Content},
+ llms.TextContent{Text: msg.Content},
},
}
@@ -101,7 +138,7 @@ func (s *ChatServer) Chat(ctx context.Context, req
*chat.ChatRequest, stream cha
messages = append(messages, messageContent)
}
- _, err = s.llm.GenerateContent(
+ _, err = llm.GenerateContent(
ctx,
messages,
llms.WithStreamingFunc(func(ctx context.Context, chunk []byte)
error {
@@ -110,12 +147,13 @@ func (s *ChatServer) Chat(ctx context.Context, req
*chat.ChatRequest, stream cha
}
return stream.Send(&chat.ChatResponse{
Content: string(chunk),
+ Model: modelName,
})
}),
)
if err != nil {
- log.Printf("GenerateContent failed: %v\n", err)
- return fmt.Errorf("GenerateContent failed: %v", err)
+ log.Printf("GenerateContent failed with model %s: %v\n",
modelName, err)
+ return fmt.Errorf("GenerateContent failed with model %s: %v",
modelName, err)
}
return nil
@@ -123,26 +161,6 @@ func (s *ChatServer) Chat(ctx context.Context, req
*chat.ChatRequest, stream cha
func main() {
- err := godotenv.Load(".env")
- if err != nil {
- fmt.Printf("Error loading .env file: %v\n", err)
- return
- }
-
- _, exist := os.LookupEnv("OLLAMA_MODEL")
-
- if !exist {
- fmt.Println("OLLAMA_MODEL is not set")
- return
- }
-
- _, exist = os.LookupEnv("OLLAMA_URL")
-
- if !exist {
- fmt.Println("OLLAMA_URL is not set")
- return
- }
-
srv, err := server.NewServer(
server.WithServerProtocol(
protocol.WithPort(20000),
diff --git a/llm/proto/chat.pb.go b/llm/proto/chat.pb.go
index b6953d16..6eb946b8 100644
--- a/llm/proto/chat.pb.go
+++ b/llm/proto/chat.pb.go
@@ -16,24 +16,20 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.5
-// protoc v3.6.1
-// source: proto/chat.proto
+// protoc-gen-go v1.36.6
+// protoc v5.29.3
+// source: chat.proto
package chat
import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-)
-
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
@@ -44,13 +40,14 @@ const (
type ChatRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Messages []*ChatMessage
`protobuf:"bytes,1,rep,name=messages,proto3" json:"messages,omitempty"`
+ Model string
`protobuf:"bytes,2,opt,name=model,proto3" json:"model,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ChatRequest) Reset() {
*x = ChatRequest{}
- mi := &file_proto_chat_proto_msgTypes[0]
+ mi := &file_chat_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -62,7 +59,7 @@ func (x *ChatRequest) String() string {
func (*ChatRequest) ProtoMessage() {}
func (x *ChatRequest) ProtoReflect() protoreflect.Message {
- mi := &file_proto_chat_proto_msgTypes[0]
+ mi := &file_chat_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -75,7 +72,7 @@ func (x *ChatRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChatRequest.ProtoReflect.Descriptor instead.
func (*ChatRequest) Descriptor() ([]byte, []int) {
- return file_proto_chat_proto_rawDescGZIP(), []int{0}
+ return file_chat_proto_rawDescGZIP(), []int{0}
}
func (x *ChatRequest) GetMessages() []*ChatMessage {
@@ -85,6 +82,13 @@ func (x *ChatRequest) GetMessages() []*ChatMessage {
return nil
}
+func (x *ChatRequest) GetModel() string {
+ if x != nil {
+ return x.Model
+ }
+ return ""
+}
+
type ChatMessage struct {
state protoimpl.MessageState `protogen:"open.v1"`
Role string
`protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` // "human" or
"ai"
@@ -96,7 +100,7 @@ type ChatMessage struct {
func (x *ChatMessage) Reset() {
*x = ChatMessage{}
- mi := &file_proto_chat_proto_msgTypes[1]
+ mi := &file_chat_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -108,7 +112,7 @@ func (x *ChatMessage) String() string {
func (*ChatMessage) ProtoMessage() {}
func (x *ChatMessage) ProtoReflect() protoreflect.Message {
- mi := &file_proto_chat_proto_msgTypes[1]
+ mi := &file_chat_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -121,7 +125,7 @@ func (x *ChatMessage) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChatMessage.ProtoReflect.Descriptor instead.
func (*ChatMessage) Descriptor() ([]byte, []int) {
- return file_proto_chat_proto_rawDescGZIP(), []int{1}
+ return file_chat_proto_rawDescGZIP(), []int{1}
}
func (x *ChatMessage) GetRole() string {
@@ -148,13 +152,14 @@ func (x *ChatMessage) GetBin() []byte {
type ChatResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Content string
`protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ Model string
`protobuf:"bytes,2,opt,name=model,proto3" json:"model,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ChatResponse) Reset() {
*x = ChatResponse{}
- mi := &file_proto_chat_proto_msgTypes[2]
+ mi := &file_chat_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -166,7 +171,7 @@ func (x *ChatResponse) String() string {
func (*ChatResponse) ProtoMessage() {}
func (x *ChatResponse) ProtoReflect() protoreflect.Message {
- mi := &file_proto_chat_proto_msgTypes[2]
+ mi := &file_chat_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -179,7 +184,7 @@ func (x *ChatResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ChatResponse.ProtoReflect.Descriptor instead.
func (*ChatResponse) Descriptor() ([]byte, []int) {
- return file_proto_chat_proto_rawDescGZIP(), []int{2}
+ return file_chat_proto_rawDescGZIP(), []int{2}
}
func (x *ChatResponse) GetContent() string {
@@ -189,51 +194,51 @@ func (x *ChatResponse) GetContent() string {
return ""
}
-var File_proto_chat_proto protoreflect.FileDescriptor
-
-var file_proto_chat_proto_rawDesc = string([]byte{
- 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x68, 0x61, 0x74,
0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x04, 0x63, 0x68, 0x61, 0x74, 0x22, 0x3c, 0x0a, 0x0b,
0x43, 0x68, 0x61, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6d,
0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e,
0x63, 0x68, 0x61, 0x74,
- 0x2e, 0x43, 0x68, 0x61, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
0x52, 0x08, 0x6d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x4d, 0x0a, 0x0b, 0x43, 0x68,
0x61, 0x74, 0x4d, 0x65,
- 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c,
0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x18, 0x0a,
0x07, 0x63, 0x6f, 0x6e,
- 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x63, 0x6f, 0x6e, 0x74,
- 0x65, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x62, 0x69, 0x6e, 0x18, 0x03,
0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x03, 0x62, 0x69, 0x6e, 0x22, 0x28, 0x0a, 0x0c, 0x43, 0x68, 0x61,
0x74, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e,
0x74, 0x65, 0x6e, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74,
0x65, 0x6e, 0x74, 0x32,
- 0x40, 0x0a, 0x0b, 0x43, 0x68, 0x61, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x12, 0x31,
- 0x0a, 0x04, 0x43, 0x68, 0x61, 0x74, 0x12, 0x11, 0x2e, 0x63, 0x68, 0x61,
0x74, 0x2e, 0x43, 0x68,
- 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e,
0x63, 0x68, 0x61, 0x74,
- 0x2e, 0x43, 0x68, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x30,
- 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f,
- 0x61, 0x70, 0x61, 0x63, 0x68, 0x65, 0x2f, 0x64, 0x75, 0x62, 0x62, 0x6f,
0x2d, 0x67, 0x6f, 0x2d,
- 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x2f, 0x6c, 0x6c, 0x6d, 0x2f,
0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x3b, 0x63, 0x68, 0x61, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
-})
+func (x *ChatResponse) GetModel() string {
+ if x != nil {
+ return x.Model
+ }
+ return ""
+}
+
+var File_chat_proto protoreflect.FileDescriptor
+
+const file_chat_proto_rawDesc = "" +
+ "\n" +
+ "\n" +
+ "chat.proto\x12\x04chat\"R\n" +
+ "\vChatRequest\x12-\n" +
+ "\bmessages\x18\x01 \x03(\v2\x11.chat.ChatMessageR\bmessages\x12\x14\n"
+
+ "\x05model\x18\x02 \x01(\tR\x05model\"M\n" +
+ "\vChatMessage\x12\x12\n" +
+ "\x04role\x18\x01 \x01(\tR\x04role\x12\x18\n" +
+ "\acontent\x18\x02 \x01(\tR\acontent\x12\x10\n" +
+ "\x03bin\x18\x03 \x01(\fR\x03bin\">\n" +
+ "\fChatResponse\x12\x18\n" +
+ "\acontent\x18\x01 \x01(\tR\acontent\x12\x14\n" +
+ "\x05model\x18\x02 \x01(\tR\x05model2@\n" +
+ "\vChatService\x121\n" +
+
"\x04Chat\x12\x11.chat.ChatRequest\x1a\x12.chat.ChatResponse\"\x000\x01B3Z1github.com/apache/dubbo-go-samples/llm/proto;chatb\x06proto3"
var (
- file_proto_chat_proto_rawDescOnce sync.Once
- file_proto_chat_proto_rawDescData []byte
+ file_chat_proto_rawDescOnce sync.Once
+ file_chat_proto_rawDescData []byte
)
-func file_proto_chat_proto_rawDescGZIP() []byte {
- file_proto_chat_proto_rawDescOnce.Do(func() {
- file_proto_chat_proto_rawDescData =
protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_chat_proto_rawDesc),
len(file_proto_chat_proto_rawDesc)))
+func file_chat_proto_rawDescGZIP() []byte {
+ file_chat_proto_rawDescOnce.Do(func() {
+ file_chat_proto_rawDescData =
protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_chat_proto_rawDesc),
len(file_chat_proto_rawDesc)))
})
- return file_proto_chat_proto_rawDescData
+ return file_chat_proto_rawDescData
}
-var file_proto_chat_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_proto_chat_proto_goTypes = []any{
+var file_chat_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_chat_proto_goTypes = []any{
(*ChatRequest)(nil), // 0: chat.ChatRequest
(*ChatMessage)(nil), // 1: chat.ChatMessage
(*ChatResponse)(nil), // 2: chat.ChatResponse
}
-var file_proto_chat_proto_depIdxs = []int32{
+var file_chat_proto_depIdxs = []int32{
1, // 0: chat.ChatRequest.messages:type_name -> chat.ChatMessage
0, // 1: chat.ChatService.Chat:input_type -> chat.ChatRequest
2, // 2: chat.ChatService.Chat:output_type -> chat.ChatResponse
@@ -244,26 +249,26 @@ var file_proto_chat_proto_depIdxs = []int32{
0, // [0:1] is the sub-list for field type_name
}
-func init() { file_proto_chat_proto_init() }
-func file_proto_chat_proto_init() {
- if File_proto_chat_proto != nil {
+func init() { file_chat_proto_init() }
+func file_chat_proto_init() {
+ if File_chat_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor:
unsafe.Slice(unsafe.StringData(file_proto_chat_proto_rawDesc),
len(file_proto_chat_proto_rawDesc)),
+ RawDescriptor:
unsafe.Slice(unsafe.StringData(file_chat_proto_rawDesc),
len(file_chat_proto_rawDesc)),
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
NumServices: 1,
},
- GoTypes: file_proto_chat_proto_goTypes,
- DependencyIndexes: file_proto_chat_proto_depIdxs,
- MessageInfos: file_proto_chat_proto_msgTypes,
+ GoTypes: file_chat_proto_goTypes,
+ DependencyIndexes: file_chat_proto_depIdxs,
+ MessageInfos: file_chat_proto_msgTypes,
}.Build()
- File_proto_chat_proto = out.File
- file_proto_chat_proto_goTypes = nil
- file_proto_chat_proto_depIdxs = nil
+ File_chat_proto = out.File
+ file_chat_proto_goTypes = nil
+ file_chat_proto_depIdxs = nil
}
diff --git a/llm/proto/chat.proto b/llm/proto/chat.proto
index 7b363ac2..6e8d7bd4 100644
--- a/llm/proto/chat.proto
+++ b/llm/proto/chat.proto
@@ -23,6 +23,7 @@ option go_package =
"github.com/apache/dubbo-go-samples/llm/proto;chat";
message ChatRequest {
repeated ChatMessage messages = 1;
+ string model = 2;
}
message ChatMessage {
@@ -33,6 +34,7 @@ message ChatMessage {
message ChatResponse {
string content = 1;
+ string model = 2;
}
service ChatService {
diff --git a/llm/proto/chat.triple.go b/llm/proto/chat.triple.go
index 75aaf337..bdb7eb2f 100644
--- a/llm/proto/chat.triple.go
+++ b/llm/proto/chat.triple.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-triple. DO NOT EDIT.
//
-// Source: proto/chat.proto
+// Source: chat.proto
package chat
import (