This is an automated email from the ASF dual-hosted git repository.

shreemaanabhishek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/apisix.git


The following commit(s) were added to refs/heads/master by this push:
     new 11c9d29b2 feat: ai-rag plugin (#11568)
11c9d29b2 is described below

commit 11c9d29b264fb4502b9848305b6d8c8d77b7be6b
Author: Shreemaan Abhishek <[email protected]>
AuthorDate: Wed Oct 16 18:20:34 2024 +0545

    feat: ai-rag plugin (#11568)
---
 Makefile                                           |   5 +
 apisix/cli/config.lua                              |   1 +
 apisix/plugins/ai-rag.lua                          | 156 ++++++++
 apisix/plugins/ai-rag/embeddings/azure_openai.lua  |  88 +++++
 .../ai-rag/vector-search/azure_ai_search.lua       |  83 +++++
 conf/config.yaml.example                           |   1 +
 docs/en/latest/config.json                         |   3 +-
 docs/en/latest/plugins/ai-rag.md                   | 200 +++++++++++
 t/admin/plugins.t                                  |   1 +
 t/assets/embeddings.json                           |  25 ++
 t/plugin/ai-rag.t                                  | 392 +++++++++++++++++++++
 11 files changed, 954 insertions(+), 1 deletion(-)

diff --git a/Makefile b/Makefile
index c72a12423..a24e8f7b8 100644
--- a/Makefile
+++ b/Makefile
@@ -377,6 +377,11 @@ install: runtime
        $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-proxy/drivers
        $(ENV_INSTALL) apisix/plugins/ai-proxy/drivers/*.lua 
$(ENV_INST_LUADIR)/apisix/plugins/ai-proxy/drivers
 
+       $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/embeddings
+       $(ENV_INSTALL) apisix/plugins/ai-rag/embeddings/*.lua 
$(ENV_INST_LUADIR)/apisix/plugins/ai-rag/embeddings
+       $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/vector-search
+       $(ENV_INSTALL) apisix/plugins/ai-rag/vector-search/*.lua 
$(ENV_INST_LUADIR)/apisix/plugins/ai-rag/vector-search
+
        # ai-content-moderation plugin
        $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai
        $(ENV_INSTALL) apisix/plugins/ai/*.lua 
$(ENV_INST_LUADIR)/apisix/plugins/ai
diff --git a/apisix/cli/config.lua b/apisix/cli/config.lua
index 57b4aa9d1..6a05fed5d 100644
--- a/apisix/cli/config.lua
+++ b/apisix/cli/config.lua
@@ -216,6 +216,7 @@ local _M = {
     "body-transformer",
     "ai-prompt-template",
     "ai-prompt-decorator",
+    "ai-rag",
     "ai-content-moderation",
     "proxy-mirror",
     "proxy-rewrite",
diff --git a/apisix/plugins/ai-rag.lua b/apisix/plugins/ai-rag.lua
new file mode 100644
index 000000000..0acd5f1a1
--- /dev/null
+++ b/apisix/plugins/ai-rag.lua
@@ -0,0 +1,156 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local next    = next
+local require = require
+local ngx_req = ngx.req
+
+local http     = require("resty.http")
+local core     = require("apisix.core")
+
+local azure_openai_embeddings = 
require("apisix.plugins.ai-rag.embeddings.azure_openai").schema
+local azure_ai_search_schema = 
require("apisix.plugins.ai-rag.vector-search.azure_ai_search").schema
+
+local HTTP_INTERNAL_SERVER_ERROR = ngx.HTTP_INTERNAL_SERVER_ERROR
+local HTTP_BAD_REQUEST = ngx.HTTP_BAD_REQUEST
+
+local schema = {
+    type = "object",
+    properties = {
+        type = "object",
+        embeddings_provider = {
+            type = "object",
+            properties = {
+                azure_openai = azure_openai_embeddings
+            },
+            -- ensure only one provider can be configured while implementing 
support for
+            -- other providers
+            required = { "azure_openai" },
+            maxProperties = 1,
+        },
+        vector_search_provider = {
+            type = "object",
+            properties = {
+                azure_ai_search = azure_ai_search_schema
+            },
+            -- ensure only one provider can be configured while implementing 
support for
+            -- other providers
+            required = { "azure_ai_search" },
+            maxProperties = 1
+        },
+    },
+    required = { "embeddings_provider", "vector_search_provider" }
+}
+
+local request_schema = {
+    type = "object",
+    properties = {
+        ai_rag = {
+            type = "object",
+            properties = {
+                vector_search = {},
+                embeddings = {},
+            },
+            required = { "vector_search", "embeddings" }
+        }
+    }
+}
+
+local _M = {
+    version = 0.1,
+    priority = 1060,
+    name = "ai-rag",
+    schema = schema,
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+function _M.access(conf, ctx)
+    local httpc = http.new()
+    local body_tab, err = core.request.get_json_request_body_table()
+    if not body_tab then
+        return HTTP_BAD_REQUEST, err
+    end
+    if not body_tab["ai_rag"] then
+        core.log.error("request body must have \"ai-rag\" field")
+        return HTTP_BAD_REQUEST
+    end
+
+    local embeddings_provider = next(conf.embeddings_provider)
+    local embeddings_provider_conf = 
conf.embeddings_provider[embeddings_provider]
+    local embeddings_driver = require("apisix.plugins.ai-rag.embeddings." .. 
embeddings_provider)
+
+    local vector_search_provider = next(conf.vector_search_provider)
+    local vector_search_provider_conf = 
conf.vector_search_provider[vector_search_provider]
+    local vector_search_driver = 
require("apisix.plugins.ai-rag.vector-search." ..
+                                        vector_search_provider)
+
+    local vs_req_schema = vector_search_driver.request_schema
+    local emb_req_schema = embeddings_driver.request_schema
+
+    request_schema.properties.ai_rag.properties.vector_search = vs_req_schema
+    request_schema.properties.ai_rag.properties.embeddings = emb_req_schema
+
+    local ok, err = core.schema.check(request_schema, body_tab)
+    if not ok then
+        core.log.error("request body fails schema check: ", err)
+        return HTTP_BAD_REQUEST
+    end
+
+    local embeddings, status, err = 
embeddings_driver.get_embeddings(embeddings_provider_conf,
+                                                        
body_tab["ai_rag"].embeddings, httpc)
+    if not embeddings then
+        core.log.error("could not get embeddings: ", err)
+        return status, err
+    end
+
+    local search_body = body_tab["ai_rag"].vector_search
+    search_body.embeddings = embeddings
+    local res, status, err = 
vector_search_driver.search(vector_search_provider_conf,
+                                                        search_body, httpc)
+    if not res then
+        core.log.error("could not get vector_search result: ", err)
+        return status, err
+    end
+
+    -- remove ai_rag from request body because their purpose is served
+    -- also, these values will cause failure when proxying requests to LLM.
+    body_tab["ai_rag"] = nil
+
+    if not body_tab.messages then
+        body_tab.messages = {}
+    end
+
+    local augment = {
+        role = "user",
+        content = res
+    }
+    core.table.insert_tail(body_tab.messages, augment)
+
+    local req_body_json, err = core.json.encode(body_tab)
+    if not req_body_json then
+        return HTTP_INTERNAL_SERVER_ERROR, err
+    end
+
+    ngx_req.set_body_data(req_body_json)
+end
+
+
+return _M
diff --git a/apisix/plugins/ai-rag/embeddings/azure_openai.lua 
b/apisix/plugins/ai-rag/embeddings/azure_openai.lua
new file mode 100644
index 000000000..b6bacbf32
--- /dev/null
+++ b/apisix/plugins/ai-rag/embeddings/azure_openai.lua
@@ -0,0 +1,88 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local HTTP_INTERNAL_SERVER_ERROR = ngx.HTTP_INTERNAL_SERVER_ERROR
+local HTTP_OK = ngx.HTTP_OK
+local type = type
+
+local _M = {}
+
+_M.schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "string",
+        },
+        api_key = {
+            type = "string",
+        },
+    },
+    required = { "endpoint", "api_key" }
+}
+
+function _M.get_embeddings(conf, body, httpc)
+    local body_tab, err = core.json.encode(body)
+    if not body_tab then
+        return nil, HTTP_INTERNAL_SERVER_ERROR, err
+    end
+
+    local res, err = httpc:request_uri(conf.endpoint, {
+        method = "POST",
+        headers = {
+            ["Content-Type"] = "application/json",
+            ["api-key"] = conf.api_key,
+        },
+        body = body_tab
+    })
+
+    if not res or not res.body then
+        return nil, HTTP_INTERNAL_SERVER_ERROR, err
+    end
+
+    if res.status ~= HTTP_OK then
+        return nil, res.status, res.body
+    end
+
+    local res_tab, err = core.json.decode(res.body)
+    if not res_tab then
+        return nil, HTTP_INTERNAL_SERVER_ERROR, err
+    end
+
+    if type(res_tab.data) ~= "table" or core.table.isempty(res_tab.data) then
+        return nil, HTTP_INTERNAL_SERVER_ERROR, res.body
+    end
+
+    local embeddings, err = core.json.encode(res_tab.data[1].embedding)
+    if not embeddings then
+        return nil, HTTP_INTERNAL_SERVER_ERROR, err
+    end
+
+    return res_tab.data[1].embedding
+end
+
+
+_M.request_schema = {
+    type = "object",
+    properties = {
+        input = {
+            type = "string"
+        }
+    },
+    required = { "input" }
+}
+
+return _M
diff --git a/apisix/plugins/ai-rag/vector-search/azure_ai_search.lua 
b/apisix/plugins/ai-rag/vector-search/azure_ai_search.lua
new file mode 100644
index 000000000..7a0106490
--- /dev/null
+++ b/apisix/plugins/ai-rag/vector-search/azure_ai_search.lua
@@ -0,0 +1,83 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local HTTP_INTERNAL_SERVER_ERROR = ngx.HTTP_INTERNAL_SERVER_ERROR
+local HTTP_OK = ngx.HTTP_OK
+
+local _M = {}
+
+_M.schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "string",
+        },
+        api_key = {
+            type = "string",
+        },
+    },
+    required = {"endpoint", "api_key"}
+}
+
+
+function _M.search(conf, search_body, httpc)
+    local body = {
+        vectorQueries = {
+            {
+                kind = "vector",
+                vector = search_body.embeddings,
+                fields = search_body.fields
+            }
+        }
+    }
+    local final_body, err = core.json.encode(body)
+    if not final_body then
+        return nil, HTTP_INTERNAL_SERVER_ERROR, err
+    end
+
+    local res, err = httpc:request_uri(conf.endpoint, {
+        method = "POST",
+        headers = {
+            ["Content-Type"] = "application/json",
+            ["api-key"] = conf.api_key,
+        },
+        body = final_body
+    })
+
+    if not res or not res.body then
+        return nil, HTTP_INTERNAL_SERVER_ERROR, err
+    end
+
+    if res.status ~= HTTP_OK then
+        return nil, res.status, res.body
+    end
+
+    return res.body
+end
+
+
+_M.request_schema = {
+    type = "object",
+    properties = {
+        fields = {
+            type = "string"
+        }
+    },
+    required = { "fields" }
+}
+
+return _M
diff --git a/conf/config.yaml.example b/conf/config.yaml.example
index 44005ffd0..eea2335bc 100644
--- a/conf/config.yaml.example
+++ b/conf/config.yaml.example
@@ -479,6 +479,7 @@ plugins:                           # plugin list (sorted by 
priority)
   - body-transformer               # priority: 1080
   - ai-prompt-template             # priority: 1071
   - ai-prompt-decorator            # priority: 1070
+  - ai-rag                         # priority: 1060
   - ai-content-moderation          # priority: 1040 TODO: compare priority 
with other ai plugins
   - proxy-mirror                   # priority: 1010
   - proxy-rewrite                  # priority: 1008
diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json
index c2d8996ee..a17a6ae48 100644
--- a/docs/en/latest/config.json
+++ b/docs/en/latest/config.json
@@ -100,7 +100,8 @@
             "plugins/degraphql",
             "plugins/body-transformer",
             "plugins/ai-proxy",
-            "plugins/attach-consumer-label"
+            "plugins/attach-consumer-label",
+            "plugins/ai-rag"
           ]
         },
         {
diff --git a/docs/en/latest/plugins/ai-rag.md b/docs/en/latest/plugins/ai-rag.md
new file mode 100644
index 000000000..813e5fff0
--- /dev/null
+++ b/docs/en/latest/plugins/ai-rag.md
@@ -0,0 +1,200 @@
+---
+title: ai-rag
+keywords:
+  - Apache APISIX
+  - API Gateway
+  - Plugin
+  - ai-rag
+description: This document contains information about the Apache APISIX ai-rag 
Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `ai-rag` plugin integrates Retrieval-Augmented Generation (RAG) 
capabilities with AI models.
+It allows efficient retrieval of relevant documents or information from 
external data sources and
+augments the LLM responses with that data, improving the accuracy and context 
of generated outputs.
+
+**_As of now only [Azure 
OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) 
and [Azure AI 
Search](https://azure.microsoft.com/en-us/products/ai-services/ai-search) 
services are supported for generating embeddings and performing vector search 
respectively. PRs for introducing support for other service providers are 
welcomed._**
+
+## Plugin Attributes
+
+| **Field**                                       | **Required** | **Type** | 
**Description**                                                                 
                                                          |
+| ----------------------------------------------- | ------------ | -------- | 
-----------------------------------------------------------------------------------------------------------------------------------------
 |
+| embeddings_provider                             | Yes          | object   | 
Configurations of the embedding models provider                                 
                                                          |
+| embeddings_provider.azure_openai                | Yes          | object   | 
Configurations of [Azure 
OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) 
as the embedding models provider. |
+| embeddings_provider.azure_openai.endpoint       | Yes          | string   | 
Azure OpenAI endpoint                                                           
                                                          |
+| embeddings_provider.azure_openai.api_key        | Yes          | string   | 
Azure OpenAI API key                                                            
                                                          |
+| vector_search_provider                          | Yes          | object   | 
Configuration for the vector search provider                                    
                                                          |
+| vector_search_provider.azure_ai_search          | Yes          | object   | 
Configuration for Azure AI Search                                               
                                                          |
+| vector_search_provider.azure_ai_search.endpoint | Yes          | string   | 
Azure AI Search endpoint                                                        
                                                          |
+| vector_search_provider.azure_ai_search.api_key  | Yes          | string   | 
Azure AI Search API key                                                         
                                                          |
+
+## Request Body Format
+
+The following fields must be present in the request body.
+
+| **Field**            | **Type** | **Description**                            
                                                                                
     |
+| -------------------- | -------- | 
-------------------------------------------------------------------------------------------------------------------------------
 |
+| ai_rag               | object   | Configuration for AI-RAG (Retrieval 
Augmented Generation)                                                           
            |
+| ai_rag.embeddings    | object   | Request parameters required to generate 
embeddings. Contents will depend on the API specification of the configured 
provider.   |
+| ai_rag.vector_search | object   | Request parameters required to perform 
vector search. Contents will depend on the API specification of the configured 
provider. |
+
+- Parameters of `ai_rag.embeddings`
+
+  - Azure OpenAI
+
+  | **Name**        | **Required** | **Type** | **Description**                
                                                                                
            |
+  | --------------- | ------------ | -------- | 
--------------------------------------------------------------------------------------------------------------------------
 |
+  | input           | Yes          | string   | Input text used to compute 
embeddings, encoded as a string.                                                
                |
+  | user            | No           | string   | A unique identifier 
representing your end-user, which can help in monitoring and detecting abuse.   
                       |
+  | encoding_format | No           | string   | The format to return the 
embeddings in. Can be either `float` or `base64`. Defaults to `float`.          
                  |
+  | dimensions      | No           | integer  | The number of dimensions the 
resulting output embeddings should have. Only supported in text-embedding-3 and 
later models. |
+
+For other parameters please refer to the [Azure OpenAI embeddings 
documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings).
+
+- Parameters of `ai_rag.vector_search`
+
+  - Azure AI Search
+
+  | **Field** | **Required** | **Type** | **Description**              |
+  | --------- | ------------ | -------- | ---------------------------- |
+  | fields    | Yes          | String   | Fields for the vector search |
+
+  For other parameters please refer the [Azure AI Search 
documentation](https://learn.microsoft.com/en-us/rest/api/searchservice/documents/search-post).
+
+Example request body:
+
+```json
+{
+  "ai_rag": {
+    "vector_search": { "fields": "contentVector" },
+    "embeddings": {
+      "input": "which service is good for devops",
+      "dimensions": 1024
+    }
+  }
+}
+```
+
+## Example usage
+
+First initialise these shell variables:
+
+```shell
+ADMIN_API_KEY=edd1c9f034335f136f87ad84b625c8f1
+AZURE_OPENAI_ENDPOINT=https://name.openai.azure.com/openai/deployments/gpt-4o/chat/completions
+VECTOR_SEARCH_ENDPOINT=https://name.search.windows.net/indexes/indexname/docs/search?api-version=2024-07-01
+EMBEDDINGS_ENDPOINT=https://name.openai.azure.com/openai/deployments/text-embedding-3-small/embeddings?api-version=2023-05-15
+EMBEDDINGS_KEY=secret-azure-openai-embeddings-key
+SEARCH_KEY=secret-azureai-search-key
+AZURE_OPENAI_KEY=secret-azure-openai-key
+```
+
+Create a route with the `ai-rag` and `ai-proxy` plugin like so:
+
+```shell
+curl "http://127.0.0.1:9180/apisix/admin/routes/1"; -X PUT \
+  -H "X-API-KEY: ${ADMIN_API_KEY}" \
+  -d '{
+  "uri": "/rag",
+  "plugins": {
+    "ai-rag": {
+      "embeddings_provider": {
+        "azure_openai": {
+          "endpoint": "'"$EMBEDDINGS_ENDPOINT"'",
+          "api_key": "'"$EMBEDDINGS_KEY"'"
+        }
+      },
+      "vector_search_provider": {
+        "azure_ai_search": {
+          "endpoint": "'"$VECTOR_SEARCH_ENDPOINT"'",
+          "api_key": "'"$SEARCH_KEY"'"
+        }
+      }
+    },
+    "ai-proxy": {
+      "auth": {
+        "header": {
+          "api-key": "'"$AZURE_OPENAI_KEY"'"
+        },
+        "query": {
+          "api-version": "2023-03-15-preview"
+         }
+      },
+      "model": {
+        "provider": "openai",
+        "name": "gpt-4",
+        "options": {
+          "max_tokens": 512,
+          "temperature": 1.0
+        }
+      },
+      "override": {
+        "endpoint": "'"$AZURE_OPENAI_ENDPOINT"'"
+      }
+    }
+  },
+  "upstream": {
+    "type": "roundrobin",
+    "nodes": {
+      "someupstream.com:443": 1
+    },
+    "scheme": "https",
+    "pass_host": "node"
+  }
+}'
+```
+
+The `ai-proxy` plugin is used here as it simplifies access to LLMs. 
Alternatively, you may configure the LLM service address in the upstream 
configuration and update the route URI as well.
+
+Now send a request:
+
+```shell
+curl http://127.0.0.1:9080/rag -XPOST  -H 'Content-Type: application/json' -d 
'{"ai_rag":{"vector_search":{"fields":"contentVector"},"embeddings":{"input":"which
 service is good for devops","dimensions":1024}}}'
+```
+
+You will receive a response like this:
+
+```json
+{
+  "choices": [
+    {
+      "finish_reason": "length",
+      "index": 0,
+      "message": {
+        "content": "Here are the details for some of the services you inquired 
about from your Azure search context:\n\n ... <rest of the response>",
+        "role": "assistant"
+      }
+    }
+  ],
+  "created": 1727079764,
+  "id": "chatcmpl-AAYdA40YjOaeIHfgFBkaHkUFCWxfc",
+  "model": "gpt-4o-2024-05-13",
+  "object": "chat.completion",
+  "system_fingerprint": "fp_67802d9a6d",
+  "usage": {
+    "completion_tokens": 512,
+    "prompt_tokens": 6560,
+    "total_tokens": 7072
+  }
+}
+```
diff --git a/t/admin/plugins.t b/t/admin/plugins.t
index 6541bf764..713d59d4c 100644
--- a/t/admin/plugins.t
+++ b/t/admin/plugins.t
@@ -96,6 +96,7 @@ proxy-cache
 body-transformer
 ai-prompt-template
 ai-prompt-decorator
+ai-rag
 ai-content-moderation
 proxy-mirror
 proxy-rewrite
diff --git a/t/assets/embeddings.json b/t/assets/embeddings.json
new file mode 100644
index 000000000..2baa33099
--- /dev/null
+++ b/t/assets/embeddings.json
@@ -0,0 +1,25 @@
+{
+    "object": "list",
+    "data": [
+      {
+        "object": "embedding",
+        "index": 0,
+        "embedding": [
+          123456789,
+          0.01902593,
+          0.008967914,
+          -0.013226582,
+          -0.026961878,
+          -0.017892223,
+          -0.0007785152,
+          -0.011031842,
+          0.0068531134
+        ]
+      }
+    ],
+    "model": "text-embedding-3-small",
+    "usage": {
+      "prompt_tokens": 4,
+      "total_tokens": 4
+    }
+  }
diff --git a/t/plugin/ai-rag.t b/t/plugin/ai-rag.t
new file mode 100644
index 000000000..86b0c6440
--- /dev/null
+++ b/t/plugin/ai-rag.t
@@ -0,0 +1,392 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level("info");
+repeat_each(1);
+no_long_string();
+no_root_location();
+
+
+my $resp_file = 't/assets/embeddings.json';
+open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!";
+my $embeddings = do { local $/; <$fh> };
+close($fh);
+
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+    my $http_config = $block->http_config // <<_EOC_;
+        server {
+            listen 3623;
+
+            default_type 'application/json';
+
+            location /embeddings {
+                content_by_lua_block {
+                    local json = require("cjson.safe")
+
+                    if ngx.req.get_method() ~= "POST" then
+                        ngx.status = 400
+                        ngx.say("Unsupported request method: ", 
ngx.req.get_method())
+                        return
+                    end
+                    ngx.req.read_body()
+                    local body, err = ngx.req.get_body_data()
+                    body, err = json.decode(body)
+
+                    local header_auth = ngx.req.get_headers()["api-key"]
+
+                    if header_auth ~= "key" then
+                        ngx.status = 401
+                        ngx.say("Unauthorized")
+                        return
+                    end
+
+                    ngx.status = 200
+                    ngx.say([[$embeddings]])
+                }
+            }
+
+            location /search {
+                content_by_lua_block {
+                    local json = require("cjson.safe")
+
+                    if ngx.req.get_method() ~= "POST" then
+                        ngx.status = 400
+                        ngx.say("Unsupported request method: ", 
ngx.req.get_method())
+                    end
+
+                    local header_auth = ngx.req.get_headers()["api-key"]
+                    if header_auth ~= "key" then
+                        ngx.status = 401
+                        ngx.say("Unauthorized")
+                        return
+                    end
+
+                    ngx.req.read_body()
+                    local body, err = ngx.req.get_body_data()
+                    body, err = json.decode(body)
+                    if body.vectorQueries[1].vector[1] ~= 123456789 then
+                        ngx.status = 500
+                        ngx.say({ error = "occurred" })
+                        return
+                    end
+
+                    ngx.status = 200
+                    ngx.print("passed")
+                }
+            }
+        }
+_EOC_
+
+    $block->set_value("http_config", $http_config);
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: minimal viable configuration
+--- config
+    location /t {
+        content_by_lua_block {
+            local plugin = require("apisix.plugins.ai-rag")
+            local ok, err = plugin.check_schema({
+                embeddings_provider = {
+                    azure_openai = {
+                        api_key = "sdfjasdfh",
+                        endpoint = "http://a.b.com";
+                    }
+                },
+                vector_search_provider = {
+                    azure_ai_search = {
+                        api_key = "iuhsdf",
+                        endpoint = "http://a.b.com";
+                    }
+                }
+            })
+
+            if not ok then
+                ngx.say(err)
+            else
+                ngx.say("passed")
+            end
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 2: vector search provider missing
+--- config
+    location /t {
+        content_by_lua_block {
+            local plugin = require("apisix.plugins.ai-rag")
+            local ok, err = plugin.check_schema({
+                embeddings_provider = {
+                    azure_openai = {
+                        api_key = "sdfjasdfh",
+                        endpoint = "http://a.b.com";
+                    }
+                }
+            })
+
+            if not ok then
+                ngx.say(err)
+            else
+                ngx.say("passed")
+            end
+        }
+    }
+--- response_body
+property "vector_search_provider" is required
+
+
+
+=== TEST 3: embeddings provider missing
+--- config
+    location /t {
+        content_by_lua_block {
+            local plugin = require("apisix.plugins.ai-rag")
+            local ok, err = plugin.check_schema({
+                vector_search_provider = {
+                    azure_ai_search = {
+                        api_key = "iuhsdf",
+                        endpoint = "http://a.b.com";
+                    }
+                }
+            })
+
+            if not ok then
+                ngx.say(err)
+            else
+                ngx.say("passed")
+            end
+        }
+    }
+--- response_body
+property "embeddings_provider" is required
+
+
+
+=== TEST 4: wrong auth header for embeddings provider
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1',
+                 ngx.HTTP_PUT,
+                 [[{
+                    "uri": "/echo",
+                    "plugins": {
+                        "ai-rag": {
+                            "embeddings_provider": {
+                                "azure_openai": {
+                                    "endpoint": 
"http://localhost:3623/embeddings";,
+                                    "api_key": "wrongkey"
+                                }
+                            },
+                            "vector_search_provider": {
+                                "azure_ai_search": {
+                                    "endpoint": "http://localhost:3623/search";,
+                                    "api_key": "key"
+                                }
+                            }
+                        }
+                    },
+                    "upstream": {
+                        "type": "roundrobin",
+                        "nodes": {
+                            "127.0.0.1:1980": 1
+                        },
+                        "scheme": "http",
+                        "pass_host": "node"
+                    }
+                }]]
+            )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 5: send request
+--- request
+POST /echo
+{"ai_rag":{"vector_search":{"fields":"contentVector"},"embeddings":{"input":"which
 service is good for devops","dimensions":1024}}}
+--- error_code: 401
+--- response_body
+Unauthorized
+--- error_log
+could not get embeddings: Unauthorized
+
+
+
+=== TEST 6: wrong auth header for search provider
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1',
+                 ngx.HTTP_PUT,
+                 [[{
+                    "uri": "/echo",
+                    "plugins": {
+                        "ai-rag": {
+                            "embeddings_provider": {
+                                "azure_openai": {
+                                    "endpoint": 
"http://localhost:3623/embeddings";,
+                                    "api_key": "key"
+                                }
+                            },
+                            "vector_search_provider": {
+                                "azure_ai_search": {
+                                    "endpoint": "http://localhost:3623/search";,
+                                    "api_key": "wrongkey"
+                                }
+                            }
+                        }
+                    },
+                    "upstream": {
+                        "type": "roundrobin",
+                        "nodes": {
+                            "127.0.0.1:1980": 1
+                        },
+                        "scheme": "http",
+                        "pass_host": "node"
+                    }
+                }]]
+            )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 7: send request
+--- request
+POST /echo
+{"ai_rag":{"vector_search":{"fields":"contentVector"},"embeddings":{"input":"which
 service is good for devops","dimensions":1024}}}
+--- error_code: 401
+--- error_log
+could not get vector_search result: Unauthorized
+
+
+
+=== TEST 8: send request with empty body
+--- request
+POST /echo
+--- error_code: 400
+--- response_body_chomp
+failed to get request body: request body is empty
+
+
+
+=== TEST 9: send request with vector search fields missing
+--- request
+POST /echo
+{"ai_rag":{"vector_search":{"missing-fields":"something"},"embeddings":{"input":"which
 service is good for devops","dimensions":1024}}}
+--- error_code: 400
+--- error_log
+request body fails schema check: property "ai_rag" validation failed: property 
"vector_search" validation failed: property "fields" is required
+
+
+
+=== TEST 10: send request with embedding input missing
+--- request
+POST /echo
+{"ai_rag":{"vector_search":{"fields":"something"},"embeddings":{"missinginput":"which
 service is good for devops"}}}
+--- error_code: 400
+--- error_log
+request body fails schema check: property "ai_rag" validation failed: property 
"embeddings" validation failed: property "input" is required
+
+
+
+=== TEST 11: configure plugin with right auth headers
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1',
+                 ngx.HTTP_PUT,
+                 [[{
+                    "uri": "/echo",
+                    "plugins": {
+                        "ai-rag": {
+                            "embeddings_provider": {
+                                "azure_openai": {
+                                    "endpoint": 
"http://localhost:3623/embeddings";,
+                                    "api_key": "key"
+                                }
+                            },
+                            "vector_search_provider": {
+                                "azure_ai_search": {
+                                    "endpoint": "http://localhost:3623/search";,
+                                    "api_key": "key"
+                                }
+                            }
+                        }
+                    },
+                    "upstream": {
+                        "type": "roundrobin",
+                        "nodes": {
+                            "127.0.0.1:1980": 1
+                        },
+                        "scheme": "http",
+                        "pass_host": "node"
+                    }
+                }]]
+            )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 12: send request with embedding input missing
+--- request
+POST /echo
+{"ai_rag":{"vector_search":{"fields":"something"},"embeddings":{"input":"which 
service is good for devops"}}}
+--- error_code: 200
+--- response_body eval
+qr/\{"messages":\[\{"content":"passed","role":"user"\}\]\}|\{"messages":\[\{"role":"user","content":"passed"\}\]\}/


Reply via email to