This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph-ai.git


The following commit(s) were added to refs/heads/main by this push:
     new 7f7c82c  feat(llm): support intent recognition V1 (#159)
7f7c82c is described below

commit 7f7c82c1c4d7053c1d80cee1f485192228af08cc
Author: HaoJin Yang <1454...@gmail.com>
AuthorDate: Tue Jan 14 19:03:31 2025 +0800

    feat(llm): support intent recognition V1 (#159)
---
 .../src/hugegraph_llm/config/prompt_config.py      | 54 +++++++++++++++-------
 .../src/hugegraph_llm/demo/rag_demo/rag_block.py   |  7 ++-
 2 files changed, 42 insertions(+), 19 deletions(-)

diff --git a/hugegraph-llm/src/hugegraph_llm/config/prompt_config.py 
b/hugegraph-llm/src/hugegraph_llm/config/prompt_config.py
index 8f55fa6..fee85d2 100644
--- a/hugegraph-llm/src/hugegraph_llm/config/prompt_config.py
+++ b/hugegraph-llm/src/hugegraph_llm/config/prompt_config.py
@@ -179,30 +179,50 @@ MAX_KEYWORDS: {max_keywords}
     # """
 
     gremlin_generate_prompt = """
-You are an expert in graph query language(Gremlin), your role is to understand 
the schema of the graph and generate 
-accurate Gremlin code based on the given instructions.
-
-# Graph Schema:
-```json
-{schema}
-```
-# Rule:
-1. Could use the vertex ID directly if it's given in the context.
-2. The output format must be like:
+You are an expert in graph query language (Gremlin). Your role is to 
understand the schema of the graph, recognize the intent behind user queries, 
and generate accurate Gremlin code based on the given instructions.
+
+### Tasks
+## Complex Query Detection:
+Assess the user’s query to determine its complexity based on the following 
criteria:
+
+1. Multiple Reasoning Steps: The query requires several logical steps to 
arrive at the final result.
+2. Conditional Logic: The query includes multiple conditions or filters that 
depend on each other.
+3. Nested Queries: The query contains sub-queries or nested logical statements.
+4. High-Level Abstractions: The query requests high-level summaries or 
insights that require intricate data manipulation.
+
+# Examples of Complex Queries:
+“Retrieve all users who have posted more than five articles and have at least 
two comments with a positive sentiment score.”
+“Calculate the average response time of servers in each data center and 
identify which data centers are below the required performance threshold after 
the latest update.”
+
+# Rules
+- **Complex Query Handling**:
+    - **Detection**: If the user's query meets **any** of the complexity 
criteria listed above, it is considered **complex**.
+    - **Response**: For complex queries, **do not** proceed to Gremlin Query 
Generation. Instead, directly return the following Gremlin query:
+    ```gremlin
+    g.V().limit(0)
+    ```
+- **Simple Query Handling**:
+    - If the query does **not** meet any of the complexity criteria, it is 
considered **simple**.
+    - Proceed to the Gremlin Query Generation task as outlined below.
+
+## Gremlin Query Generation (Executed only if the query is not complex):
+# Rules
+You may use the vertex ID directly if it’s provided in the context.
+The output format must be as follows:
 ```gremlin
 g.V().limit(10)
 ```
+Graph Schema:
+{schema}
+Refer Gremlin Example Pair:
+{example}
 
-# Extracted vertex vid:
+Referenced Extracted Vertex IDs Related to the Query:
 {vertices}
 
-# Given the example query-gremlin pairs:
-{example}
-
-# Generate  gremlin from the following user query.
+Generate Gremlin from the Following User Query:
 {query}
-
-The generated gremlin is:
+The generated Gremlin is:
 """
 
     doc_input_text: str = """Meet Sarah, a 30-year-old attorney, and her 
roommate, James, whom she's shared a home with since 2010.
diff --git a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/rag_block.py 
b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/rag_block.py
index 822b080..477660b 100644
--- a/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/rag_block.py
+++ b/hugegraph-llm/src/hugegraph_llm/demo/rag_demo/rag_block.py
@@ -42,7 +42,7 @@ def rag_answer(
     answer_prompt: str,
     keywords_extract_prompt: str,
     gremlin_tmpl_num: Optional[int] = 2,
-    gremlin_prompt: Optional[str] = prompt.gremlin_generate_prompt,
+    gremlin_prompt: Optional[str] = None,
 ) -> Tuple:
     """
     Generate an answer using the RAG (Retrieval-Augmented Generation) pipeline.
@@ -52,13 +52,16 @@ def rag_answer(
     4. Synthesize the final answer.
     5. Run the pipeline and return the results.
     """
+
+    gremlin_prompt = gremlin_prompt or prompt.gremlin_generate_prompt
     should_update_prompt = (
         prompt.default_question != text
         or prompt.answer_prompt != answer_prompt
         or prompt.keywords_extract_prompt != keywords_extract_prompt
         or prompt.gremlin_generate_prompt != gremlin_prompt
+        or prompt.custom_rerank_info != custom_related_information
     )
-    if should_update_prompt or prompt.custom_rerank_info != 
custom_related_information:
+    if should_update_prompt:
         prompt.custom_rerank_info = custom_related_information
         prompt.default_question = text
         prompt.answer_prompt = answer_prompt

Reply via email to