This is an automated email from the ASF dual-hosted git repository.

maytasm pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git


The following commit(s) were added to refs/heads/master by this push:
     new 3257913  Improve query error logging (#11519)
3257913 is described below

commit 32579137378a9c2e44784489c81b358eaeb75f29
Author: Maytas Monsereenusorn <[email protected]>
AuthorDate: Thu Aug 5 22:51:09 2021 +0700

    Improve query error logging (#11519)
    
    * Improve query error logging
    
    * add docs
    
    * address comments
    
    * address comments
---
 docs/querying/query-context.md                     |  1 +
 .../java/org/apache/druid/query/QueryContexts.java |  7 +++++++
 .../org/apache/druid/query/QueryContextsTest.java  | 24 ++++++++++++++++++++++
 .../org/apache/druid/server/QueryLifecycle.java    |  7 ++++++-
 4 files changed, 38 insertions(+), 1 deletion(-)

diff --git a/docs/querying/query-context.md b/docs/querying/query-context.md
index a566b8b..b46b40f 100644
--- a/docs/querying/query-context.md
+++ b/docs/querying/query-context.md
@@ -61,6 +61,7 @@ Unless otherwise noted, the following parameters apply to all 
query types.
 |useFilterCNF|`false`| If true, Druid will attempt to convert the query filter 
to Conjunctive Normal Form (CNF). During query processing, columns can be 
pre-filtered by intersecting the bitmap indexes of all values that match the 
eligible filters, often greatly reducing the raw number of rows which need to 
be scanned. But this effect only happens for the top level filter, or 
individual clauses of a top level 'and' filter. As such, filters in CNF 
potentially have a higher chance to utiliz [...]
 |secondaryPartitionPruning|`true`|Enable secondary partition pruning on the 
Broker. The Broker will always prune unnecessary segments from the input scan 
based on a filter on time intervals, but if the data is further partitioned 
with hash or range partitioning, this option will enable additional pruning 
based on a filter on secondary partition dimensions.|
 |enableJoinLeftTableScanDirect|`false`|This flag applies to queries which have 
joins. For joins, where left child is a simple scan with a filter,  by default, 
druid will run the scan as a query and the join the results to the right child 
on broker. Setting this flag to true overrides that behavior and druid will 
attempt to push the join to data servers instead. Please note that the flag 
could be applicable to queries even if there is no explicit join. since queries 
can internally transla [...]
+|debug| `false` | Flag indicating whether to enable debugging outputs for the 
query. When set to false, no additional logs will be produced (logs produced 
will be entirely dependent on your logging level). When set to true, the 
following addition logs will be produced:<br />- Log the stack trace of the 
exception (if any) produced by the query |
 
 ## Query-type-specific parameters
 
diff --git a/processing/src/main/java/org/apache/druid/query/QueryContexts.java 
b/processing/src/main/java/org/apache/druid/query/QueryContexts.java
index 4b16ad4..4a293e5 100644
--- a/processing/src/main/java/org/apache/druid/query/QueryContexts.java
+++ b/processing/src/main/java/org/apache/druid/query/QueryContexts.java
@@ -65,6 +65,7 @@ public class QueryContexts
   public static final String RETURN_PARTIAL_RESULTS_KEY = 
"returnPartialResults";
   public static final String USE_CACHE_KEY = "useCache";
   public static final String SECONDARY_PARTITION_PRUNING_KEY = 
"secondaryPartitionPruning";
+  public static final String ENABLE_DEBUG = "debug";
   public static final String BY_SEGMENT_KEY = "bySegment";
   public static final String BROKER_SERVICE_NAME = "brokerService";
 
@@ -88,6 +89,7 @@ public class QueryContexts
   public static final boolean DEFAULT_ENABLE_SQL_JOIN_LEFT_SCAN_DIRECT = false;
   public static final boolean DEFAULT_USE_FILTER_CNF = false;
   public static final boolean DEFAULT_SECONDARY_PARTITION_PRUNING = true;
+  public static final boolean DEFAULT_ENABLE_DEBUG = false;
 
   @SuppressWarnings("unused") // Used by Jackson serialization
   public enum Vectorize
@@ -322,6 +324,11 @@ public class QueryContexts
     return parseBoolean(query, SECONDARY_PARTITION_PRUNING_KEY, 
DEFAULT_SECONDARY_PARTITION_PRUNING);
   }
 
+  public static <T> boolean isDebug(Query<T> query)
+  {
+    return parseBoolean(query, ENABLE_DEBUG, DEFAULT_ENABLE_DEBUG);
+  }
+
   public static <T> Query<T> withMaxScatterGatherBytes(Query<T> query, long 
maxScatterGatherBytesLimit)
   {
     Object obj = query.getContextValue(MAX_SCATTER_GATHER_BYTES_KEY);
diff --git 
a/processing/src/test/java/org/apache/druid/query/QueryContextsTest.java 
b/processing/src/test/java/org/apache/druid/query/QueryContextsTest.java
index 4de31d1..764a81a 100644
--- a/processing/src/test/java/org/apache/druid/query/QueryContextsTest.java
+++ b/processing/src/test/java/org/apache/druid/query/QueryContextsTest.java
@@ -177,4 +177,28 @@ public class QueryContextsTest
     exception.expect(ClassCastException.class);
     QueryContexts.getBrokerServiceName(query);
   }
+
+  @Test
+  public void testDefaultEnableQueryDebugging()
+  {
+    Query<?> query = new TestQuery(
+        new TableDataSource("test"),
+        new 
MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("0/100"))),
+        false,
+        ImmutableMap.of()
+    );
+    Assert.assertFalse(QueryContexts.isDebug(query));
+  }
+
+  @Test
+  public void testEnableQueryDebuggingSetToTrue()
+  {
+    Query<?> query = new TestQuery(
+        new TableDataSource("test"),
+        new 
MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("0/100"))),
+        false,
+        ImmutableMap.of(QueryContexts.ENABLE_DEBUG, true)
+    );
+    Assert.assertTrue(QueryContexts.isDebug(query));
+  }
 }
diff --git a/server/src/main/java/org/apache/druid/server/QueryLifecycle.java 
b/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
index 5da60d4..23be4e5 100644
--- a/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
+++ b/server/src/main/java/org/apache/druid/server/QueryLifecycle.java
@@ -36,6 +36,7 @@ import org.apache.druid.query.DefaultQueryConfig;
 import org.apache.druid.query.DruidMetrics;
 import org.apache.druid.query.GenericQueryMetricsFactory;
 import org.apache.druid.query.Query;
+import org.apache.druid.query.QueryContexts;
 import org.apache.druid.query.QueryInterruptedException;
 import org.apache.druid.query.QueryMetrics;
 import org.apache.druid.query.QueryPlus;
@@ -316,7 +317,11 @@ public class QueryLifecycle
 
       if (e != null) {
         statsMap.put("exception", e.toString());
-        log.noStackTrace().warn(e, "Exception while processing queryId [%s]", 
baseQuery.getId());
+        if (QueryContexts.isDebug(baseQuery)) {
+          log.error(e, "Exception while processing queryId [%s]", 
baseQuery.getId());
+        } else {
+          log.noStackTrace().error(e, "Exception while processing queryId 
[%s]", baseQuery.getId());
+        }
         if (e instanceof QueryInterruptedException || e instanceof 
QueryTimeoutException) {
           // Mimic behavior from QueryResource, where this code was originally 
taken from.
           statsMap.put("interrupted", true);

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to