This is an automated email from the ASF dual-hosted git repository.

abhishek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git


The following commit(s) were added to refs/heads/master by this push:
     new 3c129f6728 Add sql planning time metric (#12923)
3c129f6728 is described below

commit 3c129f672827ce6ca0e3404a51c000e25d158e80
Author: Rohan Garg <[email protected]>
AuthorDate: Mon Aug 22 11:09:44 2022 +0530

    Add sql planning time metric (#12923)
---
 docs/configuration/index.md                        |  2 +-
 docs/operations/metrics.md                         |  2 ++
 docs/operations/request-logging.md                 |  1 +
 .../server/log/DefaultRequestLogEventTest.java     |  9 +++++-
 .../java/org/apache/druid/sql/DirectStatement.java |  2 ++
 .../org/apache/druid/sql/SqlExecutionReporter.java | 13 ++++++++
 .../druid/sql/avatica/DruidAvaticaHandlerTest.java |  2 ++
 .../org/apache/druid/sql/http/SqlResourceTest.java | 36 +++++++++++++++++++---
 8 files changed, 61 insertions(+), 6 deletions(-)

diff --git a/docs/configuration/index.md b/docs/configuration/index.md
index 14f45a1458..5e6f7d7e86 100644
--- a/docs/configuration/index.md
+++ b/docs/configuration/index.md
@@ -278,7 +278,7 @@ For native JSON request, the `sql_query` field is empty. 
Example
 
 For SQL query request, the `native_query` field is empty. Example
 ```
-2019-01-14T10:00:00.000Z        127.0.0.1       
{"sqlQuery/time":100,"sqlQuery/bytes":600,"success":true,"identity":"user1"}  
{"query":"SELECT page, COUNT(*) AS Edits FROM wikiticker WHERE 
TIME_IN_INTERVAL(\"__time\", '2015-09-12/2015-09-13') GROUP BY page ORDER BY 
Edits DESC LIMIT 
10","context":{"sqlQueryId":"c9d035a0-5ffd-4a79-a865-3ffdadbb5fdd","nativeQueryIds":"[490978e4-f5c7-4cf6-b174-346e63cf8863]"}}
+2019-01-14T10:00:00.000Z        127.0.0.1       {"sqlQuery/time":100, 
"sqlQuery/planningTimeMs":10, "sqlQuery/bytes":600, "success":true, 
"identity":"user1"}  {"query":"SELECT page, COUNT(*) AS Edits FROM wikiticker 
WHERE TIME_IN_INTERVAL(\"__time\", '2015-09-12/2015-09-13') GROUP BY page ORDER 
BY Edits DESC LIMIT 
10","context":{"sqlQueryId":"c9d035a0-5ffd-4a79-a865-3ffdadbb5fdd","nativeQueryIds":"[490978e4-f5c7-4cf6-b174-346e63cf8863]"}}
 ```
 
 #### Emitter request logging
diff --git a/docs/operations/metrics.md b/docs/operations/metrics.md
index 5b58aceab4..8bc302e598 100644
--- a/docs/operations/metrics.md
+++ b/docs/operations/metrics.md
@@ -57,6 +57,7 @@ Metrics may have additional dimensions beyond those listed 
above.
 |`query/segments/count`|This metric is not enabled by default. See the 
`QueryMetrics` Interface for reference regarding enabling this metric. Number 
of segments that will be touched by the query. In the broker, it makes a plan 
to distribute the query to realtime tasks and historicals based on a snapshot 
of segment distribution state. If there are some segments moved after this 
snapshot is created, certain historicals and realtime tasks can report those 
segments as missing to the broker.  [...]
 |`query/priority`|Assigned lane and priority, only if Laning strategy is 
enabled. Refer to [Laning 
strategies](../configuration/index.md#laning-strategies)|lane, dataSource, 
type|0|
 |`sqlQuery/time`|Milliseconds taken to complete a SQL query.|id, 
nativeQueryIds, dataSource, remoteAddress, success.|< 1s|
+|`sqlQuery/planningTimeMs`|Milliseconds taken to plan a SQL to native 
query.|id, nativeQueryIds, dataSource, remoteAddress, success.| |
 |`sqlQuery/bytes`|Number of bytes returned in the SQL query response.|id, 
nativeQueryIds, dataSource, remoteAddress, success.| |
 
 ### Historical
@@ -140,6 +141,7 @@ If SQL is enabled, the Broker will emit the following 
metrics for SQL.
 |Metric|Description|Dimensions|Normal Value|
 |------|-----------|----------|------------|
 |`sqlQuery/time`|Milliseconds taken to complete a SQL.|id, nativeQueryIds, 
dataSource, remoteAddress, success.|< 1s|
+|`sqlQuery/planningTimeMs`|Milliseconds taken to plan a SQL to native 
query.|id, nativeQueryIds, dataSource, remoteAddress, success.| |
 |`sqlQuery/bytes`|number of bytes returned in SQL response.|id, 
nativeQueryIds, dataSource, remoteAddress, success.| |
 
 ## Ingestion metrics
diff --git a/docs/operations/request-logging.md 
b/docs/operations/request-logging.md
index 6565cd5945..3bccfd3ef9 100644
--- a/docs/operations/request-logging.md
+++ b/docs/operations/request-logging.md
@@ -122,6 +122,7 @@ The following shows an example log emitter output:
         "queryStats":
         {
             "sqlQuery/time": 43,
+            "sqlQuery/planningTimeMs": 5,
             "sqlQuery/bytes": 9351,
             "success": true,
             "context":
diff --git 
a/server/src/test/java/org/apache/druid/server/log/DefaultRequestLogEventTest.java
 
b/server/src/test/java/org/apache/druid/server/log/DefaultRequestLogEventTest.java
index 16398d440c..126705ec8b 100644
--- 
a/server/src/test/java/org/apache/druid/server/log/DefaultRequestLogEventTest.java
+++ 
b/server/src/test/java/org/apache/druid/server/log/DefaultRequestLogEventTest.java
@@ -130,7 +130,14 @@ public class DefaultRequestLogEventTest
     final String host = "127.0.0.1";
     final String sql = "select * from 1337";
     final QueryStats queryStats = new QueryStats(
-        ImmutableMap.of("sqlQuery/time", 13L, "sqlQuery/bytes", 10L, 
"success", true, "identity", "allowAll"));
+        ImmutableMap.of(
+            "sqlQuery/time", 13L,
+            "sqlQuery/planningTimeMs", 1L,
+            "sqlQuery/bytes", 10L,
+            "success", true,
+            "identity", "allowAll"
+        )
+    );
 
     RequestLogLine nativeLine = RequestLogLine.forSql(
         sql,
diff --git a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java 
b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java
index 03318ed802..d73761c825 100644
--- a/sql/src/main/java/org/apache/druid/sql/DirectStatement.java
+++ b/sql/src/main/java/org/apache/druid/sql/DirectStatement.java
@@ -202,6 +202,7 @@ public class DirectStatement extends AbstractStatement 
implements Cancelable
     if (state != State.START) {
       throw new ISE("Can plan a query only once.");
     }
+    long planningStartNanos = System.nanoTime();
     try (DruidPlanner planner = sqlToolbox.plannerFactory.createPlanner(
         sqlToolbox.engine,
         queryPlus.sql(),
@@ -218,6 +219,7 @@ public class DirectStatement extends AbstractStatement 
implements Cancelable
       prepareResult = planner.prepareResult();
       // Double check needed by SqlResourceTest
       transition(State.PREPARED);
+      reporter.planningTimeNanos(System.nanoTime() - planningStartNanos);
       return resultSet;
     }
     catch (RuntimeException e) {
diff --git a/sql/src/main/java/org/apache/druid/sql/SqlExecutionReporter.java 
b/sql/src/main/java/org/apache/druid/sql/SqlExecutionReporter.java
index 94e9f93c93..c3280266be 100644
--- a/sql/src/main/java/org/apache/druid/sql/SqlExecutionReporter.java
+++ b/sql/src/main/java/org/apache/druid/sql/SqlExecutionReporter.java
@@ -54,6 +54,7 @@ public class SqlExecutionReporter
   private final long startNs;
   private Throwable e;
   private long bytesWritten;
+  private long planningTimeNanos;
 
   public SqlExecutionReporter(
       final AbstractStatement stmt,
@@ -76,6 +77,11 @@ public class SqlExecutionReporter
     this.bytesWritten = bytesWritten;
   }
 
+  public void planningTimeNanos(final long planningTimeNanos)
+  {
+    this.planningTimeNanos = planningTimeNanos;
+  }
+
   public void emit()
   {
     final boolean success = e == null;
@@ -105,9 +111,16 @@ public class SqlExecutionReporter
       if (bytesWritten >= 0) {
         emitter.emit(metricBuilder.build("sqlQuery/bytes", bytesWritten));
       }
+      if (planningTimeNanos >= 0) {
+        emitter.emit(metricBuilder.build(
+            "sqlQuery/planningTimeMs",
+            TimeUnit.NANOSECONDS.toMillis(planningTimeNanos)
+        ));
+      }
 
       final Map<String, Object> statsMap = new LinkedHashMap<>();
       statsMap.put("sqlQuery/time", 
TimeUnit.NANOSECONDS.toMillis(queryTimeNs));
+      statsMap.put("sqlQuery/planningTimeMs", 
TimeUnit.NANOSECONDS.toMillis(planningTimeNanos));
       statsMap.put("sqlQuery/bytes", bytesWritten);
       statsMap.put("success", success);
       QueryContext queryContext;
diff --git 
a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java 
b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java
index bc439b660f..850fa36231 100644
--- 
a/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java
+++ 
b/sql/src/test/java/org/apache/druid/sql/avatica/DruidAvaticaHandlerTest.java
@@ -1097,6 +1097,7 @@ public class DruidAvaticaHandlerTest extends 
CalciteTestBase
       Assert.assertEquals(true, stats.get("success"));
       Assert.assertEquals("regularUser", stats.get("identity"));
       Assert.assertTrue(stats.containsKey("sqlQuery/time"));
+      Assert.assertTrue(stats.containsKey("sqlQuery/planningTimeMs"));
       Assert.assertTrue(stats.containsKey("sqlQuery/bytes"));
     }
 
@@ -1151,6 +1152,7 @@ public class DruidAvaticaHandlerTest extends 
CalciteTestBase
       Assert.assertEquals(true, stats.get("success"));
       Assert.assertEquals("regularUser", stats.get("identity"));
       Assert.assertTrue(stats.containsKey("sqlQuery/time"));
+      Assert.assertTrue(stats.containsKey("sqlQuery/planningTimeMs"));
       Assert.assertTrue(stats.containsKey("sqlQuery/bytes"));
     }
 
diff --git a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java 
b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java
index 7fe468a2e7..7ba0fb92a7 100644
--- a/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java
+++ b/sql/src/test/java/org/apache/druid/sql/http/SqlResourceTest.java
@@ -43,7 +43,7 @@ import org.apache.druid.java.util.common.concurrent.Execs;
 import org.apache.druid.java.util.common.guava.LazySequence;
 import org.apache.druid.java.util.common.guava.Sequence;
 import org.apache.druid.java.util.common.io.Closer;
-import org.apache.druid.java.util.emitter.service.ServiceEmitter;
+import org.apache.druid.java.util.metrics.StubServiceEmitter;
 import org.apache.druid.math.expr.ExprMacroTable;
 import org.apache.druid.query.BadQueryContextException;
 import org.apache.druid.query.BaseQuery;
@@ -62,7 +62,6 @@ import org.apache.druid.server.QueryScheduler;
 import org.apache.druid.server.QueryStackTests;
 import org.apache.druid.server.initialization.ServerConfig;
 import org.apache.druid.server.log.TestRequestLogger;
-import org.apache.druid.server.metrics.NoopServiceEmitter;
 import org.apache.druid.server.scheduling.HiLoQueryLaningStrategy;
 import org.apache.druid.server.scheduling.ManualQueryPrioritizationStrategy;
 import org.apache.druid.server.security.Access;
@@ -161,6 +160,7 @@ public class SqlResourceTest extends CalciteTestBase
   private SqlLifecycleManager lifecycleManager;
   private NativeSqlEngine engine;
   private SqlStatementFactory sqlStatementFactory;
+  private StubServiceEmitter stubServiceEmitter;
 
   private CountDownLatch lifecycleAddLatch;
   private final SettableSupplier<NonnullPair<CountDownLatch, Boolean>> 
validateAndAuthorizeLatchSupplier = new SettableSupplier<>();
@@ -252,14 +252,14 @@ public class SqlResourceTest extends CalciteTestBase
         }
       }
     };
-    final ServiceEmitter emitter = new NoopServiceEmitter();
+    stubServiceEmitter = new StubServiceEmitter("test", "test");
     final AuthConfig authConfig = new AuthConfig();
     final DefaultQueryConfig defaultQueryConfig = new 
DefaultQueryConfig(ImmutableMap.of());
     engine = CalciteTests.createMockSqlEngine(walker, conglomerate);
     final SqlToolbox sqlToolbox = new SqlToolbox(
         engine,
         plannerFactory,
-        emitter,
+        stubServiceEmitter,
         testRequestLogger,
         scheduler,
         authConfig,
@@ -359,6 +359,33 @@ public class SqlResourceTest extends CalciteTestBase
     Assert.assertTrue(lifecycleManager.getAll("id").isEmpty());
   }
 
+  @Test
+  public void testSqlLifecycleMetrics() throws Exception
+  {
+    final List<Map<String, Object>> rows = doPost(
+        createSimpleQueryWithId("id", "SELECT COUNT(*) AS cnt, 'foo' AS TheFoo 
FROM druid.foo")
+    ).rhs;
+
+    Assert.assertEquals(
+        ImmutableList.of(
+            ImmutableMap.of("cnt", 6, "TheFoo", "foo")
+        ),
+        rows
+    );
+    checkSqlRequestLog(true);
+    Assert.assertTrue(lifecycleManager.getAll("id").isEmpty());
+    Set<String> metricNames = ImmutableSet.of("sqlQuery/time", 
"sqlQuery/bytes", "sqlQuery/planningTimeMs");
+    Assert.assertEquals(3, stubServiceEmitter.getEvents().size());
+    for (String metricName : metricNames) {
+      Assert.assertTrue(
+          stubServiceEmitter.getEvents()
+                            .stream()
+                            .anyMatch(event -> 
event.toMap().containsValue(metricName))
+      );
+    }
+  }
+
+
   @Test
   public void testCountStarExtendedCharacters() throws Exception
   {
@@ -1712,6 +1739,7 @@ public class SqlResourceTest extends CalciteTestBase
     Assert.assertEquals(success, stats.get("success"));
     Assert.assertEquals(CalciteTests.REGULAR_USER_AUTH_RESULT.getIdentity(), 
stats.get("identity"));
     Assert.assertTrue(stats.containsKey("sqlQuery/time"));
+    Assert.assertTrue(stats.containsKey("sqlQuery/planningTimeMs"));
     
Assert.assertTrue(queryContext.containsKey(PlannerContext.CTX_SQL_QUERY_ID));
     if (success) {
       Assert.assertTrue(stats.containsKey("sqlQuery/bytes"));


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to