This is an automated email from the ASF dual-hosted git repository.

gian pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git


The following commit(s) were added to refs/heads/master by this push:
     new 2f1bcd67178 Adding `"segment/scan/active" metric for processing thread 
pool. (#15060)
2f1bcd67178 is described below

commit 2f1bcd67178b8972c77bebdd526dc0ec4c50d094
Author: Karan Kumar <[email protected]>
AuthorDate: Sat Sep 30 01:04:28 2023 +0530

    Adding `"segment/scan/active" metric for processing thread pool. (#15060)
---
 docs/design/extensions-contrib/dropwizard.md                      | 4 ++++
 docs/operations/metrics.md                                        | 2 ++
 .../src/main/resources/defaultWhiteListMap.json                   | 1 +
 .../src/main/resources/defaultMetricDimensions.json               | 4 ++++
 .../graphite-emitter/src/main/resources/defaultWhiteListMap.json  | 1 +
 .../prometheus-emitter/src/main/resources/defaultMetrics.json     | 1 +
 .../src/main/resources/defaultMetricDimensions.json               | 1 +
 .../apache/druid/query/MetricsEmittingQueryProcessingPool.java    | 1 +
 .../java/org/apache/druid/query/PrioritizedExecutorService.java   | 8 ++++++++
 .../druid/query/MetricsEmittingQueryProcessingPoolTest.java       | 5 ++++-
 10 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/docs/design/extensions-contrib/dropwizard.md 
b/docs/design/extensions-contrib/dropwizard.md
index fa1967cf056..50025fca157 100644
--- a/docs/design/extensions-contrib/dropwizard.md
+++ b/docs/design/extensions-contrib/dropwizard.md
@@ -137,6 +137,10 @@ Latest default metrics mapping can be found [here] 
(https://github.com/apache/dr
     "dimensions": [],
     "type": "gauge"
   },
+  "segment/scan/active": {
+    "dimensions": [],
+    "type": "gauge"
+  },
   "query/segmentAndCache/time": {
     "dimensions": [],
     "type": "timer",
diff --git a/docs/operations/metrics.md b/docs/operations/metrics.md
index c2a77a4f3d6..77a79170bec 100644
--- a/docs/operations/metrics.md
+++ b/docs/operations/metrics.md
@@ -89,6 +89,7 @@ Most metric values reset each emission period, as specified 
in `druid.monitoring
 |`query/segment/time`|Milliseconds taken to query individual segment. Includes 
time to page in the segment from disk.|`id`, `status`, `segment`, 
`vectorized`.|several hundred milliseconds|
 |`query/wait/time`|Milliseconds spent waiting for a segment to be 
scanned.|`id`, `segment`|< several hundred milliseconds|
 |`segment/scan/pending`|Number of segments in queue waiting to be 
scanned.||Close to 0|
+|`segment/scan/active`|Number of segments currently scanned. This metric also 
indicates how many threads from `druid.processing.numThreads` are currently 
being used.||Close to `druid.processing.numThreads`|
 |`query/segmentAndCache/time`|Milliseconds taken to query individual segment 
or hit the cache (if it is enabled on the Historical process).|`id`, 
`segment`|several hundred milliseconds|
 |`query/cpu/time`|Microseconds of CPU time taken to complete a 
query.|<p>Common: `dataSource`, `type`, `interval`, `hasFilters`, `duration`, 
`context`, `remoteAddress`, `id`.</p><p> Aggregation Queries: `numMetrics`, 
`numComplexMetrics`.</p><p> GroupBy: `numDimensions`.</p><p> TopN: `threshold`, 
`dimension`.</p>|Varies|
 |`query/count`|Total number of queries.|This metric is only available if the 
`QueryCountStatsMonitor` module is included.||
@@ -104,6 +105,7 @@ Most metric values reset each emission period, as specified 
in `druid.monitoring
 |`query/time`|Milliseconds taken to complete a query.|<p>Common: `dataSource`, 
`type`, `interval`, `hasFilters`, `duration`, `context`, `remoteAddress`, 
`id`.</p><p> Aggregation Queries: `numMetrics`, `numComplexMetrics`.</p><p> 
GroupBy: `numDimensions`.</p><p> TopN: `threshold`, `dimension`.</p>|< 1s|
 |`query/wait/time`|Milliseconds spent waiting for a segment to be 
scanned.|`id`, `segment`|several hundred milliseconds|
 |`segment/scan/pending`|Number of segments in queue waiting to be 
scanned.||Close to 0|
+|`segment/scan/active`|Number of segments currently scanned. This metric also 
indicates how many threads from `druid.processing.numThreads` are currently 
being used.||Close to `druid.processing.numThreads`|
 |`query/cpu/time`|Microseconds of CPU time taken to complete a 
query.|<p>Common: `dataSource`, `type`, `interval`, `hasFilters`, `duration`, 
`context`, `remoteAddress`, `id`.</p><p> Aggregation Queries: `numMetrics`, 
`numComplexMetrics`.</p><p> GroupBy: `numDimensions`. </p><p>TopN: `threshold`, 
`dimension`.</p>|Varies|
 |`query/count`|Number of total queries.|This metric is only available if the 
`QueryCountStatsMonitor` module is included.||
 |`query/success/count`|Number of queries successfully processed.|This metric 
is only available if the `QueryCountStatsMonitor` module is included.||
diff --git 
a/extensions-contrib/ambari-metrics-emitter/src/main/resources/defaultWhiteListMap.json
 
b/extensions-contrib/ambari-metrics-emitter/src/main/resources/defaultWhiteListMap.json
index ea31beed2a0..8ac86ed46fc 100644
--- 
a/extensions-contrib/ambari-metrics-emitter/src/main/resources/defaultWhiteListMap.json
+++ 
b/extensions-contrib/ambari-metrics-emitter/src/main/resources/defaultWhiteListMap.json
@@ -58,6 +58,7 @@
   "segment/loadQueue/failed": [],
   "segment/loadQueue/size": [],
   "segment/scan/pending": [],
+  "segment/scan/active": [],
   "segment/size": [
     "dataSource"
   ],
diff --git 
a/extensions-contrib/dropwizard-emitter/src/main/resources/defaultMetricDimensions.json
 
b/extensions-contrib/dropwizard-emitter/src/main/resources/defaultMetricDimensions.json
index 950d2638b00..849162c5ff3 100644
--- 
a/extensions-contrib/dropwizard-emitter/src/main/resources/defaultMetricDimensions.json
+++ 
b/extensions-contrib/dropwizard-emitter/src/main/resources/defaultMetricDimensions.json
@@ -42,6 +42,10 @@
     "dimensions": [],
     "type": "gauge"
   },
+  "segment/scan/active": {
+    "dimensions": [],
+    "type": "gauge"
+  },
   "query/segmentAndCache/time": {
     "dimensions": [],
     "type": "timer",
diff --git 
a/extensions-contrib/graphite-emitter/src/main/resources/defaultWhiteListMap.json
 
b/extensions-contrib/graphite-emitter/src/main/resources/defaultWhiteListMap.json
index 87cbd895165..44bd5ef8db9 100644
--- 
a/extensions-contrib/graphite-emitter/src/main/resources/defaultWhiteListMap.json
+++ 
b/extensions-contrib/graphite-emitter/src/main/resources/defaultWhiteListMap.json
@@ -43,6 +43,7 @@
   "segment/loadQueue/failed": [],
   "segment/loadQueue/size": [],
   "segment/scan/pending": [],
+  "segment/scan/active": [],
   "segment/size": [],
   "segment/usedPercent": []
 }
diff --git 
a/extensions-contrib/prometheus-emitter/src/main/resources/defaultMetrics.json 
b/extensions-contrib/prometheus-emitter/src/main/resources/defaultMetrics.json
index 88fa4347728..b08201c5630 100644
--- 
a/extensions-contrib/prometheus-emitter/src/main/resources/defaultMetrics.json
+++ 
b/extensions-contrib/prometheus-emitter/src/main/resources/defaultMetrics.json
@@ -10,6 +10,7 @@
   "query/segment/time" : { "dimensions" : [], "type" : "timer", 
"conversionFactor": 1000.0, "help": "Seconds taken to query individual segment. 
Includes time to page in the segment from disk."},
   "query/wait/time" : { "dimensions" : [], "type" : "timer", 
"conversionFactor": 1000.0, "help": "Seconds spent waiting for a segment to be 
scanned."},
   "segment/scan/pending" : { "dimensions" : [], "type" : "gauge", "help": 
"Number of segments in queue waiting to be scanned."},
+  "segment/scan/active" : { "dimensions" : [], "type" : "gauge", "help": 
"Number of segments currently scanned."},
   "query/segmentAndCache/time" : { "dimensions" : [], "type" : "timer", 
"conversionFactor": 1000.0, "help": "Seconds taken to query individual segment 
or hit the cache (if it is enabled on the Historical process)."},
   "query/cpu/time" : { "dimensions" : ["dataSource", "type"], "type" : 
"timer", "conversionFactor": "1000000", "help": "Seconds of CPU time taken to 
complete a query"},
 
diff --git 
a/extensions-contrib/statsd-emitter/src/main/resources/defaultMetricDimensions.json
 
b/extensions-contrib/statsd-emitter/src/main/resources/defaultMetricDimensions.json
index 2e4aca39ab5..3d2422ce235 100644
--- 
a/extensions-contrib/statsd-emitter/src/main/resources/defaultMetricDimensions.json
+++ 
b/extensions-contrib/statsd-emitter/src/main/resources/defaultMetricDimensions.json
@@ -9,6 +9,7 @@
   "query/segment/time" : { "dimensions" : [], "type" : "timer"},
   "query/wait/time" : { "dimensions" : [], "type" : "timer"},
   "segment/scan/pending" : { "dimensions" : [], "type" : "gauge"},
+  "segment/scan/active" : { "dimensions" : [], "type" : "gauge"},
   "query/segmentAndCache/time" : { "dimensions" : [], "type" : "timer" },
   "query/cpu/time" : { "dimensions" : ["dataSource", "type"], "type" : "timer" 
},
 
diff --git 
a/processing/src/main/java/org/apache/druid/query/MetricsEmittingQueryProcessingPool.java
 
b/processing/src/main/java/org/apache/druid/query/MetricsEmittingQueryProcessingPool.java
index 6c3ac7eb9f3..1f831d7953e 100644
--- 
a/processing/src/main/java/org/apache/druid/query/MetricsEmittingQueryProcessingPool.java
+++ 
b/processing/src/main/java/org/apache/druid/query/MetricsEmittingQueryProcessingPool.java
@@ -41,6 +41,7 @@ public class MetricsEmittingQueryProcessingPool extends 
ForwardingQueryProcessin
   {
     if (delegate() instanceof PrioritizedExecutorService) {
       emitter.emit(metricBuilder.setMetric("segment/scan/pending", 
((PrioritizedExecutorService) delegate()).getQueueSize()));
+      emitter.emit(metricBuilder.setMetric("segment/scan/active", 
((PrioritizedExecutorService) delegate()).getActiveTasks()));
     }
   }
 
diff --git 
a/processing/src/main/java/org/apache/druid/query/PrioritizedExecutorService.java
 
b/processing/src/main/java/org/apache/druid/query/PrioritizedExecutorService.java
index 6781df60965..02636df107f 100644
--- 
a/processing/src/main/java/org/apache/druid/query/PrioritizedExecutorService.java
+++ 
b/processing/src/main/java/org/apache/druid/query/PrioritizedExecutorService.java
@@ -203,6 +203,14 @@ public class PrioritizedExecutorService extends 
AbstractExecutorService implemen
   {
     return delegateQueue.size();
   }
+
+  /**
+   * Returns the approximate number of tasks being run by the thread pool 
currently.
+   */
+  public int getActiveTasks()
+  {
+    return threadPoolExecutor.getActiveCount();
+  }
 }
 
 class PrioritizedListenableFutureTask<V> implements RunnableFuture<V>,
diff --git 
a/processing/src/test/java/org/apache/druid/query/MetricsEmittingQueryProcessingPoolTest.java
 
b/processing/src/test/java/org/apache/druid/query/MetricsEmittingQueryProcessingPoolTest.java
index aa5f5473177..feb05913ead 100644
--- 
a/processing/src/test/java/org/apache/druid/query/MetricsEmittingQueryProcessingPoolTest.java
+++ 
b/processing/src/test/java/org/apache/druid/query/MetricsEmittingQueryProcessingPoolTest.java
@@ -39,6 +39,7 @@ public class MetricsEmittingQueryProcessingPoolTest
   {
     PrioritizedExecutorService service = 
Mockito.mock(PrioritizedExecutorService.class);
     Mockito.when(service.getQueueSize()).thenReturn(10);
+    Mockito.when(service.getActiveTasks()).thenReturn(2);
     ExecutorServiceMonitor monitor = new ExecutorServiceMonitor();
     List<Event> events = new ArrayList<>();
     MetricsEmittingQueryProcessingPool processingPool = new 
MetricsEmittingQueryProcessingPool(service, monitor);
@@ -53,9 +54,11 @@ public class MetricsEmittingQueryProcessingPoolTest
       }
     };
     monitor.doMonitor(serviceEmitter);
-    Assert.assertEquals(1, events.size());
+    Assert.assertEquals(2, events.size());
     Assert.assertEquals(((ServiceMetricEvent) (events.get(0))).getMetric(), 
"segment/scan/pending");
     Assert.assertEquals(((ServiceMetricEvent) (events.get(0))).getValue(), 10);
+    Assert.assertEquals(((ServiceMetricEvent) (events.get(1))).getMetric(), 
"segment/scan/active");
+    Assert.assertEquals(((ServiceMetricEvent) (events.get(1))).getValue(), 2);
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to