This is an automated email from the ASF dual-hosted git repository.

amatya pushed a commit to branch 27.0.0
in repository https://gitbox.apache.org/repos/asf/druid.git


The following commit(s) were added to refs/heads/27.0.0 by this push:
     new 309bbaa970 Rename metadatacache and serverview metrics (#14716) 
(#14722)
309bbaa970 is described below

commit 309bbaa970253a13382dd8f66c46be74057bd8d7
Author: AmatyaAvadhanula <[email protected]>
AuthorDate: Tue Aug 1 17:20:26 2023 +0530

    Rename metadatacache and serverview metrics (#14716) (#14722)
    
    Co-authored-by: Kashif Faraz <[email protected]>
---
 docs/operations/metrics.md                         | 32 +++++++++++-----------
 .../org/apache/druid/client/BrokerServerView.java  |  2 +-
 .../apache/druid/client/CoordinatorServerView.java |  2 +-
 .../druid/client/HttpServerInventoryView.java      |  4 +--
 .../druid/client/HttpServerInventoryViewTest.java  |  4 +--
 .../sql/calcite/schema/SegmentMetadataCache.java   |  6 ++--
 .../calcite/schema/SegmentMetadataCacheTest.java   |  4 +--
 7 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/docs/operations/metrics.md b/docs/operations/metrics.md
index d10bc6fa03..69f20b20e9 100644
--- a/docs/operations/metrics.md
+++ b/docs/operations/metrics.md
@@ -64,12 +64,12 @@ Metrics may have additional dimensions beyond those listed 
above.
 |`sqlQuery/time`|Milliseconds taken to complete a SQL query.|`id`, 
`nativeQueryIds`, `dataSource`, `remoteAddress`, `success`, `engine`|< 1s|
 |`sqlQuery/planningTimeMs`|Milliseconds taken to plan a SQL to native 
query.|`id`, `nativeQueryIds`, `dataSource`, `remoteAddress`, `success`, 
`engine`| |
 |`sqlQuery/bytes`|Number of bytes returned in the SQL query response.|`id`, 
`nativeQueryIds`, `dataSource`, `remoteAddress`, `success`, `engine`| |
-|`init/serverview/time`|Time taken to initialize the broker server view. 
Useful to detect if brokers are taking too long to start.||Depends on the 
number of segments.|
-|`init/metadatacache/time`|Time taken to initialize the broker segment 
metadata cache. Useful to detect if brokers are taking too long to 
start||Depends on the number of segments.|
-|`segment/metadatacache/refresh/count`|Number of segments to refresh in broker 
segment metadata cache.|`dataSource`|
-|`segment/metadatacache/refresh/time`|Time taken to refresh segments in broker 
segment metadata cache.|`dataSource`|
-|`segment/serverview/sync/healthy`|Sync status of the Broker with a 
segment-loading server such as a Historical or Peon. Emitted only when 
[HTTP-based server view](../configuration/index.md#segment-management) is 
enabled. This metric can be used in conjunction with 
`segment/serverview/sync/unstableTime` to debug slow startup of 
Brokers.|`server`, `tier`|1 for fully synced servers, 0 otherwise|
-|`segment/serverview/sync/unstableTime`|Time in milliseconds for which the 
Broker has been failing to sync with a segment-loading server. Emitted only 
when [HTTP-based server view](../configuration/index.md#segment-management) is 
enabled.|`server`, `tier`|Not emitted for synced servers.|
+|`serverview/init/time`|Time taken to initialize the broker server view. 
Useful to detect if brokers are taking too long to start.||Depends on the 
number of segments.|
+|`metadatacache/init/time`|Time taken to initialize the broker segment 
metadata cache. Useful to detect if brokers are taking too long to 
start||Depends on the number of segments.|
+|`metadatacache/refresh/count`|Number of segments to refresh in broker segment 
metadata cache.|`dataSource`|
+|`metadatacache/refresh/time`|Time taken to refresh segments in broker segment 
metadata cache.|`dataSource`|
+|`serverview/sync/healthy`|Sync status of the Broker with a segment-loading 
server such as a Historical or Peon. Emitted only when [HTTP-based server 
view](../configuration/index.md#segment-management) is enabled. This metric can 
be used in conjunction with `serverview/sync/unstableTime` to debug slow 
startup of Brokers.|`server`, `tier`|1 for fully synced servers, 0 otherwise|
+|`serverview/sync/unstableTime`|Time in milliseconds for which the Broker has 
been failing to sync with a segment-loading server. Emitted only when 
[HTTP-based server view](../configuration/index.md#segment-management) is 
enabled.|`server`, `tier`|Not emitted for synced servers.|
 
 ### Historical
 
@@ -319,16 +319,16 @@ These metrics are for the Druid Coordinator and are reset 
each time the Coordina
 |`segment/skipCompact/bytes`|Total bytes of this datasource that are skipped 
(not eligible for auto compaction) by the auto compaction.|`dataSource`|Varies|
 |`segment/skipCompact/count`|Total number of segments of this datasource that 
are skipped (not eligible for auto compaction) by the auto 
compaction.|`dataSource`|Varies|
 |`interval/skipCompact/count`|Total number of intervals of this datasource 
that are skipped (not eligible for auto compaction) by the auto 
compaction.|`dataSource`|Varies|
-|`coordinator/time`|Approximate Coordinator duty runtime in milliseconds. The 
duty dimension is the string alias of the Duty that is being run.|`duty`|Varies|
-|`coordinator/global/time`|Approximate runtime of a full coordination cycle in 
milliseconds. The `dutyGroup` dimension indicates what type of coordination 
this run was. i.e. Historical Management vs Indexing|`dutyGroup`|Varies|
-|`metadata/kill/supervisor/count`|Total number of terminated supervisors that 
were automatically deleted from metadata store per each Coordinator kill 
supervisor duty run. This metric can help adjust 
`druid.coordinator.kill.supervisor.durationToRetain` configuration based on 
whether more or less terminated supervisors need to be deleted per cycle. Note 
that this metric is only emitted when `druid.coordinator.kill.supervisor.on` is 
set to true.| |Varies|
-|`metadata/kill/audit/count`|Total number of audit logs that were 
automatically deleted from metadata store per each Coordinator kill audit duty 
run. This metric can help adjust 
`druid.coordinator.kill.audit.durationToRetain` configuration based on whether 
more or less audit logs need to be deleted per cycle. Note that this metric is 
only emitted when `druid.coordinator.kill.audit.on` is set to true.| |Varies|
-|`metadata/kill/compaction/count`|Total number of compaction configurations 
that were automatically deleted from metadata store per each Coordinator kill 
compaction configuration duty run. Note that this metric is only emitted when 
`druid.coordinator.kill.compaction.on` is set to true.| |Varies|
-|`metadata/kill/rule/count`|Total number of rules that were automatically 
deleted from metadata store per each Coordinator kill rule duty run. This 
metric can help adjust `druid.coordinator.kill.rule.durationToRetain` 
configuration based on whether more or less rules need to be deleted per cycle. 
Note that this metric is only emitted when `druid.coordinator.kill.rule.on` is 
set to true.| |Varies|
-|`metadata/kill/datasource/count`|Total number of datasource metadata that 
were automatically deleted from metadata store per each Coordinator kill 
datasource duty run (Note: datasource metadata only exists for datasource 
created from supervisor). This metric can help adjust 
`druid.coordinator.kill.datasource.durationToRetain` configuration based on 
whether more or less datasource metadata need to be deleted per cycle. Note 
that this metric is only emitted when `druid.coordinator.kill.da [...]
-|`init/serverview/time`|Time taken to initialize the coordinator server 
view.||Depends on the number of segments|
-|`segment/serverview/sync/healthy`|Sync status of the Coordinator with a 
segment-loading server such as a Historical or Peon. Emitted only when 
[HTTP-based server view](../configuration/index.md#segment-management) is 
enabled. This metric can be used in conjunction with 
`segment/serverview/sync/unstableTime` to debug slow startup of the 
Coordinator.|`server`, `tier`|1 for fully synced servers, 0 otherwise|
-|`segment/serverview/sync/unstableTime`|Time in milliseconds for which the 
Coordinator has been failing to sync with a segment-loading server. Emitted 
only when [HTTP-based server 
view](../configuration/index.md#segment-management) is enabled.|`server`, 
`tier`|Not emitted for synced servers.|
+|`coordinator/time`|Approximate Coordinator duty runtime in milliseconds. 
|`duty`|Varies|
+|`coordinator/global/time`|Approximate runtime of a full coordination cycle in 
milliseconds. The `dutyGroup` dimension indicates what type of coordination 
this run was. For example: Historical Management or 
Indexing.|`dutyGroup`|Varies|
+|`metadata/kill/supervisor/count`|Total number of terminated supervisors that 
were automatically deleted from metadata store per each Coordinator kill 
supervisor duty run. This metric can help adjust 
`druid.coordinator.kill.supervisor.durationToRetain` configuration based on 
whether more or less terminated supervisors need to be deleted per cycle. This 
metric is only emitted when `druid.coordinator.kill.supervisor.on` is set to 
true.| |Varies|
+|`metadata/kill/audit/count`|Total number of audit logs that were 
automatically deleted from metadata store per each Coordinator kill audit duty 
run. This metric can help adjust 
`druid.coordinator.kill.audit.durationToRetain` configuration based on whether 
more or less audit logs need to be deleted per cycle. This metric is emitted 
only when `druid.coordinator.kill.audit.on` is set to true.| |Varies|
+|`metadata/kill/compaction/count`|Total number of compaction configurations 
that were automatically deleted from metadata store per each Coordinator kill 
compaction configuration duty run. This metric is only emitted when 
`druid.coordinator.kill.compaction.on` is set to true.| |Varies|
+|`metadata/kill/rule/count`|Total number of rules that were automatically 
deleted from metadata store per each Coordinator kill rule duty run. This 
metric can help adjust `druid.coordinator.kill.rule.durationToRetain` 
configuration based on whether more or less rules need to be deleted per cycle. 
This metric is only emitted when `druid.coordinator.kill.rule.on` is set to 
true.| |Varies|
+|`metadata/kill/datasource/count`|Total number of datasource metadata that 
were automatically deleted from metadata store per each Coordinator kill 
datasource duty run. Note that datasource metadata only exists for datasource 
created from supervisor. This metric can help adjust 
`druid.coordinator.kill.datasource.durationToRetain` configuration based on 
whether more or less datasource metadata need to be deleted per cycle. This 
metric is only emitted when `druid.coordinator.kill.datasourc [...]
+|`serverview/init/time`|Time taken to initialize the coordinator server 
view.||Depends on the number of segments.|
+|`serverview/sync/healthy`|Sync status of the Coordinator with a 
segment-loading server such as a Historical or Peon. Emitted only when 
[HTTP-based server view](../configuration/index.md#segment-management) is 
enabled. You can use this metric in conjunction with 
`serverview/sync/unstableTime` to debug slow startup of the 
Coordinator.|`server`, `tier`|1 for fully synced servers, 0 otherwise|
+|`serverview/sync/unstableTime`|Time in milliseconds for which the Coordinator 
has been failing to sync with a segment-loading server. Emitted only when 
[HTTP-based server view](../configuration/index.md#segment-management) is 
enabled.|`server`, `tier`|Not emitted for synced servers.|
 
 ## General Health
 
diff --git a/server/src/main/java/org/apache/druid/client/BrokerServerView.java 
b/server/src/main/java/org/apache/druid/client/BrokerServerView.java
index a10dd28539..9c35b0c903 100644
--- a/server/src/main/java/org/apache/druid/client/BrokerServerView.java
+++ b/server/src/main/java/org/apache/druid/client/BrokerServerView.java
@@ -185,7 +185,7 @@ public class BrokerServerView implements TimelineServerView
       final long endMillis = System.currentTimeMillis();
       log.info("BrokerServerView initialized in [%,d] ms.", endMillis - 
startMillis);
       emitter.emit(ServiceMetricEvent.builder().build(
-          "init/serverview/time",
+          "serverview/init/time",
           endMillis - startMillis
       ));
     }
diff --git 
a/server/src/main/java/org/apache/druid/client/CoordinatorServerView.java 
b/server/src/main/java/org/apache/druid/client/CoordinatorServerView.java
index cc1159fe5a..56ad1a7887 100644
--- a/server/src/main/java/org/apache/druid/client/CoordinatorServerView.java
+++ b/server/src/main/java/org/apache/druid/client/CoordinatorServerView.java
@@ -125,7 +125,7 @@ public class CoordinatorServerView implements InventoryView
       final long endMillis = System.currentTimeMillis();
       log.info("%s initialized in [%,d] ms.", getClass().getSimpleName(), 
endMillis - startMillis);
       emitter.emit(ServiceMetricEvent.builder().build(
-          "init/serverview/time",
+          "serverview/init/time",
           endMillis - startMillis
       ));
     }
diff --git 
a/server/src/main/java/org/apache/druid/client/HttpServerInventoryView.java 
b/server/src/main/java/org/apache/druid/client/HttpServerInventoryView.java
index 2c30c16813..7b67e8802b 100644
--- a/server/src/main/java/org/apache/druid/client/HttpServerInventoryView.java
+++ b/server/src/main/java/org/apache/druid/client/HttpServerInventoryView.java
@@ -495,12 +495,12 @@ public class HttpServerInventoryView implements 
ServerInventoryView, FilteredSer
 
         final boolean isSynced = serverHolder.syncer.isSyncedSuccessfully();
         serviceEmitter.emit(
-            eventBuilder.build("segment/serverview/sync/healthy", isSynced ? 1 
: 0)
+            eventBuilder.build("serverview/sync/healthy", isSynced ? 1 : 0)
         );
         final long unstableTimeMillis = 
serverHolder.syncer.getUnstableTimeMillis();
         if (unstableTimeMillis > 0) {
           serviceEmitter.emit(
-              eventBuilder.build("segment/serverview/sync/unstableTime", 
unstableTimeMillis)
+              eventBuilder.build("serverview/sync/unstableTime", 
unstableTimeMillis)
           );
         }
       });
diff --git 
a/server/src/test/java/org/apache/druid/client/HttpServerInventoryViewTest.java 
b/server/src/test/java/org/apache/druid/client/HttpServerInventoryViewTest.java
index 70ec2b2fa4..76f23b5481 100644
--- 
a/server/src/test/java/org/apache/druid/client/HttpServerInventoryViewTest.java
+++ 
b/server/src/test/java/org/apache/druid/client/HttpServerInventoryViewTest.java
@@ -77,8 +77,8 @@ public class HttpServerInventoryViewTest
 
   private static final String EXEC_NAME_PREFIX = "InventoryViewTest";
 
-  private static final String METRIC_SUCCESS = 
"segment/serverview/sync/healthy";
-  private static final String METRIC_UNSTABLE_TIME = 
"segment/serverview/sync/unstableTime";
+  private static final String METRIC_SUCCESS = "serverview/sync/healthy";
+  private static final String METRIC_UNSTABLE_TIME = 
"serverview/sync/unstableTime";
 
   private StubServiceEmitter serviceEmitter;
 
diff --git 
a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SegmentMetadataCache.java
 
b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SegmentMetadataCache.java
index ea3dc39567..39254750d4 100644
--- 
a/sql/src/main/java/org/apache/druid/sql/calcite/schema/SegmentMetadataCache.java
+++ 
b/sql/src/main/java/org/apache/druid/sql/calcite/schema/SegmentMetadataCache.java
@@ -397,7 +397,7 @@ public class SegmentMetadataCache
       final long endMillis = System.currentTimeMillis();
       log.info("%s initialized in [%,d] ms.", getClass().getSimpleName(), 
endMillis - startMillis);
       emitter.emit(ServiceMetricEvent.builder().build(
-          "init/metadatacache/time",
+          "metadatacache/init/time",
           endMillis - startMillis
       ));
     }
@@ -722,7 +722,7 @@ public class SegmentMetadataCache
     final ServiceMetricEvent.Builder builder =
         new ServiceMetricEvent.Builder().setDimension(DruidMetrics.DATASOURCE, 
dataSource);
 
-    emitter.emit(builder.build("segment/metadatacache/refresh/count", 
segments.size()));
+    emitter.emit(builder.build("metadatacache/refresh/count", 
segments.size()));
 
     // Segment id string -> SegmentId object.
     final Map<String, SegmentId> segmentIdMap = Maps.uniqueIndex(segments, 
SegmentId::toString);
@@ -793,7 +793,7 @@ public class SegmentMetadataCache
 
     long refreshDurationMillis = stopwatch.elapsed(TimeUnit.MILLISECONDS);
 
-    emitter.emit(builder.build("segment/metadatacache/refresh/time", 
refreshDurationMillis));
+    emitter.emit(builder.build("metadatacache/refresh/time", 
refreshDurationMillis));
 
     log.debug(
         "Refreshed metadata for dataSource [%s] in %,d ms (%d segments 
queried, %d segments left).",
diff --git 
a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SegmentMetadataCacheTest.java
 
b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SegmentMetadataCacheTest.java
index 0414878b4a..aadc7fee72 100644
--- 
a/sql/src/test/java/org/apache/druid/sql/calcite/schema/SegmentMetadataCacheTest.java
+++ 
b/sql/src/test/java/org/apache/druid/sql/calcite/schema/SegmentMetadataCacheTest.java
@@ -1491,8 +1491,8 @@ public class SegmentMetadataCacheTest extends 
SegmentMetadataCacheCommon
     Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
     
schema.refresh(segments.stream().map(DataSegment::getId).collect(Collectors.toSet()),
 Sets.newHashSet(datasource));
 
-    emitter.verifyEmitted("segment/metadatacache/refresh/time", 
ImmutableMap.of(DruidMetrics.DATASOURCE, datasource), 1);
-    emitter.verifyEmitted("segment/metadatacache/refresh/count", 
ImmutableMap.of(DruidMetrics.DATASOURCE, datasource), 1);
+    emitter.verifyEmitted("metadatacache/refresh/time", 
ImmutableMap.of(DruidMetrics.DATASOURCE, datasource), 1);
+    emitter.verifyEmitted("metadatacache/refresh/count", 
ImmutableMap.of(DruidMetrics.DATASOURCE, datasource), 1);
   }
 
   private static DataSegment newSegment(String datasource, int partitionId)


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to