asafm commented on code in PR #17072:
URL: https://github.com/apache/pulsar/pull/17072#discussion_r1003122982


##########
pulsar-broker/src/test/java/org/apache/pulsar/broker/stats/MetadataStoreStatsTest.java:
##########
@@ -137,4 +137,108 @@ public void testMetadataStoreStats() throws Exception {
         }
     }
 
+    @Test
+    public void testBatchMetadataStoreMetrics() throws Exception {
+        String ns = "prop/ns-abc1";
+        admin.namespaces().createNamespace(ns);
+
+        String topic = "persistent://prop/ns-abc1/metadata-store-" + 
UUID.randomUUID();
+        String subName = "my-sub1";
+
+        @Cleanup
+        Producer<String> producer = pulsarClient.newProducer(Schema.STRING)
+                .topic(topic).create();
+        @Cleanup
+        Consumer<String> consumer = pulsarClient.newConsumer(Schema.STRING)
+                .topic(topic).subscriptionName(subName).subscribe();
+
+        for (int i = 0; i < 100; i++) {
+            producer.newMessage().value(UUID.randomUUID().toString()).send();
+        }
+
+        for (;;) {
+            Message<String> message = consumer.receive(10, TimeUnit.SECONDS);
+            if (message == null) {
+                break;
+            }
+            consumer.acknowledge(message);
+        }
+
+        ByteArrayOutputStream output = new ByteArrayOutputStream();
+        PrometheusMetricsGenerator.generate(pulsar, false, false, false, 
false, output);
+        String metricsStr = output.toString();
+        Multimap<String, PrometheusMetricsTest.Metric> metricsMap = 
PrometheusMetricsTest.parseMetrics(metricsStr);
+
+        Collection<PrometheusMetricsTest.Metric> opsOverflow = 
metricsMap.get("pulsar_batch_metadata_store_overflow_ops" + "_total");
+        Collection<PrometheusMetricsTest.Metric> queueingOps = 
metricsMap.get("pulsar_batch_metadata_store_queueing_ops");
+        Collection<PrometheusMetricsTest.Metric> executorQueueSize = 
metricsMap.get("pulsar_batch_metadata_store_executor_queue_size");
+        Collection<PrometheusMetricsTest.Metric> opsWaiting = 
metricsMap.get("pulsar_batch_metadata_store_waiting_ms" + "_sum");
+
+        Assert.assertTrue(opsOverflow.size() > 0 && opsOverflow.size() % 2 == 
0);
+        Assert.assertTrue(queueingOps.size() > 0 && queueingOps.size() % 2 == 
0);
+        Assert.assertTrue(executorQueueSize.size() > 1);
+        Assert.assertTrue(opsWaiting.size() > 1);
+
+        int readOpsOverflow = 0;
+        int writeOpsOverflow = 0;
+        for (PrometheusMetricsTest.Metric m : opsOverflow) {
+            Assert.assertEquals(m.tags.get("cluster"), "test");
+            String metadataStoreName = m.tags.get("name");
+            Assert.assertNotNull(metadataStoreName);
+            
Assert.assertTrue(metadataStoreName.equals(MetadataStoreConfig.METADATA_STORE)
+                    || 
metadataStoreName.equals(MetadataStoreConfig.CONFIGURATION_METADATA_STORE)
+                    || 
metadataStoreName.equals(MetadataStoreConfig.STATE_METADATA_STORE));
+            String opType = m.tags.get("type");
+            Assert.assertNotNull(opType);
+            if (opType.equals("read")) {
+                readOpsOverflow++;
+            } else if (opType.equals("write")){
+                writeOpsOverflow++;
+            }
+            Assert.assertTrue(m.value >= 0);
+        }
+        Assert.assertEquals(readOpsOverflow, writeOpsOverflow);
+        Assert.assertTrue(readOpsOverflow > 0);

Review Comment:
   Maybe I misunderstand the overflow ops metric.
   If the executor service has a queue with capacity = 5 and say 2 threads then 
if you add to executor service 8, 7 will succeed, the queue will be full and 
the 8th will count as overflow since it was rejected no?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to