DL1231 commented on code in PR #21150:
URL: https://github.com/apache/kafka/pull/21150#discussion_r2625800368


##########
storage/src/test/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerTest.java:
##########
@@ -2840,6 +2926,61 @@ public void testFailedDeleteExpiredSegments(long 
retentionSize,
         verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(0));
     }
 
+    @ParameterizedTest(name = 
"testDeleteSegmentFailureWithRetriableExceptionShouldNotUpdateMetrics 
retentionSize={0} retentionMs={1}")
+    @CsvSource(value = {"0, -1", "-1, 0"})
+    public void 
testDeleteSegmentFailureWithRetriableExceptionShouldNotUpdateMetrics(long 
retentionSize,
+                                                                               
      long retentionMs) throws RemoteStorageException, ExecutionException, 
InterruptedException {
+        Map<String, Long> logProps = new HashMap<>();
+        logProps.put("retention.bytes", retentionSize);
+        logProps.put("retention.ms", retentionMs);
+        LogConfig mockLogConfig = new LogConfig(logProps);
+        when(mockLog.config()).thenReturn(mockLogConfig);
+
+        List<EpochEntry> epochEntries = List.of(epochEntry0);
+        checkpoint.write(epochEntries);
+        LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, 
scheduler);
+        when(mockLog.leaderEpochCache()).thenReturn(cache);
+
+        
when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition());
+        when(mockLog.logEndOffset()).thenReturn(200L);
+
+        List<RemoteLogSegmentMetadata> metadataList =
+            listRemoteLogSegmentMetadata(leaderTopicIdPartition, 1, 100, 1024, 
RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
+        
when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition))
+            .thenReturn(metadataList.iterator());
+        
when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0))
+            .thenAnswer(ans -> metadataList.iterator());
+        
when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class)))
+            .thenReturn(CompletableFuture.runAsync(() -> { }));
+
+        // Verify the metrics for remote deletes and for failures is zero 
before attempt to delete segments
+        assertEquals(0, 
brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteDeleteRequestRate().count());
+        assertEquals(0, 
brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteDeleteRequestRate().count());
+        // Verify aggregate metrics
+        assertEquals(0, 
brokerTopicStats.allTopicsStats().remoteDeleteRequestRate().count());
+        assertEquals(0, 
brokerTopicStats.allTopicsStats().failedRemoteDeleteRequestRate().count());
+
+        RemoteLogManager.RLMExpirationTask task = remoteLogManager.new 
RLMExpirationTask(leaderTopicIdPartition);
+        doThrow(new RetriableRemoteStorageException("Failed to delete segment 
with retriable 
exception")).when(remoteStorageManager).deleteLogSegmentData(any());
+        assertThrows(RemoteStorageException.class, 
task::cleanupExpiredRemoteLogSegments);

Review Comment:
   Thanks for pointing that out. It was an oversight on my part. I've updated 
the PR.



##########
storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManager.java:
##########
@@ -143,6 +143,11 @@
  * - also provides APIs to fetch indexes, metadata about remote log segments
  * - copying log segments to the remote storage
  * - cleaning up segments that are expired based on retention size or 
retention time
+ * <p>

Review Comment:
   Done.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to