chia7712 commented on code in PR #16627: URL: https://github.com/apache/kafka/pull/16627#discussion_r1685705213
########## core/src/test/java/kafka/test/ClusterInstance.java: ########## @@ -183,6 +193,67 @@ default Set<GroupProtocol> supportedGroupProtocols() { //---------------------------[wait]---------------------------// + @SuppressWarnings("deprecation") + default void verifyTopicDeletion(String topic, int partitions) throws InterruptedException { + if (!isKRaftTest()) { Review Comment: This should be moved into zk instance, since it is not a part of default implementation. FOr example: ```java @Override public void verifyTopicDeletion(String topic, int partitions) throws InterruptedException { org.apache.kafka.test.TestUtils.waitForCondition( () -> !getUnderlying().zkClient().isTopicMarkedForDeletion(topic), String.format("Admin path /admin/delete_topics/%s path not deleted even after a replica is restarted", topic) ); org.apache.kafka.test.TestUtils.waitForCondition( () -> !getUnderlying().zkClient().topicExists(topic), String.format("Topic path /brokers/topics/%s not deleted after /admin/delete_topics/%s path is deleted", topic, topic) ); ClusterInstance.super.verifyTopicDeletion(topic, partitions); } ``` ########## core/src/test/java/kafka/test/ClusterInstance.java: ########## @@ -183,6 +193,67 @@ default Set<GroupProtocol> supportedGroupProtocols() { //---------------------------[wait]---------------------------// + @SuppressWarnings("deprecation") + default void verifyTopicDeletion(String topic, int partitions) throws InterruptedException { + if (!isKRaftTest()) { + TestUtils.waitForCondition( + () -> !((IntegrationTestHarness) getUnderlying()).zkClient().isTopicMarkedForDeletion(topic), + String.format("Admin path /admin/delete_topics/%s path not deleted even after a replica is restarted", topic) + ); + + TestUtils.waitForCondition( + () -> !((IntegrationTestHarness) getUnderlying()).zkClient().topicExists(topic), + String.format("Topic path /brokers/topics/%s not deleted after /admin/delete_topics/%s path is deleted", topic, topic) + ); + } + + List<TopicPartition> topicPartitions = IntStream.range(0, partitions).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toList()); + TestUtils.waitForCondition( + () -> + brokers().values().stream().allMatch(broker -> + topicPartitions.stream().allMatch(tp -> + broker.replicaManager().onlinePartition(tp).isEmpty()) + ), "Replica manager's should have deleted all of this topic's partitions" + ); + + TestUtils.waitForCondition( + () -> + brokers().values().stream().allMatch(broker -> + topicPartitions.stream().allMatch(tp -> + broker.replicaManager().onlinePartition(tp).isEmpty()) + ), "Replica logs not deleted after delete topic is complete" + ); + + + TestUtils.waitForCondition(() -> brokers().values().stream().allMatch(broker -> + topicPartitions.stream().allMatch(tp -> + JavaConverters.seqAsJavaList(broker.logManager().liveLogDirs()).stream() + .map(logDir -> JavaConverters.mapAsJavaMap(new OffsetCheckpointFile(new File(logDir, "cleaner-offset-checkpoint"), null).read())) + .collect(Collectors.toList()) Review Comment: Do we need to re-collect it? ########## core/src/test/java/kafka/test/ClusterInstance.java: ########## @@ -183,6 +193,67 @@ default Set<GroupProtocol> supportedGroupProtocols() { //---------------------------[wait]---------------------------// + @SuppressWarnings("deprecation") + default void verifyTopicDeletion(String topic, int partitions) throws InterruptedException { + if (!isKRaftTest()) { + TestUtils.waitForCondition( + () -> !((IntegrationTestHarness) getUnderlying()).zkClient().isTopicMarkedForDeletion(topic), + String.format("Admin path /admin/delete_topics/%s path not deleted even after a replica is restarted", topic) + ); + + TestUtils.waitForCondition( + () -> !((IntegrationTestHarness) getUnderlying()).zkClient().topicExists(topic), + String.format("Topic path /brokers/topics/%s not deleted after /admin/delete_topics/%s path is deleted", topic, topic) + ); + } + + List<TopicPartition> topicPartitions = IntStream.range(0, partitions).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toList()); + TestUtils.waitForCondition( + () -> + brokers().values().stream().allMatch(broker -> Review Comment: please fix the indent. ########## core/src/test/java/kafka/test/ClusterInstance.java: ########## @@ -183,6 +193,67 @@ default Set<GroupProtocol> supportedGroupProtocols() { //---------------------------[wait]---------------------------// + @SuppressWarnings("deprecation") + default void verifyTopicDeletion(String topic, int partitions) throws InterruptedException { + if (!isKRaftTest()) { + TestUtils.waitForCondition( + () -> !((IntegrationTestHarness) getUnderlying()).zkClient().isTopicMarkedForDeletion(topic), + String.format("Admin path /admin/delete_topics/%s path not deleted even after a replica is restarted", topic) + ); + + TestUtils.waitForCondition( + () -> !((IntegrationTestHarness) getUnderlying()).zkClient().topicExists(topic), + String.format("Topic path /brokers/topics/%s not deleted after /admin/delete_topics/%s path is deleted", topic, topic) + ); + } + + List<TopicPartition> topicPartitions = IntStream.range(0, partitions).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toList()); + TestUtils.waitForCondition( + () -> + brokers().values().stream().allMatch(broker -> + topicPartitions.stream().allMatch(tp -> + broker.replicaManager().onlinePartition(tp).isEmpty()) + ), "Replica manager's should have deleted all of this topic's partitions" + ); + + TestUtils.waitForCondition( + () -> + brokers().values().stream().allMatch(broker -> + topicPartitions.stream().allMatch(tp -> + broker.replicaManager().onlinePartition(tp).isEmpty()) + ), "Replica logs not deleted after delete topic is complete" + ); + + + TestUtils.waitForCondition(() -> brokers().values().stream().allMatch(broker -> + topicPartitions.stream().allMatch(tp -> + JavaConverters.seqAsJavaList(broker.logManager().liveLogDirs()).stream() + .map(logDir -> JavaConverters.mapAsJavaMap(new OffsetCheckpointFile(new File(logDir, "cleaner-offset-checkpoint"), null).read())) + .collect(Collectors.toList()) + .stream() + .noneMatch(checkpointsPerLogDir -> checkpointsPerLogDir.containsKey(tp)) + ) + ), "Cleaner offset for deleted partition should have been removed"); + + TestUtils.waitForCondition(() -> brokers().values().stream().allMatch( + broker -> broker.config().logDirs().forall( + logDir -> topicPartitions.stream().noneMatch( + tp -> new File(logDir, tp.topic() + "-" + tp.partition()).exists()))), + "Failed to soft-delete the data to a delete directory" + ); + + TestUtils.waitForCondition(() -> brokers().values().stream().allMatch( + broker -> broker.config().logDirs().forall( + logDir -> topicPartitions.stream().noneMatch( + tp -> Arrays.asList(new File(logDir).list()).stream().allMatch( Review Comment: please add null check ########## core/src/test/java/kafka/test/ClusterInstance.java: ########## @@ -183,6 +193,67 @@ default Set<GroupProtocol> supportedGroupProtocols() { //---------------------------[wait]---------------------------// + @SuppressWarnings("deprecation") + default void verifyTopicDeletion(String topic, int partitions) throws InterruptedException { + if (!isKRaftTest()) { + TestUtils.waitForCondition( + () -> !((IntegrationTestHarness) getUnderlying()).zkClient().isTopicMarkedForDeletion(topic), + String.format("Admin path /admin/delete_topics/%s path not deleted even after a replica is restarted", topic) + ); + + TestUtils.waitForCondition( + () -> !((IntegrationTestHarness) getUnderlying()).zkClient().topicExists(topic), + String.format("Topic path /brokers/topics/%s not deleted after /admin/delete_topics/%s path is deleted", topic, topic) + ); + } + + List<TopicPartition> topicPartitions = IntStream.range(0, partitions).mapToObj(i -> new TopicPartition(topic, i)).collect(Collectors.toList()); + TestUtils.waitForCondition( + () -> + brokers().values().stream().allMatch(broker -> + topicPartitions.stream().allMatch(tp -> + broker.replicaManager().onlinePartition(tp).isEmpty()) + ), "Replica manager's should have deleted all of this topic's partitions" + ); + + TestUtils.waitForCondition( + () -> + brokers().values().stream().allMatch(broker -> + topicPartitions.stream().allMatch(tp -> + broker.replicaManager().onlinePartition(tp).isEmpty()) Review Comment: We should check `logManager`, right? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org