gharris1727 commented on code in PR #11818:
URL: https://github.com/apache/kafka/pull/11818#discussion_r1057917207


##########
connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java:
##########
@@ -305,8 +305,10 @@ public void testReplication() throws Exception {
         Map<TopicPartition, OffsetAndMetadata> backupOffsets = 
backupClient.remoteConsumerOffsets(consumerGroupName, PRIMARY_CLUSTER_ALIAS,
             Duration.ofMillis(CHECKPOINT_DURATION_MS));
 
-        assertTrue(backupOffsets.containsKey(
-            new TopicPartition("primary.test-topic-1", 0)), "Offsets not 
translated downstream to backup cluster. Found: " + backupOffsets);
+        for (int i = 0; i < NUM_PARTITIONS; i++) {

Review Comment:
   Nice change!



##########
connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java:
##########
@@ -207,4 +375,5 @@ private void compareHeaders(List<Header> expectedHeaders, 
List<org.apache.kafka.
                     "taskHeader's value expected to equal " + 
taskHeader.value().toString());
         }
     }
+

Review Comment:
   nit: extra line



##########
connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java:
##########
@@ -196,6 +300,70 @@ public void testCommitRecordWithNullMetadata() {
         verifyNoInteractions(producer);
     }
 
+    @Test
+    public void testPartitionStateMutation() {
+        byte[] recordKey = "key".getBytes();
+        byte[] recordValue = "value".getBytes();
+        int maxOffsetLag = 50;
+        int recordPartition = 0;
+        int recordOffset = 0;
+        int metadataOffset = 100;
+        String topicName = "topic";
+        String sourceClusterName = "sourceCluster";
+
+        RecordHeaders headers = new RecordHeaders();
+        ReplicationPolicy replicationPolicy = new DefaultReplicationPolicy();
+
+        @SuppressWarnings("unchecked")
+        KafkaConsumer<byte[], byte[]> consumer = mock(KafkaConsumer.class);
+        @SuppressWarnings("unchecked")
+        KafkaProducer<byte[], byte[]> producer = mock(KafkaProducer.class);
+        MirrorSourceMetrics metrics = mock(MirrorSourceMetrics.class);
+        Semaphore outstandingOffsetSyncs = mock(Semaphore.class);
+        PartitionState partitionState = new PartitionState(maxOffsetLag);
+        Map<TopicPartition, PartitionState> partitionStates = new HashMap<>();
+
+        MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(consumer, 
metrics, sourceClusterName,
+                replicationPolicy, maxOffsetLag, producer, 
outstandingOffsetSyncs, partitionStates, topicName);
+
+        SourceRecord sourceRecord = mirrorSourceTask.convertRecord(new 
ConsumerRecord<>(topicName, recordPartition,
+                recordOffset, System.currentTimeMillis(), 
TimestampType.CREATE_TIME, recordKey.length,
+                recordValue.length, recordKey, recordValue, headers, 
Optional.empty()));
+
+        TopicPartition sourceTopicPartition = 
MirrorUtils.unwrapPartition(sourceRecord.sourcePartition());
+        partitionStates.put(sourceTopicPartition, partitionState);
+        RecordMetadata recordMetadata = new 
RecordMetadata(sourceTopicPartition, metadataOffset, 0, 0, 0, recordPartition);
+
+        when(outstandingOffsetSyncs.tryAcquire()).thenReturn(true);
+        mirrorSourceTask.commitRecord(sourceRecord, recordMetadata);
+        assertEquals(recordOffset, partitionState.lastSyncUpstreamOffset, 
"sync offsets");
+        assertEquals(metadataOffset, partitionState.lastSyncDownstreamOffset, 
"sync offsets");
+        assertEquals(recordOffset, partitionState.previousUpstreamOffset, 
"sync offsets");
+        assertEquals(metadataOffset, partitionState.previousDownstreamOffset, 
"sync offsets");
+
+        int newRecordOffset = 2;
+        int newMetadataOffset = 102;
+        recordMetadata = new RecordMetadata(sourceTopicPartition, 
newMetadataOffset, 0, 0, 0, recordPartition);
+        sourceRecord = mirrorSourceTask.convertRecord(new 
ConsumerRecord<>(topicName, recordPartition,
+                newRecordOffset, System.currentTimeMillis(), 
TimestampType.CREATE_TIME, recordKey.length,
+                recordValue.length, recordKey, recordValue, headers, 
Optional.empty()));
+
+        when(outstandingOffsetSyncs.tryAcquire()).thenReturn(false);
+        mirrorSourceTask.commitRecord(sourceRecord, recordMetadata);
+        assertEquals(recordOffset, partitionState.lastSyncUpstreamOffset, "no 
sync");
+        assertEquals(metadataOffset, partitionState.lastSyncDownstreamOffset, 
"no sync");
+        assertEquals(newRecordOffset, partitionState.previousUpstreamOffset, 
"update previuos upstream offset");
+        assertEquals(newMetadataOffset, 
partitionState.previousDownstreamOffset, "update previuos upstream offset");

Review Comment:
   nit
   ```suggestion
           assertEquals(newRecordOffset, partitionState.previousUpstreamOffset, 
"update previous upstream offset");
           assertEquals(newMetadataOffset, 
partitionState.previousDownstreamOffset, "update previous upstream offset");
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to