apoorvmittal10 commented on code in PR #20253: URL: https://github.com/apache/kafka/pull/20253#discussion_r2237889909
########## core/src/test/java/kafka/server/share/SharePartitionTest.java: ########## @@ -1510,6 +1510,150 @@ public void testAcquireBatchSkipWithBatchSizeAndEndOffsetLargerThanFirstBatch() assertTrue(sharePartition.cachedState().containsKey(12L)); } + @Test + public void testAcquireWithMaxInFlightMessagesAndTryAcquireNewBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .withMaxInflightMessages(20) + .build(); + + // Acquire records, should be acquired till maxInFlightMessages i.e. 20 records till 29 offset. + List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(memoryRecords(10, 0), 0), + FETCH_ISOLATION_HWM), + 10); + // Validate all 10 records will be acquired as the maxInFlightMessages is 20. + assertArrayEquals(expectedAcquiredRecord(0, 9, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(10, sharePartition.nextFetchOffset()); + + // Create 4 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 10).close(); + memoryRecordsBuilder(buffer, 10, 15).close(); + memoryRecordsBuilder(buffer, 5, 25).close(); + memoryRecordsBuilder(buffer, 2, 30).close(); + + buffer.flip(); + + MemoryRecords records = MemoryRecords.readableRecords(buffer); + + // Acquire records, should be acquired till maxInFlightMessages i.e. 20 records. As second batch + // is ending at 24 offset, hence additional 15 records will be acquired. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(records, 0), + FETCH_ISOLATION_HWM), + 15); + + // Validate 2 batches are fetched one with 5 records and other till end of batch, third batch + // should be skipped. + assertArrayEquals(expectedAcquiredRecord(10, 24, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(25, sharePartition.nextFetchOffset()); + + // Should not acquire any records as the share partition is at capacity and fetch offset is beyond + // the end offset. + fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + 25 /* Fetch Offset */, + fetchPartitionData(memoryRecords(10, 25), 10), + FETCH_ISOLATION_HWM), + 0); + + assertEquals(25, sharePartition.nextFetchOffset()); + } + + @Test + public void testAcquireWithMaxInFlightMessagesAndReleaseLastOffset() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .withMaxInflightMessages(20) + .build(); + + // Create 3 batches of records. Review Comment: My bad, I didn't correct the comments when later changed the tests. Done. ########## core/src/test/java/kafka/server/share/SharePartitionTest.java: ########## @@ -1510,6 +1510,150 @@ public void testAcquireBatchSkipWithBatchSizeAndEndOffsetLargerThanFirstBatch() assertTrue(sharePartition.cachedState().containsKey(12L)); } + @Test + public void testAcquireWithMaxInFlightMessagesAndTryAcquireNewBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .withMaxInflightMessages(20) + .build(); + + // Acquire records, should be acquired till maxInFlightMessages i.e. 20 records till 29 offset. Review Comment: My bad, I didn't correct the comments when later changed the tests. Done. ########## core/src/test/java/kafka/server/share/SharePartitionTest.java: ########## @@ -1510,6 +1510,150 @@ public void testAcquireBatchSkipWithBatchSizeAndEndOffsetLargerThanFirstBatch() assertTrue(sharePartition.cachedState().containsKey(12L)); } + @Test + public void testAcquireWithMaxInFlightMessagesAndTryAcquireNewBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .withMaxInflightMessages(20) + .build(); + + // Acquire records, should be acquired till maxInFlightMessages i.e. 20 records till 29 offset. + List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(memoryRecords(10, 0), 0), + FETCH_ISOLATION_HWM), + 10); + // Validate all 10 records will be acquired as the maxInFlightMessages is 20. + assertArrayEquals(expectedAcquiredRecord(0, 9, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(10, sharePartition.nextFetchOffset()); + + // Create 4 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 10).close(); + memoryRecordsBuilder(buffer, 10, 15).close(); + memoryRecordsBuilder(buffer, 5, 25).close(); + memoryRecordsBuilder(buffer, 2, 30).close(); + + buffer.flip(); + + MemoryRecords records = MemoryRecords.readableRecords(buffer); + + // Acquire records, should be acquired till maxInFlightMessages i.e. 20 records. As second batch + // is ending at 24 offset, hence additional 15 records will be acquired. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(records, 0), + FETCH_ISOLATION_HWM), + 15); + + // Validate 2 batches are fetched one with 5 records and other till end of batch, third batch + // should be skipped. + assertArrayEquals(expectedAcquiredRecord(10, 24, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(25, sharePartition.nextFetchOffset()); + + // Should not acquire any records as the share partition is at capacity and fetch offset is beyond + // the end offset. + fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + 25 /* Fetch Offset */, + fetchPartitionData(memoryRecords(10, 25), 10), + FETCH_ISOLATION_HWM), + 0); + + assertEquals(25, sharePartition.nextFetchOffset()); + } + + @Test + public void testAcquireWithMaxInFlightMessagesAndReleaseLastOffset() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .withSharePartitionMetrics(sharePartitionMetrics) + .withMaxInflightMessages(20) + .build(); + + // Create 3 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 10).close(); + memoryRecordsBuilder(buffer, 10, 15).close(); + memoryRecordsBuilder(buffer, 5, 25).close(); + memoryRecordsBuilder(buffer, 3, 30).close(); + + buffer.flip(); + + MemoryRecords records = MemoryRecords.readableRecords(buffer); + // Acquire records, should be acquired till maxInFlightMessages i.e. 20 records till 29 offset. + List<AcquiredRecords> acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + BATCH_SIZE, + 500 /* Max fetch records */, + DEFAULT_FETCH_OFFSET, + fetchPartitionData(records, 10), + FETCH_ISOLATION_HWM), + 20); + + // Validate 2 batches are fetched one with 5 records and other till end of batch, third batch Review Comment: My bad, I didn't correct the comments when later changed the tests. Done. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org