apoorvmittal10 commented on code in PR #17539:
URL: https://github.com/apache/kafka/pull/17539#discussion_r1816509367


##########
core/src/main/java/kafka/server/share/DelayedShareFetch.java:
##########
@@ -55,6 +60,7 @@ public class DelayedShareFetch extends DelayedOperation {
     private final ReplicaManager replicaManager;
 
     private Map<TopicIdPartition, FetchRequest.PartitionData> 
topicPartitionDataFromTryComplete;
+    private Map<TopicIdPartition, FetchPartitionOffsetData> 
logReadResponseFromTryComplete;

Review Comment:
   Do we need the variable name to have suffix `TryComplete`? I don't find the 
suffix is any helpful.



##########
core/src/main/java/kafka/server/share/DelayedShareFetch.java:
##########
@@ -204,7 +211,154 @@ Map<TopicIdPartition, FetchRequest.PartitionData> 
acquirablePartitions() {
         return topicPartitionData;
     }
 
-    private void releasePartitionLocks(String groupId, Set<TopicIdPartition> 
topicIdPartitions) {
+    // In case, fetch offset metadata doesn't exist for any topic partition in 
the list of topic partitions, we do a
+    // replicaManager.readFromLog to populate the offset metadata.
+    private MaybeUpdateFetchOffsetMetadataResult 
maybeUpdateFetchOffsetMetadataForTopicPartitions(Map<TopicIdPartition, 
FetchRequest.PartitionData> topicPartitionData) {
+        boolean isFetchOffsetMetadataUpdated = false;
+        Map<TopicIdPartition, FetchRequest.PartitionData> 
missingFetchOffsetMetadataTopicPartitions = new LinkedHashMap<>();

Review Comment:
   How frequent it is to see missing fetch offset information, mostly at start 
only right? They why to initialize this variable, can't it be lazily loaded, if 
required?



##########
core/src/main/java/kafka/server/share/DelayedShareFetch.java:
##########
@@ -204,7 +211,154 @@ Map<TopicIdPartition, FetchRequest.PartitionData> 
acquirablePartitions() {
         return topicPartitionData;
     }
 
-    private void releasePartitionLocks(String groupId, Set<TopicIdPartition> 
topicIdPartitions) {
+    // In case, fetch offset metadata doesn't exist for any topic partition in 
the list of topic partitions, we do a
+    // replicaManager.readFromLog to populate the offset metadata.
+    private MaybeUpdateFetchOffsetMetadataResult 
maybeUpdateFetchOffsetMetadataForTopicPartitions(Map<TopicIdPartition, 
FetchRequest.PartitionData> topicPartitionData) {
+        boolean isFetchOffsetMetadataUpdated = false;
+        Map<TopicIdPartition, FetchRequest.PartitionData> 
missingFetchOffsetMetadataTopicPartitions = new LinkedHashMap<>();
+        Map<TopicIdPartition, FetchPartitionOffsetData> 
replicaManagerReadResponseData;
+        for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : 
topicPartitionData.entrySet()) {
+            TopicIdPartition topicIdPartition = entry.getKey();
+            SharePartition sharePartition = 
sharePartitionManager.sharePartition(shareFetchData.groupId(), 
topicIdPartition);
+            if (sharePartition == null) {
+                log.debug("Encountered null share partition for groupId={}, 
topicIdPartition={}. Skipping it.", shareFetchData.groupId(), topicIdPartition);
+                continue;
+            }
+            if (sharePartition.latestFetchOffsetMetadata().isEmpty())
+                
missingFetchOffsetMetadataTopicPartitions.put(topicIdPartition, 
entry.getValue());
+        }
+
+        if (missingFetchOffsetMetadataTopicPartitions.isEmpty()) {
+            return new MaybeUpdateFetchOffsetMetadataResult(false, null);
+        }
+        // We fetch data from replica manager corresponding to the topic 
partitions that have missing fetch offset metadata.
+        replicaManagerReadResponseData = 
readFromLog(missingFetchOffsetMetadataTopicPartitions);
+
+        for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : 
missingFetchOffsetMetadataTopicPartitions.entrySet()) {
+            TopicIdPartition topicIdPartition = entry.getKey();
+            SharePartition sharePartition = 
sharePartitionManager.sharePartition(shareFetchData.groupId(), 
topicIdPartition);
+            if (sharePartition == null) {
+                log.debug("Encountered null share partition for groupId={}, 
topicIdPartition={}. Skipping it.", shareFetchData.groupId(), topicIdPartition);
+                continue;
+            }
+            FetchPartitionOffsetData fetchPartitionOffsetData = 
replicaManagerReadResponseData.get(topicIdPartition);
+            if (fetchPartitionOffsetData == null) {
+                log.debug("Replica manager read log result {} does not contain 
topic partition {}",
+                    replicaManagerReadResponseData, topicIdPartition);
+                continue;
+            }
+            
sharePartition.updateLatestFetchOffsetMetadata(fetchPartitionOffsetData.logOffsetMetadata());
+            isFetchOffsetMetadataUpdated = true;
+        }
+        return new 
MaybeUpdateFetchOffsetMetadataResult(isFetchOffsetMetadataUpdated, 
replicaManagerReadResponseData);
+    }
+
+    private boolean isMinBytesSatisfied(Map<TopicIdPartition, 
FetchRequest.PartitionData> topicPartitionData) {
+        long accumulatedSize = 0;
+        try {
+            for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry 
: topicPartitionData.entrySet()) {
+                TopicIdPartition topicIdPartition = entry.getKey();
+                FetchRequest.PartitionData partitionData = entry.getValue();
+                Partition partition = 
replicaManager.getPartitionOrException(topicIdPartition.topicPartition());
+                LogOffsetSnapshot offsetSnapshot = 
partition.fetchOffsetSnapshot(Optional.empty(), true);
+                // The FetchIsolation type that we use for share fetch is 
FetchIsolation.HIGH_WATERMARK. In the future, we can
+                // extend it other FetchIsolation types.
+                FetchIsolation isolationType = 
shareFetchData.fetchParams().isolation;
+                LogOffsetMetadata endOffsetMetadata;
+                if (isolationType == FetchIsolation.LOG_END)
+                    endOffsetMetadata = offsetSnapshot.logEndOffset;
+                else if (isolationType == FetchIsolation.HIGH_WATERMARK)
+                    endOffsetMetadata = offsetSnapshot.highWatermark;
+                else
+                    endOffsetMetadata = offsetSnapshot.lastStableOffset;
+
+                if (endOffsetMetadata == 
LogOffsetMetadata.UNKNOWN_OFFSET_METADATA)
+                    continue;
+
+                SharePartition sharePartition = 
sharePartitionManager.sharePartition(shareFetchData.groupId(), 
topicIdPartition);
+                if (sharePartition == null) {
+                    return true;
+                }
+
+                Optional<LogOffsetMetadata> optionalFetchOffsetMetadata = 
sharePartition.latestFetchOffsetMetadata();
+                if (optionalFetchOffsetMetadata.isEmpty() || 
optionalFetchOffsetMetadata.get() == LogOffsetMetadata.UNKNOWN_OFFSET_METADATA)
+                    continue;
+                LogOffsetMetadata fetchOffsetMetadata = 
optionalFetchOffsetMetadata.get();
+
+                if (fetchOffsetMetadata.messageOffset > 
endOffsetMetadata.messageOffset) {
+                    log.debug("Satisfying delayed share fetch request for 
group {}, member {} since it is fetching later segments of " +
+                        "topicIdPartition {}", shareFetchData.groupId(), 
shareFetchData.memberId(), topicIdPartition);
+                    return true;
+                } else if (fetchOffsetMetadata.messageOffset < 
endOffsetMetadata.messageOffset) {
+                    if (fetchOffsetMetadata.onOlderSegment(endOffsetMetadata)) 
{
+                        // This can happen when the fetch operation is falling 
behind the current segment or the partition
+                        // has just rolled a new segment.
+                        log.debug("Satisfying delayed share fetch request for 
group {}, member {} immediately since it is fetching older " +
+                            "segments of topicIdPartition {}", 
shareFetchData.groupId(), shareFetchData.memberId(), topicIdPartition);
+                        return true;
+                    } else if 
(fetchOffsetMetadata.onSameSegment(endOffsetMetadata)) {
+                        // we take the partition fetch size as upper bound 
when accumulating the bytes.
+                        long bytesAvailable = 
Math.min(endOffsetMetadata.positionDiff(fetchOffsetMetadata), 
partitionData.maxBytes);
+                        accumulatedSize += bytesAvailable;
+                    }
+                }
+            }
+            return accumulatedSize >= shareFetchData.fetchParams().minBytes;
+        } catch (Exception e) {
+            // Ideally we should complete the share fetch request's future 
exceptionally in this case from tryComplete itself.
+            // A function that can be utilized is handleFetchException in an 
in-flight PR https://github.com/apache/kafka/pull/16842.
+            // Perhaps, once the mentioned PR is merged, I'll change it to 
better exception handling.
+            log.error("Error processing the minBytes criteria for share fetch 
request", e);
+            return true;
+        }
+    }
+
+    private Map<TopicIdPartition, FetchPartitionOffsetData> 
readFromLog(Map<TopicIdPartition, FetchRequest.PartitionData> 
topicPartitionData) {
+        Seq<Tuple2<TopicIdPartition, LogReadResult>> responseLogResult = 
replicaManager.readFromLog(
+            shareFetchData.fetchParams(),
+            CollectionConverters.asScala(
+                topicPartitionData.entrySet().stream().map(entry ->
+                    new Tuple2<>(entry.getKey(), 
entry.getValue())).collect(Collectors.toList())
+            ),
+            QuotaFactory.UnboundedQuota$.MODULE$,
+            true);
+
+        Map<TopicIdPartition, FetchPartitionOffsetData> responseData = new 
HashMap<>();
+        responseLogResult.foreach(tpLogResult -> {
+            TopicIdPartition topicIdPartition = tpLogResult._1();
+            LogReadResult logResult = tpLogResult._2();
+            FetchPartitionData fetchPartitionData = 
logResult.toFetchPartitionData(false);
+            responseData.put(topicIdPartition, new 
FetchPartitionOffsetData(fetchPartitionData, 
logResult.info().fetchOffsetMetadata));
+            return BoxedUnit.UNIT;
+        });
+
+        log.trace("Data successfully retrieved by replica manager: {}", 
responseData);
+        return responseData;
+    }
+
+    // Visible for testing.
+    Map<TopicIdPartition, FetchPartitionOffsetData> 
combineLogReadResponseForOnComplete(Map<TopicIdPartition, 
FetchRequest.PartitionData> topicPartitionData) {

Review Comment:
   Why to have method names with such suffix, are they helping?
   
   ```suggestion
       Map<TopicIdPartition, FetchPartitionOffsetData> 
combineLogReadResponse(Map<TopicIdPartition, FetchRequest.PartitionData> 
topicPartitionData) {
   ```



##########
core/src/main/java/kafka/server/share/DelayedShareFetch.java:
##########
@@ -204,7 +202,117 @@ Map<TopicIdPartition, FetchRequest.PartitionData> 
acquirablePartitions() {
         return topicPartitionData;
     }
 
-    private void releasePartitionLocks(String groupId, Set<TopicIdPartition> 
topicIdPartitions) {
+    // In case, fetch offset metadata doesn't exist for any topic partition in 
the list of topic partitions, we do a
+    // replicaManager.readFromLog to populate the offset metadata.
+    private void 
maybeUpdateFetchOffsetMetadataForTopicPartitions(Map<TopicIdPartition, 
FetchRequest.PartitionData> topicPartitionData) {
+        Map<TopicIdPartition, FetchPartitionOffsetData> 
replicaManagerReadResponseData = null;
+        for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : 
topicPartitionData.entrySet()) {
+            TopicIdPartition topicIdPartition = entry.getKey();
+            SharePartition sharePartition = 
sharePartitionManager.sharePartition(shareFetchData.groupId(), 
topicIdPartition);
+            if (sharePartition == null) {
+                log.error("Encountered null share partition for groupId={}, 
topicIdPartition={}. Skipping it.", shareFetchData.groupId(), topicIdPartition);
+                continue;

Review Comment:
   @adixitconfluent Is it handled?



##########
core/src/main/java/kafka/server/share/DelayedShareFetch.java:
##########
@@ -214,4 +368,14 @@ private void releasePartitionLocks(String groupId, 
Set<TopicIdPartition> topicId
             sharePartition.releaseFetchLock();
         });
     }
+
+    static final class MaybeUpdateFetchOffsetMetadataResult {

Review Comment:
   Wouldn't `FetchOffsetMetadataUpdateResult` be a better name?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to