thomaskwscott commented on a change in pull request #10960:
URL: https://github.com/apache/kafka/pull/10960#discussion_r664042666



##########
File path: core/src/main/scala/kafka/log/LogSegment.scala
##########
@@ -386,15 +385,13 @@ class LogSegment private[log] (val log: FileRecords,
   private def loadLargestTimestamp(): Unit = {
     // Get the last time index entry. If the time index is empty, it will 
return (-1, baseOffset)
     val lastTimeIndexEntry = timeIndex.lastEntry
-    maxTimestampSoFar = lastTimeIndexEntry.timestamp
-    offsetOfMaxTimestampSoFar = lastTimeIndexEntry.offset
+    _maxTimestampAndOffsetSoFar = 
TimestampOffset(lastTimeIndexEntry.timestamp, lastTimeIndexEntry.offset)
 
     val offsetPosition = offsetIndex.lookup(lastTimeIndexEntry.offset)
     // Scan the rest of the messages to see if there is a larger timestamp 
after the last time index entry.
     val maxTimestampOffsetAfterLastEntry = 
log.largestTimestampAfter(offsetPosition.position)
     if (maxTimestampOffsetAfterLastEntry.timestamp > 
lastTimeIndexEntry.timestamp) {
-      maxTimestampSoFar = maxTimestampOffsetAfterLastEntry.timestamp
-      offsetOfMaxTimestampSoFar = maxTimestampOffsetAfterLastEntry.offset
+      _maxTimestampAndOffsetSoFar = 
TimestampOffset(maxTimestampOffsetAfterLastEntry.timestamp, 
maxTimestampOffsetAfterLastEntry.offset)

Review comment:
       fixed

##########
File path: core/src/main/scala/kafka/log/LogSegment.scala
##########
@@ -386,15 +385,13 @@ class LogSegment private[log] (val log: FileRecords,
   private def loadLargestTimestamp(): Unit = {
     // Get the last time index entry. If the time index is empty, it will 
return (-1, baseOffset)
     val lastTimeIndexEntry = timeIndex.lastEntry
-    maxTimestampSoFar = lastTimeIndexEntry.timestamp
-    offsetOfMaxTimestampSoFar = lastTimeIndexEntry.offset
+    _maxTimestampAndOffsetSoFar = 
TimestampOffset(lastTimeIndexEntry.timestamp, lastTimeIndexEntry.offset)

Review comment:
       fixed

##########
File path: core/src/main/scala/kafka/log/LogSegment.scala
##########
@@ -338,16 +338,15 @@ class LogSegment private[log] (val log: FileRecords,
     txnIndex.reset()
     var validBytes = 0
     var lastIndexEntry = 0
-    maxTimestampSoFar = RecordBatch.NO_TIMESTAMP
+    _maxTimestampAndOffsetSoFar = TimestampOffset.Unknown
     try {
       for (batch <- log.batches.asScala) {
         batch.ensureValid()
         ensureOffsetInRange(batch.lastOffset)
 
         // The max timestamp is exposed at the batch level, so no need to 
iterate the records
         if (batch.maxTimestamp > maxTimestampSoFar) {
-          maxTimestampSoFar = batch.maxTimestamp
-          offsetOfMaxTimestampSoFar = batch.lastOffset
+          _maxTimestampAndOffsetSoFar = TimestampOffset(batch.maxTimestamp, 
batch.lastOffset)

Review comment:
       fixed

##########
File path: core/src/main/scala/kafka/log/LogSegment.scala
##########
@@ -338,16 +338,15 @@ class LogSegment private[log] (val log: FileRecords,
     txnIndex.reset()
     var validBytes = 0
     var lastIndexEntry = 0
-    maxTimestampSoFar = RecordBatch.NO_TIMESTAMP
+    _maxTimestampAndOffsetSoFar = TimestampOffset.Unknown

Review comment:
       fixed

##########
File path: core/src/main/scala/kafka/log/LogSegment.scala
##########
@@ -584,8 +580,8 @@ class LogSegment private[log] (val log: FileRecords,
    * Close this log segment
    */
   def close(): Unit = {
-    if (_maxTimestampSoFar.nonEmpty || _offsetOfMaxTimestampSoFar.nonEmpty)
-      CoreUtils.swallow(timeIndex.maybeAppend(maxTimestampSoFar, 
offsetOfMaxTimestampSoFar,
+    if (_maxTimestampAndOffsetSoFar != TimestampOffset.Unknown)
+      
CoreUtils.swallow(timeIndex.maybeAppend(maxTimestampAndOffsetSoFar.timestamp, 
maxTimestampAndOffsetSoFar.offset,

Review comment:
       fixed

##########
File path: core/src/main/scala/kafka/log/LogSegment.scala
##########
@@ -378,23 +376,21 @@ class LogSegment private[log] (val log: FileRecords,
     log.truncateTo(validBytes)
     offsetIndex.trimToValidSize()
     // A normally closed segment always appends the biggest timestamp ever 
seen into log segment, we do this as well.
-    timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestampSoFar, 
skipFullCheck = true)
+    timeIndex.maybeAppend(maxTimestampAndOffsetSoFar.timestamp, 
maxTimestampAndOffsetSoFar.offset, skipFullCheck = true)

Review comment:
       fixed

##########
File path: core/src/main/scala/kafka/log/LogSegment.scala
##########
@@ -158,13 +158,12 @@ class LogSegment private[log] (val log: FileRecords,
       trace(s"Appended $appendedBytes to ${log.file} at end offset 
$largestOffset")
       // Update the in memory max timestamp and corresponding offset.
       if (largestTimestamp > maxTimestampSoFar) {
-        maxTimestampSoFar = largestTimestamp
-        offsetOfMaxTimestampSoFar = shallowOffsetOfMaxTimestamp
+        _maxTimestampAndOffsetSoFar = TimestampOffset(largestTimestamp, 
shallowOffsetOfMaxTimestamp)
       }
       // append an entry to the index (if needed)
       if (bytesSinceLastIndexEntry > indexIntervalBytes) {
         offsetIndex.append(largestOffset, physicalPosition)
-        timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestampSoFar)
+        timeIndex.maybeAppend(maxTimestampAndOffsetSoFar.timestamp, 
maxTimestampAndOffsetSoFar.offset)

Review comment:
       fixed




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to