blambov commented on code in PR #2267:
URL: https://github.com/apache/cassandra/pull/2267#discussion_r1183349069


##########
src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableScrubber.java:
##########
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.rows.UnfilteredRowIterators;
+import org.apache.cassandra.io.sstable.IScrubber;
+import org.apache.cassandra.io.sstable.SSTableRewriter;
+import org.apache.cassandra.io.sstable.format.SortedTableScrubber;
+import org.apache.cassandra.io.sstable.format.bti.BtiFormat.Components;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.OutputHandler;
+import org.apache.cassandra.utils.Throwables;
+
+public class BtiTableScrubber extends SortedTableScrubber<BtiTableReader> 
implements IScrubber
+{
+    private final boolean isIndex;
+    private ScrubPartitionIterator indexIterator;
+
+    public BtiTableScrubber(ColumnFamilyStore cfs,
+                            LifecycleTransaction transaction,
+                            OutputHandler outputHandler,
+                            IScrubber.Options options)
+    {
+        super(cfs, transaction, outputHandler, options);
+
+        boolean hasIndexFile = 
sstable.getComponents().contains(Components.PARTITION_INDEX);
+        this.isIndex = cfs.isIndex();
+        if (!hasIndexFile)
+        {
+            // if there's any corruption in the -Data.db then partitions can't 
be skipped over. but it's worth a shot.
+            outputHandler.warn("Missing index component");

Review Comment:
   The scrub output does contain the sstable name, at the very least in the 
completion messages done by `SortedTableScrubber.outputSummary`.
   Adding the context here will likely make the output less readable.



##########
src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableScrubber.java:
##########
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.rows.UnfilteredRowIterators;
+import org.apache.cassandra.io.sstable.IScrubber;
+import org.apache.cassandra.io.sstable.SSTableRewriter;
+import org.apache.cassandra.io.sstable.format.SortedTableScrubber;
+import org.apache.cassandra.io.sstable.format.bti.BtiFormat.Components;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.OutputHandler;
+import org.apache.cassandra.utils.Throwables;
+
+public class BtiTableScrubber extends SortedTableScrubber<BtiTableReader> 
implements IScrubber
+{
+    private final boolean isIndex;
+    private ScrubPartitionIterator indexIterator;
+
+    public BtiTableScrubber(ColumnFamilyStore cfs,
+                            LifecycleTransaction transaction,
+                            OutputHandler outputHandler,
+                            IScrubber.Options options)
+    {
+        super(cfs, transaction, outputHandler, options);
+
+        boolean hasIndexFile = 
sstable.getComponents().contains(Components.PARTITION_INDEX);
+        this.isIndex = cfs.isIndex();
+        if (!hasIndexFile)
+        {
+            // if there's any corruption in the -Data.db then partitions can't 
be skipped over. but it's worth a shot.
+            outputHandler.warn("Missing index component");
+        }
+
+        try
+        {
+            this.indexIterator = hasIndexFile
+                                 ? openIndexIterator()
+                                 : null;
+        }
+        catch (RuntimeException ex)
+        {
+            outputHandler.warn("Detected corruption in the index file - cannot 
open index iterator", ex);
+        }
+    }
+
+    private ScrubPartitionIterator openIndexIterator()
+    {
+        try
+        {
+            return sstable.scrubPartitionsIterator();
+        }
+        catch (Throwable t)
+        {
+            outputHandler.warn(t, "Index is unreadable, scrubbing will 
continue without index.");
+        }
+        return null;
+    }
+
+    @Override
+    protected UnfilteredRowIterator withValidation(UnfilteredRowIterator iter, 
String filename)
+    {
+        return options.checkData && !isIndex ? 
UnfilteredRowIterators.withValidation(iter, filename) : iter;
+    }
+
+    public void scrubInternal(SSTableRewriter writer)
+    {
+        assert !indexAvailable() || indexIterator.dataPosition() == 0 : 
indexIterator.dataPosition();
+
+        DecoratedKey prevKey = null;
+
+        while (!dataFile.isEOF())
+        {
+            if (scrubInfo.isStopRequested())
+                throw new 
CompactionInterruptedException(scrubInfo.getCompactionInfo());
+
+            // position in a data file where the partition starts
+            long dataStart = dataFile.getFilePointer();
+            outputHandler.debug("Reading row at %d", dataStart);
+
+            DecoratedKey key = null;
+            Throwable keyReadError = null;
+            try
+            {
+                ByteBuffer raw = ByteBufferUtil.readWithShortLength(dataFile);
+                if (!cfs.metadata.getLocal().isIndex())
+                    cfs.metadata.getLocal().partitionKeyType.validate(raw);
+                key = sstable.decorateKey(raw);
+            }
+            catch (Throwable th)
+            {
+                keyReadError = th;
+                throwIfFatal(th);
+                // check for null key below
+            }
+
+            // position of the partition in a data file, it points to the 
beginning of the partition key
+            long dataStartFromIndex = -1;
+            // size of the partition (including partition key)
+            long dataSizeFromIndex = -1;
+            ByteBuffer currentIndexKey = null;
+            if (indexAvailable())
+            {
+                currentIndexKey = indexIterator.key();
+                dataStartFromIndex = indexIterator.dataPosition();
+                if (!indexIterator.isExhausted())
+                {
+                    try
+                    {
+                        indexIterator.advance();
+                        if (!indexIterator.isExhausted())
+                            dataSizeFromIndex = indexIterator.dataPosition() - 
dataStartFromIndex;
+                    }
+                    catch (Throwable th)
+                    {
+                        throwIfFatal(th);
+                        outputHandler.warn(th,
+                                           "Failed to advance to the next 
index position. Index is corrupted. " +
+                                           "Continuing without the index. Last 
position read is %d.",
+                                           indexIterator.dataPosition());
+                        indexIterator.close();
+                        indexIterator = null;
+                        currentIndexKey = null;
+                        dataStartFromIndex = -1;
+                        dataSizeFromIndex = -1;

Review Comment:
   I prefer to reset all.



##########
src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableScrubber.java:
##########
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.rows.UnfilteredRowIterators;
+import org.apache.cassandra.io.sstable.IScrubber;
+import org.apache.cassandra.io.sstable.SSTableRewriter;
+import org.apache.cassandra.io.sstable.format.SortedTableScrubber;
+import org.apache.cassandra.io.sstable.format.bti.BtiFormat.Components;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.OutputHandler;
+import org.apache.cassandra.utils.Throwables;
+
+public class BtiTableScrubber extends SortedTableScrubber<BtiTableReader> 
implements IScrubber
+{
+    private final boolean isIndex;
+    private ScrubPartitionIterator indexIterator;
+
+    public BtiTableScrubber(ColumnFamilyStore cfs,
+                            LifecycleTransaction transaction,
+                            OutputHandler outputHandler,
+                            IScrubber.Options options)
+    {
+        super(cfs, transaction, outputHandler, options);
+
+        boolean hasIndexFile = 
sstable.getComponents().contains(Components.PARTITION_INDEX);
+        this.isIndex = cfs.isIndex();
+        if (!hasIndexFile)
+        {
+            // if there's any corruption in the -Data.db then partitions can't 
be skipped over. but it's worth a shot.
+            outputHandler.warn("Missing index component");
+        }
+
+        try
+        {
+            this.indexIterator = hasIndexFile
+                                 ? openIndexIterator()
+                                 : null;
+        }
+        catch (RuntimeException ex)
+        {
+            outputHandler.warn("Detected corruption in the index file - cannot 
open index iterator", ex);
+        }
+    }
+
+    private ScrubPartitionIterator openIndexIterator()
+    {
+        try
+        {
+            return sstable.scrubPartitionsIterator();
+        }
+        catch (Throwable t)
+        {
+            outputHandler.warn(t, "Index is unreadable, scrubbing will 
continue without index.");
+        }
+        return null;
+    }
+
+    @Override
+    protected UnfilteredRowIterator withValidation(UnfilteredRowIterator iter, 
String filename)
+    {
+        return options.checkData && !isIndex ? 
UnfilteredRowIterators.withValidation(iter, filename) : iter;
+    }
+
+    public void scrubInternal(SSTableRewriter writer)

Review Comment:
   Done



##########
src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableScrubber.java:
##########
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.rows.UnfilteredRowIterators;
+import org.apache.cassandra.io.sstable.IScrubber;
+import org.apache.cassandra.io.sstable.SSTableRewriter;
+import org.apache.cassandra.io.sstable.format.SortedTableScrubber;
+import org.apache.cassandra.io.sstable.format.bti.BtiFormat.Components;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.OutputHandler;
+import org.apache.cassandra.utils.Throwables;
+
+public class BtiTableScrubber extends SortedTableScrubber<BtiTableReader> 
implements IScrubber
+{
+    private final boolean isIndex;
+    private ScrubPartitionIterator indexIterator;
+
+    public BtiTableScrubber(ColumnFamilyStore cfs,
+                            LifecycleTransaction transaction,
+                            OutputHandler outputHandler,
+                            IScrubber.Options options)
+    {
+        super(cfs, transaction, outputHandler, options);
+
+        boolean hasIndexFile = 
sstable.getComponents().contains(Components.PARTITION_INDEX);
+        this.isIndex = cfs.isIndex();
+        if (!hasIndexFile)
+        {
+            // if there's any corruption in the -Data.db then partitions can't 
be skipped over. but it's worth a shot.
+            outputHandler.warn("Missing index component");
+        }
+
+        try
+        {
+            this.indexIterator = hasIndexFile
+                                 ? openIndexIterator()
+                                 : null;
+        }
+        catch (RuntimeException ex)
+        {
+            outputHandler.warn("Detected corruption in the index file - cannot 
open index iterator", ex);
+        }
+    }
+
+    private ScrubPartitionIterator openIndexIterator()
+    {
+        try
+        {
+            return sstable.scrubPartitionsIterator();
+        }
+        catch (Throwable t)
+        {
+            outputHandler.warn(t, "Index is unreadable, scrubbing will 
continue without index.");
+        }
+        return null;
+    }
+
+    @Override
+    protected UnfilteredRowIterator withValidation(UnfilteredRowIterator iter, 
String filename)
+    {
+        return options.checkData && !isIndex ? 
UnfilteredRowIterators.withValidation(iter, filename) : iter;
+    }
+
+    public void scrubInternal(SSTableRewriter writer)
+    {
+        assert !indexAvailable() || indexIterator.dataPosition() == 0 : 
indexIterator.dataPosition();
+
+        DecoratedKey prevKey = null;
+
+        while (!dataFile.isEOF())
+        {
+            if (scrubInfo.isStopRequested())
+                throw new 
CompactionInterruptedException(scrubInfo.getCompactionInfo());
+
+            // position in a data file where the partition starts
+            long dataStart = dataFile.getFilePointer();
+            outputHandler.debug("Reading row at %d", dataStart);
+
+            DecoratedKey key = null;
+            Throwable keyReadError = null;
+            try
+            {
+                ByteBuffer raw = ByteBufferUtil.readWithShortLength(dataFile);
+                if (!cfs.metadata.getLocal().isIndex())
+                    cfs.metadata.getLocal().partitionKeyType.validate(raw);
+                key = sstable.decorateKey(raw);
+            }
+            catch (Throwable th)
+            {
+                keyReadError = th;
+                throwIfFatal(th);
+                // check for null key below
+            }
+
+            // position of the partition in a data file, it points to the 
beginning of the partition key
+            long dataStartFromIndex = -1;
+            // size of the partition (including partition key)
+            long dataSizeFromIndex = -1;
+            ByteBuffer currentIndexKey = null;
+            if (indexAvailable())
+            {
+                currentIndexKey = indexIterator.key();
+                dataStartFromIndex = indexIterator.dataPosition();
+                if (!indexIterator.isExhausted())
+                {
+                    try
+                    {
+                        indexIterator.advance();
+                        if (!indexIterator.isExhausted())
+                            dataSizeFromIndex = indexIterator.dataPosition() - 
dataStartFromIndex;
+                    }
+                    catch (Throwable th)
+                    {
+                        throwIfFatal(th);
+                        outputHandler.warn(th,
+                                           "Failed to advance to the next 
index position. Index is corrupted. " +
+                                           "Continuing without the index. Last 
position read is %d.",
+                                           indexIterator.dataPosition());
+                        indexIterator.close();
+                        indexIterator = null;
+                        currentIndexKey = null;
+                        dataStartFromIndex = -1;
+                        dataSizeFromIndex = -1;
+                    }
+                }
+            }
+
+            String keyName = key == null ? "(unreadable key)" : keyString(key);
+            outputHandler.debug("partition %s is %s", keyName, 
FBUtilities.prettyPrintMemory(dataSizeFromIndex));
+
+            try
+            {
+                if (key == null)
+                    throw new IOError(new IOException("Unable to read 
partition key from data file", keyReadError));
+
+                if (currentIndexKey != null && 
!key.getKey().equals(currentIndexKey))
+                {
+                    throw new IOError(new IOException(String.format("Key from 
data file (%s) does not match key from index file (%s)",
+                                                                    
//ByteBufferUtil.bytesToHex(key.getKey()), 
ByteBufferUtil.bytesToHex(currentIndexKey))));
+                                                                    "_too 
big_", ByteBufferUtil.bytesToHex(currentIndexKey))));
+                }
+
+                if (indexIterator != null && dataSizeFromIndex > 
dataFile.length())
+                    throw new IOError(new IOException("Impossible partition 
size (greater than file length): " + dataSizeFromIndex));
+
+                if (indexIterator != null && dataStart != dataStartFromIndex)
+                    outputHandler.warn("Data file partition position %d 
differs from index file row position %d", dataStart, dataStartFromIndex);
+
+                if (tryAppend(prevKey, key, writer))
+                    prevKey = key;
+            }
+            catch (Throwable th)
+            {
+                throwIfFatal(th);
+                outputHandler.warn(th, "Error reading partition %s (stacktrace 
follows):", keyName);
+
+                if (currentIndexKey != null
+                    && (key == null || !key.getKey().equals(currentIndexKey) 
|| dataStart != dataStartFromIndex))
+                {
+
+                    // position where the row should start in a data file 
(right after the partition key)
+                    long rowStartFromIndex = dataStartFromIndex + 
TypeSizes.SHORT_SIZE + currentIndexKey.remaining();
+                    outputHandler.output("Retrying from partition index; data 
is %s bytes starting at %s",
+                                         dataSizeFromIndex, rowStartFromIndex);
+                    key = sstable.decorateKey(currentIndexKey);
+                    try
+                    {
+                        if (!cfs.metadata.getLocal().isIndex())
+                            
cfs.metadata.getLocal().partitionKeyType.validate(key.getKey());
+                        dataFile.seek(rowStartFromIndex);
+
+                        if (tryAppend(prevKey, key, writer))
+                            prevKey = key;
+                    }
+                    catch (Throwable th2)
+                    {
+                        throwIfFatal(th2);
+                        throwIfCannotContinue(key, th2);
+
+                        outputHandler.warn(th2, "Retry failed too. Skipping to 
next partition (retry's stacktrace follows)");
+                        badPartitions++;
+                        if (!seekToNextPartition())
+                            break;
+                    }
+                }
+                else
+                {
+                    throwIfCannotContinue(key, th);
+
+                    badPartitions++;
+                    if (indexIterator != null)
+                    {
+                        outputHandler.warn("Partition starting at position %d 
is unreadable; skipping to next", dataStart);
+                        if (!seekToNextPartition())
+                            break;
+                    }
+                    else
+                    {
+                        outputHandler.warn("Unrecoverable error while 
scrubbing %s." +
+                                           "Scrubbing cannot continue. The 
sstable will be marked for deletion. " +
+                                           "You can attempt manual recovery 
from the pre-scrub snapshot. " +
+                                           "You can also run nodetool repair 
to transfer the data from a healthy replica, if any.",
+                                           sstable);
+                        // There's no way to resync and continue. Give up.
+                        break;
+                    }
+                }
+            }
+        }
+    }
+
+
+    private boolean indexAvailable()
+    {
+        return indexIterator != null && !indexIterator.isExhausted();
+    }
+
+    private boolean seekToNextPartition()
+    {
+        while (indexAvailable())
+        {
+            long nextRowPositionFromIndex = indexIterator.dataPosition();
+
+            try
+            {
+                dataFile.seek(nextRowPositionFromIndex);
+                return true;
+            }
+            catch (Throwable th)
+            {
+                throwIfFatal(th);
+                outputHandler.warn(th, "Failed to seek to next row position 
%d", nextRowPositionFromIndex);
+                badPartitions++;
+            }
+
+            try
+            {
+                indexIterator.advance();
+            }
+            catch (Throwable th)
+            {
+                outputHandler.warn(th, "Failed to go to the next entry in 
index");
+                throw Throwables.cleaned(th);
+            }
+        }
+
+        return false;
+    }
+
+    @Override
+    protected void throwIfCannotContinue(DecoratedKey key, Throwable th)
+    {
+        if (isIndex)
+        {
+            outputHandler.warn("An error occurred while scrubbing the 
partition with key '%s' for an index table. " +
+                               "Scrubbing will abort for this table and the 
index will be rebuilt.", keyString(key));
+            throw new IOError(th);
+        }
+
+        super.throwIfCannotContinue(key, th);
+    }
+
+    public void close()

Review Comment:
   Done



##########
src/java/org/apache/cassandra/io/sstable/format/bti/PartitionIterator.java:
##########
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.PartitionPosition;
+import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.io.sstable.KeyReader;
+import org.apache.cassandra.io.util.FileDataInput;
+import org.apache.cassandra.io.util.FileHandle;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.Throwables;
+
+import static 
org.apache.cassandra.utils.FBUtilities.immutableListWithFilteredNulls;
+
+class PartitionIterator extends PartitionIndex.IndexPosIterator implements 
KeyReader
+{
+    private final PartitionIndex partitionIndex;
+    private final IPartitioner partitioner;
+    private final PartitionPosition limit;
+    private final int exclusiveLimit;
+    private final FileHandle dataFile;
+    private final FileHandle rowIndexFile;
+
+    private FileDataInput dataInput;
+    private FileDataInput indexInput;
+
+    private DecoratedKey currentKey;
+    private TrieIndexEntry currentEntry;
+    private DecoratedKey nextKey;
+    private TrieIndexEntry nextEntry;
+
+    @SuppressWarnings({ "resource", "RedundantSuppression" })
+    static PartitionIterator create(PartitionIndex partitionIndex, 
IPartitioner partitioner, FileHandle rowIndexFile, FileHandle dataFile,
+                                    PartitionPosition left, int inclusiveLeft, 
PartitionPosition right, int exclusiveRight) throws IOException
+    {
+        PartitionIterator partitionIterator = null;
+        PartitionIndex partitionIndexCopy = null;
+        FileHandle dataFileCopy = null;
+        FileHandle rowIndexFileCopy = null;
+
+        try
+        {
+            partitionIndexCopy = partitionIndex.sharedCopy();
+            dataFileCopy = dataFile.sharedCopy();
+            rowIndexFileCopy = rowIndexFile.sharedCopy();
+
+            partitionIterator = new PartitionIterator(partitionIndexCopy, 
partitioner, rowIndexFileCopy, dataFileCopy, left, right, exclusiveRight);
+
+            partitionIterator.readNext();
+            // Because the index stores prefixes, the first value can be in 
any relationship with the left bound.
+            if (partitionIterator.nextKey != null && 
!(partitionIterator.nextKey.compareTo(left) > inclusiveLeft))
+            {
+                partitionIterator.readNext();
+            }
+            partitionIterator.advance();

Review Comment:
   Yes. First and last entry need to be rechecked against the boundaries, and 
we do the latter by reading one entry ahead.
   
   Added class JavaDoc.



##########
src/java/org/apache/cassandra/io/sstable/format/bti/SSTableReversedIterator.java:
##########
@@ -0,0 +1,293 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOException;
+import java.util.NoSuchElementException;
+
+import com.carrotsearch.hppc.LongStack;
+import org.apache.cassandra.db.ClusteringBound;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Slice;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.UnfilteredValidation;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.rows.RangeTombstoneBoundMarker;
+import org.apache.cassandra.db.rows.RangeTombstoneMarker;
+import org.apache.cassandra.db.rows.Unfiltered;
+import org.apache.cassandra.io.sstable.AbstractRowIndexEntry;
+import org.apache.cassandra.io.sstable.AbstractSSTableIterator;
+import org.apache.cassandra.io.sstable.format.bti.RowIndexReader.IndexInfo;
+import org.apache.cassandra.io.util.FileDataInput;
+import org.apache.cassandra.io.util.FileHandle;
+
+/**
+ * Unfiltered row iterator over a BTI SSTable that returns rows in reverse 
order.
+ */
+class SSTableReversedIterator extends AbstractSSTableIterator<TrieIndexEntry>
+{
+    /**
+     * The index of the slice being processed.
+     */
+    private int slice;
+
+    public SSTableReversedIterator(BtiTableReader sstable,
+                                   FileDataInput file,
+                                   DecoratedKey key,
+                                   TrieIndexEntry indexEntry,
+                                   Slices slices,
+                                   ColumnFilter columns,
+                                   FileHandle ifile)
+    {
+        super(sstable, file, key, indexEntry, slices, columns, ifile);
+    }
+
+    protected Reader createReaderInternal(TrieIndexEntry indexEntry, 
FileDataInput file, boolean shouldCloseFile)
+    {
+        if (indexEntry.isIndexed())
+            return new ReverseIndexedReader(indexEntry, file, shouldCloseFile);
+        else
+            return new ReverseReader(file, shouldCloseFile);
+    }
+
+    public boolean isReverseOrder()
+    {
+        return true;
+    }
+
+    protected int nextSliceIndex()
+    {
+        int next = slice;
+        slice++;
+        return slices.size() - (next + 1);
+    }
+
+    protected boolean hasMoreSlices()
+    {
+        return slice < slices.size();
+    }
+
+    /**
+     * Reverse iteration is performed by going through an index block (or the 
whole partition if not indexed) forwards
+     * and storing the positions of each entry that falls within the slice in 
a stack. Reverse iteration then pops out
+     * positions and reads the entries.
+     * <p>
+     * Note: The earlier version of this was constructing an in-memory view of 
the block instead, which gives better
+     * performance on bigger queries and index blocks (due to not having to 
read disk again). With the lower
+     * granularity of the tries it makes better sense to store as little as 
possible as the beginning of the block
+     * should very rarely be in other page/chunk cache locations. This has the 
benefit of being able to answer small
+     * queries (esp. LIMIT 1) faster and with less GC churn.
+     */
+    private class ReverseReader extends AbstractReader
+    {
+        final LongStack rowOffsets = new LongStack();
+        RangeTombstoneMarker blockOpenMarker, blockCloseMarker;
+        private Unfiltered next = null;
+        private boolean foundLessThan;
+        private long startPos = -1;
+
+        private ReverseReader(FileDataInput file, boolean shouldCloseFile)
+        {
+            super(file, shouldCloseFile);
+        }
+
+        public void setForSlice(Slice slice) throws IOException
+        {
+            // read full row and filter
+            if (startPos == -1)
+                startPos = file.getFilePointer();
+            else
+                seekToPosition(startPos);
+
+            fillOffsets(slice, true, true, Long.MAX_VALUE);
+        }
+
+        protected boolean hasNextInternal() throws IOException
+        {
+            if (next != null)
+                return true;
+            next = computeNext();
+            return next != null;
+        }
+
+        protected Unfiltered nextInternal() throws IOException
+        {
+            if (!hasNextInternal())
+                throw new NoSuchElementException();
+
+            Unfiltered toReturn = next;
+            next = null;
+            return toReturn;
+        }
+
+        private Unfiltered computeNext() throws IOException
+        {
+            Unfiltered toReturn;
+            do
+            {
+                if (blockCloseMarker != null)
+                {
+                    toReturn = blockCloseMarker;
+                    blockCloseMarker = null;
+                    return toReturn;
+                }
+                while (!rowOffsets.isEmpty())
+                {
+                    seekToPosition(rowOffsets.pop());
+                    boolean hasNext = deserializer.hasNext();
+                    assert hasNext;
+                    toReturn = deserializer.readNext();
+                    UnfilteredValidation.maybeValidateUnfiltered(toReturn, 
metadata(), key, sstable);
+                    // We may get empty row for the same reason expressed on 
UnfilteredSerializer.deserializeOne.
+                    if (!toReturn.isEmpty())
+                        return toReturn;
+                }
+            }
+            while (!foundLessThan && advanceIndexBlock());
+
+            // open marker to be output only as slice is finished
+            if (blockOpenMarker != null)
+            {
+                toReturn = blockOpenMarker;
+                blockOpenMarker = null;
+                return toReturn;
+            }
+            return null;
+        }
+
+        protected boolean advanceIndexBlock() throws IOException
+        {
+            return false;
+        }
+
+        void fillOffsets(Slice slice, boolean filterStart, boolean filterEnd, 
long stopPosition) throws IOException
+        {
+            filterStart &= !slice.start().equals(ClusteringBound.BOTTOM);
+            filterEnd &= !slice.end().equals(ClusteringBound.TOP);
+            long currentPosition = -1;
+
+            ClusteringBound start = slice.start();

Review Comment:
   Done



##########
src/java/org/apache/cassandra/io/sstable/format/bti/SSTableReversedIterator.java:
##########
@@ -0,0 +1,293 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOException;
+import java.util.NoSuchElementException;
+
+import com.carrotsearch.hppc.LongStack;
+import org.apache.cassandra.db.ClusteringBound;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Slice;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.UnfilteredValidation;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.rows.RangeTombstoneBoundMarker;
+import org.apache.cassandra.db.rows.RangeTombstoneMarker;
+import org.apache.cassandra.db.rows.Unfiltered;
+import org.apache.cassandra.io.sstable.AbstractRowIndexEntry;
+import org.apache.cassandra.io.sstable.AbstractSSTableIterator;
+import org.apache.cassandra.io.sstable.format.bti.RowIndexReader.IndexInfo;
+import org.apache.cassandra.io.util.FileDataInput;
+import org.apache.cassandra.io.util.FileHandle;
+
+/**
+ * Unfiltered row iterator over a BTI SSTable that returns rows in reverse 
order.
+ */
+class SSTableReversedIterator extends AbstractSSTableIterator<TrieIndexEntry>
+{
+    /**
+     * The index of the slice being processed.
+     */
+    private int slice;
+
+    public SSTableReversedIterator(BtiTableReader sstable,
+                                   FileDataInput file,
+                                   DecoratedKey key,
+                                   TrieIndexEntry indexEntry,
+                                   Slices slices,
+                                   ColumnFilter columns,
+                                   FileHandle ifile)
+    {
+        super(sstable, file, key, indexEntry, slices, columns, ifile);
+    }
+
+    protected Reader createReaderInternal(TrieIndexEntry indexEntry, 
FileDataInput file, boolean shouldCloseFile)
+    {
+        if (indexEntry.isIndexed())
+            return new ReverseIndexedReader(indexEntry, file, shouldCloseFile);
+        else
+            return new ReverseReader(file, shouldCloseFile);
+    }
+
+    public boolean isReverseOrder()
+    {
+        return true;
+    }
+
+    protected int nextSliceIndex()
+    {
+        int next = slice;
+        slice++;
+        return slices.size() - (next + 1);
+    }
+
+    protected boolean hasMoreSlices()
+    {
+        return slice < slices.size();
+    }
+
+    /**
+     * Reverse iteration is performed by going through an index block (or the 
whole partition if not indexed) forwards
+     * and storing the positions of each entry that falls within the slice in 
a stack. Reverse iteration then pops out
+     * positions and reads the entries.
+     * <p>
+     * Note: The earlier version of this was constructing an in-memory view of 
the block instead, which gives better
+     * performance on bigger queries and index blocks (due to not having to 
read disk again). With the lower
+     * granularity of the tries it makes better sense to store as little as 
possible as the beginning of the block
+     * should very rarely be in other page/chunk cache locations. This has the 
benefit of being able to answer small
+     * queries (esp. LIMIT 1) faster and with less GC churn.
+     */
+    private class ReverseReader extends AbstractReader
+    {
+        final LongStack rowOffsets = new LongStack();
+        RangeTombstoneMarker blockOpenMarker, blockCloseMarker;
+        private Unfiltered next = null;
+        private boolean foundLessThan;
+        private long startPos = -1;
+
+        private ReverseReader(FileDataInput file, boolean shouldCloseFile)
+        {
+            super(file, shouldCloseFile);
+        }
+
+        public void setForSlice(Slice slice) throws IOException
+        {
+            // read full row and filter
+            if (startPos == -1)
+                startPos = file.getFilePointer();
+            else
+                seekToPosition(startPos);
+
+            fillOffsets(slice, true, true, Long.MAX_VALUE);
+        }
+
+        protected boolean hasNextInternal() throws IOException
+        {
+            if (next != null)
+                return true;
+            next = computeNext();
+            return next != null;
+        }
+
+        protected Unfiltered nextInternal() throws IOException

Review Comment:
   Done



##########
src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableScrubber.java:
##########
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.rows.UnfilteredRowIterators;
+import org.apache.cassandra.io.sstable.IScrubber;
+import org.apache.cassandra.io.sstable.SSTableRewriter;
+import org.apache.cassandra.io.sstable.format.SortedTableScrubber;
+import org.apache.cassandra.io.sstable.format.bti.BtiFormat.Components;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.OutputHandler;
+import org.apache.cassandra.utils.Throwables;
+
+public class BtiTableScrubber extends SortedTableScrubber<BtiTableReader> 
implements IScrubber
+{
+    private final boolean isIndex;
+    private ScrubPartitionIterator indexIterator;
+
+    public BtiTableScrubber(ColumnFamilyStore cfs,
+                            LifecycleTransaction transaction,
+                            OutputHandler outputHandler,
+                            IScrubber.Options options)
+    {
+        super(cfs, transaction, outputHandler, options);
+
+        boolean hasIndexFile = 
sstable.getComponents().contains(Components.PARTITION_INDEX);
+        this.isIndex = cfs.isIndex();
+        if (!hasIndexFile)
+        {
+            // if there's any corruption in the -Data.db then partitions can't 
be skipped over. but it's worth a shot.
+            outputHandler.warn("Missing index component");
+        }
+
+        try
+        {
+            this.indexIterator = hasIndexFile
+                                 ? openIndexIterator()
+                                 : null;
+        }
+        catch (RuntimeException ex)
+        {
+            outputHandler.warn("Detected corruption in the index file - cannot 
open index iterator", ex);
+        }
+    }
+
+    private ScrubPartitionIterator openIndexIterator()
+    {
+        try
+        {
+            return sstable.scrubPartitionsIterator();
+        }
+        catch (Throwable t)
+        {
+            outputHandler.warn(t, "Index is unreadable, scrubbing will 
continue without index.");
+        }
+        return null;
+    }
+
+    @Override
+    protected UnfilteredRowIterator withValidation(UnfilteredRowIterator iter, 
String filename)
+    {
+        return options.checkData && !isIndex ? 
UnfilteredRowIterators.withValidation(iter, filename) : iter;
+    }
+
+    public void scrubInternal(SSTableRewriter writer)
+    {
+        assert !indexAvailable() || indexIterator.dataPosition() == 0 : 
indexIterator.dataPosition();

Review Comment:
   Added message and also changed to continue without index in this case.



##########
src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableScrubber.java:
##########
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.rows.UnfilteredRowIterators;
+import org.apache.cassandra.io.sstable.IScrubber;
+import org.apache.cassandra.io.sstable.SSTableRewriter;
+import org.apache.cassandra.io.sstable.format.SortedTableScrubber;
+import org.apache.cassandra.io.sstable.format.bti.BtiFormat.Components;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.OutputHandler;
+import org.apache.cassandra.utils.Throwables;
+
+public class BtiTableScrubber extends SortedTableScrubber<BtiTableReader> 
implements IScrubber
+{
+    private final boolean isIndex;
+    private ScrubPartitionIterator indexIterator;
+
+    public BtiTableScrubber(ColumnFamilyStore cfs,
+                            LifecycleTransaction transaction,
+                            OutputHandler outputHandler,
+                            IScrubber.Options options)
+    {
+        super(cfs, transaction, outputHandler, options);
+
+        boolean hasIndexFile = 
sstable.getComponents().contains(Components.PARTITION_INDEX);
+        this.isIndex = cfs.isIndex();
+        if (!hasIndexFile)
+        {
+            // if there's any corruption in the -Data.db then partitions can't 
be skipped over. but it's worth a shot.
+            outputHandler.warn("Missing index component");
+        }
+
+        try
+        {
+            this.indexIterator = hasIndexFile
+                                 ? openIndexIterator()
+                                 : null;
+        }
+        catch (RuntimeException ex)
+        {
+            outputHandler.warn("Detected corruption in the index file - cannot 
open index iterator", ex);
+        }
+    }
+
+    private ScrubPartitionIterator openIndexIterator()
+    {
+        try
+        {
+            return sstable.scrubPartitionsIterator();
+        }
+        catch (Throwable t)
+        {
+            outputHandler.warn(t, "Index is unreadable, scrubbing will 
continue without index.");
+        }
+        return null;
+    }
+
+    @Override
+    protected UnfilteredRowIterator withValidation(UnfilteredRowIterator iter, 
String filename)
+    {
+        return options.checkData && !isIndex ? 
UnfilteredRowIterators.withValidation(iter, filename) : iter;
+    }
+
+    public void scrubInternal(SSTableRewriter writer)
+    {
+        assert !indexAvailable() || indexIterator.dataPosition() == 0 : 
indexIterator.dataPosition();
+
+        DecoratedKey prevKey = null;
+
+        while (!dataFile.isEOF())
+        {
+            if (scrubInfo.isStopRequested())
+                throw new 
CompactionInterruptedException(scrubInfo.getCompactionInfo());
+
+            // position in a data file where the partition starts
+            long dataStart = dataFile.getFilePointer();
+            outputHandler.debug("Reading row at %d", dataStart);
+
+            DecoratedKey key = null;
+            Throwable keyReadError = null;
+            try
+            {
+                ByteBuffer raw = ByteBufferUtil.readWithShortLength(dataFile);
+                if (!cfs.metadata.getLocal().isIndex())

Review Comment:
   Changed to `isIndex` (`metadata.get()` and `getLocal()` can only be 
different in details like compression) and cached the partition key type as 
well.



##########
src/java/org/apache/cassandra/io/sstable/format/bti/ScrubPartitionIterator.java:
##########
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * Iterator over the partitions of an sstable used for scrubbing.
+ * <p>
+ * The difference between this and {@Link PartitionIterator} is that this only 
uses information present in the index file

Review Comment:
   Done



##########
src/java/org/apache/cassandra/io/sstable/format/bti/ScrubPartitionIterator.java:
##########
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * Iterator over the partitions of an sstable used for scrubbing.
+ * <p>
+ * The difference between this and {@Link PartitionIterator} is that this only 
uses information present in the index file
+ * and does not try to read keys of the data file (for the trie index format), 
thus {@link #key()} can be null.
+ * <p>
+ * Starts advanced to a position, {@link #advance()} is to be used to go to 
next, and iteration completes when
+ * {@link #dataPosition()} == -1.
+ */
+public interface ScrubPartitionIterator extends Closeable
+{
+    /**
+     * Serialized partition key or {@code null} if the iterator reached the 
end of the index or if the key may not
+     * be fully retrieved from the index file.
+     * @return

Review Comment:
   Done



##########
src/java/org/apache/cassandra/io/sstable/format/bti/BtiTableScrubber.java:
##########
@@ -0,0 +1,304 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOError;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.TypeSizes;
+import org.apache.cassandra.db.compaction.CompactionInterruptedException;
+import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
+import org.apache.cassandra.db.rows.UnfilteredRowIterator;
+import org.apache.cassandra.db.rows.UnfilteredRowIterators;
+import org.apache.cassandra.io.sstable.IScrubber;
+import org.apache.cassandra.io.sstable.SSTableRewriter;
+import org.apache.cassandra.io.sstable.format.SortedTableScrubber;
+import org.apache.cassandra.io.sstable.format.bti.BtiFormat.Components;
+import org.apache.cassandra.io.util.FileUtils;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.OutputHandler;
+import org.apache.cassandra.utils.Throwables;
+
+public class BtiTableScrubber extends SortedTableScrubber<BtiTableReader> 
implements IScrubber
+{
+    private final boolean isIndex;
+    private ScrubPartitionIterator indexIterator;
+
+    public BtiTableScrubber(ColumnFamilyStore cfs,
+                            LifecycleTransaction transaction,
+                            OutputHandler outputHandler,
+                            IScrubber.Options options)
+    {
+        super(cfs, transaction, outputHandler, options);
+
+        boolean hasIndexFile = 
sstable.getComponents().contains(Components.PARTITION_INDEX);
+        this.isIndex = cfs.isIndex();
+        if (!hasIndexFile)
+        {
+            // if there's any corruption in the -Data.db then partitions can't 
be skipped over. but it's worth a shot.
+            outputHandler.warn("Missing index component");
+        }
+
+        try
+        {
+            this.indexIterator = hasIndexFile
+                                 ? openIndexIterator()
+                                 : null;
+        }
+        catch (RuntimeException ex)
+        {
+            outputHandler.warn("Detected corruption in the index file - cannot 
open index iterator", ex);
+        }
+    }
+
+    private ScrubPartitionIterator openIndexIterator()
+    {
+        try
+        {
+            return sstable.scrubPartitionsIterator();
+        }
+        catch (Throwable t)
+        {
+            outputHandler.warn(t, "Index is unreadable, scrubbing will 
continue without index.");
+        }
+        return null;
+    }
+
+    @Override
+    protected UnfilteredRowIterator withValidation(UnfilteredRowIterator iter, 
String filename)
+    {
+        return options.checkData && !isIndex ? 
UnfilteredRowIterators.withValidation(iter, filename) : iter;
+    }
+
+    public void scrubInternal(SSTableRewriter writer)
+    {
+        assert !indexAvailable() || indexIterator.dataPosition() == 0 : 
indexIterator.dataPosition();
+
+        DecoratedKey prevKey = null;
+
+        while (!dataFile.isEOF())
+        {
+            if (scrubInfo.isStopRequested())
+                throw new 
CompactionInterruptedException(scrubInfo.getCompactionInfo());
+
+            // position in a data file where the partition starts
+            long dataStart = dataFile.getFilePointer();
+            outputHandler.debug("Reading row at %d", dataStart);
+
+            DecoratedKey key = null;
+            Throwable keyReadError = null;
+            try
+            {
+                ByteBuffer raw = ByteBufferUtil.readWithShortLength(dataFile);
+                if (!cfs.metadata.getLocal().isIndex())
+                    cfs.metadata.getLocal().partitionKeyType.validate(raw);
+                key = sstable.decorateKey(raw);
+            }
+            catch (Throwable th)
+            {
+                keyReadError = th;
+                throwIfFatal(th);
+                // check for null key below
+            }
+
+            // position of the partition in a data file, it points to the 
beginning of the partition key
+            long dataStartFromIndex = -1;
+            // size of the partition (including partition key)
+            long dataSizeFromIndex = -1;
+            ByteBuffer currentIndexKey = null;
+            if (indexAvailable())
+            {
+                currentIndexKey = indexIterator.key();
+                dataStartFromIndex = indexIterator.dataPosition();
+                if (!indexIterator.isExhausted())
+                {
+                    try
+                    {
+                        indexIterator.advance();
+                        if (!indexIterator.isExhausted())
+                            dataSizeFromIndex = indexIterator.dataPosition() - 
dataStartFromIndex;
+                    }
+                    catch (Throwable th)
+                    {
+                        throwIfFatal(th);
+                        outputHandler.warn(th,
+                                           "Failed to advance to the next 
index position. Index is corrupted. " +
+                                           "Continuing without the index. Last 
position read is %d.",
+                                           indexIterator.dataPosition());
+                        indexIterator.close();
+                        indexIterator = null;
+                        currentIndexKey = null;
+                        dataStartFromIndex = -1;
+                        dataSizeFromIndex = -1;
+                    }
+                }
+            }
+
+            String keyName = key == null ? "(unreadable key)" : keyString(key);
+            outputHandler.debug("partition %s is %s", keyName, 
FBUtilities.prettyPrintMemory(dataSizeFromIndex));
+
+            try
+            {
+                if (key == null)
+                    throw new IOError(new IOException("Unable to read 
partition key from data file", keyReadError));
+
+                if (currentIndexKey != null && 
!key.getKey().equals(currentIndexKey))
+                {
+                    throw new IOError(new IOException(String.format("Key from 
data file (%s) does not match key from index file (%s)",
+                                                                    
//ByteBufferUtil.bytesToHex(key.getKey()), 
ByteBufferUtil.bytesToHex(currentIndexKey))));

Review Comment:
   Hm... this was introduced in CASSANDRA-8099 and is probably an oversight 
there.



##########
src/java/org/apache/cassandra/io/sstable/format/bti/ScrubIterator.java:
##########
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.cassandra.io.util.FileDataInput;
+import org.apache.cassandra.io.util.FileHandle;
+import org.apache.cassandra.utils.ByteBufferUtil;
+
+public class ScrubIterator extends PartitionIndex.IndexPosIterator implements 
ScrubPartitionIterator
+{
+    ByteBuffer key;
+    long dataPosition;
+    final FileHandle rowIndexFile;
+
+    ScrubIterator(PartitionIndex partitionIndex, FileHandle rowIndexFile) 
throws IOException
+    {
+        super(partitionIndex);
+        this.rowIndexFile = rowIndexFile.sharedCopy();
+        advance();
+    }
+
+    @Override
+    public void close()
+    {
+        super.close();
+        rowIndexFile.close();
+    }
+
+    @Override
+    public ByteBuffer key()
+    {
+        return key;
+    }
+
+    @Override
+    public long dataPosition()
+    {
+        return dataPosition;
+    }
+
+    @Override
+    public void advance() throws IOException
+    {
+        long pos = nextIndexPos();
+        if (pos != PartitionIndex.NOT_FOUND)
+        {
+            if (pos >= 0) // row index position
+            {
+                try (FileDataInput in = rowIndexFile.createReader(pos))
+                {
+                    key = ByteBufferUtil.readWithShortLength(in);
+                    dataPosition = TrieIndexEntry.deserialize(in, 
in.getFilePointer()).position;
+                }
+            }
+            else
+            {
+                key = null;
+                dataPosition = ~pos;
+            }
+        }
+        else
+        {
+            key = null;
+            dataPosition = -1;
+        }
+    }
+
+    @Override
+    public boolean isExhausted()
+    {
+        return dataPosition == -1;

Review Comment:
   Done



##########
src/java/org/apache/cassandra/io/sstable/format/bti/SSTableReversedIterator.java:
##########
@@ -0,0 +1,293 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOException;
+import java.util.NoSuchElementException;
+
+import com.carrotsearch.hppc.LongStack;
+import org.apache.cassandra.db.ClusteringBound;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Slice;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.UnfilteredValidation;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.rows.RangeTombstoneBoundMarker;
+import org.apache.cassandra.db.rows.RangeTombstoneMarker;
+import org.apache.cassandra.db.rows.Unfiltered;
+import org.apache.cassandra.io.sstable.AbstractRowIndexEntry;
+import org.apache.cassandra.io.sstable.AbstractSSTableIterator;
+import org.apache.cassandra.io.sstable.format.bti.RowIndexReader.IndexInfo;
+import org.apache.cassandra.io.util.FileDataInput;
+import org.apache.cassandra.io.util.FileHandle;
+
+/**
+ * Unfiltered row iterator over a BTI SSTable that returns rows in reverse 
order.
+ */
+class SSTableReversedIterator extends AbstractSSTableIterator<TrieIndexEntry>
+{
+    /**
+     * The index of the slice being processed.
+     */
+    private int slice;
+
+    public SSTableReversedIterator(BtiTableReader sstable,
+                                   FileDataInput file,
+                                   DecoratedKey key,
+                                   TrieIndexEntry indexEntry,
+                                   Slices slices,
+                                   ColumnFilter columns,
+                                   FileHandle ifile)
+    {
+        super(sstable, file, key, indexEntry, slices, columns, ifile);
+    }
+
+    protected Reader createReaderInternal(TrieIndexEntry indexEntry, 
FileDataInput file, boolean shouldCloseFile)
+    {
+        if (indexEntry.isIndexed())
+            return new ReverseIndexedReader(indexEntry, file, shouldCloseFile);
+        else
+            return new ReverseReader(file, shouldCloseFile);
+    }
+
+    public boolean isReverseOrder()
+    {
+        return true;
+    }
+
+    protected int nextSliceIndex()
+    {
+        int next = slice;
+        slice++;
+        return slices.size() - (next + 1);
+    }
+
+    protected boolean hasMoreSlices()
+    {
+        return slice < slices.size();
+    }
+
+    /**
+     * Reverse iteration is performed by going through an index block (or the 
whole partition if not indexed) forwards
+     * and storing the positions of each entry that falls within the slice in 
a stack. Reverse iteration then pops out
+     * positions and reads the entries.
+     * <p>
+     * Note: The earlier version of this was constructing an in-memory view of 
the block instead, which gives better
+     * performance on bigger queries and index blocks (due to not having to 
read disk again). With the lower
+     * granularity of the tries it makes better sense to store as little as 
possible as the beginning of the block
+     * should very rarely be in other page/chunk cache locations. This has the 
benefit of being able to answer small
+     * queries (esp. LIMIT 1) faster and with less GC churn.
+     */
+    private class ReverseReader extends AbstractReader
+    {
+        final LongStack rowOffsets = new LongStack();
+        RangeTombstoneMarker blockOpenMarker, blockCloseMarker;
+        private Unfiltered next = null;
+        private boolean foundLessThan;
+        private long startPos = -1;
+
+        private ReverseReader(FileDataInput file, boolean shouldCloseFile)
+        {
+            super(file, shouldCloseFile);
+        }
+
+        public void setForSlice(Slice slice) throws IOException
+        {
+            // read full row and filter
+            if (startPos == -1)
+                startPos = file.getFilePointer();
+            else
+                seekToPosition(startPos);
+
+            fillOffsets(slice, true, true, Long.MAX_VALUE);
+        }
+
+        protected boolean hasNextInternal() throws IOException

Review Comment:
   Done



##########
src/java/org/apache/cassandra/io/sstable/format/bti/SSTableReversedIterator.java:
##########
@@ -0,0 +1,293 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOException;
+import java.util.NoSuchElementException;
+
+import com.carrotsearch.hppc.LongStack;
+import org.apache.cassandra.db.ClusteringBound;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Slice;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.UnfilteredValidation;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.rows.RangeTombstoneBoundMarker;
+import org.apache.cassandra.db.rows.RangeTombstoneMarker;
+import org.apache.cassandra.db.rows.Unfiltered;
+import org.apache.cassandra.io.sstable.AbstractRowIndexEntry;
+import org.apache.cassandra.io.sstable.AbstractSSTableIterator;
+import org.apache.cassandra.io.sstable.format.bti.RowIndexReader.IndexInfo;
+import org.apache.cassandra.io.util.FileDataInput;
+import org.apache.cassandra.io.util.FileHandle;
+
+/**
+ * Unfiltered row iterator over a BTI SSTable that returns rows in reverse 
order.
+ */
+class SSTableReversedIterator extends AbstractSSTableIterator<TrieIndexEntry>
+{
+    /**
+     * The index of the slice being processed.
+     */
+    private int slice;
+
+    public SSTableReversedIterator(BtiTableReader sstable,
+                                   FileDataInput file,
+                                   DecoratedKey key,
+                                   TrieIndexEntry indexEntry,
+                                   Slices slices,
+                                   ColumnFilter columns,
+                                   FileHandle ifile)
+    {
+        super(sstable, file, key, indexEntry, slices, columns, ifile);
+    }
+
+    protected Reader createReaderInternal(TrieIndexEntry indexEntry, 
FileDataInput file, boolean shouldCloseFile)
+    {
+        if (indexEntry.isIndexed())
+            return new ReverseIndexedReader(indexEntry, file, shouldCloseFile);
+        else
+            return new ReverseReader(file, shouldCloseFile);
+    }
+
+    public boolean isReverseOrder()
+    {
+        return true;
+    }
+
+    protected int nextSliceIndex()
+    {
+        int next = slice;
+        slice++;
+        return slices.size() - (next + 1);
+    }
+
+    protected boolean hasMoreSlices()
+    {
+        return slice < slices.size();
+    }
+
+    /**
+     * Reverse iteration is performed by going through an index block (or the 
whole partition if not indexed) forwards
+     * and storing the positions of each entry that falls within the slice in 
a stack. Reverse iteration then pops out
+     * positions and reads the entries.
+     * <p>
+     * Note: The earlier version of this was constructing an in-memory view of 
the block instead, which gives better
+     * performance on bigger queries and index blocks (due to not having to 
read disk again). With the lower
+     * granularity of the tries it makes better sense to store as little as 
possible as the beginning of the block
+     * should very rarely be in other page/chunk cache locations. This has the 
benefit of being able to answer small
+     * queries (esp. LIMIT 1) faster and with less GC churn.
+     */
+    private class ReverseReader extends AbstractReader
+    {
+        final LongStack rowOffsets = new LongStack();
+        RangeTombstoneMarker blockOpenMarker, blockCloseMarker;
+        private Unfiltered next = null;
+        private boolean foundLessThan;
+        private long startPos = -1;
+
+        private ReverseReader(FileDataInput file, boolean shouldCloseFile)
+        {
+            super(file, shouldCloseFile);
+        }
+
+        public void setForSlice(Slice slice) throws IOException

Review Comment:
   Done



##########
src/java/org/apache/cassandra/io/sstable/format/bti/SSTableReversedIterator.java:
##########
@@ -0,0 +1,293 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.io.sstable.format.bti;
+
+import java.io.IOException;
+import java.util.NoSuchElementException;
+
+import com.carrotsearch.hppc.LongStack;
+import org.apache.cassandra.db.ClusteringBound;
+import org.apache.cassandra.db.ClusteringComparator;
+import org.apache.cassandra.db.DecoratedKey;
+import org.apache.cassandra.db.Slice;
+import org.apache.cassandra.db.Slices;
+import org.apache.cassandra.db.UnfilteredValidation;
+import org.apache.cassandra.db.filter.ColumnFilter;
+import org.apache.cassandra.db.rows.RangeTombstoneBoundMarker;
+import org.apache.cassandra.db.rows.RangeTombstoneMarker;
+import org.apache.cassandra.db.rows.Unfiltered;
+import org.apache.cassandra.io.sstable.AbstractRowIndexEntry;
+import org.apache.cassandra.io.sstable.AbstractSSTableIterator;
+import org.apache.cassandra.io.sstable.format.bti.RowIndexReader.IndexInfo;
+import org.apache.cassandra.io.util.FileDataInput;
+import org.apache.cassandra.io.util.FileHandle;
+
+/**
+ * Unfiltered row iterator over a BTI SSTable that returns rows in reverse 
order.
+ */
+class SSTableReversedIterator extends AbstractSSTableIterator<TrieIndexEntry>
+{
+    /**
+     * The index of the slice being processed.
+     */
+    private int slice;
+
+    public SSTableReversedIterator(BtiTableReader sstable,
+                                   FileDataInput file,
+                                   DecoratedKey key,
+                                   TrieIndexEntry indexEntry,
+                                   Slices slices,
+                                   ColumnFilter columns,
+                                   FileHandle ifile)
+    {
+        super(sstable, file, key, indexEntry, slices, columns, ifile);
+    }
+
+    protected Reader createReaderInternal(TrieIndexEntry indexEntry, 
FileDataInput file, boolean shouldCloseFile)
+    {
+        if (indexEntry.isIndexed())
+            return new ReverseIndexedReader(indexEntry, file, shouldCloseFile);
+        else
+            return new ReverseReader(file, shouldCloseFile);
+    }
+
+    public boolean isReverseOrder()
+    {
+        return true;
+    }
+
+    protected int nextSliceIndex()
+    {
+        int next = slice;
+        slice++;
+        return slices.size() - (next + 1);
+    }
+
+    protected boolean hasMoreSlices()
+    {
+        return slice < slices.size();
+    }
+
+    /**
+     * Reverse iteration is performed by going through an index block (or the 
whole partition if not indexed) forwards
+     * and storing the positions of each entry that falls within the slice in 
a stack. Reverse iteration then pops out
+     * positions and reads the entries.
+     * <p>
+     * Note: The earlier version of this was constructing an in-memory view of 
the block instead, which gives better
+     * performance on bigger queries and index blocks (due to not having to 
read disk again). With the lower
+     * granularity of the tries it makes better sense to store as little as 
possible as the beginning of the block
+     * should very rarely be in other page/chunk cache locations. This has the 
benefit of being able to answer small
+     * queries (esp. LIMIT 1) faster and with less GC churn.
+     */
+    private class ReverseReader extends AbstractReader
+    {
+        final LongStack rowOffsets = new LongStack();
+        RangeTombstoneMarker blockOpenMarker, blockCloseMarker;
+        private Unfiltered next = null;
+        private boolean foundLessThan;
+        private long startPos = -1;
+
+        private ReverseReader(FileDataInput file, boolean shouldCloseFile)
+        {
+            super(file, shouldCloseFile);
+        }
+
+        public void setForSlice(Slice slice) throws IOException
+        {
+            // read full row and filter
+            if (startPos == -1)
+                startPos = file.getFilePointer();
+            else
+                seekToPosition(startPos);
+
+            fillOffsets(slice, true, true, Long.MAX_VALUE);
+        }
+
+        protected boolean hasNextInternal() throws IOException
+        {
+            if (next != null)
+                return true;
+            next = computeNext();
+            return next != null;
+        }
+
+        protected Unfiltered nextInternal() throws IOException
+        {
+            if (!hasNextInternal())
+                throw new NoSuchElementException();
+
+            Unfiltered toReturn = next;
+            next = null;
+            return toReturn;
+        }
+
+        private Unfiltered computeNext() throws IOException
+        {
+            Unfiltered toReturn;
+            do
+            {
+                if (blockCloseMarker != null)
+                {
+                    toReturn = blockCloseMarker;
+                    blockCloseMarker = null;
+                    return toReturn;
+                }
+                while (!rowOffsets.isEmpty())
+                {
+                    seekToPosition(rowOffsets.pop());
+                    boolean hasNext = deserializer.hasNext();
+                    assert hasNext;
+                    toReturn = deserializer.readNext();
+                    UnfilteredValidation.maybeValidateUnfiltered(toReturn, 
metadata(), key, sstable);
+                    // We may get empty row for the same reason expressed on 
UnfilteredSerializer.deserializeOne.
+                    if (!toReturn.isEmpty())
+                        return toReturn;
+                }
+            }
+            while (!foundLessThan && advanceIndexBlock());
+
+            // open marker to be output only as slice is finished
+            if (blockOpenMarker != null)
+            {
+                toReturn = blockOpenMarker;
+                blockOpenMarker = null;
+                return toReturn;
+            }
+            return null;
+        }
+
+        protected boolean advanceIndexBlock() throws IOException
+        {
+            return false;
+        }
+
+        void fillOffsets(Slice slice, boolean filterStart, boolean filterEnd, 
long stopPosition) throws IOException
+        {
+            filterStart &= !slice.start().equals(ClusteringBound.BOTTOM);
+            filterEnd &= !slice.end().equals(ClusteringBound.TOP);
+            long currentPosition = -1;
+
+            ClusteringBound start = slice.start();
+            currentPosition = file.getFilePointer();

Review Comment:
   Done



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to