HBASE-16225 Refactor ScanQueryMatcher

Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b35cf8f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b35cf8f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b35cf8f9

Branch: refs/heads/master
Commit: b35cf8f93a54b73389431ea7ba2d08f4aae7e104
Parents: 7836124
Author: zhangduo <zhang...@apache.org>
Authored: Sat Jul 30 23:50:24 2016 +0800
Committer: zhangduo <zhang...@apache.org>
Committed: Tue Aug 2 14:01:28 2016 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/io/TimeRange.java   |   7 +-
 .../hbase/coprocessor/BaseRegionObserver.java   |   2 +-
 .../hbase/coprocessor/RegionObserver.java       |   2 +-
 .../hadoop/hbase/regionserver/ColumnCount.java  | 111 ---
 .../hbase/regionserver/ColumnTracker.java       | 128 ----
 .../hbase/regionserver/DeleteTracker.java       | 106 ---
 .../regionserver/ExplicitColumnTracker.java     | 254 -------
 .../hadoop/hbase/regionserver/HStore.java       |  30 +-
 .../regionserver/RegionCoprocessorHost.java     |   1 +
 .../hbase/regionserver/ScanDeleteTracker.java   | 174 -----
 .../hadoop/hbase/regionserver/ScanInfo.java     |   6 +-
 .../hbase/regionserver/ScanQueryMatcher.java    | 699 -------------------
 .../regionserver/ScanWildcardColumnTracker.java | 201 ------
 .../apache/hadoop/hbase/regionserver/Store.java |   1 +
 .../hbase/regionserver/StoreFileReader.java     |  50 +-
 .../hbase/regionserver/StoreFileScanner.java    |  61 +-
 .../hadoop/hbase/regionserver/StoreScanner.java | 239 ++++---
 .../regionserver/querymatcher/ColumnCount.java  | 110 +++
 .../querymatcher/ColumnTracker.java             | 128 ++++
 .../CompactionScanQueryMatcher.java             | 119 ++++
 .../querymatcher/DeleteTracker.java             | 101 +++
 .../DropDeletesCompactionScanQueryMatcher.java  |  83 +++
 .../querymatcher/ExplicitColumnTracker.java     | 251 +++++++
 .../querymatcher/LegacyScanQueryMatcher.java    | 397 +++++++++++
 .../MajorCompactionScanQueryMatcher.java        |  80 +++
 .../MinorCompactionScanQueryMatcher.java        |  61 ++
 .../NormalUserScanQueryMatcher.java             | 106 +++
 .../querymatcher/RawScanQueryMatcher.java       |  77 ++
 .../querymatcher/ScanDeleteTracker.java         | 159 +++++
 .../querymatcher/ScanQueryMatcher.java          | 357 ++++++++++
 .../querymatcher/ScanWildcardColumnTracker.java | 199 ++++++
 .../StripeCompactionScanQueryMatcher.java       | 118 ++++
 .../querymatcher/UserScanQueryMatcher.java      | 217 ++++++
 .../visibility/VisibilityController.java        |   2 +-
 .../visibility/VisibilityScanDeleteTracker.java |   2 +-
 .../regionserver/DataBlockEncodingTool.java     |   2 +-
 .../EncodedSeekPerformanceTest.java             |   4 +-
 .../regionserver/TestCompoundBloomFilter.java   |   2 +-
 .../regionserver/TestExplicitColumnTracker.java | 195 ------
 .../hbase/regionserver/TestQueryMatcher.java    | 382 ----------
 .../regionserver/TestScanDeleteTracker.java     | 197 ------
 .../TestScanWildcardColumnTracker.java          | 130 ----
 .../hbase/regionserver/TestStoreFile.java       |  23 +-
 .../TestStoreFileScannerWithTagCompression.java |   2 +-
 .../hbase/regionserver/TestStoreScanner.java    |   3 +-
 .../regionserver/compactions/TestCompactor.java |   4 +-
 .../compactions/TestStripeCompactionPolicy.java |   5 +-
 .../AbstractTestScanQueryMatcher.java           |  77 ++
 .../TestCompactionScanQueryMatcher.java         |  99 +++
 .../querymatcher/TestExplicitColumnTracker.java | 185 +++++
 .../querymatcher/TestScanDeleteTracker.java     | 185 +++++
 .../TestScanWildcardColumnTracker.java          | 126 ++++
 .../querymatcher/TestUserScanQueryMatcher.java  | 236 +++++++
 53 files changed, 3674 insertions(+), 2822 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
----------------------------------------------------------------------
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
index fed20c4..2efcde1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
@@ -184,12 +184,11 @@ public class TimeRange {
   }
 
   /**
-   * Check if the specified timestamp is within this TimeRange.
+   * Check if the specified timestamp is within or after this TimeRange.
    * <p>
-   * Returns true if within interval [minStamp, maxStamp), false
-   * if not.
+   * Returns true if greater than minStamp, false if not.
    * @param timestamp timestamp to check
-   * @return true if within TimeRange, false if not
+   * @return true if within or after TimeRange, false if not
    */
   public boolean withinOrAfterTimeRange(long timestamp) {
     assert timestamp >= 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
index bff727a..748268e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.regionserver.DeleteTracker;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
@@ -57,6 +56,7 @@ import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileReader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Pair;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 4729954..4c94644 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.regionserver.DeleteTracker;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.Region.Operation;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -57,6 +56,7 @@ import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileReader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java
deleted file mode 100644
index 71ea1bd..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnCount.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Simple wrapper for a byte buffer and a counter.  Does not copy.
- * <p>
- * NOT thread-safe because it is not used in a multi-threaded context, yet.
- */
-@InterfaceAudience.Private
-public class ColumnCount {
-  private final byte [] bytes;
-  private final int offset;
-  private final int length;
-  private int count;
-
-  /**
-   * Constructor
-   * @param column the qualifier to count the versions for
-   */
-  public ColumnCount(byte [] column) {
-    this(column, 0);
-  }
-
-  /**
-   * Constructor
-   * @param column the qualifier to count the versions for
-   * @param count initial count
-   */
-  public ColumnCount(byte [] column, int count) {
-    this(column, 0, column.length, count);
-  }
-
-  /**
-   * Constuctor
-   * @param column the qualifier to count the versions for
-   * @param offset in the passed buffer where to start the qualifier from
-   * @param length of the qualifier
-   * @param count initial count
-   */
-  public ColumnCount(byte [] column, int offset, int length, int count) {
-    this.bytes = column;
-    this.offset = offset;
-    this.length = length;
-    this.count = count;
-  }
-
-  /**
-   * @return the buffer
-   */
-  public byte [] getBuffer(){
-    return this.bytes;
-  }
-
-  /**
-   * @return the offset
-   */
-  public int getOffset(){
-    return this.offset;
-  }
-
-  /**
-   * @return the length
-   */
-  public int getLength(){
-    return this.length;
-  }
-
-  /**
-   * Decrement the current version count
-   * @return current count
-   */
-  public int decrement() {
-    return --count;
-  }
-
-  /**
-   * Increment the current version count
-   * @return current count
-   */
-  public int increment() {
-    return ++count;
-  }
-
-  /**
-   * Set the current count to a new count
-   * @param count new count to set
-   */
-  public void setCount(int count) {
-    this.count = count;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
deleted file mode 100644
index d352561..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode;
-
-/**
- * Implementing classes of this interface will be used for the tracking
- * and enforcement of columns and numbers of versions and timeToLive during
- * the course of a Get or Scan operation.
- * <p>
- * Currently there are two different types of Store/Family-level queries.
- * <ul><li>{@link ExplicitColumnTracker} is used when the query specifies
- * one or more column qualifiers to return in the family.</li>
- * <li>{@link ScanWildcardColumnTracker} is used when no columns are
- * explicitly specified.</li>
- * </ul>
- * <p>
- * This class is utilized by {@link ScanQueryMatcher} mainly through two 
methods:
- * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
- * conditions of the query.</li>
- * <li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
- * believes that the current column should be skipped (by timestamp, filter 
etc.)</li>
- * </ul>
- * <p>
- * These two methods returns a 
- * {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode}
- * to define what action should be taken.
- * <p>
- * This class is NOT thread-safe as queries are never multi-threaded
- */
-@InterfaceAudience.Private
-public interface ColumnTracker {
-
-  /**
-   * Checks if the column is present in the list of requested columns by 
returning the match code
-   * instance. It does not check against the number of versions for the 
columns asked for. To do the
-   * version check, one has to call {@link #checkVersions(Cell, long, byte, 
boolean)}
-   * method based on the return type (INCLUDE) of this method. The values that 
can be returned by
-   * this method are {@link MatchCode#INCLUDE}, {@link 
MatchCode#SEEK_NEXT_COL} and
-   * {@link MatchCode#SEEK_NEXT_ROW}.
-   * @param cell
-   * @param type The type of the KeyValue
-   * @return The match code instance.
-   * @throws IOException in case there is an internal consistency problem 
caused by a data
-   *           corruption.
-   */
-  ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) throws 
IOException;
-
-  /**
-   * Keeps track of the number of versions for the columns asked for. It 
assumes that the user has
-   * already checked if the keyvalue needs to be included by calling the
-   * {@link #checkColumn(Cell, byte)} method. The enum values returned by this 
method
-   * are {@link MatchCode#SKIP}, {@link MatchCode#INCLUDE},
-   * {@link MatchCode#INCLUDE_AND_SEEK_NEXT_COL} and {@link 
MatchCode#INCLUDE_AND_SEEK_NEXT_ROW}.
-   * Implementations which include all the columns could just return {@link 
MatchCode#INCLUDE} in
-   * the {@link #checkColumn(Cell, byte)} method and perform all the 
operations in this
-   * checkVersions method.
-   * @param cell
-   * @param ttl The timeToLive to enforce.
-   * @param type the type of the key value (Put/Delete)
-   * @param ignoreCount indicates if the KV needs to be excluded while 
counting (used during
-   *          compactions. We only count KV's that are older than all the 
scanners' read points.)
-   * @return the scan query matcher match code instance
-   * @throws IOException in case there is an internal consistency problem 
caused by a data
-   *           corruption.
-   */
-  ScanQueryMatcher.MatchCode checkVersions(Cell cell, long ttl, byte type, 
boolean ignoreCount)
-      throws IOException;
-  /**
-   * Resets the Matcher
-   */
-  void reset();
-
-  /**
-   *
-   * @return <code>true</code> when done.
-   */
-  boolean done();
-
-  /**
-   * Used by matcher and scan/get to get a hint of the next column
-   * to seek to after checkColumn() returns SKIP.  Returns the next interesting
-   * column we want, or NULL there is none (wildcard scanner).
-   *
-   * Implementations aren't required to return anything useful unless the most 
recent
-   * call was to checkColumn() and the return code was SKIP.  This is pretty 
implementation
-   * detail-y, but optimizations are like that.
-   *
-   * @return null, or a ColumnCount that we should seek to
-   */
-  ColumnCount getColumnHint();
-
-  /**
-   * Retrieve the MatchCode for the next row or column
-   * @param cell
-   */
-  MatchCode getNextRowOrNextColumn(Cell cell);
-
-  /**
-   * Give the tracker a chance to declare it's done based on only the timestamp
-   * to allow an early out.
-   *
-   * @param timestamp
-   * @return <code>true</code> to early out based on timestamp.
-   */
-  boolean isDone(long timestamp);
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
deleted file mode 100644
index 8f466fc..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-
-/**
- * This interface is used for the tracking and enforcement of Deletes
- * during the course of a Get or Scan operation.
- * <p>
- * This class is utilized through three methods:
- * <ul><li>{@link #add} when encountering a Delete</li>
- * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted</li>
- * <li>{@link #update} when reaching the end of a StoreFile</li>
- * </ul>
- */
-@InterfaceAudience.Private
-public interface DeleteTracker {
-
-  /**
-   * Add the specified cell to the list of deletes to check against for
-   * this row operation.
-   * <p>
-   * This is called when a Delete is encountered in a StoreFile.
-   * @param cell - the delete cell
-   */
-  void add(Cell cell);
-
-  /**
-   * Check if the specified cell buffer has been deleted by a previously
-   * seen delete.
-   * @param cell - current cell to check if deleted by a previously seen delete
-   * @return deleteResult The result tells whether the KeyValue is deleted and 
why
-   */
-  DeleteResult isDeleted(Cell cell);
-
-  /**
-   * @return true if there are no current delete, false otherwise
-   */
-  boolean isEmpty();
-
-  /**
-   * Called at the end of every StoreFile.
-   * <p>
-   * Many optimized implementations of Trackers will require an update at
-   * when the end of each StoreFile is reached.
-   */
-  void update();
-
-  /**
-   * Called between rows.
-   * <p>
-   * This clears everything as if a new DeleteTracker was instantiated.
-   */
-  void reset();
-
-
-  /**
-   * Return codes for comparison of two Deletes.
-   * <p>
-   * The codes tell the merging function what to do.
-   * <p>
-   * INCLUDE means add the specified Delete to the merged list.
-   * NEXT means move to the next element in the specified list(s).
-   */
-  enum DeleteCompare {
-    INCLUDE_OLD_NEXT_OLD,
-    INCLUDE_OLD_NEXT_BOTH,
-    INCLUDE_NEW_NEXT_NEW,
-    INCLUDE_NEW_NEXT_BOTH,
-    NEXT_OLD,
-    NEXT_NEW
-  }
-
-  /**
-   * Returns codes for delete result.
-   * The codes tell the ScanQueryMatcher whether the kv is deleted and why.
-   * Based on the delete result, the ScanQueryMatcher will decide the next
-   * operation
-   */
-  enum DeleteResult {
-    FAMILY_DELETED, // The KeyValue is deleted by a delete family.
-    FAMILY_VERSION_DELETED, // The KeyValue is deleted by a delete family 
version.
-    COLUMN_DELETED, // The KeyValue is deleted by a delete column.
-    VERSION_DELETED, // The KeyValue is deleted by a version delete.
-    NOT_DELETED
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
deleted file mode 100644
index 86c8b48..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.NavigableSet;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode;
-
-/**
- * This class is used for the tracking and enforcement of columns and numbers
- * of versions during the course of a Get or Scan operation, when explicit
- * column qualifiers have been asked for in the query.
- *
- * With a little magic (see {@link ScanQueryMatcher}), we can use this matcher
- * for both scans and gets.  The main difference is 'next' and 'done' collapse
- * for the scan case (since we see all columns in order), and we only reset
- * between rows.
- *
- * <p>
- * This class is utilized by {@link ScanQueryMatcher} mainly through two 
methods:
- * <ul><li>{@link #checkColumn} is called when a Put satisfies all other
- * conditions of the query.</li>
- * <li>{@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher
- * believes that the current column should be skipped (by timestamp, filter 
etc.)</li>
- * </ul>
- * <p>
- * These two methods returns a
- * {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode}
- * to define what action should be taken.
- * <p>
- * This class is NOT thread-safe as queries are never multi-threaded
- */
-@InterfaceAudience.Private
-public class ExplicitColumnTracker implements ColumnTracker {
-
-  private final int maxVersions;
-  private final int minVersions;
-
- /**
-  * Contains the list of columns that the ExplicitColumnTracker is tracking.
-  * Each ColumnCount instance also tracks how many versions of the requested
-  * column have been returned.
-  */
-  private final ColumnCount[] columns;
-  private int index;
-  private ColumnCount column;
-  /** Keeps track of the latest timestamp included for current column.
-   * Used to eliminate duplicates. */
-  private long latestTSOfCurrentColumn;
-  private long oldestStamp;
-
-  /**
-   * Default constructor.
-   * @param columns columns specified user in query
-   * @param minVersions minimum number of versions to keep
-   * @param maxVersions maximum versions to return per column
-   * @param oldestUnexpiredTS the oldest timestamp we are interested in,
-   *  based on TTL
-   */
-  public ExplicitColumnTracker(NavigableSet<byte[]> columns, int minVersions,
-      int maxVersions, long oldestUnexpiredTS) {
-    this.maxVersions = maxVersions;
-    this.minVersions = minVersions;
-    this.oldestStamp = oldestUnexpiredTS;
-    this.columns = new ColumnCount[columns.size()];
-    int i=0;
-    for(byte [] column : columns) {
-      this.columns[i++] = new ColumnCount(column);
-    }
-    reset();
-  }
-
-    /**
-   * Done when there are no more columns to match against.
-   */
-  public boolean done() {
-    return this.index >= columns.length;
-  }
-
-  public ColumnCount getColumnHint() {
-    return this.column;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public ScanQueryMatcher.MatchCode checkColumn(Cell cell, byte type) {
-    // delete markers should never be passed to an
-    // *Explicit*ColumnTracker
-    assert !CellUtil.isDelete(type);
-    do {
-      // No more columns left, we are done with this query
-      if(done()) {
-        return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row
-      }
-
-      // No more columns to match against, done with storefile
-      if(this.column == null) {
-        return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row
-      }
-
-      // Compare specific column to current column
-      int ret = CellComparator.compareQualifiers(cell, column.getBuffer(), 
column.getOffset(),
-          column.getLength());
-
-      // Column Matches. Return include code. The caller would call 
checkVersions
-      // to limit the number of versions.
-      if (ret == 0) {
-        return ScanQueryMatcher.MatchCode.INCLUDE;
-      }
-
-      resetTS();
-
-      if (ret < 0) {
-        // The current KV is smaller than the column the ExplicitColumnTracker
-        // is interested in, so seek to that column of interest.
-        return ScanQueryMatcher.MatchCode.SEEK_NEXT_COL;
-      }
-
-      // The current KV is bigger than the column the ExplicitColumnTracker
-      // is interested in. That means there is no more data for the column
-      // of interest. Advance the ExplicitColumnTracker state to next
-      // column of interest, and check again.
-      if (ret > 0) {
-        ++this.index;
-        if (done()) {
-          // No more to match, do not include, done with this row.
-          return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row
-        }
-        // This is the recursive case.
-        this.column = this.columns[this.index];
-      }
-    } while(true);
-  }
-
-  @Override
-  public ScanQueryMatcher.MatchCode checkVersions(Cell cell,
-      long timestamp, byte type, boolean ignoreCount) throws IOException {
-    assert !CellUtil.isDelete(type);
-    if (ignoreCount) return ScanQueryMatcher.MatchCode.INCLUDE;
-    // Check if it is a duplicate timestamp
-    if (sameAsPreviousTS(timestamp)) {
-      // If duplicate, skip this Key
-      return ScanQueryMatcher.MatchCode.SKIP;
-    }
-    int count = this.column.increment();
-    if (count >= maxVersions || (count >= minVersions && 
isExpired(timestamp))) {
-      // Done with versions for this column
-      ++this.index;
-      resetTS();
-      if (done()) {
-        // We have served all the requested columns.
-        this.column = null;
-        return ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_ROW;
-      }
-      // We are done with current column; advance to next column
-      // of interest.
-      this.column = this.columns[this.index];
-      return ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL;
-    }
-    setTS(timestamp);
-    return ScanQueryMatcher.MatchCode.INCLUDE;
-  }
-
-  // Called between every row.
-  public void reset() {
-    this.index = 0;
-    this.column = this.columns[this.index];
-    for(ColumnCount col : this.columns) {
-      col.setCount(0);
-    }
-    resetTS();
-  }
-
-  private void resetTS() {
-    latestTSOfCurrentColumn = HConstants.LATEST_TIMESTAMP;
-  }
-
-  private void setTS(long timestamp) {
-    latestTSOfCurrentColumn = timestamp;
-  }
-
-  private boolean sameAsPreviousTS(long timestamp) {
-    return timestamp == latestTSOfCurrentColumn;
-  }
-
-  private boolean isExpired(long timestamp) {
-    return timestamp < oldestStamp;
-  }
-
-  /**
-   * This method is used to inform the column tracker that we are done with
-   * this column. We may get this information from external filters or
-   * timestamp range and we then need to indicate this information to
-   * tracker. It is required only in case of ExplicitColumnTracker.
-   * @param cell
-   */
-  public void doneWithColumn(Cell cell) {
-    while (this.column != null) {
-      int compare = CellComparator.compareQualifiers(cell, column.getBuffer(), 
column.getOffset(),
-          column.getLength());
-      resetTS();
-      if (compare >= 0) {
-        ++this.index;
-        if (done()) {
-          // Will not hit any more columns in this storefile
-          this.column = null;
-        } else {
-          this.column = this.columns[this.index];
-        }
-        if (compare > 0) continue;
-      }
-      return;
-    }
-  }
-
-  @Override
-  public MatchCode getNextRowOrNextColumn(Cell cell) {
-    doneWithColumn(cell);
-
-    if (getColumnHint() == null) {
-      return MatchCode.SEEK_NEXT_ROW;
-    } else {
-      return MatchCode.SEEK_NEXT_COL;
-    }
-  }
-
-  public boolean isDone(long timestamp) {
-    return minVersions <= 0 && isExpired(timestamp);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index b32b757..c4bd849 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -86,6 +86,7 @@ import 
org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
 import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
+import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
 import org.apache.hadoop.hbase.security.EncryptionUtil;
@@ -1749,35 +1750,6 @@ public class HStore implements Store {
     return wantedVersions > maxVersions ? maxVersions: wantedVersions;
   }
 
-  /**
-   * @param cell
-   * @param oldestTimestamp
-   * @return true if the cell is expired
-   */
-  static boolean isCellTTLExpired(final Cell cell, final long oldestTimestamp, 
final long now) {
-    // Look for a TTL tag first. Use it instead of the family setting if
-    // found. If a cell has multiple TTLs, resolve the conflict by using the
-    // first tag encountered.
-    Iterator<Tag> i = CellUtil.tagsIterator(cell);
-    while (i.hasNext()) {
-      Tag t = i.next();
-      if (TagType.TTL_TAG_TYPE == t.getType()) {
-        // Unlike in schema cell TTLs are stored in milliseconds, no need
-        // to convert
-        long ts = cell.getTimestamp();
-        assert t.getValueLength() == Bytes.SIZEOF_LONG;
-        long ttl = TagUtil.getValueAsLong(t);
-        if (ts + ttl < now) {
-          return true;
-        }
-        // Per cell TTLs cannot extend lifetime beyond family settings, so
-        // fall through to check that
-        break;
-      }
-    }
-    return false;
-  }
-
   @Override
   public boolean canSplit() {
     this.lock.readLock().lock();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index acfaa96..567664e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.regionserver.Region.Operation;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.wal.WALKey;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
deleted file mode 100644
index e2db008..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java
+++ /dev/null
@@ -1,174 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * This class is responsible for the tracking and enforcement of Deletes
- * during the course of a Scan operation.
- *
- * It only has to enforce Delete and DeleteColumn, since the
- * DeleteFamily is handled at a higher level.
- *
- * <p>
- * This class is utilized through three methods:
- * <ul><li>{@link #add} when encountering a Delete or DeleteColumn</li>
- * <li>{@link #isDeleted} when checking if a Put KeyValue has been deleted</li>
- * <li>{@link #update} when reaching the end of a StoreFile or row for 
scans</li>
- * </ul>
- * <p>
- * This class is NOT thread-safe as queries are never multi-threaded
- */
-@InterfaceAudience.Private
-public class ScanDeleteTracker implements DeleteTracker {
-
-  protected boolean hasFamilyStamp = false;
-  protected long familyStamp = 0L;
-  protected SortedSet<Long> familyVersionStamps = new TreeSet<Long>();
-  protected byte [] deleteBuffer = null;
-  protected int deleteOffset = 0;
-  protected int deleteLength = 0;
-  protected byte deleteType = 0;
-  protected long deleteTimestamp = 0L;
-
-  /**
-   * Constructor for ScanDeleteTracker
-   */
-  public ScanDeleteTracker() {
-    super();
-  }
-
-  /**
-   * Add the specified KeyValue to the list of deletes to check against for
-   * this row operation.
-   * <p>
-   * This is called when a Delete is encountered.
-   * @param cell - the delete cell
-   */
-  @Override
-  public void add(Cell cell) {
-    long timestamp = cell.getTimestamp();
-    byte type = cell.getTypeByte();
-    if (!hasFamilyStamp || timestamp > familyStamp) {
-      if (type == KeyValue.Type.DeleteFamily.getCode()) {
-        hasFamilyStamp = true;
-        familyStamp = timestamp;
-        return;
-      } else if (type == KeyValue.Type.DeleteFamilyVersion.getCode()) {
-        familyVersionStamps.add(timestamp);
-        return;
-      }
-
-      if (deleteBuffer != null && type < deleteType) {
-        // same column, so ignore less specific delete
-        if (CellUtil.matchingQualifier(cell, deleteBuffer, deleteOffset, 
deleteLength)) {
-          return;
-        }
-      }
-      // new column, or more general delete type
-      deleteBuffer = cell.getQualifierArray();
-      deleteOffset = cell.getQualifierOffset();
-      deleteLength = cell.getQualifierLength();
-      deleteType = type;
-      deleteTimestamp = timestamp;
-    }
-    // missing else is never called.
-  }
-
-  /**
-   * Check if the specified KeyValue buffer has been deleted by a previously
-   * seen delete.
-   *
-   * @param cell - current cell to check if deleted by a previously seen delete
-   * @return deleteResult
-   */
-  @Override
-  public DeleteResult isDeleted(Cell cell) {
-    long timestamp = cell.getTimestamp();
-    if (hasFamilyStamp && timestamp <= familyStamp) {
-      return DeleteResult.FAMILY_DELETED;
-    }
-
-    if (familyVersionStamps.contains(Long.valueOf(timestamp))) {
-        return DeleteResult.FAMILY_VERSION_DELETED;
-    }
-
-    if (deleteBuffer != null) {
-      int ret = -(CellComparator.compareQualifiers(cell, deleteBuffer, 
deleteOffset, deleteLength));
-      if (ret == 0) {
-        if (deleteType == KeyValue.Type.DeleteColumn.getCode()) {
-          return DeleteResult.COLUMN_DELETED;
-        }
-        // Delete (aka DeleteVersion)
-        // If the timestamp is the same, keep this one
-        if (timestamp == deleteTimestamp) {
-          return DeleteResult.VERSION_DELETED;
-        }
-        // use assert or not?
-        assert timestamp < deleteTimestamp;
-
-        // different timestamp, let's clear the buffer.
-        deleteBuffer = null;
-      } else if(ret < 0){
-        // Next column case.
-        deleteBuffer = null;
-      } else {
-        throw new IllegalStateException("isDelete failed: deleteBuffer="
-            + Bytes.toStringBinary(deleteBuffer, deleteOffset, deleteLength)
-            + ", qualifier="
-            + Bytes.toStringBinary(cell.getQualifierArray(), 
cell.getQualifierOffset(),
-                cell.getQualifierLength())
-            + ", timestamp=" + timestamp + ", comparison result: " + ret);
-      }
-    }
-
-    return DeleteResult.NOT_DELETED;
-  }
-
-  @Override
-  public boolean isEmpty() {
-    return deleteBuffer == null && !hasFamilyStamp &&
-           familyVersionStamps.isEmpty();
-  }
-
-  @Override
-  // called between every row.
-  public void reset() {
-    hasFamilyStamp = false;
-    familyStamp = 0L;
-    familyVersionStamps.clear();
-    deleteBuffer = null;
-  }
-
-  @Override
-  // should not be called at all even (!)
-  public void update() {
-    this.reset();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
index 42f7369..349e166 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanInfo.java
@@ -122,11 +122,11 @@ public class ScanInfo {
     return this.parallelSeekEnabled;
   }
 
-  byte[] getFamily() {
+  public byte[] getFamily() {
     return family;
   }
 
-  int getMinVersions() {
+  public int getMinVersions() {
     return minVersions;
   }
 
@@ -138,7 +138,7 @@ public class ScanInfo {
     return ttl;
   }
 
-  KeepDeletedCells getKeepDeletedCells() {
+  public KeepDeletedCells getKeepDeletedCells() {
     return keepDeletedCells;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
deleted file mode 100644
index d2d0ccb..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
+++ /dev/null
@@ -1,699 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.NavigableSet;
-
-import org.apache.hadoop.hbase.KeyValue.Type;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.KeepDeletedCells;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
-import org.apache.hadoop.hbase.io.TimeRange;
-import org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-import com.google.common.base.Preconditions;
-
-/**
- * A query matcher that is specifically designed for the scan case.
- */
-@InterfaceAudience.Private
-public class ScanQueryMatcher {
-  // Optimization so we can skip lots of compares when we decide to skip
-  // to the next row.
-  private boolean stickyNextRow;
-  private final byte[] stopRow;
-
-  private final TimeRange tr;
-
-  private final Filter filter;
-
-  /** Keeps track of deletes */
-  private final DeleteTracker deletes;
-
-  /*
-   * The following three booleans define how we deal with deletes.
-   * There are three different aspects:
-   * 1. Whether to keep delete markers. This is used in compactions.
-   *    Minor compactions always keep delete markers.
-   * 2. Whether to keep deleted rows. This is also used in compactions,
-   *    if the store is set to keep deleted rows. This implies keeping
-   *    the delete markers as well.
-   *    In this case deleted rows are subject to the normal max version
-   *    and TTL/min version rules just like "normal" rows.
-   * 3. Whether a scan can do time travel queries even before deleted
-   *    marker to reach deleted rows.
-   */
-  /** whether to retain delete markers */
-  private boolean retainDeletesInOutput;
-
-  /** whether to return deleted rows */
-  private final KeepDeletedCells keepDeletedCells;
-  /** whether time range queries can see rows "behind" a delete */
-  private final boolean seePastDeleteMarkers;
-
-
-  /** Keeps track of columns and versions */
-  private final ColumnTracker columns;
-
-  /** Key to seek to in memstore and StoreFiles */
-  private final Cell startKey;
-
-  /** Row comparator for the region this query is for */
-  private final CellComparator rowComparator;
-
-  /* row is not private for tests */
-  /** Row the query is on */
-  Cell curCell;
-
-  /**
-   * Oldest put in any of the involved store files
-   * Used to decide whether it is ok to delete
-   * family delete marker of this store keeps
-   * deleted KVs.
-   */
-  private final long earliestPutTs;
-  private final long ttl;
-
-  /** The oldest timestamp we are interested in, based on TTL */
-  private final long oldestUnexpiredTS;
-  private final long now;
-
-  /** readPoint over which the KVs are unconditionally included */
-  protected long maxReadPointToTrackVersions;
-
-  private byte[] dropDeletesFromRow = null, dropDeletesToRow = null;
-
-  /**
-   * This variable shows whether there is an null column in the query. There
-   * always exists a null column in the wildcard column query.
-   * There maybe exists a null column in the explicit column query based on the
-   * first column.
-   * */
-  private boolean hasNullColumn = true;
-
-  private RegionCoprocessorHost regionCoprocessorHost= null;
-
-  // By default, when hbase.hstore.time.to.purge.deletes is 0ms, a delete
-  // marker is always removed during a major compaction. If set to non-zero
-  // value then major compaction will try to keep a delete marker around for
-  // the given number of milliseconds. We want to keep the delete markers
-  // around a bit longer because old puts might appear out-of-order. For
-  // example, during log replication between two clusters.
-  //
-  // If the delete marker has lived longer than its column-family's TTL then
-  // the delete marker will be removed even if time.to.purge.deletes has not
-  // passed. This is because all the Puts that this delete marker can influence
-  // would have also expired. (Removing of delete markers on col family TTL 
will
-  // not happen if min-versions is set to non-zero)
-  //
-  // But, if time.to.purge.deletes has not expired then a delete
-  // marker will not be removed just because there are no Puts that it is
-  // currently influencing. This is because Puts, that this delete can
-  // influence.  may appear out of order.
-  private final long timeToPurgeDeletes;
-
-  private final boolean isUserScan;
-
-  private final boolean isReversed;
-
-  /**
-   * True if we are doing a 'Get' Scan. Every Get is actually a one-row Scan.
-   */
-  private final boolean get;
-
-  /**
-   * Construct a QueryMatcher for a scan
-   * @param scanInfo The store's immutable scan info
-   * @param scanType Type of the scan
-   * @param earliestPutTs Earliest put seen in any of the store files.
-   * @param oldestUnexpiredTS the oldest timestamp we are interested in, based 
on TTL
-   */
-  public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> 
columns,
-      ScanType scanType, long readPointToUse, long earliestPutTs, long 
oldestUnexpiredTS,
-      long now, RegionCoprocessorHost regionCoprocessorHost) throws 
IOException {
-    TimeRange timeRange = 
scan.getColumnFamilyTimeRange().get(scanInfo.getFamily());
-    if (timeRange == null) {
-      this.tr = scan.getTimeRange();
-    } else {
-      this.tr = timeRange;
-    }
-    this.get = scan.isGetScan();
-    this.rowComparator = scanInfo.getComparator();
-    this.regionCoprocessorHost = regionCoprocessorHost;
-    this.deletes =  instantiateDeleteTracker();
-    this.stopRow = scan.getStopRow();
-    this.startKey = 
CellUtil.createFirstDeleteFamilyCellOnRow(scan.getStartRow(),
-        scanInfo.getFamily());
-    this.filter = scan.getFilter();
-    this.earliestPutTs = earliestPutTs;
-    this.oldestUnexpiredTS = oldestUnexpiredTS;
-    this.now = now;
-
-    this.maxReadPointToTrackVersions = readPointToUse;
-    this.timeToPurgeDeletes = scanInfo.getTimeToPurgeDeletes();
-    this.ttl = oldestUnexpiredTS;
-
-    /* how to deal with deletes */
-    this.isUserScan = scanType == ScanType.USER_SCAN;
-    // keep deleted cells: if compaction or raw scan
-    this.keepDeletedCells = scan.isRaw() ? KeepDeletedCells.TRUE :
-      isUserScan ? KeepDeletedCells.FALSE : scanInfo.getKeepDeletedCells();
-    // retain deletes: if minor compaction or raw scanisDone
-    this.retainDeletesInOutput = scanType == ScanType.COMPACT_RETAIN_DELETES 
|| scan.isRaw();
-    // seePastDeleteMarker: user initiated scans
-    this.seePastDeleteMarkers =
-        scanInfo.getKeepDeletedCells() != KeepDeletedCells.FALSE && isUserScan;
-
-    int maxVersions =
-        scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(),
-          scanInfo.getMaxVersions());
-
-    // Single branch to deal with two types of reads (columns vs all in family)
-    if (columns == null || columns.size() == 0) {
-      // there is always a null column in the wildcard column query.
-      hasNullColumn = true;
-
-      // use a specialized scan for wildcard column tracker.
-      this.columns = new ScanWildcardColumnTracker(
-          scanInfo.getMinVersions(), maxVersions, oldestUnexpiredTS);
-    } else {
-      // whether there is null column in the explicit column query
-      hasNullColumn = (columns.first().length == 0);
-
-      // We can share the ExplicitColumnTracker, diff is we reset
-      // between rows, not between storefiles.
-      this.columns = new ExplicitColumnTracker(columns, 
scanInfo.getMinVersions(), maxVersions,
-          oldestUnexpiredTS);
-    }
-    this.isReversed = scan.isReversed();
-  }
-
-  private DeleteTracker instantiateDeleteTracker() throws IOException {
-    DeleteTracker tracker = new ScanDeleteTracker();
-    if (regionCoprocessorHost != null) {
-      tracker = regionCoprocessorHost.postInstantiateDeleteTracker(tracker);
-    }
-    return tracker;
-  }
-
-  /**
-   * Construct a QueryMatcher for a scan that drop deletes from a limited 
range of rows.
-   * @param scan
-   * @param scanInfo The store's immutable scan info
-   * @param columns
-   * @param earliestPutTs Earliest put seen in any of the store files.
-   * @param oldestUnexpiredTS the oldest timestamp we are interested in, based 
on TTL
-   * @param now the current server time
-   * @param dropDeletesFromRow The inclusive left bound of the range; can be 
EMPTY_START_ROW.
-   * @param dropDeletesToRow The exclusive right bound of the range; can be 
EMPTY_END_ROW.
-   * @param regionCoprocessorHost
-   * @throws IOException
-   */
-  public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet<byte[]> 
columns,
-      long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long 
now,
-      byte[] dropDeletesFromRow, byte[] dropDeletesToRow,
-      RegionCoprocessorHost regionCoprocessorHost) throws IOException {
-    this(scan, scanInfo, columns, ScanType.COMPACT_RETAIN_DELETES, 
readPointToUse, earliestPutTs,
-        oldestUnexpiredTS, now, regionCoprocessorHost);
-    Preconditions.checkArgument((dropDeletesFromRow != null) && 
(dropDeletesToRow != null));
-    this.dropDeletesFromRow = dropDeletesFromRow;
-    this.dropDeletesToRow = dropDeletesToRow;
-  }
-
-  /*
-   * Constructor for tests
-   */
-  ScanQueryMatcher(Scan scan, ScanInfo scanInfo,
-      NavigableSet<byte[]> columns, long oldestUnexpiredTS, long now) throws 
IOException {
-    this(scan, scanInfo, columns, ScanType.USER_SCAN,
-          Long.MAX_VALUE, /* max Readpoint to track versions */
-        HConstants.LATEST_TIMESTAMP, oldestUnexpiredTS, now, null);
-  }
-
-  /**
-   *
-   * @return  whether there is an null column in the query
-   */
-  public boolean hasNullColumnInQuery() {
-    return hasNullColumn;
-  }
-
-  /**
-   * Determines if the caller should do one of several things:
-   * - seek/skip to the next row (MatchCode.SEEK_NEXT_ROW)
-   * - seek/skip to the next column (MatchCode.SEEK_NEXT_COL)
-   * - include the current KeyValue (MatchCode.INCLUDE)
-   * - ignore the current KeyValue (MatchCode.SKIP)
-   * - got to the next row (MatchCode.DONE)
-   *
-   * @param cell KeyValue to check
-   * @return The match code instance.
-   * @throws IOException in case there is an internal consistency problem
-   *      caused by a data corruption.
-   */
-  public MatchCode match(Cell cell) throws IOException {
-    if (filter != null && filter.filterAllRemaining()) {
-      return MatchCode.DONE_SCAN;
-    }
-    if (curCell != null) {
-      int ret = this.rowComparator.compareRows(curCell, cell);
-      if (!this.isReversed) {
-        if (ret <= -1) {
-          return MatchCode.DONE;
-        } else if (ret >= 1) {
-          // could optimize this, if necessary?
-          // Could also be called SEEK_TO_CURRENT_ROW, but this
-          // should be rare/never happens.
-          return MatchCode.SEEK_NEXT_ROW;
-        }
-      } else {
-        if (ret <= -1) {
-          return MatchCode.SEEK_NEXT_ROW;
-        } else if (ret >= 1) {
-          return MatchCode.DONE;
-        }
-      }
-    } else {
-      // Since the curCell is null it means we are already sure that we have 
moved over to the next row
-      return MatchCode.DONE;
-    }
-
-    // optimize case.
-    if (this.stickyNextRow) {
-      return MatchCode.SEEK_NEXT_ROW;
-    }
-
-    if (this.columns.done()) {
-      stickyNextRow = true;
-      return MatchCode.SEEK_NEXT_ROW;
-    }
-
-    long timestamp = cell.getTimestamp();
-    // check for early out based on timestamp alone
-    if (columns.isDone(timestamp)) {
-      return columns.getNextRowOrNextColumn(cell);
-    }
-    // check if the cell is expired by cell TTL
-    if (HStore.isCellTTLExpired(cell, this.oldestUnexpiredTS, this.now)) {
-      return MatchCode.SKIP;
-    }
-
-    /*
-     * The delete logic is pretty complicated now.
-     * This is corroborated by the following:
-     * 1. The store might be instructed to keep deleted rows around.
-     * 2. A scan can optionally see past a delete marker now.
-     * 3. If deleted rows are kept, we have to find out when we can
-     *    remove the delete markers.
-     * 4. Family delete markers are always first (regardless of their TS)
-     * 5. Delete markers should not be counted as version
-     * 6. Delete markers affect puts of the *same* TS
-     * 7. Delete marker need to be version counted together with puts
-     *    they affect
-     */
-    byte typeByte = cell.getTypeByte();
-    long mvccVersion = cell.getSequenceId();
-    if (CellUtil.isDelete(cell)) {
-      if (keepDeletedCells == KeepDeletedCells.FALSE
-          || (keepDeletedCells == KeepDeletedCells.TTL && timestamp < ttl)) {
-        // first ignore delete markers if the scanner can do so, and the
-        // range does not include the marker
-        //
-        // during flushes and compactions also ignore delete markers newer
-        // than the readpoint of any open scanner, this prevents deleted
-        // rows that could still be seen by a scanner from being collected
-        boolean includeDeleteMarker = seePastDeleteMarkers ?
-            tr.withinTimeRange(timestamp) :
-            tr.withinOrAfterTimeRange(timestamp);
-        if (includeDeleteMarker
-            && mvccVersion <= maxReadPointToTrackVersions) {
-          this.deletes.add(cell);
-        }
-        // Can't early out now, because DelFam come before any other keys
-      }
-
-      if ((!isUserScan)
-          && timeToPurgeDeletes > 0
-          && (EnvironmentEdgeManager.currentTime() - timestamp)
-            <= timeToPurgeDeletes) {
-        return MatchCode.INCLUDE;
-      } else if (retainDeletesInOutput || mvccVersion > 
maxReadPointToTrackVersions) {
-        // always include or it is not time yet to check whether it is OK
-        // to purge deltes or not
-        if (!isUserScan) {
-          // if this is not a user scan (compaction), we can filter this 
deletemarker right here
-          // otherwise (i.e. a "raw" scan) we fall through to normal version 
and timerange checking
-          return MatchCode.INCLUDE;
-        }
-      } else if (keepDeletedCells == KeepDeletedCells.TRUE
-          || (keepDeletedCells == KeepDeletedCells.TTL && timestamp >= ttl)) {
-        if (timestamp < earliestPutTs) {
-          // keeping delete rows, but there are no puts older than
-          // this delete in the store files.
-          return columns.getNextRowOrNextColumn(cell);
-        }
-        // else: fall through and do version counting on the
-        // delete markers
-      } else {
-        return MatchCode.SKIP;
-      }
-      // note the following next else if...
-      // delete marker are not subject to other delete markers
-    } else if (!this.deletes.isEmpty()) {
-      DeleteResult deleteResult = deletes.isDeleted(cell);
-      switch (deleteResult) {
-        case FAMILY_DELETED:
-        case COLUMN_DELETED:
-          return columns.getNextRowOrNextColumn(cell);
-        case VERSION_DELETED:
-        case FAMILY_VERSION_DELETED:
-          return MatchCode.SKIP;
-        case NOT_DELETED:
-          break;
-        default:
-          throw new RuntimeException("UNEXPECTED");
-        }
-    }
-
-    // NOTE: Cryptic stuff!
-    // if the timestamp is HConstants.OLDEST_TIMESTAMP, then this is a fake 
cell made to prime a
-    // Scanner; See KeyValueUTil#createLastOnRow. This Cell should never end 
up returning out of
-    // here a matchcode of INCLUDE else we will return to the client a fake 
Cell. If we call
-    // TimeRange, it will return 0 because it doesn't deal in OLDEST_TIMESTAMP 
and we will fall
-    // into the later code where we could return a matchcode of INCLUDE. See 
HBASE-16074 "ITBLL
-    // fails, reports lost big or tiny families" for a horror story. Check 
here for
-    // OLDEST_TIMESTAMP. TimeRange#compare is about more generic timestamps, 
between 0L and
-    // Long.MAX_LONG. It doesn't do OLDEST_TIMESTAMP weird handling.
-    int timestampComparison = timestamp == HConstants.OLDEST_TIMESTAMP? -1: 
tr.compare(timestamp);
-    if (timestampComparison >= 1) {
-      return MatchCode.SKIP;
-    } else if (timestampComparison <= -1) {
-      return columns.getNextRowOrNextColumn(cell);
-    }
-
-    // STEP 1: Check if the column is part of the requested columns
-    MatchCode colChecker = columns.checkColumn(cell, typeByte);
-    if (colChecker == MatchCode.INCLUDE) {
-      ReturnCode filterResponse = ReturnCode.SKIP;
-      // STEP 2: Yes, the column is part of the requested columns. Check if 
filter is present
-      if (filter != null) {
-        // STEP 3: Filter the key value and return if it filters out
-        filterResponse = filter.filterKeyValue(cell);
-        switch (filterResponse) {
-        case SKIP:
-          return MatchCode.SKIP;
-        case NEXT_COL:
-          return columns.getNextRowOrNextColumn(cell);
-        case NEXT_ROW:
-          stickyNextRow = true;
-          return MatchCode.SEEK_NEXT_ROW;
-        case SEEK_NEXT_USING_HINT:
-          return MatchCode.SEEK_NEXT_USING_HINT;
-        default:
-          //It means it is either include or include and seek next
-          break;
-        }
-      }
-      /*
-       * STEP 4: Reaching this step means the column is part of the requested 
columns and either
-       * the filter is null or the filter has returned INCLUDE or 
INCLUDE_AND_NEXT_COL response.
-       * Now check the number of versions needed. This method call returns 
SKIP, INCLUDE,
-       * INCLUDE_AND_SEEK_NEXT_ROW, INCLUDE_AND_SEEK_NEXT_COL.
-       *
-       * FilterResponse            ColumnChecker               Desired behavior
-       * INCLUDE                   SKIP                        row has already 
been included, SKIP.
-       * INCLUDE                   INCLUDE                     INCLUDE
-       * INCLUDE                   INCLUDE_AND_SEEK_NEXT_COL   
INCLUDE_AND_SEEK_NEXT_COL
-       * INCLUDE                   INCLUDE_AND_SEEK_NEXT_ROW   
INCLUDE_AND_SEEK_NEXT_ROW
-       * INCLUDE_AND_SEEK_NEXT_COL SKIP                        row has already 
been included, SKIP.
-       * INCLUDE_AND_SEEK_NEXT_COL INCLUDE                     
INCLUDE_AND_SEEK_NEXT_COL
-       * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_COL   
INCLUDE_AND_SEEK_NEXT_COL
-       * INCLUDE_AND_SEEK_NEXT_COL INCLUDE_AND_SEEK_NEXT_ROW   
INCLUDE_AND_SEEK_NEXT_ROW
-       *
-       * In all the above scenarios, we return the column checker return value 
except for
-       * FilterResponse (INCLUDE_AND_SEEK_NEXT_COL) and ColumnChecker(INCLUDE)
-       */
-      colChecker = columns.checkVersions(cell, timestamp, typeByte,
-          mvccVersion > maxReadPointToTrackVersions);
-      //Optimize with stickyNextRow
-      boolean seekNextRowFromEssential = filterResponse == 
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW &&
-          filter.isFamilyEssential(cell.getFamilyArray());
-      if (colChecker == MatchCode.INCLUDE_AND_SEEK_NEXT_ROW || 
seekNextRowFromEssential) {
-        stickyNextRow = true;
-      }
-      if (filterResponse == ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW) {
-        if (colChecker != MatchCode.SKIP) {
-          return MatchCode.INCLUDE_AND_SEEK_NEXT_ROW;
-        }
-        return MatchCode.SEEK_NEXT_ROW;
-      }
-      return (filterResponse == ReturnCode.INCLUDE_AND_NEXT_COL &&
-          colChecker == MatchCode.INCLUDE) ? 
MatchCode.INCLUDE_AND_SEEK_NEXT_COL
-          : colChecker;
-    }
-    stickyNextRow = (colChecker == MatchCode.SEEK_NEXT_ROW) ? true
-        : stickyNextRow;
-    return colChecker;
-  }
-
-  /** Handle partial-drop-deletes. As we match keys in order, when we have a 
range from which
-   * we can drop deletes, we can set retainDeletesInOutput to false for the 
duration of this
-   * range only, and maintain consistency. */
-  private void checkPartialDropDeleteRange(Cell curCell) {
-    // If partial-drop-deletes are used, initially, dropDeletesFromRow and 
dropDeletesToRow
-    // are both set, and the matcher is set to retain deletes. We assume 
ordered keys. When
-    // dropDeletesFromRow is leq current kv, we start dropping deletes and 
reset
-    // dropDeletesFromRow; thus the 2nd "if" starts to apply.
-    if ((dropDeletesFromRow != null)
-        && (Arrays.equals(dropDeletesFromRow, HConstants.EMPTY_START_ROW) ||
-            (CellComparator.COMPARATOR.compareRows(curCell, 
dropDeletesFromRow, 0,
-                dropDeletesFromRow.length) >= 0))) {
-      retainDeletesInOutput = false;
-      dropDeletesFromRow = null;
-    }
-    // If dropDeletesFromRow is null and dropDeletesToRow is set, we are 
inside the partial-
-    // drop-deletes range. When dropDeletesToRow is leq current kv, we stop 
dropping deletes,
-    // and reset dropDeletesToRow so that we don't do any more compares.
-    if ((dropDeletesFromRow == null)
-        && (dropDeletesToRow != null)
-        && !Arrays.equals(dropDeletesToRow, HConstants.EMPTY_END_ROW)
-        && (CellComparator.COMPARATOR
-            .compareRows(curCell, dropDeletesToRow, 0, 
dropDeletesToRow.length) >= 0)) {
-      retainDeletesInOutput = true;
-      dropDeletesToRow = null;
-    }
-  }
-
-  /**
-   * @return Returns false if we know there are no more rows to be scanned 
(We've reached the
-   * <code>stopRow</code> or we are scanning on row only because this Scan is 
for a Get, etc.
-   */
-  public boolean moreRowsMayExistAfter(Cell kv) {
-    // If a 'get' Scan -- we are doing a Get (every Get is a single-row Scan 
in implementation) --
-    // then we are looking at one row only, the one specified in the Get 
coordinate..so we know
-    // for sure that there are no more rows on this Scan
-    if (this.get) {
-      return false;
-    }
-    // If no stopRow, return that there may be more rows. The tests that 
follow depend on a
-    // non-empty, non-default stopRow so this little test below short-circuits 
out doing the
-    // following compares.
-    if (this.stopRow == null || this.stopRow == HConstants.EMPTY_BYTE_ARRAY) {
-      return true;
-    }
-    return this.isReversed?
-      rowComparator.compareRows(kv, stopRow, 0, stopRow.length) > 0:
-      Bytes.equals(stopRow, HConstants.EMPTY_END_ROW) ||
-        rowComparator.compareRows(kv, stopRow, 0, stopRow.length) < 0;
-  }
-
-  /**
-   * Set the row when there is change in row
-   * @param curCell
-   */
-  public void setToNewRow(Cell curCell) {
-    checkPartialDropDeleteRange(curCell);
-    this.curCell = curCell;
-    reset();
-  }
-
-  public void reset() {
-    this.deletes.reset();
-    this.columns.reset();
-
-    stickyNextRow = false;
-  }
-
-  /**
-   *
-   * @return the start key
-   */
-  public Cell getStartKey() {
-    return this.startKey;
-  }
-
-  /**
-   *
-   * @return the Filter
-   */
-  Filter getFilter() {
-    return this.filter;
-  }
-
-  public Cell getNextKeyHint(Cell kv) throws IOException {
-    if (filter == null) {
-      return null;
-    } else {
-      return filter.getNextCellHint(kv);
-    }
-  }
-
-  public Cell getKeyForNextColumn(Cell kv) {
-    ColumnCount nextColumn = columns.getColumnHint();
-    if (nextColumn == null) {
-      return CellUtil.createLastOnRowCol(kv);
-    } else {
-      return CellUtil.createFirstOnRowCol(kv, nextColumn.getBuffer(), 
nextColumn.getOffset(),
-          nextColumn.getLength());
-    }
-  }
-
-  public Cell getKeyForNextRow(Cell c) {
-    return CellUtil.createLastOnRow(c);
-  }
-
-  /**
-   * @param nextIndexed the key of the next entry in the block index (if any)
-   * @param kv The Cell we're using to calculate the seek key
-   * @return result of the compare between the indexed key and the key portion 
of the passed cell
-   */
-  public int compareKeyForNextRow(Cell nextIndexed, Cell kv) {
-    return rowComparator.compareKeyBasedOnColHint(nextIndexed, kv, 0, 0, null, 
0, 0,
-        HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
-  }
-
-  /**
-   * @param nextIndexed the key of the next entry in the block index (if any)
-   * @param currentCell The Cell we're using to calculate the seek key
-   * @return result of the compare between the indexed key and the key portion 
of the passed cell
-   */
-  public int compareKeyForNextColumn(Cell nextIndexed, Cell currentCell) {
-    ColumnCount nextColumn = columns.getColumnHint();
-    if (nextColumn == null) {
-      return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell, 
0, 0, null, 0, 0,
-          HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode());
-    } else {
-      return rowComparator.compareKeyBasedOnColHint(nextIndexed, currentCell,
-          currentCell.getFamilyOffset(), currentCell.getFamilyLength(), 
nextColumn.getBuffer(),
-          nextColumn.getOffset(), nextColumn.getLength(), 
HConstants.LATEST_TIMESTAMP,
-          Type.Maximum.getCode());
-    }
-  }
-
-  boolean isUserScan() {
-    return this.isUserScan;
-  }
-
-  //Used only for testing purposes
-  static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int 
offset,
-      int length, long ttl, byte type, boolean ignoreCount) throws IOException 
{
-    KeyValue kv = KeyValueUtil.createFirstOnRow(HConstants.EMPTY_BYTE_ARRAY, 
0, 0,
-        HConstants.EMPTY_BYTE_ARRAY, 0, 0, bytes, offset, length);
-    MatchCode matchCode = columnTracker.checkColumn(kv, type);
-    if (matchCode == MatchCode.INCLUDE) {
-      return columnTracker.checkVersions(kv, ttl, type, ignoreCount);
-    }
-    return matchCode;
-  }
-
-  /**
-   * {@link #match} return codes.  These instruct the scanner moving through
-   * memstores and StoreFiles what to do with the current KeyValue.
-   * <p>
-   * Additionally, this contains "early-out" language to tell the scanner to
-   * move on to the next File (memstore or Storefile), or to return 
immediately.
-   */
-  public static enum MatchCode {
-    /**
-     * Include KeyValue in the returned result
-     */
-    INCLUDE,
-
-    /**
-     * Do not include KeyValue in the returned result
-     */
-    SKIP,
-
-    /**
-     * Do not include, jump to next StoreFile or memstore (in time order)
-     */
-    NEXT,
-
-    /**
-     * Do not include, return current result
-     */
-    DONE,
-
-    /**
-     * These codes are used by the ScanQueryMatcher
-     */
-
-    /**
-     * Done with the row, seek there.
-     */
-    SEEK_NEXT_ROW,
-    /**
-     * Done with column, seek to next.
-     */
-    SEEK_NEXT_COL,
-
-    /**
-     * Done with scan, thanks to the row filter.
-     */
-    DONE_SCAN,
-
-    /*
-     * Seek to next key which is given as hint.
-     */
-    SEEK_NEXT_USING_HINT,
-
-    /**
-     * Include KeyValue and done with column, seek to next.
-     */
-    INCLUDE_AND_SEEK_NEXT_COL,
-
-    /**
-     * Include KeyValue and done with row, seek to next.
-     */
-    INCLUDE_AND_SEEK_NEXT_ROW,
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
deleted file mode 100644
index b5ef319..0000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanWildcardColumnTracker.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode;
-import org.apache.hadoop.hbase.util.Bytes;
-
-/**
- * Keeps track of the columns for a scan if they are not explicitly specified
- */
-@InterfaceAudience.Private
-public class ScanWildcardColumnTracker implements ColumnTracker {
-  private Cell columnCell = null;
-  private int currentCount = 0;
-  private int maxVersions;
-  private int minVersions;
-  /* Keeps track of the latest timestamp and type included for current column.
-   * Used to eliminate duplicates. */
-  private long latestTSOfCurrentColumn;
-  private byte latestTypeOfCurrentColumn;
-
-  private long oldestStamp;
-
-  /**
-   * Return maxVersions of every row.
-   * @param minVersion Minimum number of versions to keep
-   * @param maxVersion Maximum number of versions to return
-   * @param oldestUnexpiredTS oldest timestamp that has not expired according
-   *          to the TTL.
-   */
-  public ScanWildcardColumnTracker(int minVersion, int maxVersion,
-      long oldestUnexpiredTS) {
-    this.maxVersions = maxVersion;
-    this.minVersions = minVersion;
-    this.oldestStamp = oldestUnexpiredTS;
-  }
-
-  /**
-   * {@inheritDoc}
-   * This receives puts *and* deletes.
-   */
-  @Override
-  public MatchCode checkColumn(Cell cell, byte type) throws IOException {
-    return MatchCode.INCLUDE;
-  }
-
-  /**
-   * {@inheritDoc}
-   * This receives puts *and* deletes. Deletes do not count as a version, but 
rather
-   * take the version of the previous put (so eventually all but the last can 
be reclaimed).
-   */
-  @Override
-  public ScanQueryMatcher.MatchCode checkVersions(Cell cell,
-      long timestamp, byte type, boolean ignoreCount) throws IOException {
-
-    if (columnCell == null) {
-      // first iteration.
-      resetCell(cell);
-      if (ignoreCount) return ScanQueryMatcher.MatchCode.INCLUDE;
-      // do not count a delete marker as another version
-      return checkVersion(type, timestamp);
-    }
-    int cmp = CellComparator.compareQualifiers(cell, this.columnCell);
-    if (cmp == 0) {
-      if (ignoreCount) return ScanQueryMatcher.MatchCode.INCLUDE;
-
-      //If column matches, check if it is a duplicate timestamp
-      if (sameAsPreviousTSAndType(timestamp, type)) {
-        return ScanQueryMatcher.MatchCode.SKIP;
-      }
-      return checkVersion(type, timestamp);
-    }
-
-    resetTSAndType();
-
-    // new col > old col
-    if (cmp > 0) {
-      // switched columns, lets do something.x
-      resetCell(cell);
-      if (ignoreCount) return ScanQueryMatcher.MatchCode.INCLUDE;
-      return checkVersion(type, timestamp);
-    }
-
-    // new col < oldcol
-    // WARNING: This means that very likely an edit for some other family
-    // was incorrectly stored into the store for this one. Throw an exception,
-    // because this might lead to data corruption.
-    throw new IOException(
-        "ScanWildcardColumnTracker.checkColumn ran into a column actually " +
-        "smaller than the previous column: " +
-        Bytes.toStringBinary(CellUtil.cloneQualifier(cell)));
-  }
-
-  private void resetCell(Cell columnCell) {
-    this.columnCell = columnCell;
-    currentCount = 0;
-  }
-
-  /**
-   * Check whether this version should be retained.
-   * There are 4 variables considered:
-   * If this version is past max versions -> skip it
-   * If this kv has expired or was deleted, check min versions
-   * to decide whther to skip it or not.
-   *
-   * Increase the version counter unless this is a delete
-   */
-  private MatchCode checkVersion(byte type, long timestamp) {
-    if (!CellUtil.isDelete(type)) {
-      currentCount++;
-    }
-    if (currentCount > maxVersions) {
-      return ScanQueryMatcher.MatchCode.SEEK_NEXT_COL; // skip to next col
-    }
-    // keep the KV if required by minversions or it is not expired, yet
-    if (currentCount <= minVersions || !isExpired(timestamp)) {
-      setTSAndType(timestamp, type);
-      return ScanQueryMatcher.MatchCode.INCLUDE;
-    } else {
-      return MatchCode.SEEK_NEXT_COL;
-    }
-
-  }
-
-  @Override
-  public void reset() {
-    columnCell = null;
-    resetTSAndType();
-  }
-
-  private void resetTSAndType() {
-    latestTSOfCurrentColumn = HConstants.LATEST_TIMESTAMP;
-    latestTypeOfCurrentColumn = 0;
-  }
-
-  private void setTSAndType(long timestamp, byte type) {
-    latestTSOfCurrentColumn = timestamp;
-    latestTypeOfCurrentColumn = type;
-  }
-
-  private boolean sameAsPreviousTSAndType(long timestamp, byte type) {
-    return timestamp == latestTSOfCurrentColumn && type == 
latestTypeOfCurrentColumn;
-  }
-
-  private boolean isExpired(long timestamp) {
-    return timestamp < oldestStamp;
-  }
-
-  /**
-   * Used by matcher and scan/get to get a hint of the next column
-   * to seek to after checkColumn() returns SKIP.  Returns the next interesting
-   * column we want, or NULL there is none (wildcard scanner).
-   *
-   * @return The column count.
-   */
-  public ColumnCount getColumnHint() {
-    return null;
-  }
-
-  /**
-   * We can never know a-priori if we are done, so always return false.
-   * @return false
-   */
-  @Override
-  public boolean done() {
-    return false;
-  }
-
-  @Override
-  public MatchCode getNextRowOrNextColumn(Cell cell) {
-    return MatchCode.SEEK_NEXT_COL;
-  }
-
-  public boolean isDone(long timestamp) {
-    return minVersions <= 0 && isExpired(timestamp);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index 5dec59a..853a4cf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -42,6 +42,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.security.User;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index 1582237..dca7388 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -18,6 +18,12 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import java.io.DataInput;
+import java.io.IOException;
+import java.util.Map;
+import java.util.SortedSet;
+import java.util.concurrent.atomic.AtomicInteger;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -43,12 +49,6 @@ import org.apache.hadoop.hbase.util.BloomFilter;
 import org.apache.hadoop.hbase.util.BloomFilterFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 
-import java.io.DataInput;
-import java.io.IOException;
-import java.util.Map;
-import java.util.SortedSet;
-import java.util.concurrent.atomic.AtomicInteger;
-
 /**
  * Reader for a StoreFile.
  */
@@ -114,46 +114,22 @@ public class StoreFileReader {
   }
 
   /**
-   * Uses {@link #getStoreFileScanner(boolean, boolean, boolean, long, long)} 
by setting
-   * {@code isCompaction} to false, {@code readPt} to 0 and {@code 
scannerOrder} to 0.
-   * Do not use this overload if using this scanner for compactions.
-   *
-   * @see #getStoreFileScanner(boolean, boolean, boolean, long, long)
-   */
-  public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread) {
-    // 0 is passed as readpoint because this method is only used by test
-    // where StoreFile is directly operated upon
-    return getStoreFileScanner(cacheBlocks, pread, false, 0, 0);
-  }
-
-  /**
-   * Uses {@link #getStoreFileScanner(boolean, boolean, boolean, long, long)} 
by setting
-   * {@code scannerOrder} to 0.
-   *
-   * @see #getStoreFileScanner(boolean, boolean, boolean, long, long)
-   */
-  public StoreFileScanner getStoreFileScanner(
-      boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt) {
-    return getStoreFileScanner(cacheBlocks, pread, isCompaction, readPt, 0);
-  }
-
-  /**
    * Get a scanner to scan over this StoreFile.
-   *
    * @param cacheBlocks should this scanner cache blocks?
    * @param pread use pread (for highly concurrent small readers)
    * @param isCompaction is scanner being used for compaction?
    * @param scannerOrder Order of this scanner relative to other scanners. See
-   *  {@link KeyValueScanner#getScannerOrder()}.
+   *          {@link KeyValueScanner#getScannerOrder()}.
+   * @param canOptimizeForNonNullColumn {@code true} if we can make sure there 
is no null column,
+   *          otherwise {@code false}. This is a hint for optimization.
    * @return a scanner
    */
-  public StoreFileScanner getStoreFileScanner(
-      boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, 
long scannerOrder) {
+  public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread,
+      boolean isCompaction, long readPt, long scannerOrder, boolean 
canOptimizeForNonNullColumn) {
     // Increment the ref count
     refCount.incrementAndGet();
-    return new StoreFileScanner(
-        this, getScanner(cacheBlocks, pread, isCompaction), !isCompaction, 
reader.hasMVCCInfo(),
-        readPt, scannerOrder);
+    return new StoreFileScanner(this, getScanner(cacheBlocks, pread, 
isCompaction), !isCompaction,
+        reader.hasMVCCInfo(), readPt, scannerOrder, 
canOptimizeForNonNullColumn);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b35cf8f9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 4955ffe..ac55d8c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
 import org.apache.hadoop.hbase.util.Counter;
 
 /**
@@ -54,49 +55,41 @@ public class StoreFileScanner implements KeyValueScanner {
   private boolean delayedReseek;
   private Cell delayedSeekKV;
 
-  private boolean enforceMVCC = false;
-  private boolean hasMVCCInfo = false;
+  private final boolean enforceMVCC;
+  private final boolean hasMVCCInfo;
   // A flag represents whether could stop skipping KeyValues for MVCC
   // if have encountered the next row. Only used for reversed scan
   private boolean stopSkippingKVsIfNextRow = false;
 
   private static Counter seekCount;
 
-  private ScanQueryMatcher matcher;
+  private final boolean canOptimizeForNonNullColumn;
 
-  private long readPt;
+  private final long readPt;
 
   // Order of this scanner relative to other scanners when duplicate key-value 
is found.
   // Higher values means scanner has newer data.
-  private long scannerOrder;
+  private final long scannerOrder;
 
   /**
    * Implements a {@link KeyValueScanner} on top of the specified {@link 
HFileScanner}
    * @param useMVCC If true, scanner will filter out updates with MVCC larger 
than {@code readPt}.
    * @param readPt MVCC value to use to filter out the updates newer than this 
scanner.
    * @param hasMVCC Set to true if underlying store file reader has MVCC info.
+   * @param scannerOrder Order of the scanner relative to other scanners. See
+   *          {@link KeyValueScanner#getScannerOrder()}.
+   * @param canOptimizeForNonNullColumn {@code true} if we can make sure there 
is no null column,
+   *          otherwise {@code false}. This is a hint for optimization.
    */
   public StoreFileScanner(StoreFileReader reader, HFileScanner hfs, boolean 
useMVCC,
-      boolean hasMVCC, long readPt) {
-    this (reader, hfs, useMVCC, hasMVCC, readPt, 0);
-  }
-
-  /**
-   * Implements a {@link KeyValueScanner} on top of the specified {@link 
HFileScanner}
-   * @param useMVCC If true, scanner will filter out updates with MVCC larger 
than {@code readPt}.
-   * @param readPt MVCC value to use to filter out the updates newer than this 
scanner.
-   * @param hasMVCC Set to true if underlying store file reader has MVCC info.
-   * @param scannerOrder Order of the scanner relative to other scanners.
-   *   See {@link KeyValueScanner#getScannerOrder()}.
-   */
-  public StoreFileScanner(StoreFileReader reader, HFileScanner hfs, boolean 
useMVCC,
-      boolean hasMVCC, long readPt, long scannerOrder) {
+      boolean hasMVCC, long readPt, long scannerOrder, boolean 
canOptimizeForNonNullColumn) {
     this.readPt = readPt;
     this.reader = reader;
     this.hfs = hfs;
     this.enforceMVCC = useMVCC;
     this.hasMVCCInfo = hasMVCC;
     this.scannerOrder = scannerOrder;
+    this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
   }
 
   boolean isPrimaryReplica() {
@@ -126,24 +119,20 @@ public class StoreFileScanner implements KeyValueScanner {
   }
 
   /**
-   * Return an array of scanners corresponding to the given set of store files,
-   * And set the ScanQueryMatcher for each store file scanner for further
-   * optimization
+   * Return an array of scanners corresponding to the given set of store 
files, And set the
+   * ScanQueryMatcher for each store file scanner for further optimization
    */
-  public static List<StoreFileScanner> getScannersForStoreFiles(
-      Collection<StoreFile> files, boolean cacheBlocks, boolean usePread,
-      boolean isCompaction, boolean canUseDrop,
+  public static List<StoreFileScanner> 
getScannersForStoreFiles(Collection<StoreFile> files,
+      boolean cacheBlocks, boolean usePread, boolean isCompaction, boolean 
canUseDrop,
       ScanQueryMatcher matcher, long readPt, boolean isPrimaryReplica) throws 
IOException {
-    List<StoreFileScanner> scanners = new ArrayList<StoreFileScanner>(
-        files.size());
+    List<StoreFileScanner> scanners = new 
ArrayList<StoreFileScanner>(files.size());
     List<StoreFile> sorted_files = new ArrayList<>(files);
     Collections.sort(sorted_files, StoreFile.Comparators.SEQ_ID);
     for (int i = 0; i < sorted_files.size(); i++) {
       StoreFileReader r = sorted_files.get(i).createReader();
       r.setReplicaStoreFile(isPrimaryReplica);
-      StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread,
-          isCompaction, readPt, i);
-      scanner.setScanQueryMatcher(matcher);
+      StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, 
isCompaction, readPt,
+        i, matcher != null ? !matcher.hasNullColumnInQuery() : false);
       scanners.add(scanner);
     }
     return scanners;
@@ -360,12 +349,12 @@ public class StoreFileScanner implements KeyValueScanner {
       // check ROWCOL Bloom filter first.
       if (reader.getBloomFilterType() == BloomType.ROWCOL) {
         haveToSeek = reader.passesGeneralRowColBloomFilter(kv);
-      } else if (this.matcher != null && !matcher.hasNullColumnInQuery() &&
-          ((CellUtil.isDeleteFamily(kv) || 
CellUtil.isDeleteFamilyVersion(kv)))) {
+      } else if (canOptimizeForNonNullColumn
+          && ((CellUtil.isDeleteFamily(kv) || 
CellUtil.isDeleteFamilyVersion(kv)))) {
         // if there is no such delete family kv in the store file,
         // then no need to seek.
-        haveToSeek = reader.passesDeleteFamilyBloomFilter(kv.getRowArray(),
-            kv.getRowOffset(), kv.getRowLength());
+        haveToSeek = reader.passesDeleteFamilyBloomFilter(kv.getRowArray(), 
kv.getRowOffset(),
+          kv.getRowLength());
       }
     }
 
@@ -434,10 +423,6 @@ public class StoreFileScanner implements KeyValueScanner {
     }
   }
 
-  public void setScanQueryMatcher(ScanQueryMatcher matcher) {
-    this.matcher = matcher;
-  }
-
   @Override
   public boolean isFileScanner() {
     return true;

Reply via email to