kadirozde commented on a change in pull request #973:
URL: https://github.com/apache/phoenix/pull/973#discussion_r543593516



##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java
##########
@@ -1343,6 +1346,36 @@ public int prepareIndexMutations(Put put, Delete del, 
Map<byte[], List<Mutation>
         return indexMutations.size();
     }
 
+    static boolean adjustScanFilter(Scan scan) {
+        // For rebuilds we use count (*) as query for regular tables which 
ends up setting the FKOF on scan

Review comment:
       ok

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
##########
@@ -241,6 +231,12 @@ public void 
preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext<R
             // last possible moment. You need to swap the start/stop and make 
the
             // start exclusive and the stop inclusive.
             ScanUtil.setupReverseScan(scan);
+            if (!(scan.getFilter() instanceof PagedFilter)) {
+                byte[] pageSizeMsBytes = 
scan.getAttribute(BaseScannerRegionObserver.SERVER_PAGE_SIZE_MS);
+                if (pageSizeMsBytes != null) {
+                    scan.setFilter(new PagedFilter(scan.getFilter(), 
Bytes.toLong(pageSizeMsBytes)/2));

Review comment:
       Good suggestion.

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java
##########
@@ -1351,26 +1384,18 @@ protected RegionScanner getLocalScanner() throws 
IOException {
             Scan incrScan = new Scan(scan);
             incrScan.setTimeRange(minTimestamp, scan.getTimeRange().getMax());
             incrScan.setRaw(true);
-            incrScan.setMaxVersions();
+            incrScan.readAllVersions();
             incrScan.getFamilyMap().clear();
             incrScan.setCacheBlocks(false);
             for (byte[] family : scan.getFamilyMap().keySet()) {
                 incrScan.addFamily(family);
             }
-            // For rebuilds we use count (*) as query for regular tables which 
ends up setting the FKOF on scan
-            // This filter doesn't give us all columns and skips to the next 
row as soon as it finds 1 col
-            // For rebuilds we need all columns and all versions
-            if (scan.getFilter() instanceof FirstKeyOnlyFilter) {
-                incrScan.setFilter(null);
-            } else if (scan.getFilter() != null) {
-                // Override the filter so that we get all versions
-                incrScan.setFilter(new 
AllVersionsIndexRebuildFilter(scan.getFilter()));
-            }
+            adjustScanFilter(incrScan);
             if(nextStartKey != null) {
                 incrScan.setStartRow(nextStartKey);
             }
             List<KeyRange> keys = new ArrayList<>();
-            try(RegionScanner scanner = region.getScanner(incrScan)) {
+            try(RegionScanner scanner = new PagedRegionScanner(region, 
region.getScanner(incrScan), incrScan)) {

Review comment:
       ok

##########
File path: phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
##########
@@ -1223,21 +1232,66 @@ public static void getDummyResult(List<Cell> result) {
         getDummyResult(EMPTY_BYTE_ARRAY, result);
     }
 
+    public static Tuple getDummyTuple(byte[] rowKey) {
+        List<Cell> result = new ArrayList<Cell>(1);
+        getDummyResult(rowKey, result);
+        return new ResultTuple(Result.create(result));
+    }
+
+    public static Tuple getDummyTuple() {
+        List<Cell> result = new ArrayList<Cell>(1);
+        getDummyResult(result);
+        return new ResultTuple(Result.create(result));
+    }
+
+    public static Tuple getDummyTuple(Tuple tuple) {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        tuple.getKey(ptr);
+        return getDummyTuple(ptr.copyBytes());
+    }
+
+    public static boolean isDummy(Cell cell) {
+        return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY, 
EMPTY_BYTE_ARRAY);
+    }
+
     public static boolean isDummy(Result result) {
-        // Check if the result is a dummy result
         if (result.rawCells().length != 1) {
             return false;
         }
         Cell cell = result.rawCells()[0];
-        return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY, 
EMPTY_BYTE_ARRAY);
+        return isDummy(cell);
     }
 
     public static boolean isDummy(List<Cell> result) {
-        // Check if the result is a dummy result
         if (result.size() != 1) {
             return false;
         }
         Cell cell = result.get(0);
-        return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY, 
EMPTY_BYTE_ARRAY);
+        return isDummy(cell);
+    }
+
+    public static boolean isDummy(Tuple tuple) {
+        if (tuple instanceof ResultTuple) {
+            isDummy(((ResultTuple) tuple).getResult());
+        }
+        return false;
+    }
+
+    public static PagedFilter getPhoenixPageFilter(Scan scan) {
+        Filter filter = scan.getFilter();

Review comment:
       PagedFilter is added on the server side by coprocs after the client 
prepares the scan, in order to wrap the filter set by the client. It is not 
used by the client. The method name here should be renamed to 
getPhoenixPagedFilter().

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixTTLRegionObserver.java
##########
@@ -41,35 +43,41 @@
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.sql.SQLException;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Optional;
 
-import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.EMPTY_COLUMN_FAMILY_NAME;
-import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER_NAME;
+import static org.apache.phoenix.util.ScanUtil.getDummyResult;
+import static org.apache.phoenix.util.ScanUtil.getPageSizeMs;
+import static org.apache.phoenix.util.ScanUtil.isDummy;
 
 /**
  * Coprocessor that checks whether the row is expired based on the TTL spec.
  */
-public class PhoenixTTLRegionObserver extends BaseRegionObserver {
+public class PhoenixTTLRegionObserver extends BaseScannerRegionObserver 
implements RegionCoprocessor {
     private static final Logger LOG = 
LoggerFactory.getLogger(PhoenixTTLRegionObserver.class);
     private MetricsPhoenixTTLSource metricSource;
 
-    @Override public void start(CoprocessorEnvironment e) throws IOException {
-        super.start(e);
-        metricSource = 
MetricsPhoenixCoprocessorSourceFactory.getInstance().getPhoenixTTLSource();
+    @Override
+    public Optional<RegionObserver> getRegionObserver() {
+        return Optional.of(this);
     }
 
-    @Override public void stop(CoprocessorEnvironment e) throws IOException {
-        super.stop(e);
+    @Override
+    public void start(CoprocessorEnvironment e) throws IOException {
+        metricSource = 
MetricsPhoenixCoprocessorSourceFactory.getInstance().getPhoenixTTLSource();

Review comment:
       We have changed the super from BaseRegionObserver to 
BaseScannerRegionObserver which does not have start() or something equivalent.

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
##########
@@ -26,6 +27,9 @@
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.util.PhoenixKeyValueUtil;
+import org.apache.phoenix.util.ScanUtil;

Review comment:
       Ok




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to