gjacoby126 commented on a change in pull request #973:
URL: https://github.com/apache/phoenix/pull/973#discussion_r538855230



##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java
##########
@@ -1343,6 +1346,36 @@ public int prepareIndexMutations(Put put, Delete del, 
Map<byte[], List<Mutation>
         return indexMutations.size();
     }
 
+    static boolean adjustScanFilter(Scan scan) {
+        // For rebuilds we use count (*) as query for regular tables which 
ends up setting the FKOF on scan

Review comment:
       nit: please spell out FirstKeyOnlyFilter. Took me a minute to figure it 
out. :-)

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java
##########
@@ -244,7 +247,7 @@ public GlobalIndexRegionScanner(final RegionScanner 
innerScanner,
                     new 
IndexVerificationResultRepository(indexMaintainer.getIndexTableName(), 
hTableFactory);
             nextStartKey = null;
             minTimestamp = scan.getTimeRange().getMin();
-            useSkipScanFilter = HbaseCompatCapabilities.isRawFilterSupported();
+            isRawFilterSupported = 
HbaseCompatCapabilities.isRawFilterSupported();

Review comment:
       Good rename

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixTTLRegionObserver.java
##########
@@ -41,35 +43,41 @@
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.sql.SQLException;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Optional;
 
-import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.EMPTY_COLUMN_FAMILY_NAME;
-import static 
org.apache.phoenix.coprocessor.BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER_NAME;
+import static org.apache.phoenix.util.ScanUtil.getDummyResult;
+import static org.apache.phoenix.util.ScanUtil.getPageSizeMs;
+import static org.apache.phoenix.util.ScanUtil.isDummy;
 
 /**
  * Coprocessor that checks whether the row is expired based on the TTL spec.
  */
-public class PhoenixTTLRegionObserver extends BaseRegionObserver {
+public class PhoenixTTLRegionObserver extends BaseScannerRegionObserver 
implements RegionCoprocessor {
     private static final Logger LOG = 
LoggerFactory.getLogger(PhoenixTTLRegionObserver.class);
     private MetricsPhoenixTTLSource metricSource;
 
-    @Override public void start(CoprocessorEnvironment e) throws IOException {
-        super.start(e);
-        metricSource = 
MetricsPhoenixCoprocessorSourceFactory.getInstance().getPhoenixTTLSource();
+    @Override
+    public Optional<RegionObserver> getRegionObserver() {
+        return Optional.of(this);
     }
 
-    @Override public void stop(CoprocessorEnvironment e) throws IOException {
-        super.stop(e);
+    @Override
+    public void start(CoprocessorEnvironment e) throws IOException {
+        metricSource = 
MetricsPhoenixCoprocessorSourceFactory.getInstance().getPhoenixTTLSource();

Review comment:
       why don't we need to call super.start()? (and are removing the existing 
call to super.stop()?

##########
File path: phoenix-core/src/it/java/org/apache/phoenix/end2end/ViewTTLIT.java
##########
@@ -1406,6 +1407,7 @@ public void testWithVariousSQLsForMultipleViews() throws 
Exception {
         }
     }
 
+    @Ignore("Fails with StaleRegionBoundaryCacheException. Mutations on a SCN 
connection could be the reason")

Review comment:
       @jpisaac what are the implications of ignoring these tests?

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/ResultTuple.java
##########
@@ -26,6 +27,9 @@
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.util.PhoenixKeyValueUtil;
+import org.apache.phoenix.util.ScanUtil;

Review comment:
       nit: seems like several unnecessary imports?

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
##########
@@ -241,6 +231,12 @@ public void 
preScannerOpen(org.apache.hadoop.hbase.coprocessor.ObserverContext<R
             // last possible moment. You need to swap the start/stop and make 
the
             // start exclusive and the stop inclusive.
             ScanUtil.setupReverseScan(scan);
+            if (!(scan.getFilter() instanceof PagedFilter)) {
+                byte[] pageSizeMsBytes = 
scan.getAttribute(BaseScannerRegionObserver.SERVER_PAGE_SIZE_MS);
+                if (pageSizeMsBytes != null) {
+                    scan.setFilter(new PagedFilter(scan.getFilter(), 
Bytes.toLong(pageSizeMsBytes)/2));

Review comment:
       Why divided by 2? To allow for round trip time back to client? Good to 
have a comment. 

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GlobalIndexRegionScanner.java
##########
@@ -1351,26 +1384,18 @@ protected RegionScanner getLocalScanner() throws 
IOException {
             Scan incrScan = new Scan(scan);
             incrScan.setTimeRange(minTimestamp, scan.getTimeRange().getMax());
             incrScan.setRaw(true);
-            incrScan.setMaxVersions();
+            incrScan.readAllVersions();
             incrScan.getFamilyMap().clear();
             incrScan.setCacheBlocks(false);
             for (byte[] family : scan.getFamilyMap().keySet()) {
                 incrScan.addFamily(family);
             }
-            // For rebuilds we use count (*) as query for regular tables which 
ends up setting the FKOF on scan
-            // This filter doesn't give us all columns and skips to the next 
row as soon as it finds 1 col
-            // For rebuilds we need all columns and all versions
-            if (scan.getFilter() instanceof FirstKeyOnlyFilter) {
-                incrScan.setFilter(null);
-            } else if (scan.getFilter() != null) {
-                // Override the filter so that we get all versions
-                incrScan.setFilter(new 
AllVersionsIndexRebuildFilter(scan.getFilter()));
-            }
+            adjustScanFilter(incrScan);
             if(nextStartKey != null) {
                 incrScan.setStartRow(nextStartKey);
             }
             List<KeyRange> keys = new ArrayList<>();
-            try(RegionScanner scanner = region.getScanner(incrScan)) {
+            try(RegionScanner scanner = new PagedRegionScanner(region, 
region.getScanner(incrScan), incrScan)) {

Review comment:
       tiny nit: space between try and (. (checkstyle will complain)

##########
File path: phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
##########
@@ -1223,21 +1232,66 @@ public static void getDummyResult(List<Cell> result) {
         getDummyResult(EMPTY_BYTE_ARRAY, result);
     }
 
+    public static Tuple getDummyTuple(byte[] rowKey) {
+        List<Cell> result = new ArrayList<Cell>(1);
+        getDummyResult(rowKey, result);
+        return new ResultTuple(Result.create(result));
+    }
+
+    public static Tuple getDummyTuple() {
+        List<Cell> result = new ArrayList<Cell>(1);
+        getDummyResult(result);
+        return new ResultTuple(Result.create(result));
+    }
+
+    public static Tuple getDummyTuple(Tuple tuple) {
+        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+        tuple.getKey(ptr);
+        return getDummyTuple(ptr.copyBytes());
+    }
+
+    public static boolean isDummy(Cell cell) {
+        return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY, 
EMPTY_BYTE_ARRAY);
+    }
+
     public static boolean isDummy(Result result) {
-        // Check if the result is a dummy result
         if (result.rawCells().length != 1) {
             return false;
         }
         Cell cell = result.rawCells()[0];
-        return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY, 
EMPTY_BYTE_ARRAY);
+        return isDummy(cell);
     }
 
     public static boolean isDummy(List<Cell> result) {
-        // Check if the result is a dummy result
         if (result.size() != 1) {
             return false;
         }
         Cell cell = result.get(0);
-        return CellUtil.matchingColumn(cell, EMPTY_BYTE_ARRAY, 
EMPTY_BYTE_ARRAY);
+        return isDummy(cell);
+    }
+
+    public static boolean isDummy(Tuple tuple) {
+        if (tuple instanceof ResultTuple) {
+            isDummy(((ResultTuple) tuple).getResult());
+        }
+        return false;
+    }
+
+    public static PagedFilter getPhoenixPageFilter(Scan scan) {
+        Filter filter = scan.getFilter();

Review comment:
       What if the scan's filter is a FilterList that contains a PagedFilter?

##########
File path: 
phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixTTLRegionObserver.java
##########
@@ -204,17 +213,22 @@ public 
PhoenixTTLRegionScanner(RegionCoprocessorEnvironment env, Scan scan,
 
         private boolean doNext(List<Cell> result, boolean raw) throws 
IOException {
             try {
+                long startTime = EnvironmentEdgeManager.currentTimeMillis();
                 boolean hasMore;
                 do {
                     hasMore = raw ? scanner.nextRaw(result) : 
scanner.next(result);
                     if (result.isEmpty()) {
                         break;
                     }
+                    if (isDummy(result)) {
+                        return true;
+                    }
                     numRowsScanned++;
                     if (maskIfExpired && checkRowNotExpired(result)) {
                         break;
                     }
 
+                    //TODO 6211 The following logic does not sound correct

Review comment:
       @jpisaac can you please comment on this?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to