This is an automated email from the ASF dual-hosted git repository.

pboado pushed a commit to branch 5.x-cdh6
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit f3e17d3fab76df109b19cf9ac5fa87dfe6705d77
Author: chenglei <cheng...@apache.org>
AuthorDate: Fri Apr 19 07:53:05 2019 +0100

    PHOENIX-5217 Incorrect result for COUNT DISTINCT limit
---
 .../apache/phoenix/end2end/DistinctCountIT.java    | 28 ++++++++++++++++++++++
 .../phoenix/iterate/BaseResultIterators.java       | 23 ++++++++++--------
 .../apache/phoenix/compile/QueryCompilerTest.java  | 25 +++++++++++++++++++
 .../java/org/apache/phoenix/util/TestUtil.java     | 13 ++++++++++
 4 files changed, 79 insertions(+), 10 deletions(-)

diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
index e586ebc..ae86c36 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DistinctCountIT.java
@@ -32,6 +32,7 @@ import static org.apache.phoenix.util.TestUtil.ROW7;
 import static org.apache.phoenix.util.TestUtil.ROW8;
 import static org.apache.phoenix.util.TestUtil.ROW9;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.apache.phoenix.util.TestUtil.assertResultSet;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -468,4 +469,31 @@ public class DistinctCountIT extends 
ParallelStatsDisabledIT {
         assertEquals(2, rs.getInt(1));
         conn.close();
     }
+
+    @Test
+    public void testDistinctCountLimitBug5217() throws Exception {
+        Connection conn = null;
+        try {
+            Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+            conn = DriverManager.getConnection(getUrl(), props);
+            String tableName = generateUniqueName();
+            String sql = "create table " + tableName + "( "+
+                    " pk1 integer not null , " +
+                    " pk2 integer not null, " +
+                    " v integer, " +
+                    " CONSTRAINT TEST_PK PRIMARY KEY (pk1,pk2))";
+            conn.createStatement().execute(sql);
+            conn.createStatement().execute("UPSERT INTO 
"+tableName+"(pk1,pk2,v) VALUES (1,1,1)");
+            conn.createStatement().execute("UPSERT INTO 
"+tableName+"(pk1,pk2,v) VALUES (2,2,2)");
+            conn.commit();
+
+            sql = "select count(distinct pk1) from " + tableName + " limit 1";
+            ResultSet rs = conn.prepareStatement(sql).executeQuery();
+            assertResultSet(rs, new Object[][]{{Long.valueOf(2L)}});
+        } finally {
+            if(conn!=null) {
+                conn.close();
+            }
+        }
+    }
 }
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
index 7fbb636..a562b8d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/BaseResultIterators.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
+import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.compile.RowProjector;
 import org.apache.phoenix.compile.ScanRanges;
@@ -262,19 +263,21 @@ public abstract class BaseResultIterators extends 
ExplainTable implements Result
             if(offset!=null){
                 ScanUtil.addOffsetAttribute(scan, offset);
             }
-            int cols = plan.getGroupBy().getOrderPreservingColumnCount();
+            GroupBy groupBy = plan.getGroupBy();
+            int cols = groupBy.getOrderPreservingColumnCount();
             if (cols > 0 && keyOnlyFilter &&
                 
!plan.getStatement().getHint().hasHint(HintNode.Hint.RANGE_SCAN) &&
                 cols < 
plan.getTableRef().getTable().getRowKeySchema().getFieldCount() &&
-                plan.getGroupBy().isOrderPreserving() &&
-                (context.getAggregationManager().isEmpty() || 
plan.getGroupBy().isUngroupedAggregate())) {
-                
-                ScanUtil.andFilterAtEnd(scan,
-                        new 
DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(),
-                                cols));
-                if (plan.getLimit() != null) { // We can push the limit to the 
server
-                    ScanUtil.andFilterAtEnd(scan, new 
PageFilter(plan.getLimit()));
-                }
+                groupBy.isOrderPreserving() &&
+                (context.getAggregationManager().isEmpty() || 
groupBy.isUngroupedAggregate())) {
+
+                    ScanUtil.andFilterAtEnd(scan,
+                            new 
DistinctPrefixFilter(plan.getTableRef().getTable().getRowKeySchema(),cols));
+                    if (!groupBy.isUngroupedAggregate() && plan.getLimit() != 
null) {
+                        // We can push the limit to the server,but for 
UngroupedAggregate
+                        // we can not push the limit.
+                        ScanUtil.andFilterAtEnd(scan, new 
PageFilter(plan.getLimit()));
+                    }
             }
             
scan.setAttribute(BaseScannerRegionObserver.QUALIFIER_ENCODING_SCHEME, new 
byte[]{table.getEncodingScheme().getSerializedMetadataValue()});
             
scan.setAttribute(BaseScannerRegionObserver.IMMUTABLE_STORAGE_ENCODING_SCHEME, 
new byte[]{table.getImmutableStorageScheme().getSerializedMetadataValue()});
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index bb18c29..d645995 100644
--- 
a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.PageFilter;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
@@ -5960,4 +5961,28 @@ public class QueryCompilerTest extends 
BaseConnectionlessQueryTest {
                     plan.contains("SERVER SORTED BY"));
         }
     }
+
+    @Test
+    public void testDistinctCountLimitBug5217() throws Exception {
+        Connection conn = null;
+        try {
+            conn = DriverManager.getConnection(getUrl());
+            String tableName = generateUniqueName();
+            String sql = "create table " + tableName + "( "+
+                    " pk1 integer not null , " +
+                    " pk2 integer not null, " +
+                    " v integer, " +
+                    " CONSTRAINT TEST_PK PRIMARY KEY (pk1,pk2))";
+            conn.createStatement().execute(sql);
+
+            sql = "select count(distinct pk1) from " + tableName + " limit 1";
+            QueryPlan plan =  TestUtil.getOptimizeQueryPlan(conn, sql);
+            Scan scan = plan.getContext().getScan();
+            assertFalse(TestUtil.hasFilter(scan, PageFilter.class));
+        } finally {
+            if(conn!=null) {
+                conn.close();
+            }
+        }
+    }
 }
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java 
b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
index 40b9cfb..9021eca 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java
@@ -48,6 +48,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
 
@@ -68,6 +69,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
@@ -1126,4 +1128,15 @@ public class TestUtil {
             return -1;
         }
     }
+
+    public static boolean hasFilter(Scan scan, Class<? extends Filter> 
filterClass) {
+        Iterator<Filter> filterIter = ScanUtil.getFilterIterator(scan);
+        while(filterIter.hasNext()) {
+            Filter filter = filterIter.next();
+            if(filterClass.isInstance(filter)) {
+                return true;
+            }
+        }
+        return false;
+    }
 }

Reply via email to