This is an automated email from the ASF dual-hosted git repository.

indhumuthumurugesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 344a851  [CARBONDATA-3964] Added test case for select query without 
filter
344a851 is described below

commit 344a8511cf05b047be124ca430493f7dfc4ae528
Author: Nihal ojha <[email protected]>
AuthorDate: Fri Oct 9 16:28:02 2020 +0530

    [CARBONDATA-3964] Added test case for select query without filter
    
    Why is this PR needed?
    Added test case for select or select count query without filter to not 
prune with multi thread.
    
    What changes were proposed in this PR?
    Added test case for select or select count query without filter to not 
prune with multi thread.
    
    This closes #3975
---
 .../apache/carbondata/core/index/TableIndex.java   |  2 +-
 .../testsuite/filterexpr/CountStarTestCase.scala   | 41 ++++++++++++++++++++++
 2 files changed, 42 insertions(+), 1 deletion(-)

diff --git 
a/core/src/main/java/org/apache/carbondata/core/index/TableIndex.java 
b/core/src/main/java/org/apache/carbondata/core/index/TableIndex.java
index 1cb7760..ddf91ec 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/TableIndex.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/TableIndex.java
@@ -160,7 +160,7 @@ public final class TableIndex extends 
OperationEventListener {
       // As 0.1 million files block pruning can take only 1 second.
       // Doing multi-thread for smaller values is not recommended as
       // driver should have minimum threads opened to support multiple 
concurrent queries.
-      if (filter == null || filter.isEmpty()) {
+      if (!isFilterPresent) {
         // if filter is not passed, then return all the blocklets.
         return pruneWithoutFilter(segments, partitionLocations, blocklets);
       }
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala
index 83ddd36..c95beef 100644
--- 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala
@@ -65,6 +65,47 @@ class CountStarTestCase extends QueryTest with 
BeforeAndAfterAll {
         CarbonCommonConstants.ENABLE_QUERY_STATISTICS_DEFAULT)
   }
 
+  test("select query without filter should not be pruned with multi thread") {
+    val numOfThreadsForPruning = CarbonProperties.getNumOfThreadsForPruning
+    val carbonDriverPruningMultiThreadEnableFilesCount =
+      CarbonProperties.getDriverPruningMultiThreadEnableFilesCount
+    CarbonProperties.getInstance()
+      
.addProperty(CarbonCommonConstants.CARBON_MAX_DRIVER_THREADS_FOR_BLOCK_PRUNING, 
"2")
+    CarbonProperties.getInstance()
+      
.addProperty(CarbonCommonConstants.CARBON_DRIVER_PRUNING_MULTI_THREAD_ENABLE_FILES_COUNT,
 "1")
+    try {
+      sql("CREATE TABLE filtertestTables (ID int, date Timestamp, country 
String, " +
+        "name String, phonetype String, serialname String, salary int) " +
+        "STORED AS carbondata"
+      )
+      val csvFilePath = s"$resourcesPath/datanullmeasurecol.csv"
+      sql(
+        s"LOAD DATA LOCAL INPATH '" + csvFilePath + "' INTO TABLE " +
+          s"filtertestTables OPTIONS('DELIMITER'= ',', 'FILEHEADER'= '')"
+      )
+      sql(
+        s"LOAD DATA LOCAL INPATH '" + csvFilePath + "' INTO TABLE " +
+          s"filtertestTables OPTIONS('DELIMITER'= ',', 'FILEHEADER'= '')"
+      )
+      checkAnswer(
+        sql("select ID, Country, name, phoneType, serialName from 
filtertestTables"),
+        Seq(
+          Row(1, "china", "aaa1", "phone197", "A234"),
+          Row(1, "china", "aaa1", "phone197", "A234"),
+          Row(2, "china", "aaa2", "phone756", "A453"),
+          Row(2, "china", "aaa2", "phone756", "A453"))
+      )
+      checkAnswer(
+        sql("select count(*) from filtertestTables"), Seq(Row(4)))
+    } finally {
+      CarbonProperties.getInstance().addProperty(CarbonCommonConstants
+        .CARBON_MAX_DRIVER_THREADS_FOR_BLOCK_PRUNING, 
numOfThreadsForPruning.toString)
+      CarbonProperties.getInstance().addProperty(CarbonCommonConstants
+        .CARBON_DRIVER_PRUNING_MULTI_THREAD_ENABLE_FILES_COUNT,
+        carbonDriverPruningMultiThreadEnableFilesCount.toString)
+    }
+  }
+
   override def afterAll {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,

Reply via email to