This is an automated email from the ASF dual-hosted git repository.

qiangcai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new b4d96f4  [CARBONDATA-4008] Fixed IN filter on date column is returning 
0 results when 'carbon.push.rowfilters.for.vector' is true
b4d96f4 is described below

commit b4d96f4be959d2a965e80213cc970f5e5c293e7b
Author: Venu Reddy <[email protected]>
AuthorDate: Thu Sep 24 01:31:58 2020 +0530

    [CARBONDATA-4008] Fixed IN filter on date column is returning 0 results 
when 'carbon.push.rowfilters.for.vector' is true
    
    Why is this PR needed?
    IN filter on date column is returning 0 results when 
'carbon.push.rowfilters.for.vector' is set to true. 
RowLevelFilterExecutorImpl.applyFilter() calls createRow() and applies filter 
at row level with expression.evaluate(row) invocation. expression in this case 
is InExpression. But createRow() missed to fill the date column value. Thus 
expression always evaluates to false and returns 0 rows.
    
    What changes were proposed in this PR?
    Filled the date column value in createRow().
    Have removed unused imports introduced in previous 
PR.(SqlAstBuilderHelper.scala)
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    Yes
    
    This closes #3953
---
 .../executer/RowLevelFilterExecutorImpl.java       | 17 ++++++++--
 .../spark/sql/hive/SqlAstBuilderHelper.scala       |  5 ++-
 .../spark/testsuite/filterexpr/TestInFilter.scala  | 38 +++++++++++++++++-----
 3 files changed, 46 insertions(+), 14 deletions(-)

diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecutorImpl.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecutorImpl.java
index a5b3a42..de31fdd 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecutorImpl.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecutorImpl.java
@@ -36,6 +36,8 @@ import 
org.apache.carbondata.core.datastore.chunk.DimensionColumnPage;
 import 
org.apache.carbondata.core.datastore.chunk.impl.VariableLengthDimensionColumnPage;
 import org.apache.carbondata.core.datastore.chunk.store.ColumnPageWrapper;
 import org.apache.carbondata.core.datastore.page.ColumnPage;
+import 
org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
+import 
org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
 import 
org.apache.carbondata.core.keygenerator.directdictionary.timestamp.DateDirectDictionaryGenerator;
 import 
org.apache.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityTypeValue;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
@@ -106,6 +108,11 @@ public class RowLevelFilterExecutorImpl implements 
FilterExecutor {
    */
   boolean isNaturalSorted;
 
+  /**
+   * date direct dictionary generator
+   */
+  private DirectDictionaryGenerator dateDictionaryGenerator;
+
   public RowLevelFilterExecutorImpl(List<DimColumnResolvedFilterInfo> 
dimColEvaluatorInfoList,
       List<MeasureColumnResolvedFilterInfo> msrColEvalutorInfoList, Expression 
exp,
       AbsoluteTableIdentifier tableIdentifier, SegmentProperties 
segmentProperties,
@@ -138,6 +145,8 @@ public class RowLevelFilterExecutorImpl implements 
FilterExecutor {
     this.exp = exp;
     this.tableIdentifier = tableIdentifier;
     this.complexDimensionInfoMap = complexDimensionInfoMap;
+    this.dateDictionaryGenerator =
+        
DirectDictionaryKeyGeneratorFactory.getDirectDictionaryGenerator(DataTypes.DATE);
     initDimensionChunkIndexes();
     initMeasureChunkIndexes();
   }
@@ -438,9 +447,11 @@ public class RowLevelFilterExecutorImpl implements 
FilterExecutor {
         DimensionColumnPage columnDataChunk =
             
blockChunkHolder.getDimensionRawColumnChunks()[dimensionChunkIndex[i]]
                 .decodeColumnPage(pageIndex);
-        if (dimColumnEvaluatorInfo.getDimension().getDataType() != 
DataTypes.DATE &&
-            (columnDataChunk instanceof VariableLengthDimensionColumnPage ||
-                columnDataChunk instanceof ColumnPageWrapper)) {
+        if (dimColumnEvaluatorInfo.getDimension().getDataType() == 
DataTypes.DATE) {
+          record[dimColumnEvaluatorInfo.getRowIndex()] = 
dateDictionaryGenerator
+              
.getValueFromSurrogate(ByteUtil.toInt(columnDataChunk.getChunkData(index), 0));
+        } else if (columnDataChunk instanceof VariableLengthDimensionColumnPage
+            || columnDataChunk instanceof ColumnPageWrapper) {
 
           byte[] memberBytes = columnDataChunk.getChunkData(index);
           if (null != memberBytes) {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
index 2ffb007..be949b4 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
@@ -18,14 +18,13 @@
 package org.apache.spark.sql.hive
 
 import org.apache.spark.sql.catalyst.CarbonParserUtil
-import org.apache.spark.sql.catalyst.parser.ParserUtils.{string, withOrigin}
 import org.apache.spark.sql.catalyst.parser.SqlBaseParser
-import 
org.apache.spark.sql.catalyst.parser.SqlBaseParser.{AddTableColumnsContext, 
ChangeColumnContext, CreateTableContext, ShowTablesContext}
+import 
org.apache.spark.sql.catalyst.parser.SqlBaseParser.{AddTableColumnsContext, 
ChangeColumnContext, CreateTableContext}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.SparkSqlAstBuilder
 import org.apache.spark.sql.execution.command.{AlterTableAddColumnsModel, 
AlterTableDataTypeChangeModel}
 import 
org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand,
 CarbonAlterTableColRenameDataTypeChangeCommand}
-import org.apache.spark.sql.execution.command.table.{CarbonExplainCommand, 
CarbonShowTablesCommand}
+import org.apache.spark.sql.execution.command.table.CarbonExplainCommand
 import org.apache.spark.sql.parser.CarbonSpark2SqlParser
 import org.apache.spark.sql.types.DecimalType
 
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/TestInFilter.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/TestInFilter.scala
index de3131e..27fc58f 100644
--- 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/TestInFilter.scala
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/TestInFilter.scala
@@ -17,26 +17,29 @@
 
 package org.apache.carbondata.spark.testsuite.filterexpr
 
+import java.sql.{Date, Timestamp}
+
 import org.apache.spark.sql.Row
 import org.apache.spark.sql.test.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
+import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
 
-class TestInFilter extends QueryTest with BeforeAndAfterAll{
+class TestInFilter extends QueryTest with BeforeAndAfterAll with 
BeforeAndAfterEach {
 
   override def beforeAll: Unit = {
     sql("drop table if exists test_table")
-    sql("create table test_table(intField INT, floatField FLOAT, doubleField 
DOUBLE, " +
-        "decimalField DECIMAL(18,2))  STORED AS carbondata")
-
     // turn on  row level filter in carbon
     // because only row level is on, 'in' will be pushdowned into CarbonScanRDD
     //  or in filter will be handled by spark.
     sql("set carbon.push.rowfilters.for.vector=true")
-    sql("insert into test_table 
values(8,8,8,8),(5,5.0,5.0,5.0),(4,1.00,2.00,3.00)," +
-        
"(6,6.0000,6.0000,6.0000),(4743,4743.00,4743.0000,4743.0),(null,null,null,null)")
   }
 
   test("sql with in different measurement type") {
+    sql("create table test_table(intField INT, floatField FLOAT, doubleField 
DOUBLE, " +
+        "decimalField DECIMAL(18,2))  STORED AS carbondata")
+
+    sql("insert into test_table 
values(8,8,8,8),(5,5.0,5.0,5.0),(4,1.00,2.00,3.00)," +
+        
"(6,6.0000,6.0000,6.0000),(4743,4743.00,4743.0000,4743.0),(null,null,null,null)")
+
     // the precision of filter value is less one digit than column value
     // float type test
     checkAnswer(
@@ -165,8 +168,27 @@ class TestInFilter extends QueryTest with 
BeforeAndAfterAll{
       Seq(Row(4, 1.00, 2.00, 3.00)))
   }
 
-  override def afterAll(): Unit = {
+  test("test infilter with date, timestamp columns") {
+    sql("create table test_table(i int, dt date, ts timestamp) stored as 
carbondata")
+    sql("insert into test_table select 1, '2020-03-30', '2020-03-30 10:00:00'")
+    sql("insert into test_table select 2, '2020-07-04', '2020-07-04 14:12:15'")
+    sql("insert into test_table select 3, '2020-09-23', '2020-09-23 12:30:45'")
+
+    checkAnswer(sql("select * from test_table where dt IN ('2020-03-30', 
'2020-09-23')"),
+      Seq(Row(1, Date.valueOf("2020-03-30"), Timestamp.valueOf("2020-03-30 
10:00:00")),
+        Row(3, Date.valueOf("2020-09-23"), Timestamp.valueOf("2020-09-23 
12:30:45"))))
+
+    checkAnswer(sql(
+      "select * from test_table where ts IN ('2020-03-30 10:00:00', 
'2020-07-04 14:12:15')"),
+      Seq(Row(1, Date.valueOf("2020-03-30"), Timestamp.valueOf("2020-03-30 
10:00:00")),
+        Row(2, Date.valueOf("2020-07-04"), Timestamp.valueOf("2020-07-04 
14:12:15"))))
+  }
+
+  override def afterEach(): Unit = {
     sql("drop table if exists test_table")
+  }
+
+  override def afterAll(): Unit = {
     sql("set carbon.push.rowfilters.for.vector=false")
     defaultConfig()
   }

Reply via email to