This is an automated email from the ASF dual-hosted git repository.

akashrn5 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 11ae435  [CARBONDATA-4095] Fix Select Query with SI filter fails, when 
columnDrift is Set
11ae435 is described below

commit 11ae435fe3b5f448bd2beabf20c1a0cb2ad1c885
Author: Indhumathi27 <[email protected]>
AuthorDate: Tue Dec 22 18:52:00 2020 +0530

    [CARBONDATA-4095] Fix Select Query with SI filter fails, when columnDrift 
is Set
    
    Why is this PR needed?
    After converting expression to IN Expression for maintable with SI, 
expression
    is not processed if ColumnDrift is enabled. Query fails with NPE during
    resolveFilter. Exception is added in JIRA
    
    What changes were proposed in this PR?
    Process the filter expression after adding implicit expression
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    Yes
    
    This closes #4063
---
 .../apache/carbondata/hadoop/api/CarbonInputFormat.java  | 16 +++++++++++-----
 .../secondaryindex/TestSIWithSecondaryIndex.scala        | 13 +++++++++++++
 2 files changed, 24 insertions(+), 5 deletions(-)

diff --git 
a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java 
b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
index 4d917fb..dda0f20 100644
--- 
a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
+++ 
b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
@@ -52,7 +52,6 @@ import 
org.apache.carbondata.core.metadata.schema.table.TableInfo;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.profiler.ExplainCollector;
 import org.apache.carbondata.core.readcommitter.ReadCommittedScope;
-import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.scan.model.QueryModel;
@@ -686,7 +685,8 @@ public abstract class CarbonInputFormat<T> extends 
FileInputFormat<Void, T> {
       projectColumns = new String[]{};
     }
     if (indexFilter != null) {
-      checkAndAddImplicitExpression(indexFilter.getExpression(), inputSplit);
+      boolean hasColumnDrift = carbonTable.isTransactionalTable() && 
carbonTable.hasColumnDrift();
+      checkAndAddImplicitExpression(indexFilter, inputSplit, hasColumnDrift);
     }
     QueryModel queryModel = new QueryModelBuilder(carbonTable)
         .projectColumns(projectColumns)
@@ -704,7 +704,8 @@ public abstract class CarbonInputFormat<T> extends 
FileInputFormat<Void, T> {
    * This method will create an Implicit Expression and set it as right child 
in the given
    * expression
    */
-  private void checkAndAddImplicitExpression(Expression expression, InputSplit 
inputSplit) {
+  private void checkAndAddImplicitExpression(IndexFilter indexFilter, 
InputSplit inputSplit,
+      boolean hasColumnDrift) {
     if (inputSplit instanceof CarbonMultiBlockSplit) {
       CarbonMultiBlockSplit split = (CarbonMultiBlockSplit) inputSplit;
       List<CarbonInputSplit> splits = split.getAllSplits();
@@ -721,8 +722,13 @@ public abstract class CarbonInputFormat<T> extends 
FileInputFormat<Void, T> {
       }
       if (!blockIdToBlockletIdMapping.isEmpty()) {
         // create implicit expression and set as right child
-        FilterUtil
-            .createImplicitExpressionAndSetAsRightChild(expression, 
blockIdToBlockletIdMapping);
+        
FilterUtil.createImplicitExpressionAndSetAsRightChild(indexFilter.getExpression(),
+            blockIdToBlockletIdMapping);
+        // process filter expression after adding implicit expression. If 
hasColumnDrift is false,
+        // then the filter expression will be processed during 
QueryModelBuilder.build()
+        if (hasColumnDrift) {
+          indexFilter.processFilterExpression();
+        }
       }
     }
   }
diff --git 
a/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondaryIndex.scala
 
b/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondaryIndex.scala
index f424d91..efef733 100644
--- 
a/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondaryIndex.scala
+++ 
b/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestSIWithSecondaryIndex.scala
@@ -466,6 +466,19 @@ class TestSIWithSecondaryIndex extends QueryTest with 
BeforeAndAfterAll {
     sql("drop table if exists maintable")
   }
 
+  test("test SI on fact table with columnDrift enabled") {
+    sql("drop table if exists maintable")
+    sql("create table maintable (a string,b string,c int,d int) STORED AS 
carbondata ")
+    sql("insert into maintable values('k','d',2,3)")
+    sql("alter table maintable set 
tblproperties('sort_columns'='c,d','sort_scope'='local_sort')")
+    sql("create index indextable on table maintable(b) AS 'carbondata'")
+    sql("insert into maintable values('k','x',2,4)")
+    val dataFrame = sql("select * from maintable where b='x'")
+    checkAnswer(dataFrame, Seq(Row("k", "x", 2, 4)))
+    
TestSecondaryIndexUtils.isFilterPushedDownToSI(dataFrame.queryExecution.sparkPlan)
+    sql("drop table if exists maintable")
+  }
+
   override def afterAll {
     dropIndexAndTable()
   }

Reply via email to