Repository: carbondata
Updated Branches:
  refs/heads/master 9a75ce53b -> 4df335f20


[CARBONDATA-2649] Handled executor min/max pruning when filter column in not 
cached in driver for CACHE_LEVEL=BLOCKLET

Things handled as part of this PR:

Modified code to use min/max in executor pruning for Blocklet dataMap when 
filter column min/max is not cached in driver. When column to be cached in 
driver are specified and CACHE_LEVEL = BLOCKLET, then executor min/max pruning 
was not happening which can increase the query time.

Removed unwanted addition of schemaEvolutionEntry to schema on Alter SET and 
UNSET table properties

This closes #2540


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/4df335f2
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/4df335f2
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/4df335f2

Branch: refs/heads/master
Commit: 4df335f20808a963a2b0ff719afdff9efeaf9e33
Parents: 9a75ce5
Author: manishgupta88 <[email protected]>
Authored: Mon Jul 23 11:51:23 2018 +0530
Committer: ravipesala <[email protected]>
Committed: Wed Jul 25 12:33:30 2018 +0530

----------------------------------------------------------------------
 .../core/indexstore/BlockletDetailInfo.java     | 18 ++++++
 .../indexstore/blockletindex/BlockDataMap.java  | 22 +++++--
 .../blockletindex/BlockletDataMap.java          |  9 ++-
 .../executor/impl/AbstractQueryExecutor.java    |  6 +-
 .../core/scan/executor/util/QueryUtil.java      |  2 +-
 .../core/util/BlockletDataMapUtil.java          | 66 ++++++++++++++++++++
 ...ithColumnMetCacheAndCacheLevelProperty.scala | 47 +++++++++++++-
 .../org/apache/spark/util/AlterTableUtil.scala  | 12 ++--
 8 files changed, 163 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/4df335f2/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailInfo.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailInfo.java
 
b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailInfo.java
index 2425bda..47455c7 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailInfo.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailInfo.java
@@ -72,6 +72,13 @@ public class BlockletDetailInfo implements Serializable, 
Writable {
    * flag to check for store from 1.1 or any prior version
    */
   private boolean isLegacyStore;
+  /**
+   * flag to check whether to serialize min max values. The flag will be set 
to true in case
+   * 1. When CACHE_LEVEL = BLOCKLET and filter column min/max in not cached in 
the driver using the
+   * property COLUMN_META_CACHE
+   * 2. for CACHE_LEVEL = BLOCK, it will always be true which is also the 
default value
+   */
+  private boolean useMinMaxForPruning = true;
 
   public int getRowCount() {
     return rowCount;
@@ -185,6 +192,7 @@ public class BlockletDetailInfo implements Serializable, 
Writable {
     out.write(blockletInfoBinary);
     out.writeLong(blockSize);
     out.writeBoolean(isLegacyStore);
+    out.writeBoolean(useMinMaxForPruning);
   }
 
   @Override public void readFields(DataInput in) throws IOException {
@@ -215,6 +223,7 @@ public class BlockletDetailInfo implements Serializable, 
Writable {
     setBlockletInfoFromBinary();
     blockSize = in.readLong();
     isLegacyStore = in.readBoolean();
+    useMinMaxForPruning = in.readBoolean();
   }
 
   /**
@@ -252,6 +261,7 @@ public class BlockletDetailInfo implements Serializable, 
Writable {
     detailInfo.columnSchemaBinary = columnSchemaBinary;
     detailInfo.blockSize = blockSize;
     detailInfo.isLegacyStore = isLegacyStore;
+    detailInfo.useMinMaxForPruning = useMinMaxForPruning;
     return detailInfo;
   }
 
@@ -297,4 +307,12 @@ public class BlockletDetailInfo implements Serializable, 
Writable {
   public void setColumnSchemas(List<ColumnSchema> columnSchemas) {
     this.columnSchemas = columnSchemas;
   }
+
+  public boolean isUseMinMaxForPruning() {
+    return useMinMaxForPruning;
+  }
+
+  public void setUseMinMaxForPruning(boolean useMinMaxForPruning) {
+    this.useMinMaxForPruning = useMinMaxForPruning;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4df335f2/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
 
b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
index 7baba89..82006c3 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
@@ -548,7 +548,7 @@ public class BlockDataMap extends CoarseGrainDataMap
       for (int i = 0; i < numBlocklets; i++) {
         DataMapRow safeRow = memoryDMStore.getDataMapRow(schema, 
i).convertToSafeRow();
         blocklets.add(createBlocklet(safeRow, getFileNameWithFilePath(safeRow, 
filePath),
-            getBlockletId(safeRow)));
+            getBlockletId(safeRow), false));
       }
     } else {
       // Remove B-tree jump logic as start and end key prepared is not
@@ -557,6 +557,9 @@ public class BlockDataMap extends CoarseGrainDataMap
       numBlocklets = memoryDMStore.getRowCount();
       FilterExecuter filterExecuter = FilterUtil
           .getFilterExecuterTree(filterExp, getSegmentProperties(), null, 
getMinMaxCacheColumns());
+      // flag to be used for deciding whether use min/max in executor pruning 
for BlockletDataMap
+      boolean useMinMaxForPruning = useMinMaxForExecutorPruning(filterExp);
+      // min and max for executor pruning
       while (startIndex < numBlocklets) {
         DataMapRow safeRow = memoryDMStore.getDataMapRow(schema, 
startIndex).convertToSafeRow();
         String fileName = getFileNameWithFilePath(safeRow, filePath);
@@ -565,7 +568,7 @@ public class BlockDataMap extends CoarseGrainDataMap
             addBlockBasedOnMinMaxValue(filterExecuter, getMinMaxValue(safeRow, 
MAX_VALUES_INDEX),
                 getMinMaxValue(safeRow, MIN_VALUES_INDEX), fileName, 
blockletId);
         if (isValid) {
-          blocklets.add(createBlocklet(safeRow, fileName, blockletId));
+          blocklets.add(createBlocklet(safeRow, fileName, blockletId, 
useMinMaxForPruning));
         }
         startIndex++;
       }
@@ -574,6 +577,15 @@ public class BlockDataMap extends CoarseGrainDataMap
     return blocklets;
   }
 
+  private boolean useMinMaxForExecutorPruning(FilterResolverIntf 
filterResolverIntf) {
+    boolean useMinMaxForPruning = false;
+    if (this instanceof BlockletDataMap) {
+      useMinMaxForPruning = BlockletDataMapUtil
+          .useMinMaxForBlockletPruning(filterResolverIntf, 
getMinMaxCacheColumns());
+    }
+    return useMinMaxForPruning;
+  }
+
   @Override
   public List<Blocklet> prune(FilterResolverIntf filterExp, SegmentProperties 
segmentProperties,
       List<PartitionSpec> partitions) {
@@ -704,7 +716,8 @@ public class BlockDataMap extends CoarseGrainDataMap
     DataMapRow safeRow =
         memoryDMStore.getDataMapRow(getFileFooterEntrySchema(), 
rowIndex).convertToSafeRow();
     String filePath = getFilePath();
-    return createBlocklet(safeRow, getFileNameWithFilePath(safeRow, filePath), 
relativeBlockletId);
+    return createBlocklet(safeRow, getFileNameWithFilePath(safeRow, filePath), 
relativeBlockletId,
+        false);
   }
 
   private byte[] getBlockletRowCountForEachBlock() {
@@ -743,7 +756,8 @@ public class BlockDataMap extends CoarseGrainDataMap
     return BLOCK_DEFAULT_BLOCKLET_ID;
   }
 
-  protected ExtendedBlocklet createBlocklet(DataMapRow row, String fileName, 
short blockletId) {
+  protected ExtendedBlocklet createBlocklet(DataMapRow row, String fileName, 
short blockletId,
+      boolean useMinMaxForPruning) {
     ExtendedBlocklet blocklet = new ExtendedBlocklet(fileName, blockletId + 
"", false);
     BlockletDetailInfo detailInfo = getBlockletDetailInfo(row, blockletId, 
blocklet);
     detailInfo.setBlockletInfoBinary(new byte[0]);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4df335f2/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java
 
b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java
index 6a05442..d4d1cbb 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMap.java
@@ -212,7 +212,8 @@ public class BlockletDataMap extends BlockDataMap 
implements Serializable {
         .convertToSafeRow();
     short relativeBlockletId = safeRow.getShort(BLOCKLET_ID_INDEX);
     String filePath = getFilePath();
-    return createBlocklet(safeRow, getFileNameWithFilePath(safeRow, filePath), 
relativeBlockletId);
+    return createBlocklet(safeRow, getFileNameWithFilePath(safeRow, filePath), 
relativeBlockletId,
+        false);
   }
 
   protected short getBlockletId(DataMapRow dataMapRow) {
@@ -222,15 +223,17 @@ public class BlockletDataMap extends BlockDataMap 
implements Serializable {
     return dataMapRow.getShort(BLOCKLET_ID_INDEX);
   }
 
-  protected ExtendedBlocklet createBlocklet(DataMapRow row, String fileName, 
short blockletId) {
+  protected ExtendedBlocklet createBlocklet(DataMapRow row, String fileName, 
short blockletId,
+      boolean useMinMaxForPruning) {
     if (isLegacyStore) {
-      return super.createBlocklet(row, fileName, blockletId);
+      return super.createBlocklet(row, fileName, blockletId, 
useMinMaxForPruning);
     }
     ExtendedBlocklet blocklet = new ExtendedBlocklet(fileName, blockletId + 
"");
     BlockletDetailInfo detailInfo = getBlockletDetailInfo(row, blockletId, 
blocklet);
     detailInfo.setColumnSchemas(getColumnSchema());
     detailInfo.setBlockletInfoBinary(row.getByteArray(BLOCKLET_INFO_INDEX));
     detailInfo.setPagesCount(row.getShort(BLOCKLET_PAGE_COUNT_INDEX));
+    detailInfo.setUseMinMaxForPruning(useMinMaxForPruning);
     blocklet.setDetailInfo(detailInfo);
     return blocklet;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4df335f2/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index 910ae3e..c8c8a0f 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -160,7 +160,7 @@ public abstract class AbstractQueryExecutor<E> implements 
QueryExecutor<E> {
     // and measure column start index
     queryProperties.filterMeasures = new HashSet<>();
     queryProperties.complexFilterDimension = new HashSet<>();
-    
QueryUtil.getAllFilterDimensions(queryModel.getFilterExpressionResolverTree(),
+    
QueryUtil.getAllFilterDimensionsAndMeasures(queryModel.getFilterExpressionResolverTree(),
         queryProperties.complexFilterDimension, 
queryProperties.filterMeasures);
 
     CarbonTable carbonTable = queryModel.getTable();
@@ -204,7 +204,9 @@ public abstract class AbstractQueryExecutor<E> implements 
QueryExecutor<E> {
       // 1. old stores (1.1 or any prior version to 1.1) where blocklet 
information is not
       // available so read the blocklet information from block file
       // 2. CACHE_LEVEL is set to block
-      if (blockletDetailInfo.getBlockletInfo() == null) {
+      // 3. CACHE_LEVEL is BLOCKLET but filter column min/max is not cached in 
driver
+      if (blockletDetailInfo.getBlockletInfo() == null || blockletDetailInfo
+          .isUseMinMaxForPruning()) {
         readAndFillBlockletInfo(filePathToFileFooterMapping, tableBlockInfos, 
blockInfo,
             blockletDetailInfo);
       } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4df335f2/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
index 2e98f68..627a9ec 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
@@ -634,7 +634,7 @@ public class QueryUtil {
     return parentBlockIndex;
   }
 
-  public static void getAllFilterDimensions(FilterResolverIntf 
filterResolverTree,
+  public static void getAllFilterDimensionsAndMeasures(FilterResolverIntf 
filterResolverTree,
       Set<CarbonDimension> filterDimensions, Set<CarbonMeasure> filterMeasure) 
{
     if (null == filterResolverTree) {
       return;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4df335f2/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java 
b/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
index 58df0f7..86e9f9c 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
@@ -51,8 +51,11 @@ import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.scan.executor.util.QueryUtil;
+import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
 import org.apache.commons.logging.Log;
@@ -408,4 +411,67 @@ public class BlockletDataMapUtil {
       return column.getOrdinal();
     }
   }
+
+  /**
+   * Method to check whether to serialize min/max values to executor. Returns 
true if
+   * filter column min/max is not cached in driver
+   *
+   * @param filterResolverTree
+   * @param minMaxCacheColumns
+   * @return
+   */
+  public static boolean useMinMaxForBlockletPruning(FilterResolverIntf 
filterResolverTree,
+      List<CarbonColumn> minMaxCacheColumns) {
+    boolean serializeMinMax = false;
+    if (null != minMaxCacheColumns) {
+      Set<CarbonDimension> filterDimensions = new HashSet<>();
+      Set<CarbonMeasure> filterMeasures = new HashSet<>();
+      QueryUtil
+          .getAllFilterDimensionsAndMeasures(filterResolverTree, 
filterDimensions, filterMeasures);
+      // set flag to true if columns cached size is lesser than filter columns
+      if (minMaxCacheColumns.size() < (filterDimensions.size() + 
filterMeasures.size())) {
+        serializeMinMax = true;
+      } else {
+        // check if all the filter dimensions are cached
+        for (CarbonDimension filterDimension : filterDimensions) {
+          // complex dimensions are not allwed to be specified in 
COLUMN_META_CACHE property, so
+          // cannot validate for complex columns
+          if (filterDimension.isComplex()) {
+            continue;
+          }
+          if (!filterColumnExistsInMinMaxColumnList(minMaxCacheColumns, 
filterDimension)) {
+            serializeMinMax = true;
+            break;
+          }
+        }
+        // check if all the filter measures are cached only if all filter 
dimensions are cached
+        if (!serializeMinMax) {
+          for (CarbonMeasure filterMeasure : filterMeasures) {
+            if (!filterColumnExistsInMinMaxColumnList(minMaxCacheColumns, 
filterMeasure)) {
+              serializeMinMax = true;
+              break;
+            }
+          }
+        }
+      }
+    }
+    return serializeMinMax;
+  }
+
+  /**
+   * Method to check for filter column in min/max cache columns list
+   *
+   * @param minMaxCacheColumns
+   * @param filterColumn
+   * @return
+   */
+  private static boolean 
filterColumnExistsInMinMaxColumnList(List<CarbonColumn> minMaxCacheColumns,
+      CarbonColumn filterColumn) {
+    for (CarbonColumn column : minMaxCacheColumns) {
+      if (filterColumn.getColumnId().equals(column.getColumnId())) {
+        return true;
+      }
+    }
+    return false;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4df335f2/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestQueryWithColumnMetCacheAndCacheLevelProperty.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestQueryWithColumnMetCacheAndCacheLevelProperty.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestQueryWithColumnMetCacheAndCacheLevelProperty.scala
index af9930a..53d8f10 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestQueryWithColumnMetCacheAndCacheLevelProperty.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestQueryWithColumnMetCacheAndCacheLevelProperty.scala
@@ -17,6 +17,8 @@
 package org.apache.carbondata.spark.testsuite.allqueries
 
 
+import java.util
+
 import scala.collection.JavaConverters._
 
 import org.apache.spark.sql.{CarbonEnv, Row}
@@ -25,11 +27,17 @@ import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.core.datamap.dev.DataMap
-import org.apache.carbondata.core.datamap.{DataMapStoreManager, Segment, 
TableDataMap}
+import org.apache.carbondata.core.datamap.{DataMapChooser, 
DataMapStoreManager, Segment, TableDataMap}
 import 
org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder
 import org.apache.carbondata.core.indexstore.blockletindex.{BlockDataMap, 
BlockletDataMap}
 import org.apache.carbondata.core.indexstore.schema.CarbonRowSchema
 import org.apache.carbondata.core.indexstore.Blocklet
+import org.apache.carbondata.core.metadata.datatype.DataTypes
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension
+import 
org.apache.carbondata.core.scan.expression.conditional.NotEqualsExpression
+import org.apache.carbondata.core.scan.expression.logical.AndExpression
+import org.apache.carbondata.core.scan.expression.{ColumnExpression, 
LiteralExpression}
+import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf
 
 /**
  * test class for validating COLUMN_META_CACHE and CACHE_LEVEL
@@ -47,6 +55,7 @@ class TestQueryWithColumnMetCacheAndCacheLevelProperty 
extends QueryTest with Be
   private def dropSchema: Unit = {
     sql("drop table if exists metaCache")
     sql("drop table if exists column_min_max_cache_test")
+    sql("drop table if exists minMaxSerialize")
   }
 
   private def createAndLoadTable(cacheLevel: String): Unit = {
@@ -58,11 +67,13 @@ class TestQueryWithColumnMetCacheAndCacheLevelProperty 
extends QueryTest with Be
 
   private def getDataMaps(dbName: String,
       tableName: String,
-      segmentId: String): List[DataMap[_ <: Blocklet]] = {
+      segmentId: String,
+      isSchemaModified: Boolean = false): List[DataMap[_ <: Blocklet]] = {
     val relation: CarbonRelation = 
CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore
       .lookupRelation(Some(dbName), tableName)(sqlContext.sparkSession)
       .asInstanceOf[CarbonRelation]
     val carbonTable = relation.carbonTable
+    assert(carbonTable.getTableInfo.isSchemaModified == isSchemaModified)
     val segment: Segment = Segment.getSegment(segmentId, 
carbonTable.getTablePath)
     val defaultDataMap: TableDataMap = DataMapStoreManager.getInstance()
       .getDefaultDataMap(carbonTable)
@@ -268,4 +279,36 @@ class TestQueryWithColumnMetCacheAndCacheLevelProperty 
extends QueryTest with Be
     sql("drop table if exists alter_add_column_min_max")
   }
 
+  test("verify min/max getting serialized to executor when cache_level = 
blocklet") {
+    sql("drop table if exists minMaxSerialize")
+    sql("create table minMaxSerialize(name string, c1 string, c2 string) 
stored by 'carbondata' TBLPROPERTIES('CACHE_LEVEL'='BLOCKLET', 
'COLUMN_META_CACHE'='c1,c2')")
+    sql("insert into minMaxSerialize select 'a','aa','aaa'")
+    checkAnswer(sql("select * from minMaxSerialize where name='a'"), Row("a", 
"aa", "aaa"))
+    checkAnswer(sql("select * from minMaxSerialize where name='b'"), Seq.empty)
+    val relation: CarbonRelation = 
CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore
+      .lookupRelation(Some("default"), 
"minMaxSerialize")(sqlContext.sparkSession)
+      .asInstanceOf[CarbonRelation]
+    val carbonTable = relation.carbonTable
+    // form a filter expression and generate filter resolver tree
+    val columnExpression = new ColumnExpression("name", DataTypes.STRING)
+    columnExpression.setDimension(true)
+    val dimension: CarbonDimension = 
carbonTable.getDimensionByName(carbonTable.getTableName, "name")
+    columnExpression.setDimension(dimension)
+    columnExpression.setCarbonColumn(dimension)
+    val literalValueExpression = new LiteralExpression("a", DataTypes.STRING)
+    val literalNullExpression = new LiteralExpression(null, DataTypes.STRING)
+    val notEqualsExpression = new NotEqualsExpression(columnExpression, 
literalNullExpression)
+    val equalsExpression = new NotEqualsExpression(columnExpression, 
literalValueExpression)
+    val andExpression = new AndExpression(notEqualsExpression, 
equalsExpression)
+    val resolveFilter: FilterResolverIntf = 
carbonTable.resolveFilter(andExpression)
+    val exprWrapper = DataMapChooser.getDefaultDataMap(carbonTable, 
resolveFilter)
+    val segment = new Segment("0")
+    // get the pruned blocklets
+    val prunedBlocklets = exprWrapper.prune(List(segment).asJava, null)
+    prunedBlocklets.asScala.foreach { blocklet =>
+      // all the blocklets should have useMinMaxForPrune flag set to true
+      assert(blocklet.getDetailInfo.isUseMinMaxForPruning)
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4df335f2/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala 
b/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
index 00cd653..96b191f 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
@@ -109,7 +109,7 @@ object AlterTableUtil {
    * @param sparkSession
    */
   def updateSchemaInfo(carbonTable: CarbonTable,
-      schemaEvolutionEntry: SchemaEvolutionEntry,
+      schemaEvolutionEntry: SchemaEvolutionEntry = null,
       thriftTable: TableInfo,
       cols: 
Option[Seq[org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema]]
 =
       None)
@@ -295,7 +295,6 @@ object AlterTableUtil {
     LOGGER.audit(s"Alter table newProperties request has been received for 
$dbName.$tableName")
     val locksToBeAcquired = List(LockUsage.METADATA_LOCK, 
LockUsage.COMPACTION_LOCK)
     var locks = List.empty[ICarbonLock]
-    val timeStamp = 0L
     try {
       locks = AlterTableUtil
         .validateTableAndAcquireLock(dbName, tableName, 
locksToBeAcquired)(sparkSession)
@@ -317,8 +316,6 @@ object AlterTableUtil {
         dbName,
         tableName,
         carbonTable.getTablePath)
-      val schemaEvolutionEntry = new 
org.apache.carbondata.core.metadata.schema.SchemaEvolutionEntry
-      schemaEvolutionEntry.setTimeStamp(timeStamp)
       val thriftTable = schemaConverter.fromWrapperToExternalTableInfo(
         wrapperTableInfo, dbName, tableName)
       val tblPropertiesMap: mutable.Map[String, String] =
@@ -358,9 +355,10 @@ object AlterTableUtil {
         // check if duplicate columns are present in both local dictionary 
include and exclude
         
CarbonScalaUtil.validateDuplicateLocalDictIncludeExcludeColmns(tblPropertiesMap)
       }
-      val (tableIdentifier, schemParts, cols) = updateSchemaInfo(carbonTable,
-        
schemaConverter.fromWrapperToExternalSchemaEvolutionEntry(schemaEvolutionEntry),
-        thriftTable)(sparkSession)
+      val (tableIdentifier, schemParts, cols: 
Option[Seq[org.apache.carbondata.core.metadata
+      .schema.table.column.ColumnSchema]]) = updateSchemaInfo(
+        carbonTable = carbonTable,
+        thriftTable = thriftTable)(sparkSession)
       catalog.alterTable(tableIdentifier, schemParts, cols)
       sparkSession.catalog.refreshTable(tableIdentifier.quotedString)
       // check and clear the block/blocklet cache

Reply via email to