carbondata git commit: Revert "[CARBONDATA-2023][DataLoad] Add size base block allocation in data loading"

2018-02-09 Thread jackylk
Repository: carbondata
Updated Branches:
  refs/heads/carbonstore 6dd8b038f -> e5c32ac96


Revert "[CARBONDATA-2023][DataLoad] Add size base block allocation in data 
loading"

This reverts commit 6dd8b038fc898dbf48ad30adfc870c19eb38e3d0.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/e5c32ac9
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/e5c32ac9
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/e5c32ac9

Branch: refs/heads/carbonstore
Commit: e5c32ac96f4cf85ef7a42f2a14c31c19418a789b
Parents: 6dd8b03
Author: Jacky Li 
Authored: Sat Feb 10 10:34:59 2018 +0800
Committer: Jacky Li 
Committed: Sat Feb 10 10:34:59 2018 +0800

--
 .../constants/CarbonLoadOptionConstants.java|  10 -
 .../core/datastore/block/TableBlockInfo.java|  29 --
 .../carbondata/core/util/CarbonProperties.java  |  11 -
 docs/useful-tips-on-carbondata.md   |   1 -
 .../spark/rdd/NewCarbonDataLoadRDD.scala|   4 +-
 .../spark/sql/hive/DistributionUtil.scala   |   2 +-
 .../spark/rdd/CarbonDataRDDFactory.scala|  18 +-
 .../merger/NodeMultiBlockRelation.java  |  40 --
 .../processing/util/CarbonLoaderUtil.java   | 494 +++
 .../processing/util/CarbonLoaderUtilTest.java   | 125 -
 10 files changed, 183 insertions(+), 551 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/e5c32ac9/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
index a6bf60f..bcfeba0 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
@@ -114,14 +114,4 @@ public final class CarbonLoadOptionConstants {
*/
   public static final int MAX_EXTERNAL_DICTIONARY_SIZE = 1000;
 
-  /**
-   * enable block size based block allocation while loading data. By default, 
carbondata assigns
-   * blocks to node based on block number. If this option is set to `true`, 
carbondata will
-   * consider block size first and make sure that all the nodes will process 
almost equal size of
-   * data. This option is especially useful when you encounter skewed data.
-   */
-  @CarbonProperty
-  public static final String ENABLE_CARBON_LOAD_SKEWED_DATA_OPTIMIZATION
-  = "carbon.load.skewedDataOptimization.enabled";
-  public static final String 
ENABLE_CARBON_LOAD_SKEWED_DATA_OPTIMIZATION_DEFAULT = "false";
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e5c32ac9/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
index 6624311..907708c 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
@@ -19,8 +19,6 @@ package org.apache.carbondata.core.datastore.block;
 import java.io.IOException;
 import java.io.Serializable;
 import java.nio.charset.Charset;
-import java.util.Arrays;
-import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -94,20 +92,6 @@ public class TableBlockInfo implements Distributable, 
Serializable {
 
   private String dataMapWriterPath;
 
-  /**
-   * comparator to sort by block size in descending order.
-   * Since each line is not exactly the same, the size of a InputSplit may 
differs,
-   * so we allow some deviation for these splits.
-   */
-  public static final Comparator DATA_SIZE_DESC_COMPARATOR =
-  new Comparator() {
-@Override public int compare(Distributable o1, Distributable o2) {
-  long diff =
-  ((TableBlockInfo) o1).getBlockLength() - ((TableBlockInfo) 
o2).getBlockLength();
-  return diff < 0 ? 1 : (diff == 0 ? 0 : -1);
-}
-  };
-
   public TableBlockInfo(String filePath, long blockOffset, String segmentId,
   String[] locations, long blockLength, ColumnarFormatVersion version,
   String[] deletedDeltaFilePath) {
@@ -436,17 +420,4 @@ public class TableBlockInfo implements Distributable, 
Serializable {
   public void setDataMapWriterPath(String dataMapWriterPath) {
 this.dataMapWriterPath = dataMapWriterPath;
   }
-
-  @Override
-  public String 

[24/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
index 9391ebd..44f7c07 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/ExcludeColGroupFilterExecuterImpl.java
@@ -16,20 +16,10 @@
  */
 package org.apache.carbondata.core.scan.filter.executer;
 
-import java.util.ArrayList;
 import java.util.BitSet;
-import java.util.List;
 
-import org.apache.carbondata.common.logging.LogService;
-import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
-import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
-import org.apache.carbondata.core.keygenerator.KeyGenException;
-import org.apache.carbondata.core.keygenerator.KeyGenerator;
-import org.apache.carbondata.core.scan.executor.infos.KeyStructureInfo;
-import org.apache.carbondata.core.scan.executor.util.QueryUtil;
 import 
org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
-import org.apache.carbondata.core.util.ByteUtil;
 
 /**
  * It checks if filter is required on given block and if required, it does
@@ -38,12 +28,6 @@ import org.apache.carbondata.core.util.ByteUtil;
 public class ExcludeColGroupFilterExecuterImpl extends 
ExcludeFilterExecuterImpl {
 
   /**
-   * LOGGER
-   */
-  private static final LogService LOGGER =
-  
LogServiceFactory.getLogService(ExcludeColGroupFilterExecuterImpl.class.getName());
-
-  /**
* @param dimColResolvedFilterInfo
* @param segmentProperties
*/
@@ -53,54 +37,6 @@ public class ExcludeColGroupFilterExecuterImpl extends 
ExcludeFilterExecuterImpl
   }
 
   /**
-   * It fills BitSet with row index which matches filter key
-   */
-  protected BitSet getFilteredIndexes(DimensionColumnDataChunk 
dimensionColumnDataChunk,
-  int numerOfRows) {
-BitSet bitSet = new BitSet(numerOfRows);
-bitSet.flip(0, numerOfRows);
-try {
-  KeyStructureInfo keyStructureInfo = getKeyStructureInfo();
-  byte[][] filterValues = dimColumnExecuterInfo.getExcludeFilterKeys();
-  for (int i = 0; i < filterValues.length; i++) {
-byte[] filterVal = filterValues[i];
-for (int rowId = 0; rowId < numerOfRows; rowId++) {
-  byte[] colData = new 
byte[keyStructureInfo.getMaskByteRanges().length];
-  dimensionColumnDataChunk.fillChunkData(colData, 0, rowId, 
keyStructureInfo);
-  if (ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterVal, colData) 
== 0) {
-bitSet.flip(rowId);
-  }
-}
-  }
-
-} catch (Exception e) {
-  LOGGER.error(e);
-}
-
-return bitSet;
-  }
-
-  /**
-   * It is required for extracting column data from columngroup chunk
-   *
-   * @return
-   * @throws KeyGenException
-   */
-  private KeyStructureInfo getKeyStructureInfo() throws KeyGenException {
-int colGrpId = getColumnGroupId(dimColEvaluatorInfo.getColumnIndex());
-KeyGenerator keyGenerator = 
segmentProperties.getColumnGroupAndItsKeygenartor().get(colGrpId);
-List mdKeyOrdinal = new ArrayList();
-mdKeyOrdinal.add(getMdkeyOrdinal(dimColEvaluatorInfo.getColumnIndex(), 
colGrpId));
-int[] maskByteRanges = 
QueryUtil.getMaskedByteRangeBasedOrdinal(mdKeyOrdinal, keyGenerator);
-byte[] maxKey = QueryUtil.getMaxKeyBasedOnOrinal(mdKeyOrdinal, 
keyGenerator);
-KeyStructureInfo restructureInfos = new KeyStructureInfo();
-restructureInfos.setKeyGenerator(keyGenerator);
-restructureInfos.setMaskByteRanges(maskByteRanges);
-restructureInfos.setMaxKey(maxKey);
-return restructureInfos;
-  }
-
-  /**
* Check if scan is required on given block based on min and max value
*/
   public BitSet isScanRequired(byte[][] blkMaxVal, byte[][] blkMinVal) {
@@ -109,25 +45,4 @@ public class ExcludeColGroupFilterExecuterImpl extends 
ExcludeFilterExecuterImpl
 return bitSet;
   }
 
-  private int getMdkeyOrdinal(int ordinal, int colGrpId) {
-return segmentProperties.getColumnGroupMdKeyOrdinal(colGrpId, ordinal);
-  }
-
-  private int getColumnGroupId(int ordinal) {
-int[][] columnGroups = segmentProperties.getColumnGroups();
-int colGrpId = -1;
-for (int i = 0; i < columnGroups.length; i++) {
-  if (columnGroups[i].length > 1) {
-colGrpId++;
-if (QueryUtil.searchInArray(columnGroups[i], ordinal)) {
-  break;
-}
-  }
-}
-return colGrpId;
-  }
-
-  public KeyGenerator getKeyGenerator(int 

[33/36] carbondata git commit: [CARBONDATA-1544][Datamap] Datamap FineGrain implementation

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/dc43a46a/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxDataWriter.java
--
diff --git 
a/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxDataWriter.java
 
b/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxDataWriter.java
index 78544d3..fe0bbcf 100644
--- 
a/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxDataWriter.java
+++ 
b/datamap/examples/src/minmaxdatamap/main/java/org/apache/carbondata/datamap/examples/MinMaxDataWriter.java
@@ -19,7 +19,6 @@ package org.apache.carbondata.datamap.examples;
 
 import java.io.BufferedWriter;
 import java.io.DataOutputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.util.ArrayList;
@@ -29,17 +28,18 @@ import java.util.Map;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.datamap.dev.DataMapWriter;
+import org.apache.carbondata.core.datamap.dev.AbstractDataMapWriter;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.datastore.page.ColumnPage;
+import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.schema.table.TableInfo;
 import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.path.CarbonTablePath;
 
 import com.google.gson.Gson;
 
-public class MinMaxDataWriter implements DataMapWriter {
+public class MinMaxDataWriter extends AbstractDataMapWriter {
 
   private static final LogService LOGGER =
   LogServiceFactory.getLogService(TableInfo.class.getName());
@@ -50,17 +50,23 @@ public class MinMaxDataWriter implements DataMapWriter {
 
   private Map blockMinMaxMap;
 
-  private String blockPath;
+  private String dataWritePath;
 
+  public MinMaxDataWriter(AbsoluteTableIdentifier identifier, String segmentId,
+  String dataWritePath) {
+super(identifier, segmentId, dataWritePath);
+this.identifier = identifier;
+this.segmentId = segmentId;
+this.dataWritePath = dataWritePath;
+  }
 
-  @Override public void onBlockStart(String blockId, String blockPath) {
+  @Override public void onBlockStart(String blockId) {
 pageLevelMax = null;
 pageLevelMin = null;
 blockletLevelMax = null;
 blockletLevelMin = null;
 blockMinMaxMap = null;
 blockMinMaxMap = new HashMap();
-this.blockPath = blockPath;
   }
 
   @Override public void onBlockEnd(String blockId) {
@@ -161,7 +167,7 @@ public class MinMaxDataWriter implements DataMapWriter {
 List tempMinMaxIndexBlockDetails = null;
 tempMinMaxIndexBlockDetails = loadBlockDetails();
 try {
-  writeMinMaxIndexFile(tempMinMaxIndexBlockDetails, blockPath, blockId);
+  writeMinMaxIndexFile(tempMinMaxIndexBlockDetails, blockId);
 } catch (IOException ex) {
   LOGGER.info(" Unable to write the file");
 }
@@ -178,7 +184,6 @@ public class MinMaxDataWriter implements DataMapWriter {
   
tmpminMaxIndexBlockDetails.setMinValues(blockMinMaxMap.get(index).getMin());
   
tmpminMaxIndexBlockDetails.setMaxValues(blockMinMaxMap.get(index).getMax());
   tmpminMaxIndexBlockDetails.setBlockletId(index);
-  tmpminMaxIndexBlockDetails.setFilePath(this.blockPath);
   minMaxIndexBlockDetails.add(tmpminMaxIndexBlockDetails);
 }
 return minMaxIndexBlockDetails;
@@ -187,22 +192,19 @@ public class MinMaxDataWriter implements DataMapWriter {
   /**
* Write the data to a file. This is JSON format file.
* @param minMaxIndexBlockDetails
-   * @param blockPath
* @param blockId
* @throws IOException
*/
   public void writeMinMaxIndexFile(List 
minMaxIndexBlockDetails,
-  String blockPath, String blockId) throws IOException {
-String filePath = blockPath.substring(0, 
blockPath.lastIndexOf(File.separator) + 1) + blockId
-+ ".minmaxindex";
+  String blockId) throws IOException {
+String filePath = dataWritePath +"/" + blockId + ".minmaxindex";
 BufferedWriter brWriter = null;
 DataOutputStream dataOutStream = null;
 try {
   FileFactory.createNewFile(filePath, FileFactory.getFileType(filePath));
   dataOutStream = FileFactory.getDataOutputStream(filePath, 
FileFactory.getFileType(filePath));
   Gson gsonObjectToWrite = new Gson();
-  brWriter = new BufferedWriter(new OutputStreamWriter(dataOutStream,
-  CarbonCommonConstants.CARBON_DEFAULT_STREAM_ENCODEFORMAT));
+  brWriter = new BufferedWriter(new 

[36/36] carbondata git commit: [HotFix][CheckStyle] Fix import related checkstyle

2018-02-09 Thread jackylk
[HotFix][CheckStyle] Fix import related checkstyle

This closes #1952


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/7adb295b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/7adb295b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/7adb295b

Branch: refs/heads/carbonstore-rebase
Commit: 7adb295b650a69fa5cab9ff8d6c1cf5a599793ab
Parents: dc43a46
Author: xuchuanyin 
Authored: Thu Feb 8 15:39:45 2018 +0800
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:11 2018 +0800

--
 .../core/indexstore/blockletindex/BlockletDataRefNode.java | 2 +-
 .../org/apache/carbondata/core/memory/HeapMemoryAllocator.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/7adb295b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
 
b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
index b8fd6ff..50862a7 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
@@ -33,8 +33,8 @@ import 
org.apache.carbondata.core.datastore.chunk.reader.MeasureColumnChunkReade
 import org.apache.carbondata.core.indexstore.BlockletDetailInfo;
 import org.apache.carbondata.core.indexstore.FineGrainBlocklet;
 import org.apache.carbondata.core.metadata.ColumnarFormatVersion;
-import org.apache.carbondata.core.util.BitSetGroup;
 import org.apache.carbondata.core.metadata.blocklet.index.BlockletIndex;
+import org.apache.carbondata.core.util.BitSetGroup;
 
 /**
  * wrapper for blocklet data map data

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7adb295b/core/src/main/java/org/apache/carbondata/core/memory/HeapMemoryAllocator.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/memory/HeapMemoryAllocator.java 
b/core/src/main/java/org/apache/carbondata/core/memory/HeapMemoryAllocator.java
index 2203b3b..5862933 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/memory/HeapMemoryAllocator.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/memory/HeapMemoryAllocator.java
@@ -17,11 +17,11 @@
 
 package org.apache.carbondata.core.memory;
 
-import javax.annotation.concurrent.GuardedBy;
 import java.lang.ref.WeakReference;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.Map;
+import javax.annotation.concurrent.GuardedBy;
 
 /**
  * Code ported from Apache Spark {org.apache.spark.unsafe.memory} package



[32/36] carbondata git commit: [CARBONDATA-2018][DataLoad] Optimization in reading/writing for sort temp row

2018-02-09 Thread jackylk
[CARBONDATA-2018][DataLoad] Optimization in reading/writing for sort temp row

Pick up the no-sort fields in the row and pack them as bytes array and skip 
parsing them during merge sort to reduce CPU consumption

This closes #1792


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1acc4e18
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1acc4e18
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1acc4e18

Branch: refs/heads/carbonstore-rebase
Commit: 1acc4e18746f511599e9cd81762e332ebc9ed390
Parents: 7adb295
Author: xuchuanyin 
Authored: Thu Feb 8 14:35:14 2018 +0800
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:11 2018 +0800

--
 .../carbondata/core/util/NonDictionaryUtil.java |  67 +--
 .../presto/util/CarbonDataStoreCreator.scala|   1 -
 .../load/DataLoadProcessorStepOnSpark.scala |   6 +-
 .../loading/row/IntermediateSortTempRow.java| 117 +
 .../loading/sort/SortStepRowHandler.java| 466 +++
 .../loading/sort/SortStepRowUtil.java   | 103 
 .../sort/unsafe/UnsafeCarbonRowPage.java| 331 ++---
 .../loading/sort/unsafe/UnsafeSortDataRows.java |  57 +--
 .../unsafe/comparator/UnsafeRowComparator.java  |  95 ++--
 .../UnsafeRowComparatorForNormalDIms.java   |  59 ---
 .../UnsafeRowComparatorForNormalDims.java   |  59 +++
 .../sort/unsafe/holder/SortTempChunkHolder.java |   3 +-
 .../holder/UnsafeFinalMergePageHolder.java  |  19 +-
 .../unsafe/holder/UnsafeInmemoryHolder.java |  21 +-
 .../holder/UnsafeSortTempFileChunkHolder.java   | 138 ++
 .../merger/UnsafeIntermediateFileMerger.java| 118 +
 .../UnsafeSingleThreadFinalSortFilesMerger.java |  27 +-
 .../merger/CompactionResultSortProcessor.java   |   1 -
 .../sort/sortdata/IntermediateFileMerger.java   |  95 +---
 .../IntermediateSortTempRowComparator.java  |  73 +++
 .../sort/sortdata/NewRowComparator.java |   5 +-
 .../sortdata/NewRowComparatorForNormalDims.java |   3 +-
 .../processing/sort/sortdata/RowComparator.java |  94 
 .../sortdata/RowComparatorForNormalDims.java|  62 ---
 .../SingleThreadFinalSortFilesMerger.java   |  25 +-
 .../processing/sort/sortdata/SortDataRows.java  |  85 +---
 .../sort/sortdata/SortTempFileChunkHolder.java  | 174 ++-
 .../sort/sortdata/TableFieldStat.java   | 176 +++
 28 files changed, 1186 insertions(+), 1294 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1acc4e18/core/src/main/java/org/apache/carbondata/core/util/NonDictionaryUtil.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/NonDictionaryUtil.java 
b/core/src/main/java/org/apache/carbondata/core/util/NonDictionaryUtil.java
index d6ecfbc..fca1244 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/NonDictionaryUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/NonDictionaryUtil.java
@@ -82,18 +82,26 @@ public class NonDictionaryUtil {
   }
 
   /**
-   * Method to get the required Dimension from obj []
+   * Method to get the required dictionary Dimension from obj []
*
* @param index
* @param row
* @return
*/
-  public static Integer getDimension(int index, Object[] row) {
-
-Integer[] dimensions = (Integer[]) 
row[WriteStepRowUtil.DICTIONARY_DIMENSION];
-
+  public static int getDictDimension(int index, Object[] row) {
+int[] dimensions = (int[]) row[WriteStepRowUtil.DICTIONARY_DIMENSION];
 return dimensions[index];
+  }
 
+  /**
+   * Method to get the required non-dictionary & complex from 3-parted row
+   * @param index
+   * @param row
+   * @return
+   */
+  public static byte[] getNoDictOrComplex(int index, Object[] row) {
+byte[][] nonDictArray = (byte[][]) 
row[WriteStepRowUtil.NO_DICTIONARY_AND_COMPLEX];
+return nonDictArray[index];
   }
 
   /**
@@ -108,60 +116,11 @@ public class NonDictionaryUtil {
 return measures[index];
   }
 
-  public static byte[] getByteArrayForNoDictionaryCols(Object[] row) {
-
-return (byte[]) row[WriteStepRowUtil.NO_DICTIONARY_AND_COMPLEX];
-  }
-
   public static void prepareOutObj(Object[] out, int[] dimArray, byte[][] 
byteBufferArr,
   Object[] measureArray) {
-
 out[WriteStepRowUtil.DICTIONARY_DIMENSION] = dimArray;
 out[WriteStepRowUtil.NO_DICTIONARY_AND_COMPLEX] = byteBufferArr;
 out[WriteStepRowUtil.MEASURE] = measureArray;
 
   }
-
-  /**
-   * This method will extract the single dimension from the complete high card 
dims byte[].+ *
-   * The format of the byte [] will be,  Totallength,CompleteStartOffsets,Dat
-   *
-   * @param highCardArr
-   * @param index
-   * @param highCardinalityCount
-   

[15/36] carbondata git commit: [CARBONDATA-2025] Unify all path construction through CarbonTablePath static method

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/bfa9a2c2/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionaryCacheTest.java
--
diff --git 
a/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionaryCacheTest.java
 
b/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionaryCacheTest.java
index d0aedd4..c36c89d 100644
--- 
a/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionaryCacheTest.java
+++ 
b/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ForwardDictionaryCacheTest.java
@@ -56,7 +56,7 @@ public class ForwardDictionaryCacheTest extends 
AbstractDictionaryCacheTest {
 this.carbonStorePath = props.getProperty("storePath", "carbonStore");
 carbonTableIdentifier =
 new CarbonTableIdentifier(databaseName, tableName, 
UUID.randomUUID().toString());
-absoluteTableIdentifier =
+identifier =
 AbsoluteTableIdentifier.from(carbonStorePath + "/" + databaseName + 
"/" + tableName,
 carbonTableIdentifier);
 columnIdentifiers = new String[] { "name", "place" };
@@ -67,7 +67,7 @@ public class ForwardDictionaryCacheTest extends 
AbstractDictionaryCacheTest {
 
   @After public void tearDown() throws Exception {
 carbonTableIdentifier = null;
-absoluteTableIdentifier = null;
+identifier = null;
 forwardDictionaryCache = null;
 deleteStorePath();
   }
@@ -217,7 +217,7 @@ public class ForwardDictionaryCacheTest extends 
AbstractDictionaryCacheTest {
   private void writeSortIndexFile(List data, String columnId) throws 
IOException {
ColumnIdentifier columnIdentifier = new ColumnIdentifier(columnId, 
null, null);
 DictionaryColumnUniqueIdentifier dictionaryColumnUniqueIdentifier =
-new DictionaryColumnUniqueIdentifier(absoluteTableIdentifier, 
columnIdentifier,
+new DictionaryColumnUniqueIdentifier(identifier, columnIdentifier,
 columnIdentifier.getDataType());
 Map dataToSurrogateKeyMap = new HashMap<>(data.size());
 int surrogateKey = 0;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bfa9a2c2/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionaryCacheTest.java
--
diff --git 
a/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionaryCacheTest.java
 
b/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionaryCacheTest.java
index 01cb3a9..d2bf2e3 100644
--- 
a/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionaryCacheTest.java
+++ 
b/core/src/test/java/org/apache/carbondata/core/cache/dictionary/ReverseDictionaryCacheTest.java
@@ -58,7 +58,7 @@ public class ReverseDictionaryCacheTest extends 
AbstractDictionaryCacheTest {
 this.carbonStorePath = props.getProperty("storePath", "carbonStore");
 carbonTableIdentifier =
 new CarbonTableIdentifier(databaseName, tableName, 
UUID.randomUUID().toString());
-absoluteTableIdentifier = AbsoluteTableIdentifier.from(
+identifier = AbsoluteTableIdentifier.from(
 carbonStorePath + "/" + databaseName + "/" + tableName, 
carbonTableIdentifier);
 columnIdentifiers = new String[] { "name", "place" };
 deleteStorePath();
@@ -69,7 +69,7 @@ public class ReverseDictionaryCacheTest extends 
AbstractDictionaryCacheTest {
   @After public void tearDown() throws Exception {
 carbonTableIdentifier = null;
 reverseDictionaryCache = null;
-absoluteTableIdentifier = null;
+identifier = null;
 deleteStorePath();
   }
 
@@ -276,6 +276,6 @@ public class ReverseDictionaryCacheTest extends 
AbstractDictionaryCacheTest {
   protected DictionaryColumnUniqueIdentifier 
createDictionaryColumnUniqueIdentifier(
  String columnId) {
ColumnIdentifier columnIdentifier = new ColumnIdentifier(columnId, 
null, DataTypes.DOUBLE);
-return new DictionaryColumnUniqueIdentifier(absoluteTableIdentifier, 
columnIdentifier);
+return new DictionaryColumnUniqueIdentifier(identifier, columnIdentifier);
  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bfa9a2c2/core/src/test/java/org/apache/carbondata/core/reader/CarbonDictionaryReaderImplTest.java
--
diff --git 
a/core/src/test/java/org/apache/carbondata/core/reader/CarbonDictionaryReaderImplTest.java
 
b/core/src/test/java/org/apache/carbondata/core/reader/CarbonDictionaryReaderImplTest.java
index d3c3bc3..ecabfd4 100644
--- 
a/core/src/test/java/org/apache/carbondata/core/reader/CarbonDictionaryReaderImplTest.java
+++ 
b/core/src/test/java/org/apache/carbondata/core/reader/CarbonDictionaryReaderImplTest.java
@@ -28,8 +28,6 @@ import 

[25/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index 6490694..f20108b 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -43,10 +43,9 @@ import 
org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.datastore.block.TableBlockInfo;
 import org.apache.carbondata.core.datastore.block.TableBlockUniqueIdentifier;
 import org.apache.carbondata.core.indexstore.BlockletDetailInfo;
-import 
org.apache.carbondata.core.indexstore.blockletindex.BlockletDataRefNodeWrapper;
+import org.apache.carbondata.core.indexstore.blockletindex.BlockletDataRefNode;
 import org.apache.carbondata.core.indexstore.blockletindex.IndexWrapper;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
-import org.apache.carbondata.core.keygenerator.KeyGenerator;
 import org.apache.carbondata.core.memory.UnsafeMemoryManager;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.blocklet.BlockletInfo;
@@ -64,8 +63,8 @@ import 
org.apache.carbondata.core.scan.executor.util.RestructureUtil;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
 import org.apache.carbondata.core.scan.filter.SingleTableProvider;
 import org.apache.carbondata.core.scan.filter.TableProvider;
-import org.apache.carbondata.core.scan.model.QueryDimension;
-import org.apache.carbondata.core.scan.model.QueryMeasure;
+import org.apache.carbondata.core.scan.model.ProjectionDimension;
+import org.apache.carbondata.core.scan.model.ProjectionMeasure;
 import org.apache.carbondata.core.scan.model.QueryModel;
 import org.apache.carbondata.core.stats.QueryStatistic;
 import org.apache.carbondata.core.stats.QueryStatisticsConstants;
@@ -122,7 +121,6 @@ public abstract class AbstractQueryExecutor implements 
QueryExecutor {
 queryProperties.queryStatisticsRecorder =
 
CarbonTimeStatisticsFactory.createExecutorRecorder(queryModel.getQueryId());
 queryModel.setStatisticsRecorder(queryProperties.queryStatisticsRecorder);
-QueryUtil.resolveQueryModel(queryModel);
 QueryStatistic queryStatistic = new QueryStatistic();
 // sort the block info
 // so block will be loaded in sorted order this will be required for
@@ -169,12 +167,12 @@ public abstract class AbstractQueryExecutor implements 
QueryExecutor {
 .addStatistics(QueryStatisticsConstants.LOAD_BLOCKS_EXECUTOR, 
System.currentTimeMillis());
 queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
 // calculating the total number of aggeragted columns
-int measureCount = queryModel.getQueryMeasures().size();
+int measureCount = queryModel.getProjectionMeasures().size();
 
 int currentIndex = 0;
 DataType[] dataTypes = new DataType[measureCount];
 
-for (QueryMeasure carbonMeasure : queryModel.getQueryMeasures()) {
+for (ProjectionMeasure carbonMeasure : queryModel.getProjectionMeasures()) 
{
   // adding the data type and aggregation type of all the measure this
   // can be used
   // to select the aggregator
@@ -199,9 +197,11 @@ public abstract class AbstractQueryExecutor implements 
QueryExecutor {
 queryStatistic = new QueryStatistic();
 // dictionary column unique column id to dictionary mapping
 // which will be used to get column actual data
-queryProperties.columnToDictionayMapping = QueryUtil
-.getDimensionDictionaryDetail(queryModel.getQueryDimension(),
-queryProperties.complexFilterDimension, 
queryModel.getAbsoluteTableIdentifier(),
+queryProperties.columnToDictionayMapping =
+QueryUtil.getDimensionDictionaryDetail(
+queryModel.getProjectionDimensions(),
+queryProperties.complexFilterDimension,
+queryModel.getAbsoluteTableIdentifier(),
 tableProvider);
 queryStatistic
 .addStatistics(QueryStatisticsConstants.LOAD_DICTIONARY, 
System.currentTimeMillis());
@@ -264,8 +264,8 @@ public abstract class AbstractQueryExecutor implements 
QueryExecutor {
 // and query will be executed based on that infos
 for (int i = 0; i < queryProperties.dataBlocks.size(); i++) {
   AbstractIndex abstractIndex = queryProperties.dataBlocks.get(i);
-  BlockletDataRefNodeWrapper dataRefNode =
-  (BlockletDataRefNodeWrapper) abstractIndex.getDataRefNode();
+  BlockletDataRefNode dataRefNode =
+  (BlockletDataRefNode) abstractIndex.getDataRefNode();
   

[17/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
--
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
 
b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
index f51ced3..6a401d8 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
@@ -34,20 +34,16 @@ import 
org.apache.carbondata.core.datastore.block.TableBlockInfo;
 import org.apache.carbondata.core.datastore.block.TaskBlockInfo;
 import org.apache.carbondata.core.metadata.blocklet.DataFileFooter;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
-import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
-import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.scan.executor.QueryExecutor;
 import org.apache.carbondata.core.scan.executor.QueryExecutorFactory;
 import 
org.apache.carbondata.core.scan.executor.exception.QueryExecutionException;
-import org.apache.carbondata.core.scan.model.QueryDimension;
-import org.apache.carbondata.core.scan.model.QueryMeasure;
 import org.apache.carbondata.core.scan.model.QueryModel;
-import org.apache.carbondata.core.scan.result.BatchResult;
+import org.apache.carbondata.core.scan.result.RowBatch;
 import org.apache.carbondata.core.scan.result.iterator.RawResultIterator;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonUtil;
-import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.core.util.DataTypeConverter;
 
 /**
  * Executor class for executing the query on the selected segments to be 
merged.
@@ -70,6 +66,9 @@ public class CarbonCompactionExecutor {
*/
   private boolean restructuredBlockExists;
 
+  // converter for UTF8String and decimal conversion
+  private DataTypeConverter dataTypeConverter;
+
   /**
* Constructor
*
@@ -82,13 +81,14 @@ public class CarbonCompactionExecutor {
   public CarbonCompactionExecutor(Map segmentMapping,
   SegmentProperties segmentProperties, CarbonTable carbonTable,
   Map dataFileMetadataSegMapping,
-  boolean restructuredBlockExists) {
+  boolean restructuredBlockExists, DataTypeConverter dataTypeConverter) {
 this.segmentMapping = segmentMapping;
 this.destinationSegProperties = segmentProperties;
 this.carbonTable = carbonTable;
 this.dataFileMetadataSegMapping = dataFileMetadataSegMapping;
 this.restructuredBlockExists = restructuredBlockExists;
-queryExecutorList = new 
ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+this.queryExecutorList = new 
ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+this.dataTypeConverter = dataTypeConverter;
   }
 
   /**
@@ -100,7 +100,9 @@ public class CarbonCompactionExecutor {
 List resultList =
 new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
 List list = null;
-queryModel = prepareQueryModel(list);
+queryModel = 
carbonTable.createQueryModelWithProjectAllColumns(dataTypeConverter);
+queryModel.setReadPageByPage(enablePageLevelReaderForCompaction());
+queryModel.setForcedDetailRawQuery(true);
 // iterate each seg ID
 for (Map.Entry taskMap : segmentMapping.entrySet()) 
{
   String segmentId = taskMap.getKey();
@@ -156,7 +158,7 @@ public class CarbonCompactionExecutor {
* @param blockList
* @return
*/
-  private CarbonIterator executeBlockList(List 
blockList)
+  private CarbonIterator executeBlockList(List 
blockList)
   throws QueryExecutionException, IOException {
 queryModel.setTableBlockInfos(blockList);
 QueryExecutor queryExecutor = 
QueryExecutorFactory.getQueryExecutor(queryModel);
@@ -195,48 +197,6 @@ public class CarbonCompactionExecutor {
   }
 
   /**
-   * Preparing of the query model.
-   *
-   * @param blockList
-   * @return
-   */
-  private QueryModel prepareQueryModel(List blockList) {
-QueryModel model = new QueryModel();
-model.setTableBlockInfos(blockList);
-model.setForcedDetailRawQuery(true);
-model.setFilterExpressionResolverTree(null);
-model.setConverter(DataTypeUtil.getDataTypeConverter());
-model.setReadPageByPage(enablePageLevelReaderForCompaction());
-
-List dims = new 
ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-
-List dimensions =
-carbonTable.getDimensionByTableName(carbonTable.getTableName());
-for (CarbonDimension dim : dimensions) {
- 

[34/36] carbondata git commit: [CARBONDATA-1544][Datamap] Datamap FineGrain implementation

2018-02-09 Thread jackylk
[CARBONDATA-1544][Datamap] Datamap FineGrain implementation

Implemented interfaces for FG datamap and integrated to filterscanner to use 
the pruned bitset from FG datamap.
FG Query flow as follows.
1.The user can add FG datamap to any table and implement there interfaces.
2. Any filter query which hits the table with datamap will call prune method of 
FGdatamap.
3. The prune method of FGDatamap return list FineGrainBlocklet , these 
blocklets contain the information of block, blocklet, page and rowids 
information as well.
4. The pruned blocklets are internally wriitten to file and returns only the 
block , blocklet and filepath information as part of Splits.
5. Based on the splits scanrdd schedule the tasks.
6. In filterscanner we check the datamapwriterpath from split and reNoteads the 
bitset if exists. And pass this bitset as input to it.

This closes #1471


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/dc43a46a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/dc43a46a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/dc43a46a

Branch: refs/heads/carbonstore-rebase
Commit: dc43a46aa85376decabf88b9089330c37dad34aa
Parents: fce7558
Author: ravipesala 
Authored: Wed Nov 15 19:48:40 2017 +0530
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:11 2018 +0800

--
 .../carbondata/core/datamap/DataMapMeta.java|   8 +-
 .../core/datamap/DataMapStoreManager.java   |  30 +-
 .../carbondata/core/datamap/DataMapType.java|  21 +
 .../carbondata/core/datamap/TableDataMap.java   |  30 +-
 .../core/datamap/dev/AbstractDataMapWriter.java | 110 +
 .../core/datamap/dev/BlockletSerializer.java|  57 +++
 .../carbondata/core/datamap/dev/DataMap.java|   4 +-
 .../core/datamap/dev/DataMapFactory.java|  14 +-
 .../core/datamap/dev/DataMapWriter.java |  57 ---
 .../cgdatamap/AbstractCoarseGrainDataMap.java   |  24 +
 .../AbstractCoarseGrainDataMapFactory.java  |  34 ++
 .../dev/fgdatamap/AbstractFineGrainDataMap.java |  24 +
 .../AbstractFineGrainDataMapFactory.java|  38 ++
 .../carbondata/core/datastore/DataRefNode.java  |   6 +
 .../core/datastore/block/TableBlockInfo.java|  10 +
 .../impl/btree/AbstractBTreeLeafNode.java   |   5 +
 .../datastore/impl/btree/BTreeNonLeafNode.java  |   5 +
 .../carbondata/core/indexstore/Blocklet.java|  30 +-
 .../indexstore/BlockletDataMapIndexStore.java   |   6 -
 .../core/indexstore/BlockletDetailsFetcher.java |   8 +
 .../core/indexstore/ExtendedBlocklet.java   |  17 +
 .../core/indexstore/FineGrainBlocklet.java  | 120 +
 .../blockletindex/BlockletDataMap.java  |  15 +-
 .../blockletindex/BlockletDataMapFactory.java   |  63 ++-
 .../blockletindex/BlockletDataRefNode.java  |  27 +-
 .../indexstore/blockletindex/IndexWrapper.java  |  18 +
 .../core/indexstore/schema/FilterType.java  |  24 -
 .../executer/ExcludeFilterExecuterImpl.java |   3 +
 .../executer/IncludeFilterExecuterImpl.java |   3 +
 .../scanner/impl/BlockletFilterScanner.java |   2 +
 .../apache/carbondata/core/util/CarbonUtil.java |  98 +
 .../datamap/examples/MinMaxDataMap.java |  32 +-
 .../datamap/examples/MinMaxDataMapFactory.java  |  49 ++-
 .../datamap/examples/MinMaxDataWriter.java  |  36 +-
 .../examples/MinMaxIndexBlockDetails.java   |  13 -
 .../carbondata/hadoop/CarbonInputSplit.java |  21 +-
 .../hadoop/api/CarbonTableInputFormat.java  |  17 +-
 .../testsuite/datamap/CGDataMapTestCase.scala   | 361 +++
 .../testsuite/datamap/DataMapWriterSuite.scala  |  46 +-
 .../testsuite/datamap/FGDataMapTestCase.scala   | 440 +++
 .../TestInsertAndOtherCommandConcurrent.scala   |  21 +-
 .../carbondata/spark/rdd/CarbonScanRDD.scala|   7 +-
 .../TestStreamingTableOperation.scala   |   5 +-
 .../datamap/DataMapWriterListener.java  |  57 ++-
 .../store/CarbonFactDataHandlerModel.java   |  10 +-
 .../store/writer/AbstractFactDataWriter.java| 128 +-
 46 files changed, 1764 insertions(+), 390 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/dc43a46a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java
index 7746acf..dd15ccb 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapMeta.java
@@ -19,15 +19,15 @@ package org.apache.carbondata.core.datamap;
 
 import java.util.List;
 
-import 

[18/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
--
diff --git 
a/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java 
b/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
index 984efdb..c5b4d83 100644
--- a/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
@@ -30,7 +30,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.carbondata.core.datastore.block.TableBlockInfo;
-import 
org.apache.carbondata.core.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import 
org.apache.carbondata.core.datastore.chunk.impl.FixedLengthDimensionColumnPage;
 import org.apache.carbondata.core.datastore.columnar.ColumnGroupModel;
 import org.apache.carbondata.core.datastore.filesystem.LocalCarbonFile;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
@@ -44,7 +44,7 @@ import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
-import org.apache.carbondata.core.scan.model.QueryDimension;
+import org.apache.carbondata.core.scan.model.ProjectionDimension;
 
 import mockit.Mock;
 import mockit.MockUp;
@@ -266,8 +266,8 @@ public class CarbonUtilTest {
   @Test public void testToGetNextLesserValue() {
 byte[] dataChunks = { 5, 6, 7, 8, 9 };
 byte[] compareValues = { 7 };
-FixedLengthDimensionDataChunk fixedLengthDataChunk =
-new FixedLengthDimensionDataChunk(dataChunks, null, null, 5, 1);
+FixedLengthDimensionColumnPage fixedLengthDataChunk =
+new FixedLengthDimensionColumnPage(dataChunks, null, null, 5, 1);
 int result = CarbonUtil.nextLesserValueToTarget(2, fixedLengthDataChunk, 
compareValues);
 assertEquals(result, 1);
   }
@@ -275,8 +275,8 @@ public class CarbonUtilTest {
   @Test public void testToGetNextLesserValueToTarget() {
 byte[] dataChunks = { 7, 7, 7, 8, 9 };
 byte[] compareValues = { 7 };
-FixedLengthDimensionDataChunk fixedLengthDataChunk =
-new FixedLengthDimensionDataChunk(dataChunks, null, null, 5, 1);
+FixedLengthDimensionColumnPage fixedLengthDataChunk =
+new FixedLengthDimensionColumnPage(dataChunks, null, null, 5, 1);
 int result = CarbonUtil.nextLesserValueToTarget(2, fixedLengthDataChunk, 
compareValues);
 assertEquals(result, -1);
   }
@@ -284,8 +284,8 @@ public class CarbonUtilTest {
   @Test public void testToGetnextGreaterValue() {
 byte[] dataChunks = { 5, 6, 7, 8, 9 };
 byte[] compareValues = { 7 };
-FixedLengthDimensionDataChunk fixedLengthDataChunk =
-new FixedLengthDimensionDataChunk(dataChunks, null, null, 5, 1);
+FixedLengthDimensionColumnPage fixedLengthDataChunk =
+new FixedLengthDimensionColumnPage(dataChunks, null, null, 5, 1);
 int result = CarbonUtil.nextGreaterValueToTarget(2, fixedLengthDataChunk, 
compareValues, 5);
 assertEquals(result, 3);
   }
@@ -301,8 +301,8 @@ public class CarbonUtilTest {
   @Test public void testToGetnextGreaterValueToTarget() {
 byte[] dataChunks = { 5, 6, 7, 7, 7 };
 byte[] compareValues = { 7 };
-FixedLengthDimensionDataChunk fixedLengthDataChunk =
-new FixedLengthDimensionDataChunk(dataChunks, null, null, 5, 1);
+FixedLengthDimensionColumnPage fixedLengthDataChunk =
+new FixedLengthDimensionColumnPage(dataChunks, null, null, 5, 1);
 int result = CarbonUtil.nextGreaterValueToTarget(2, fixedLengthDataChunk, 
compareValues, 5);
 assertEquals(result, 5);
   }
@@ -524,23 +524,23 @@ public class CarbonUtilTest {
   }
 
   @Test public void testToGetDictionaryEncodingArray() {
-QueryDimension column1 = new QueryDimension("Column1");
-QueryDimension column2 = new QueryDimension("Column2");
 ColumnSchema column1Schema = new ColumnSchema();
 ColumnSchema column2Schema = new ColumnSchema();
 column1Schema.setColumnName("Column1");
 List encoding = new ArrayList<>();
 encoding.add(Encoding.DICTIONARY);
 column1Schema.setEncodingList(encoding);
-column1.setDimension(new CarbonDimension(column1Schema, 1, 1, 1, 1));
+ProjectionDimension
+column1 = new ProjectionDimension(new CarbonDimension(column1Schema, 
1, 1, 1, 1));
 
 column2Schema.setColumnName("Column2");
 List encoding2 = new ArrayList<>();
 encoding2.add(Encoding.DELTA);
 column2Schema.setEncodingList(encoding2);
-column2.setDimension(new CarbonDimension(column2Schema, 1, 1, 1, 1));
+ProjectionDimension
+column2 = new ProjectionDimension(new CarbonDimension(column2Schema, 
1, 1, 1, 1));
 
-QueryDimension[] 

[12/36] carbondata git commit: [CARBONDATA-1480]Min Max Index Example for DataMap

2018-02-09 Thread jackylk
[CARBONDATA-1480]Min Max Index Example for DataMap

Datamap Example. Implementation of Min Max Index through Datamap. And Using the 
Index while prunning.

This closes #1359


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/fce75583
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/fce75583
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/fce75583

Branch: refs/heads/carbonstore-rebase
Commit: fce7558333041132dc5f64d2eb7c1ddf9d1b5ec4
Parents: 9c74874
Author: sounakr 
Authored: Thu Sep 28 16:21:05 2017 +0530
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:10 2018 +0800

--
 .../core/datamap/DataMapStoreManager.java   |  16 +-
 .../carbondata/core/datamap/TableDataMap.java   |  18 +-
 .../carbondata/core/datamap/dev/DataMap.java|  14 +-
 .../core/datamap/dev/DataMapWriter.java |   3 +-
 .../indexstore/SegmentPropertiesFetcher.java|  36 +++
 .../blockletindex/BlockletDataMap.java  |   9 +-
 .../blockletindex/BlockletDataMapFactory.java   |  33 ++-
 datamap/examples/pom.xml| 111 ++
 .../datamap/examples/BlockletMinMax.java|  41 
 .../datamap/examples/MinMaxDataMap.java | 143 
 .../datamap/examples/MinMaxDataMapFactory.java  | 114 ++
 .../datamap/examples/MinMaxDataWriter.java  | 221 +++
 .../examples/MinMaxIndexBlockDetails.java   |  77 +++
 .../MinMaxDataMapExample.scala  |  77 +++
 .../testsuite/datamap/DataMapWriterSuite.scala  |   2 +-
 pom.xml |   2 +
 .../datamap/DataMapWriterListener.java  |   4 +-
 .../store/writer/AbstractFactDataWriter.java|   7 +-
 .../writer/v3/CarbonFactDataWriterImplV3.java   |   3 +
 19 files changed, 900 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/fce75583/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
index d30483a..90e5fff 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapStoreManager.java
@@ -26,6 +26,7 @@ import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.datamap.dev.DataMapFactory;
 import org.apache.carbondata.core.indexstore.BlockletDetailsFetcher;
+import org.apache.carbondata.core.indexstore.SegmentPropertiesFetcher;
 import org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap;
 import 
org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMapFactory;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
@@ -103,7 +104,7 @@ public final class DataMapStoreManager {
   tableDataMaps = new ArrayList<>();
 }
 TableDataMap dataMap = getTableDataMap(dataMapName, tableDataMaps);
-if (dataMap != null) {
+if (dataMap != null && 
dataMap.getDataMapName().equalsIgnoreCase(dataMapName)) {
   throw new RuntimeException("Already datamap exists in that path with 
type " + dataMapName);
 }
 
@@ -113,12 +114,15 @@ public final class DataMapStoreManager {
   DataMapFactory dataMapFactory = factoryClass.newInstance();
   dataMapFactory.init(identifier, dataMapName);
   BlockletDetailsFetcher blockletDetailsFetcher;
+  SegmentPropertiesFetcher segmentPropertiesFetcher = null;
   if (dataMapFactory instanceof BlockletDetailsFetcher) {
 blockletDetailsFetcher = (BlockletDetailsFetcher) dataMapFactory;
   } else {
 blockletDetailsFetcher = getBlockletDetailsFetcher(identifier);
   }
-  dataMap = new TableDataMap(identifier, dataMapName, dataMapFactory, 
blockletDetailsFetcher);
+  segmentPropertiesFetcher = (SegmentPropertiesFetcher) 
blockletDetailsFetcher;
+  dataMap = new TableDataMap(identifier, dataMapName, dataMapFactory, 
blockletDetailsFetcher,
+  segmentPropertiesFetcher);
 } catch (Exception e) {
   LOGGER.error(e);
   throw new RuntimeException(e);
@@ -128,11 +132,11 @@ public final class DataMapStoreManager {
 return dataMap;
   }
 
-  private TableDataMap getTableDataMap(String dataMapName,
-  List tableDataMaps) {
+  private TableDataMap getTableDataMap(String dataMapName, List 
tableDataMaps) {
 TableDataMap dataMap = null;
-for (TableDataMap tableDataMap: tableDataMaps) {
-  if 

[22/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
index 7f735c2..fd92fc1 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFiterExecuterImpl.java
@@ -22,7 +22,7 @@ import java.util.List;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
-import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.DimensionColumnPage;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
 import org.apache.carbondata.core.datastore.page.ColumnPage;
@@ -35,11 +35,10 @@ import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.scan.expression.Expression;
-import 
org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
 import 
org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import 
org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
-import org.apache.carbondata.core.scan.processor.BlocksChunkHolder;
+import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
 import org.apache.carbondata.core.util.BitSetGroup;
 import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.CarbonUtil;
@@ -72,7 +71,7 @@ public class RowLevelRangeLessThanFiterExecuterImpl extends 
RowLevelFilterExecut
   comparator = 
Comparator.getComparatorByDataTypeForMeasure(measure.getDataType());
 }
 ifDefaultValueMatchesFilter();
-if (isDimensionPresentInCurrentBlock[0] == true) {
+if (isDimensionPresentInCurrentBlock[0]) {
   isNaturalSorted = 
dimColEvaluatorInfoList.get(0).getDimension().isUseInvertedIndex()
   && dimColEvaluatorInfoList.get(0).getDimension().isSortColumn();
 }
@@ -119,11 +118,11 @@ public class RowLevelRangeLessThanFiterExecuterImpl 
extends RowLevelFilterExecut
 boolean isScanRequired = false;
 if (isMeasurePresentInCurrentBlock[0] || 
isDimensionPresentInCurrentBlock[0]) {
   if (isMeasurePresentInCurrentBlock[0]) {
-minValue = blockMinValue[measureBlocksIndex[0] + 
lastDimensionColOrdinal];
+minValue = blockMinValue[measureChunkIndex[0] + 
lastDimensionColOrdinal];
 isScanRequired =
 isScanRequired(minValue, msrFilterRangeValues, 
msrColEvalutorInfoList.get(0).getType());
   } else {
-minValue = blockMinValue[dimensionBlocksIndex[0]];
+minValue = blockMinValue[dimensionChunkIndex[0]];
 isScanRequired = isScanRequired(minValue, filterRangeValues);
   }
 } else {
@@ -169,67 +168,69 @@ public class RowLevelRangeLessThanFiterExecuterImpl 
extends RowLevelFilterExecut
   }
 
   @Override
-  public BitSetGroup applyFilter(BlocksChunkHolder blockChunkHolder, boolean 
useBitsetPipeLine)
-  throws FilterUnsupportedException, IOException {
+  public BitSetGroup applyFilter(RawBlockletColumnChunks 
rawBlockletColumnChunks,
+  boolean useBitsetPipeLine) throws IOException {
 // select all rows if dimension does not exists in the current block
 if (!isDimensionPresentInCurrentBlock[0] && 
!isMeasurePresentInCurrentBlock[0]) {
-  int numberOfRows = blockChunkHolder.getDataBlock().nodeSize();
+  int numberOfRows = rawBlockletColumnChunks.getDataBlock().numRows();
   return FilterUtil
-  
.createBitSetGroupWithDefaultValue(blockChunkHolder.getDataBlock().numberOfPages(),
+  
.createBitSetGroupWithDefaultValue(rawBlockletColumnChunks.getDataBlock().numberOfPages(),
   numberOfRows, true);
 }
 if (isDimensionPresentInCurrentBlock[0]) {
-  int blockIndex =
-  
segmentProperties.getDimensionOrdinalToBlockMapping().get(dimensionBlocksIndex[0]);
-  if (null == blockChunkHolder.getDimensionRawDataChunk()[blockIndex]) {
-blockChunkHolder.getDimensionRawDataChunk()[blockIndex] = 
blockChunkHolder.getDataBlock()
-.getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
+  

[21/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
new file mode 100644
index 000..fde4e55
--- /dev/null
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.scan.processor;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.core.datastore.DataRefNode;
+import org.apache.carbondata.core.datastore.FileReader;
+import org.apache.carbondata.core.scan.collector.ResultCollectorFactory;
+import org.apache.carbondata.core.scan.collector.ScannedResultCollector;
+import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.core.scan.result.BlockletScannedResult;
+import org.apache.carbondata.core.scan.result.vector.CarbonColumnarBatch;
+import org.apache.carbondata.core.scan.scanner.BlockletScanner;
+import org.apache.carbondata.core.scan.scanner.impl.BlockletFilterScanner;
+import org.apache.carbondata.core.scan.scanner.impl.BlockletFullScanner;
+import org.apache.carbondata.core.stats.QueryStatisticsModel;
+import org.apache.carbondata.core.util.TaskMetricsMap;
+
+/**
+ * This abstract class provides a skeletal implementation of the
+ * Block iterator.
+ */
+public class DataBlockIterator extends CarbonIterator> {
+
+  /**
+   * iterator which will be used to iterate over blocklets
+   */
+  private BlockletIterator blockletIterator;
+
+  /**
+   * result collector which will be used to aggregate the scanned result
+   */
+  private ScannedResultCollector scannerResultAggregator;
+
+  /**
+   * processor which will be used to process the block processing can be
+   * filter processing or non filter processing
+   */
+  private BlockletScanner blockletScanner;
+
+  /**
+   * batch size of result
+   */
+  private int batchSize;
+
+  private ExecutorService executorService;
+
+  private Future future;
+
+  private Future futureIo;
+
+  private BlockletScannedResult scannedResult;
+
+  private BlockExecutionInfo blockExecutionInfo;
+
+  private FileReader fileReader;
+
+  private AtomicBoolean nextBlock;
+
+  private AtomicBoolean nextRead;
+
+  public DataBlockIterator(BlockExecutionInfo blockExecutionInfo, FileReader 
fileReader,
+  int batchSize, QueryStatisticsModel queryStatisticsModel, 
ExecutorService executorService) {
+this.blockExecutionInfo = blockExecutionInfo;
+this.fileReader = fileReader;
+blockletIterator = new 
BlockletIterator(blockExecutionInfo.getFirstDataBlock(),
+blockExecutionInfo.getNumberOfBlockToScan());
+if (blockExecutionInfo.getFilterExecuterTree() != null) {
+  blockletScanner = new BlockletFilterScanner(blockExecutionInfo, 
queryStatisticsModel);
+} else {
+  blockletScanner = new BlockletFullScanner(blockExecutionInfo, 
queryStatisticsModel);
+}
+this.scannerResultAggregator =
+ResultCollectorFactory.getScannedResultCollector(blockExecutionInfo);
+this.batchSize = batchSize;
+this.executorService = executorService;
+this.nextBlock = new AtomicBoolean(false);
+this.nextRead = new AtomicBoolean(false);
+  }
+
+  @Override
+  public List next() {
+List collectedResult = null;
+if (updateScanner()) {
+  collectedResult = 
this.scannerResultAggregator.collectResultInRow(scannedResult, batchSize);
+  while (collectedResult.size() < batchSize && updateScanner()) {
+List data = this.scannerResultAggregator
+

[11/36] carbondata git commit: [CARBONDATA-2080] [S3-Implementation] Propagated hadoopConf from driver to executor for s3 implementation in cluster mode.

2018-02-09 Thread jackylk
[CARBONDATA-2080] [S3-Implementation] Propagated hadoopConf from driver to 
executor for s3 implementation in cluster mode.

Problem : hadoopconf was not getting propagated from driver to the executor 
that's why load was failing to the distributed environment.
Solution: Setting the Hadoop conf in base class CarbonRDD
How to verify this PR :
Execute the load in the cluster mode It should be a success using location s3.

This closes #1860


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/9c74874f
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/9c74874f
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/9c74874f

Branch: refs/heads/carbonstore-rebase
Commit: 9c74874f069dca06e71e872fda5c2f24b4fa00da
Parents: bfa9a2c
Author: Jatin 
Authored: Thu Jan 25 16:53:00 2018 +0530
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:10 2018 +0800

--
 .../spark/rdd/AlterTableAddColumnRDD.scala  |  2 +-
 .../spark/rdd/AlterTableDropColumnRDD.scala |  2 +-
 .../spark/rdd/CarbonCleanFilesRDD.scala |  2 +-
 .../spark/rdd/CarbonDeleteLoadByDateRDD.scala   |  2 +-
 .../spark/rdd/CarbonDeleteLoadRDD.scala |  2 +-
 .../spark/rdd/CarbonDropPartitionRDD.scala  |  4 +--
 .../spark/rdd/CarbonDropTableRDD.scala  |  2 +-
 .../spark/rdd/CarbonGlobalDictionaryRDD.scala   |  3 +-
 .../spark/rdd/CarbonMergeFilesRDD.scala |  0
 .../carbondata/spark/rdd/CarbonMergerRDD.scala  |  2 +-
 .../apache/carbondata/spark/rdd/CarbonRDD.scala | 32 ++--
 .../spark/rdd/NewCarbonDataLoadRDD.scala|  2 +-
 .../carbondata/spark/rdd/SparkDataMapJob.scala  |  2 +-
 .../apache/spark/rdd/DataLoadCoalescedRDD.scala |  3 +-
 .../apache/spark/rdd/UpdateCoalescedRDD.scala   |  2 +-
 .../carbondata/streaming/StreamHandoffRDD.scala |  2 +-
 16 files changed, 46 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/9c74874f/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableAddColumnRDD.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableAddColumnRDD.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableAddColumnRDD.scala
index 56a66b9..7c1edea 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableAddColumnRDD.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableAddColumnRDD.scala
@@ -50,7 +50,7 @@ class AddColumnPartition(rddId: Int, idx: Int, schema: 
ColumnSchema) extends Par
 class AlterTableAddColumnRDD[K, V](sc: SparkContext,
 @transient newColumns: Seq[ColumnSchema],
 identifier: AbsoluteTableIdentifier)
-  extends CarbonRDD[(Int, SegmentStatus)](sc, Nil) {
+  extends CarbonRDD[(Int, SegmentStatus)](sc, Nil, sc.hadoopConfiguration) {
 
   val lockType: String = 
CarbonProperties.getInstance.getProperty(CarbonCommonConstants.LOCK_TYPE,
 CarbonCommonConstants.CARBON_LOCK_TYPE_HDFS)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9c74874f/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableDropColumnRDD.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableDropColumnRDD.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableDropColumnRDD.scala
index 248f351..e14524e 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableDropColumnRDD.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/AlterTableDropColumnRDD.scala
@@ -48,7 +48,7 @@ class DropColumnPartition(rddId: Int, idx: Int, schema: 
ColumnSchema) extends Pa
 class AlterTableDropColumnRDD[K, V](sc: SparkContext,
 @transient newColumns: Seq[ColumnSchema],
 carbonTableIdentifier: AbsoluteTableIdentifier)
-  extends CarbonRDD[(Int, SegmentStatus)](sc, Nil) {
+  extends CarbonRDD[(Int, SegmentStatus)](sc, Nil, sc.hadoopConfiguration) {
 
   override def getPartitions: Array[Partition] = {
 newColumns.zipWithIndex.map { column =>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9c74874f/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonCleanFilesRDD.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonCleanFilesRDD.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonCleanFilesRDD.scala
index 

[09/36] carbondata git commit: [CARBONDATA-1992] Remove partitionId in CarbonTablePath

2018-02-09 Thread jackylk
[CARBONDATA-1992] Remove partitionId in CarbonTablePath

In CarbonTablePath, there is a deprecated partition id which is always 0, it 
should be removed to avoid confusion.

This closes #1765


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/d3cbb026
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/d3cbb026
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/d3cbb026

Branch: refs/heads/carbonstore-rebase
Commit: d3cbb026cc8ea66e8ec49beb83b7604575714c78
Parents: da549c2
Author: Jacky Li 
Authored: Sat Jan 6 20:28:44 2018 +0800
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:09 2018 +0800

--
 .../core/metadata/PartitionMapFileStore.java|   2 +-
 .../core/mutate/CarbonUpdateUtil.java   |   8 +-
 .../core/statusmanager/LoadMetadataDetails.java |   2 +
 .../SegmentUpdateStatusManager.java |   8 +-
 .../apache/carbondata/core/util/CarbonUtil.java |   6 +-
 .../core/util/path/CarbonTablePath.java |  55 ---
 .../CarbonFormatDirectoryStructureTest.java |   4 +-
 .../hadoop/api/CarbonTableInputFormat.java  |   2 +-
 .../streaming/CarbonStreamRecordWriter.java |   2 +-
 .../hadoop/test/util/StoreCreator.java  |   1 -
 .../presto/util/CarbonDataStoreCreator.scala|   1 -
 .../dataload/TestLoadDataGeneral.scala  |   2 +-
 .../InsertIntoCarbonTableTestCase.scala |   4 +-
 .../dataload/TestBatchSortDataLoad.scala|   3 +-
 .../dataload/TestDataLoadWithFileName.scala |   2 +-
 .../dataload/TestGlobalSortDataLoad.scala   |   4 +-
 .../testsuite/datamap/TestDataMapCommand.scala  |  34 ++--
 .../TestDataLoadingForPartitionTable.scala  |   3 +-
 .../StandardPartitionTableCleanTestCase.scala   |   2 +-
 ...andardPartitionTableCompactionTestCase.scala |   2 +-
 .../StandardPartitionTableLoadingTestCase.scala |   4 +-
 .../load/DataLoadProcessBuilderOnSpark.scala|   1 -
 .../load/DataLoadProcessorStepOnSpark.scala |   2 +-
 .../spark/rdd/AlterTableLoadPartitionRDD.scala  | 154 +++
 .../carbondata/spark/rdd/CarbonMergerRDD.scala  |  11 +-
 .../spark/rdd/NewCarbonDataLoadRDD.scala|  25 ++-
 .../org/apache/spark/util/PartitionUtils.scala  |   5 +-
 .../spark/rdd/CarbonDataRDDFactory.scala|   5 +-
 .../datasources/CarbonFileFormat.scala  |   1 -
 .../partition/TestAlterPartitionTable.scala |   2 +-
 .../bucketing/TableBucketingTestCase.scala  |   2 +
 .../loading/CarbonDataLoadConfiguration.java|  10 --
 .../loading/DataLoadProcessBuilder.java |   1 -
 .../loading/TableProcessingOperations.java  |   3 +-
 .../loading/model/CarbonLoadModel.java  |  72 +
 .../sort/impl/ParallelReadMergeSorterImpl.java  |   4 +-
 ...arallelReadMergeSorterWithBucketingImpl.java |  15 +-
 .../UnsafeBatchParallelReadMergeSorterImpl.java |   7 +-
 ...arallelReadMergeSorterWithBucketingImpl.java |  21 ++-
 .../CarbonRowDataWriterProcessorStepImpl.java   |  33 ++--
 .../steps/DataWriterBatchProcessorStepImpl.java |  25 +--
 .../steps/DataWriterProcessorStepImpl.java  |  22 +--
 .../processing/merger/CarbonDataMergerUtil.java |   6 +-
 .../merger/CompactionResultSortProcessor.java   |   4 +-
 .../sort/sortdata/SortParameters.java   |  16 +-
 .../store/CarbonFactDataHandlerModel.java   |   7 +-
 .../util/CarbonDataProcessorUtil.java   |  12 +-
 .../processing/util/CarbonLoaderUtil.java   |  12 +-
 .../processing/util/DeleteLoadFolders.java  |   7 +-
 .../carbondata/processing/StoreCreator.java |   1 -
 .../carbondata/streaming/StreamHandoffRDD.scala |   1 -
 .../streaming/StreamSinkFactory.scala   |   2 +-
 .../streaming/CarbonAppendableStreamSink.scala  |   8 +-
 53 files changed, 285 insertions(+), 363 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/d3cbb026/core/src/main/java/org/apache/carbondata/core/metadata/PartitionMapFileStore.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/PartitionMapFileStore.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/PartitionMapFileStore.java
index 1e9cbc4..43310fe 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/PartitionMapFileStore.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/PartitionMapFileStore.java
@@ -363,7 +363,7 @@ public class PartitionMapFileStore {
 List toBeDeletedIndexFiles = new ArrayList<>();
 List toBeDeletedDataFiles = new ArrayList<>();
 // take the list of files from this segment.
-String segmentPath = carbonTablePath.getCarbonDataDirectoryPath("0", 
segment.getLoadName());
+String 

[26/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/SegmentInfo.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/SegmentInfo.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/SegmentInfo.java
index 0cb2918..099fffd 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/SegmentInfo.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/SegmentInfo.java
@@ -29,31 +29,12 @@ public class SegmentInfo implements Serializable {
   private static final long serialVersionUID = -174987462709431L;
 
   /**
-   * number of column in the segment
-   */
-  private int numberOfColumns;
-
-  /**
* cardinality of each columns
* column which is not participating in the multidimensional key cardinality 
will be -1;
*/
   private int[] columnCardinality;
 
   /**
-   * @return the numberOfColumns
-   */
-  public int getNumberOfColumns() {
-return numberOfColumns;
-  }
-
-  /**
-   * @param numberOfColumns the numberOfColumns to set
-   */
-  public void setNumberOfColumns(int numberOfColumns) {
-this.numberOfColumns = numberOfColumns;
-  }
-
-  /**
* @return the columnCardinality
*/
   public int[] getColumnCardinality() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index 6036569..d17d865 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -19,7 +19,13 @@ package org.apache.carbondata.core.metadata.schema.table;
 
 import java.io.IOException;
 import java.io.Serializable;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
@@ -33,7 +39,10 @@ import 
org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import 
org.apache.carbondata.core.metadata.schema.table.column.CarbonImplicitDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.scan.model.QueryModel;
+import org.apache.carbondata.core.scan.model.QueryProjection;
 import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.DataTypeConverter;
 import org.apache.carbondata.core.util.DataTypeUtil;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
@@ -136,10 +145,7 @@ public class CarbonTable implements Serializable {
   /**
* During creation of TableInfo from hivemetastore the DataMapSchemas and 
the columns
* DataTypes are not converted to the appropriate child classes.
-   *
* This method will cast the same to the appropriate classes
-   *
-   * @param tableInfo
*/
   public static void updateTableInfo(TableInfo tableInfo) {
 List dataMapSchemas = new ArrayList<>();
@@ -153,8 +159,9 @@ public class CarbonTable implements Serializable {
 }
 tableInfo.setDataMapSchemaList(dataMapSchemas);
 for (ColumnSchema columnSchema : 
tableInfo.getFactTable().getListOfColumns()) {
-  columnSchema.setDataType(DataTypeUtil.valueOf(columnSchema.getDataType(),
-  columnSchema.getPrecision(), columnSchema.getScale()));
+  columnSchema.setDataType(
+  DataTypeUtil.valueOf(
+  columnSchema.getDataType(), columnSchema.getPrecision(), 
columnSchema.getScale()));
 }
 List childSchema = tableInfo.getDataMapSchemaList();
 for (DataMapSchema dataMapSchema : childSchema) {
@@ -168,10 +175,11 @@ public class CarbonTable implements Serializable {
   }
 }
 if (tableInfo.getFactTable().getBucketingInfo() != null) {
-  for (ColumnSchema columnSchema : tableInfo.getFactTable()
-  .getBucketingInfo().getListOfColumns()) {
-
columnSchema.setDataType(DataTypeUtil.valueOf(columnSchema.getDataType(),
-columnSchema.getPrecision(), columnSchema.getScale()));
+  for (ColumnSchema columnSchema :
+  tableInfo.getFactTable().getBucketingInfo().getListOfColumns()) {
+columnSchema.setDataType(
+DataTypeUtil.valueOf(
+columnSchema.getDataType(), columnSchema.getPrecision(), 

[27/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeFixedLengthDimensionDataChunkStore.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeFixedLengthDimensionDataChunkStore.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeFixedLengthDimensionDataChunkStore.java
index 8c8d08f..a689d8e 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeFixedLengthDimensionDataChunkStore.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeFixedLengthDimensionDataChunkStore.java
@@ -124,22 +124,22 @@ public class UnsafeFixedLengthDimensionDataChunkStore
   /**
* to compare the two byte array
*
-   * @param indexindex of first byte array
+   * @param rowIdindex of first byte array
* @param compareValue value of to be compared
* @return compare result
*/
-  @Override public int compareTo(int index, byte[] compareValue) {
+  @Override public int compareTo(int rowId, byte[] compareValue) {
 // based on index we need to calculate the actual position in memory block
-index = index * columnValueSize;
+rowId = rowId * columnValueSize;
 int compareResult = 0;
 for (int i = 0; i < compareValue.length; i++) {
   compareResult = (CarbonUnsafe.getUnsafe()
-  .getByte(dataPageMemoryBlock.getBaseObject(), 
dataPageMemoryBlock.getBaseOffset() + index)
+  .getByte(dataPageMemoryBlock.getBaseObject(), 
dataPageMemoryBlock.getBaseOffset() + rowId)
   & 0xff) - (compareValue[i] & 0xff);
   if (compareResult != 0) {
 break;
   }
-  index++;
+  rowId++;
 }
 return compareResult;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimesionDataChunkStore.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimesionDataChunkStore.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimesionDataChunkStore.java
index 36b2bd8..e1eb378 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimesionDataChunkStore.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimesionDataChunkStore.java
@@ -189,11 +189,11 @@ public class UnsafeVariableLengthDimesionDataChunkStore
   /**
* to compare the two byte array
*
-   * @param index index of first byte array
+   * @param rowId index of first byte array
* @param compareValue value of to be compared
* @return compare result
*/
-  @Override public int compareTo(int index, byte[] compareValue) {
+  @Override public int compareTo(int rowId, byte[] compareValue) {
 // now to get the row from memory block we need to do following thing
 // 1. first get the current offset
 // 2. if it's not a last row- get the next row offset
@@ -201,13 +201,13 @@ public class UnsafeVariableLengthDimesionDataChunkStore
 // else subtract the current row offset
 // with complete data length get the offset of set of data
 int currentDataOffset = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
-dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((long)index
+dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((long) rowId
 * CarbonCommonConstants.INT_SIZE_IN_BYTE * 1L));
 short length = 0;
 // calculating the length of data
-if (index < numberOfRows - 1) {
+if (rowId < numberOfRows - 1) {
   int OffsetOfNextdata = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
-  dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((index + 1)
+  dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((rowId + 1)
   * CarbonCommonConstants.INT_SIZE_IN_BYTE));
   length = (short) (OffsetOfNextdata - (currentDataOffset
   + CarbonCommonConstants.SHORT_SIZE_IN_BYTE));

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ColumnGroupModel.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ColumnGroupModel.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/columnar/ColumnGroupModel.java
index 74d268a..e2a4161 100644
--- 

[29/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
[CARBONDATA-2099] Refactor query scan process to improve readability

Unified concepts in scan process flow:

1.QueryModel contains all parameter for scan, it is created by API in 
CarbonTable. (In future, CarbonTable will be the entry point for various table 
operations)
2.Use term ColumnChunk to represent one column in one blocklet, and use 
ChunkIndex in reader to read specified column chunk
3.Use term ColumnPage to represent one page in one ColumnChunk
4.QueryColumn => ProjectionColumn, indicating it is for projection

This closes #1874


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/7b79ff18
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/7b79ff18
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/7b79ff18

Branch: refs/heads/carbonstore-rebase
Commit: 7b79ff18b7b7a97418f70c1b9ea99b98c536646e
Parents: 3ee9351
Author: Jacky Li 
Authored: Tue Jan 30 21:24:04 2018 +0800
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:10 2018 +0800

--
 .../dictionary/AbstractDictionaryCache.java |   3 +-
 .../cache/dictionary/DictionaryCacheLoader.java |   7 +-
 .../dictionary/DictionaryCacheLoaderImpl.java   |  11 +-
 .../core/datastore/BTreeBuilderInfo.java|   6 -
 .../carbondata/core/datastore/DataRefNode.java  |  81 +--
 .../carbondata/core/datastore/FileHolder.java   | 118 
 .../carbondata/core/datastore/FileReader.java   | 114 +++
 .../core/datastore/block/SegmentProperties.java |  50 +-
 .../chunk/DimensionColumnDataChunk.java | 116 ---
 .../datastore/chunk/DimensionColumnPage.java| 111 +++
 .../chunk/impl/AbstractDimensionColumnPage.java |  89 +++
 .../chunk/impl/AbstractDimensionDataChunk.java  |  95 ---
 .../impl/ColumnGroupDimensionColumnPage.java| 194 ++
 .../impl/ColumnGroupDimensionDataChunk.java | 194 --
 .../chunk/impl/DimensionRawColumnChunk.java |  46 +-
 .../impl/FixedLengthDimensionColumnPage.java| 163 +
 .../impl/FixedLengthDimensionDataChunk.java | 163 -
 .../chunk/impl/MeasureRawColumnChunk.java   |  26 +-
 .../impl/VariableLengthDimensionColumnPage.java | 133 
 .../impl/VariableLengthDimensionDataChunk.java  | 140 
 .../reader/DimensionColumnChunkReader.java  |  14 +-
 .../chunk/reader/MeasureColumnChunkReader.java  |  12 +-
 .../AbstractChunkReaderV2V3Format.java  |  34 +-
 ...mpressedDimensionChunkFileBasedReaderV1.java |  38 +-
 ...mpressedDimensionChunkFileBasedReaderV2.java |  30 +-
 ...essedDimChunkFileBasedPageLevelReaderV3.java |  11 +-
 ...mpressedDimensionChunkFileBasedReaderV3.java |  49 +-
 .../AbstractMeasureChunkReaderV2V3Format.java   |  42 +-
 ...CompressedMeasureChunkFileBasedReaderV1.java |  16 +-
 ...CompressedMeasureChunkFileBasedReaderV2.java |  24 +-
 ...CompressedMeasureChunkFileBasedReaderV3.java |  45 +-
 ...essedMsrChunkFileBasedPageLevelReaderV3.java |   8 +-
 .../chunk/store/ColumnPageWrapper.java  |  30 +-
 .../chunk/store/DimensionDataChunkStore.java|   8 +-
 .../SafeFixedLengthDimensionDataChunkStore.java |   6 +-
 ...feVariableLengthDimensionDataChunkStore.java |   8 +-
 ...nsafeFixedLengthDimensionDataChunkStore.java |  10 +-
 ...afeVariableLengthDimesionDataChunkStore.java |  10 +-
 .../datastore/columnar/ColumnGroupModel.java|  26 -
 .../core/datastore/impl/DFSFileHolderImpl.java  | 166 -
 .../core/datastore/impl/DFSFileReaderImpl.java  | 155 
 .../datastore/impl/DefaultFileTypeProvider.java |  16 +-
 .../core/datastore/impl/FileFactory.java|   4 +-
 .../core/datastore/impl/FileHolderImpl.java | 224 --
 .../core/datastore/impl/FileReaderImpl.java | 215 ++
 .../core/datastore/impl/FileTypeInerface.java   |   4 +-
 .../impl/btree/AbstractBTreeLeafNode.java   |  60 +-
 .../impl/btree/BTreeDataRefNodeFinder.java  |   6 +-
 .../datastore/impl/btree/BTreeNonLeafNode.java  |  52 +-
 .../impl/btree/BlockBTreeLeafNode.java  |   6 +-
 .../impl/btree/BlockletBTreeLeafNode.java   |  46 +-
 .../page/encoding/EncodingFactory.java  |   8 +-
 .../server/NonSecureDictionaryServer.java   |   1 -
 .../core/indexstore/BlockletDetailInfo.java |   4 -
 .../blockletindex/BlockletDataRefNode.java  | 228 ++
 .../BlockletDataRefNodeWrapper.java | 241 ---
 .../indexstore/blockletindex/IndexWrapper.java  |   2 +-
 .../blockletindex/SegmentIndexFileStore.java|   7 +-
 .../core/memory/HeapMemoryAllocator.java|   2 +-
 .../core/metadata/blocklet/SegmentInfo.java |  19 -
 .../core/metadata/schema/table/CarbonTable.java | 130 +++-
 .../schema/table/RelationIdentifier.java|  16 -
 .../core/metadata/schema/table/TableInfo.java   |   6 +-
 .../schema/table/column/CarbonColumn.java   |   2 +-
 .../schema/table/column/CarbonDimension.java|  12 -
 

[31/36] carbondata git commit: [CARBONDATA-2018][DataLoad] Optimization in reading/writing for sort temp row

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/1acc4e18/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
--
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
index 11b3d43..527452a 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
@@ -31,15 +31,14 @@ import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
-import org.apache.carbondata.core.metadata.datatype.DataType;
-import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonUtil;
-import org.apache.carbondata.core.util.DataTypeUtil;
-import 
org.apache.carbondata.processing.loading.sort.unsafe.UnsafeCarbonRowPage;
+import org.apache.carbondata.processing.loading.row.IntermediateSortTempRow;
+import org.apache.carbondata.processing.loading.sort.SortStepRowHandler;
 import 
org.apache.carbondata.processing.sort.exception.CarbonSortKeyAndGroupByException;
-import org.apache.carbondata.processing.sort.sortdata.NewRowComparator;
+import 
org.apache.carbondata.processing.sort.sortdata.IntermediateSortTempRowComparator;
 import org.apache.carbondata.processing.sort.sortdata.SortParameters;
+import org.apache.carbondata.processing.sort.sortdata.TableFieldStat;
 
 public class UnsafeSortTempFileChunkHolder implements SortTempChunkHolder {
 
@@ -63,21 +62,15 @@ public class UnsafeSortTempFileChunkHolder implements 
SortTempChunkHolder {
* entry count
*/
   private int entryCount;
-
   /**
* return row
*/
-  private Object[] returnRow;
-  private int dimCnt;
-  private int complexCnt;
-  private int measureCnt;
-  private boolean[] isNoDictionaryDimensionColumn;
-  private DataType[] measureDataTypes;
+  private IntermediateSortTempRow returnRow;
   private int readBufferSize;
   private String compressorName;
-  private Object[][] currentBuffer;
+  private IntermediateSortTempRow[] currentBuffer;
 
-  private Object[][] backupBuffer;
+  private IntermediateSortTempRow[] backupBuffer;
 
   private boolean isBackupFilled;
 
@@ -100,27 +93,21 @@ public class UnsafeSortTempFileChunkHolder implements 
SortTempChunkHolder {
 
   private int numberOfObjectRead;
 
-  private int nullSetWordsLength;
-
-  private Comparator comparator;
-
+  private TableFieldStat tableFieldStat;
+  private SortStepRowHandler sortStepRowHandler;
+  private Comparator comparator;
   /**
* Constructor to initialize
*/
   public UnsafeSortTempFileChunkHolder(File tempFile, SortParameters 
parameters) {
 // set temp file
 this.tempFile = tempFile;
-this.dimCnt = parameters.getDimColCount();
-this.complexCnt = parameters.getComplexDimColCount();
-this.measureCnt = parameters.getMeasureColCount();
-this.isNoDictionaryDimensionColumn = 
parameters.getNoDictionaryDimnesionColumn();
-this.measureDataTypes = parameters.getMeasureDataType();
 this.readBufferSize = parameters.getBufferSize();
 this.compressorName = parameters.getSortTempCompressorName();
-
+this.tableFieldStat = new TableFieldStat(parameters);
+this.sortStepRowHandler = new SortStepRowHandler(tableFieldStat);
 this.executorService = Executors.newFixedThreadPool(1);
-this.nullSetWordsLength = ((parameters.getMeasureColCount() - 1) >> 6) + 1;
-comparator = new NewRowComparator(parameters.getNoDictionarySortColumn());
+comparator = new 
IntermediateSortTempRowComparator(parameters.getNoDictionarySortColumn());
 initialize();
   }
 
@@ -169,11 +156,17 @@ public class UnsafeSortTempFileChunkHolder implements 
SortTempChunkHolder {
*
* @throws CarbonSortKeyAndGroupByException problem while reading
*/
+  @Override
   public void readRow() throws CarbonSortKeyAndGroupByException {
 if (prefetch) {
   fillDataForPrefetch();
 } else {
-  this.returnRow = getRowFromStream();
+  try {
+this.returnRow = 
sortStepRowHandler.readIntermediateSortTempRowFromInputStream(stream);
+this.numberOfObjectRead++;
+  } catch (IOException e) {
+throw new CarbonSortKeyAndGroupByException("Problems while reading 
row", e);
+  }
 }
   }
 
@@ -207,63 +200,22 @@ public class UnsafeSortTempFileChunkHolder implements 
SortTempChunkHolder {
   }
 
   /**

[05/36] carbondata git commit: [CARBONDATA-2150] Unwanted updatetable status files are being generated for the delete operation where no records are deleted

2018-02-09 Thread jackylk
[CARBONDATA-2150] Unwanted updatetable status files are being generated for the 
delete operation where no records are deleted

Problem:
Unwanted updatetable status files are being generated for the delete operation 
where no records are deleted

Analysis:
when the filter value for delete operation is less than the maximum value in 
that column, then getsplits() will return the
block and hence in delete logic, it was creating update table status file even 
though delete operation was not done and
added spark context to create database event

This closes #1957


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/da549c2b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/da549c2b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/da549c2b

Branch: refs/heads/carbonstore-rebase
Commit: da549c2b721ae66f55fd28bebe775ac10373c4e2
Parents: 420cc35
Author: akashrn5 
Authored: Thu Feb 8 23:10:43 2018 +0530
Committer: kunal642 
Committed: Fri Feb 9 19:30:47 2018 +0530

--
 .../iud/DeleteCarbonTableTestCase.scala | 22 +++-
 .../events/CreateDatabaseEvents.scala   |  6 +-
 .../scala/org/apache/spark/util/FileUtils.scala |  5 +++--
 .../command/mutation/DeleteExecution.scala  |  2 +-
 .../sql/execution/strategy/DDLStrategy.scala|  2 +-
 5 files changed, 31 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/da549c2b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
index 6521657..22aa385 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
@@ -17,9 +17,11 @@
 package org.apache.carbondata.spark.testsuite.iud
 
 import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.sql.{Row, SaveMode}
+import org.apache.spark.sql.{CarbonEnv, Row, SaveMode}
 import org.scalatest.BeforeAndAfterAll
 
+import org.apache.carbondata.core.datastore.impl.FileFactory
+
 
 class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
   override def beforeAll {
@@ -180,6 +182,24 @@ class DeleteCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
   Seq(Row(1, "abc"), Row(3, "uhj"), Row(4, "frg")))
   }
 
+  test("test number of update table status files after delete query where no 
records are deleted") {
+sql("drop table if exists update_status_files")
+sql("create table update_status_files(name string,age int) stored by 
'carbondata'")
+sql("insert into update_status_files select 'abc',1")
+sql("insert into update_status_files select 'def',2")
+sql("insert into update_status_files select 'xyz',4")
+sql("insert into update_status_files select 'abc',6")
+sql("alter table update_status_files compact 'minor'")
+sql("delete from update_status_files where age=3").show()
+sql("delete from update_status_files where age=5").show()
+val carbonTable = CarbonEnv
+  .getCarbonTable(Some("iud_db"), 
"update_status_files")(sqlContext.sparkSession)
+val metaPath = carbonTable.getMetaDataFilepath
+val files = FileFactory.getCarbonFile(metaPath)
+assert(files.listFiles().length == 2)
+sql("drop table update_status_files")
+  }
+
 
   override def afterAll {
 sql("use default")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da549c2b/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
index dae22b1..c1d79db 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
@@ -17,11 +17,15 @@
 
 package org.apache.carbondata.events
 
+import org.apache.spark.SparkContext
+
 
 case class CreateDatabasePreExecutionEvent(databaseName: String) extends Event
   with DatabaseEventInfo
 
-case class 

[08/36] carbondata git commit: [CARBONDATA-1992] Remove partitionId in CarbonTablePath

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/d3cbb026/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java
--
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java
index 8b87cfc..6cf1dcd 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/steps/CarbonRowDataWriterProcessorStepImpl.java
@@ -32,6 +32,7 @@ import 
org.apache.carbondata.core.metadata.CarbonTableIdentifier;
 import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.util.CarbonThreadFactory;
 import org.apache.carbondata.core.util.CarbonTimeStatisticsFactory;
+import org.apache.carbondata.core.util.path.CarbonTablePath;
 import org.apache.carbondata.processing.loading.AbstractDataLoadProcessorStep;
 import org.apache.carbondata.processing.loading.CarbonDataLoadConfiguration;
 import org.apache.carbondata.processing.loading.DataField;
@@ -88,11 +89,13 @@ public class CarbonRowDataWriterProcessorStepImpl extends 
AbstractDataLoadProces
 child.initialize();
   }
 
-  private String[] getStoreLocation(CarbonTableIdentifier tableIdentifier, 
String partitionId) {
-String[] storeLocation = CarbonDataProcessorUtil
-.getLocalDataFolderLocation(tableIdentifier.getDatabaseName(),
-tableIdentifier.getTableName(), 
String.valueOf(configuration.getTaskNo()), partitionId,
-configuration.getSegmentId() + "", false, false);
+  private String[] getStoreLocation(CarbonTableIdentifier tableIdentifier) {
+String[] storeLocation = 
CarbonDataProcessorUtil.getLocalDataFolderLocation(
+tableIdentifier.getDatabaseName(),
+tableIdentifier.getTableName(),
+String.valueOf(configuration.getTaskNo()), 
configuration.getSegmentId(),
+false,
+false);
 CarbonDataProcessorUtil.createLocations(storeLocation);
 return storeLocation;
   }
@@ -115,11 +118,11 @@ public class CarbonRowDataWriterProcessorStepImpl extends 
AbstractDataLoadProces
   measureCount = configuration.getMeasureCount();
   outputLength = measureCount + (this.noDictWithComplextCount > 0 ? 1 : 0) 
+ 1;
   CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
-  
.recordDictionaryValue2MdkAdd2FileTime(configuration.getPartitionId(),
+  
.recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PATITION_ID,
   System.currentTimeMillis());
 
   if (iterators.length == 1) {
-doExecute(iterators[0], 0, 0);
+doExecute(iterators[0], 0);
   } else {
 executorService = Executors.newFixedThreadPool(iterators.length,
 new CarbonThreadFactory("NoSortDataWriterPool:" + 
configuration.getTableIdentifier()
@@ -150,11 +153,10 @@ public class CarbonRowDataWriterProcessorStepImpl extends 
AbstractDataLoadProces
 return null;
   }
 
-  private void doExecute(Iterator iterator, int partitionId, 
int iteratorIndex) {
-String[] storeLocation = getStoreLocation(tableIdentifier, 
String.valueOf(partitionId));
-CarbonFactDataHandlerModel model = CarbonFactDataHandlerModel
-.createCarbonFactDataHandlerModel(configuration, storeLocation, 
partitionId,
-iteratorIndex);
+  private void doExecute(Iterator iterator, int iteratorIndex) 
{
+String[] storeLocation = getStoreLocation(tableIdentifier);
+CarbonFactDataHandlerModel model = 
CarbonFactDataHandlerModel.createCarbonFactDataHandlerModel(
+configuration, storeLocation, 0, iteratorIndex);
 CarbonFactHandler dataHandler = null;
 boolean rowsNotExist = true;
 while (iterator.hasNext()) {
@@ -189,10 +191,11 @@ public class CarbonRowDataWriterProcessorStepImpl extends 
AbstractDataLoadProces
 
CarbonTimeStatisticsFactory.getLoadStatisticsInstance().recordTotalRecords(rowCounter.get());
 processingComplete(dataHandler);
 CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
-.recordDictionaryValue2MdkAdd2FileTime(configuration.getPartitionId(),
+
.recordDictionaryValue2MdkAdd2FileTime(CarbonTablePath.DEPRECATED_PATITION_ID,
 System.currentTimeMillis());
 CarbonTimeStatisticsFactory.getLoadStatisticsInstance()
-.recordMdkGenerateTotalTime(configuration.getPartitionId(), 
System.currentTimeMillis());
+.recordMdkGenerateTotalTime(CarbonTablePath.DEPRECATED_PATITION_ID,
+System.currentTimeMillis());
   }
 
   private void processingComplete(CarbonFactHandler dataHandler) throws 
CarbonDataLoadingException {
@@ -298,7 +301,7 @@ public class 

[23/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
index 89489a2..2bbe491 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelFilterExecuterImpl.java
@@ -34,8 +34,8 @@ import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.cache.dictionary.Dictionary;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
-import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
-import 
org.apache.carbondata.core.datastore.chunk.impl.VariableLengthDimensionDataChunk;
+import org.apache.carbondata.core.datastore.chunk.DimensionColumnPage;
+import 
org.apache.carbondata.core.datastore.chunk.impl.VariableLengthDimensionColumnPage;
 import org.apache.carbondata.core.datastore.page.ColumnPage;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
 import 
org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
@@ -58,7 +58,7 @@ import org.apache.carbondata.core.scan.filter.intf.RowImpl;
 import org.apache.carbondata.core.scan.filter.intf.RowIntf;
 import 
org.apache.carbondata.core.scan.filter.resolver.resolverinfo.DimColumnResolvedFilterInfo;
 import 
org.apache.carbondata.core.scan.filter.resolver.resolverinfo.MeasureColumnResolvedFilterInfo;
-import org.apache.carbondata.core.scan.processor.BlocksChunkHolder;
+import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
 import org.apache.carbondata.core.util.BitSetGroup;
 import org.apache.carbondata.core.util.CarbonUtil;
 import org.apache.carbondata.core.util.DataTypeUtil;
@@ -67,20 +67,20 @@ public class RowLevelFilterExecuterImpl implements 
FilterExecuter {
 
   private static final LogService LOGGER =
   
LogServiceFactory.getLogService(RowLevelFilterExecuterImpl.class.getName());
-  protected List dimColEvaluatorInfoList;
-  protected List msrColEvalutorInfoList;
+  List dimColEvaluatorInfoList;
+  List msrColEvalutorInfoList;
   protected Expression exp;
   protected AbsoluteTableIdentifier tableIdentifier;
   protected SegmentProperties segmentProperties;
   /**
* it has index at which given dimension is stored in file
*/
-  protected int[] dimensionBlocksIndex;
+  int[] dimensionChunkIndex;
 
   /**
* it has index at which given measure is stored in file
*/
-  protected int[] measureBlocksIndex;
+  int[] measureChunkIndex;
 
   private Map complexDimensionInfoMap;
 
@@ -88,18 +88,18 @@ public class RowLevelFilterExecuterImpl implements 
FilterExecuter {
* flag to check whether the filter dimension is present in current block 
list of dimensions.
* Applicable for restructure scenarios
*/
-  protected boolean[] isDimensionPresentInCurrentBlock;
+  boolean[] isDimensionPresentInCurrentBlock;
 
   /**
* flag to check whether the filter measure is present in current block list 
of measures.
* Applicable for restructure scenarios
*/
-  protected boolean[] isMeasurePresentInCurrentBlock;
+  boolean[] isMeasurePresentInCurrentBlock;
 
   /**
* is dimension column data is natural sorted
*/
-  protected boolean isNaturalSorted;
+  boolean isNaturalSorted;
 
   /**
* date direct dictionary generator
@@ -123,10 +123,10 @@ public class RowLevelFilterExecuterImpl implements 
FilterExecuter {
 }
 if (this.dimColEvaluatorInfoList.size() > 0) {
   this.isDimensionPresentInCurrentBlock = new 
boolean[dimColEvaluatorInfoList.size()];
-  this.dimensionBlocksIndex = new int[dimColEvaluatorInfoList.size()];
+  this.dimensionChunkIndex = new int[dimColEvaluatorInfoList.size()];
 } else {
   this.isDimensionPresentInCurrentBlock = new boolean[]{false};
-  this.dimensionBlocksIndex = new int[]{0};
+  this.dimensionChunkIndex = new int[]{0};
 }
 if (null == msrColEvalutorInfoList) {
   this.msrColEvalutorInfoList = new 
ArrayList(20);
@@ -135,10 +135,10 @@ public class RowLevelFilterExecuterImpl implements 
FilterExecuter {
 }
 if (this.msrColEvalutorInfoList.size() > 0) {
   this.isMeasurePresentInCurrentBlock = new 
boolean[msrColEvalutorInfoList.size()];
-  this.measureBlocksIndex = new int[msrColEvalutorInfoList.size()];
+  this.measureChunkIndex = new int[msrColEvalutorInfoList.size()];
 } else {
   this.isMeasurePresentInCurrentBlock = new boolean[]{false};
-  this.measureBlocksIndex = new int[] 

[10/36] carbondata git commit: [CARBONDATA-1827] S3 Carbon Implementation

2018-02-09 Thread jackylk
[CARBONDATA-1827] S3 Carbon Implementation

1.Provide support for s3 in carbondata.
2.Added S3Example to create carbon table on s3.
3.Added S3CSVExample to load carbon table using csv from s3.

This closes #1805


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/65679b8e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/65679b8e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/65679b8e

Branch: refs/heads/carbonstore-rebase
Commit: 65679b8e1d7f5e4eb77b512f9dd79c369eab554b
Parents: b3f9f84
Author: SangeetaGulia 
Authored: Thu Sep 21 14:56:26 2017 +0530
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:09 2018 +0800

--
 .../core/constants/CarbonCommonConstants.java   |  21 +++
 .../filesystem/AbstractDFSCarbonFile.java   |  20 ++-
 .../datastore/filesystem/HDFSCarbonFile.java|   5 +-
 .../core/datastore/impl/FileFactory.java|  11 +-
 .../core/locks/CarbonLockFactory.java   |  28 ++--
 .../carbondata/core/locks/S3FileLock.java   | 111 +
 .../carbondata/core/util/CarbonProperties.java  |   3 +-
 .../filesystem/HDFSCarbonFileTest.java  |   8 +-
 examples/spark2/pom.xml |   5 +
 examples/spark2/src/main/resources/data1.csv|  11 ++
 .../carbondata/examples/S3CsvExample.scala  |  99 +++
 .../apache/carbondata/examples/S3Example.scala  | 164 +++
 .../spark/rdd/NewCarbonDataLoadRDD.scala|  42 -
 integration/spark2/pom.xml  |  43 +
 .../spark/rdd/CarbonDataRDDFactory.scala|   3 +-
 .../org/apache/spark/sql/CarbonSession.scala|   3 +
 16 files changed, 554 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/65679b8e/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 6e6482d..2e169c2 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -167,6 +167,22 @@ public final class CarbonCommonConstants {
   public static final String S3N_PREFIX = "s3n://";
 
   public static final String S3A_PREFIX = "s3a://";
+  /**
+   * Access Key for s3n
+   */
+  public static final String S3N_ACCESS_KEY = "fs.s3n.awsAccessKeyId";
+  /**
+   * Secret Key for s3n
+   */
+  public static final String S3N_SECRET_KEY = "fs.s3n.awsSecretAccessKey";
+  /**
+   * Access Key for s3
+   */
+  public static final String S3_ACCESS_KEY = "fs.s3.awsAccessKeyId";
+  /**
+   * Secret Key for s3
+   */
+  public static final String S3_SECRET_KEY = "fs.s3.awsSecretAccessKey";
 
   /**
* FS_DEFAULT_FS
@@ -941,6 +957,11 @@ public final class CarbonCommonConstants {
   public static final String CARBON_LOCK_TYPE_HDFS = "HDFSLOCK";
 
   /**
+   * S3LOCK TYPE
+   */
+  public static final String CARBON_LOCK_TYPE_S3 = "S3LOCK";
+
+  /**
* Invalid filter member log string
*/
   public static final String FILTER_INVALID_MEMBER =

http://git-wip-us.apache.org/repos/asf/carbondata/blob/65679b8e/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
index 7b634d2..e1a34fa 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.io.compress.GzipCodec;
 import org.apache.hadoop.io.compress.Lz4Codec;
 import org.apache.hadoop.io.compress.SnappyCodec;
 
-public abstract  class AbstractDFSCarbonFile implements CarbonFile {
+public abstract class AbstractDFSCarbonFile implements CarbonFile {
   /**
* LOGGER
*/
@@ -261,18 +261,28 @@ public abstract  class AbstractDFSCarbonFile implements 
CarbonFile {
   @Override public DataOutputStream getDataOutputStream(String path, 
FileFactory.FileType fileType,
   int bufferSize, boolean append) throws IOException {
 Path pt = new Path(path);
-FileSystem fs = pt.getFileSystem(FileFactory.getConfiguration());
+FileSystem fileSystem = pt.getFileSystem(FileFactory.getConfiguration());
 FSDataOutputStream 

[01/36] carbondata git commit: [HOTFIX] Upgrade pom version from 1.3.0 to 1.4.0 [Forced Update!]

2018-02-09 Thread jackylk
Repository: carbondata
Updated Branches:
  refs/heads/carbonstore-rebase 132fbd4d8 -> 3d055c187 (forced update)


[HOTFIX] Upgrade pom version from 1.3.0 to 1.4.0

Upgrade pom version from 1.3.0 to 1.4.0

This closes #1961


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/957a51fe
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/957a51fe
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/957a51fe

Branch: refs/heads/carbonstore-rebase
Commit: 957a51fefdfcd810a53e3df6699ee073d5873a15
Parents: 11a795c
Author: chenliang613 
Authored: Fri Feb 9 16:17:37 2018 +0800
Committer: chenliang613 
Committed: Fri Feb 9 16:20:21 2018 +0800

--
 assembly/pom.xml  | 2 +-
 common/pom.xml| 2 +-
 core/pom.xml  | 2 +-
 examples/flink/pom.xml| 2 +-
 examples/spark2/pom.xml   | 2 +-
 format/pom.xml| 2 +-
 hadoop/pom.xml| 2 +-
 integration/hive/pom.xml  | 2 +-
 integration/presto/pom.xml| 2 +-
 integration/spark-common-cluster-test/pom.xml | 2 +-
 integration/spark-common-test/pom.xml | 2 +-
 integration/spark-common/pom.xml  | 2 +-
 integration/spark2/pom.xml| 2 +-
 pom.xml   | 2 +-
 streaming/pom.xml | 2 +-
 15 files changed, 15 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/assembly/pom.xml
--
diff --git a/assembly/pom.xml b/assembly/pom.xml
index 76ad8a5..effc271 100644
--- a/assembly/pom.xml
+++ b/assembly/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/common/pom.xml
--
diff --git a/common/pom.xml b/common/pom.xml
index 6343361..5550129 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/core/pom.xml
--
diff --git a/core/pom.xml b/core/pom.xml
index f874615..92c9607 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/examples/flink/pom.xml
--
diff --git a/examples/flink/pom.xml b/examples/flink/pom.xml
index aa2a14f..75913cf 100644
--- a/examples/flink/pom.xml
+++ b/examples/flink/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/examples/spark2/pom.xml
--
diff --git a/examples/spark2/pom.xml b/examples/spark2/pom.xml
index da39f1d..c17f0ee 100644
--- a/examples/spark2/pom.xml
+++ b/examples/spark2/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/format/pom.xml
--
diff --git a/format/pom.xml b/format/pom.xml
index ae84f0b..41197cf 100644
--- a/format/pom.xml
+++ b/format/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/hadoop/pom.xml
--
diff --git a/hadoop/pom.xml b/hadoop/pom.xml
index 206246f..2aaac99 100644
--- a/hadoop/pom.xml
+++ b/hadoop/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/integration/hive/pom.xml
--
diff --git a/integration/hive/pom.xml b/integration/hive/pom.xml
index e0ad499..68245db 100644
--- a/integration/hive/pom.xml
+++ 

[28/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
deleted file mode 100644
index 6629d31..000
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/impl/FixedLengthDimensionDataChunk.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.core.datastore.chunk.impl;
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import 
org.apache.carbondata.core.datastore.chunk.store.DimensionChunkStoreFactory;
-import 
org.apache.carbondata.core.datastore.chunk.store.DimensionChunkStoreFactory.DimensionStoreType;
-import org.apache.carbondata.core.metadata.datatype.DataType;
-import org.apache.carbondata.core.metadata.datatype.DataTypes;
-import org.apache.carbondata.core.scan.executor.infos.KeyStructureInfo;
-import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
-import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
-
-/**
- * This class is gives access to fixed length dimension data chunk store
- */
-public class FixedLengthDimensionDataChunk extends AbstractDimensionDataChunk {
-
-  /**
-   * Constructor
-   *
-   * @param dataChunkdata chunk
-   * @param invertedIndexinverted index
-   * @param invertedIndexReverse reverse inverted index
-   * @param numberOfRows number of rows
-   * @param columnValueSize  size of each column value
-   */
-  public FixedLengthDimensionDataChunk(byte[] dataChunk, int[] invertedIndex,
-  int[] invertedIndexReverse, int numberOfRows, int columnValueSize) {
-long totalSize = null != invertedIndex ?
-dataChunk.length + (2 * numberOfRows * 
CarbonCommonConstants.INT_SIZE_IN_BYTE) :
-dataChunk.length;
-dataChunkStore = DimensionChunkStoreFactory.INSTANCE
-.getDimensionChunkStore(columnValueSize, null != invertedIndex, 
numberOfRows, totalSize,
-DimensionStoreType.FIXEDLENGTH);
-dataChunkStore.putArray(invertedIndex, invertedIndexReverse, dataChunk);
-  }
-
-  /**
-   * Below method will be used to fill the data based on offset and row id
-   *
-   * @param data data to filed
-   * @param offset   offset from which data need to be filed
-   * @param indexrow id of the chunk
-   * @param keyStructureInfo define the structure of the key
-   * @return how many bytes was copied
-   */
-  @Override public int fillChunkData(byte[] data, int offset, int index,
-  KeyStructureInfo keyStructureInfo) {
-dataChunkStore.fillRow(index, data, offset);
-return dataChunkStore.getColumnValueSize();
-  }
-
-  /**
-   * Converts to column dictionary integer value
-   *
-   * @param rowId
-   * @param columnIndex
-   * @param row
-   * @param restructuringInfo
-   * @return
-   */
-  @Override public int fillConvertedChunkData(int rowId, int columnIndex, 
int[] row,
-  KeyStructureInfo restructuringInfo) {
-row[columnIndex] = dataChunkStore.getSurrogate(rowId);
-return columnIndex + 1;
-  }
-
-  /**
-   * Fill the data to vector
-   *
-   * @param vectorInfo
-   * @param column
-   * @param restructuringInfo
-   * @return next column index
-   */
-  @Override public int fillConvertedChunkData(ColumnVectorInfo[] vectorInfo, 
int column,
-  KeyStructureInfo restructuringInfo) {
-ColumnVectorInfo columnVectorInfo = vectorInfo[column];
-int offset = columnVectorInfo.offset;
-int vectorOffset = columnVectorInfo.vectorOffset;
-int len = columnVectorInfo.size + offset;
-CarbonColumnVector vector = columnVectorInfo.vector;
-for (int j = offset; j < len; j++) {
-  int dict = dataChunkStore.getSurrogate(j);
-  if (columnVectorInfo.directDictionaryGenerator == null) {
-vector.putInt(vectorOffset++, dict);
-  } else {
-Object 

[06/36] carbondata git commit: [REBASE] Solve conflict after rebasing master

2018-02-09 Thread jackylk
[REBASE] Solve conflict after rebasing master


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/3ee9351a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/3ee9351a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/3ee9351a

Branch: refs/heads/carbonstore-rebase
Commit: 3ee9351aa057bb03fdfbf141a26a516afcbd11f6
Parents: 65679b8
Author: Jacky Li 
Authored: Thu Feb 1 00:25:31 2018 +0800
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:09 2018 +0800

--
 .../hadoop/util/CarbonInputFormatUtil.java  | 20 +++
 .../spark/rdd/NewCarbonDataLoadRDD.scala| 21 ++--
 .../org/apache/spark/sql/CarbonSession.scala|  5 ++---
 3 files changed, 24 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/3ee9351a/hadoop/src/main/java/org/apache/carbondata/hadoop/util/CarbonInputFormatUtil.java
--
diff --git 
a/hadoop/src/main/java/org/apache/carbondata/hadoop/util/CarbonInputFormatUtil.java
 
b/hadoop/src/main/java/org/apache/carbondata/hadoop/util/CarbonInputFormatUtil.java
index 514428b..056c27b 100644
--- 
a/hadoop/src/main/java/org/apache/carbondata/hadoop/util/CarbonInputFormatUtil.java
+++ 
b/hadoop/src/main/java/org/apache/carbondata/hadoop/util/CarbonInputFormatUtil.java
@@ -22,6 +22,8 @@ import java.text.SimpleDateFormat;
 import java.util.List;
 import java.util.Locale;
 
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
@@ -39,6 +41,7 @@ import org.apache.carbondata.core.scan.model.QueryMeasure;
 import org.apache.carbondata.core.scan.model.QueryModel;
 import org.apache.carbondata.hadoop.api.CarbonTableInputFormat;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobID;
@@ -159,4 +162,21 @@ public class CarbonInputFormatUtil {
 String jobtrackerID = createJobTrackerID(date);
 return new JobID(jobtrackerID, batch);
   }
+
+  public static void setS3Configurations(Configuration hadoopConf) {
+FileFactory.getConfiguration()
+.set("fs.s3a.access.key", hadoopConf.get("fs.s3a.access.key", ""));
+FileFactory.getConfiguration()
+.set("fs.s3a.secret.key", hadoopConf.get("fs.s3a.secret.key", ""));
+FileFactory.getConfiguration()
+.set("fs.s3a.endpoint", hadoopConf.get("fs.s3a.endpoint", ""));
+FileFactory.getConfiguration().set(CarbonCommonConstants.S3_ACCESS_KEY,
+hadoopConf.get(CarbonCommonConstants.S3_ACCESS_KEY, ""));
+FileFactory.getConfiguration().set(CarbonCommonConstants.S3_SECRET_KEY,
+hadoopConf.get(CarbonCommonConstants.S3_SECRET_KEY, ""));
+FileFactory.getConfiguration().set(CarbonCommonConstants.S3N_ACCESS_KEY,
+hadoopConf.get(CarbonCommonConstants.S3N_ACCESS_KEY, ""));
+FileFactory.getConfiguration().set(CarbonCommonConstants.S3N_SECRET_KEY,
+hadoopConf.get(CarbonCommonConstants.S3N_SECRET_KEY, ""));
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/3ee9351a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index 917fc88..e17824f 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -41,10 +41,10 @@ import 
org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.common.logging.impl.StandardLogService
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.compression.CompressorFactory
-import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, 
SegmentStatus}
 import org.apache.carbondata.core.util.{CarbonProperties, 
CarbonTimeStatisticsFactory, ThreadLocalTaskInfo}
 import org.apache.carbondata.core.util.path.CarbonTablePath
+import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
 import 

[02/36] carbondata git commit: [CARBONDATA-1763] Dropped table if exception thrown while creation

2018-02-09 Thread jackylk
[CARBONDATA-1763] Dropped table if exception thrown while creation

Preaggregate table is not getting dropped when creation fails because

Exceptions from undo metadata is not handled
If preaggregate table is not registered with main table(main table updation 
fails) then it is not dropped from metastore.

This closes #1951


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1be27b08
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1be27b08
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1be27b08

Branch: refs/heads/carbonstore-rebase
Commit: 1be27b085696937505acfbf62079c5ca31ad347c
Parents: 957a51f
Author: kunal642 
Authored: Thu Feb 8 11:50:23 2018 +0530
Committer: ravipesala 
Committed: Fri Feb 9 16:22:03 2018 +0530

--
 .../spark/rdd/CarbonDataRDDFactory.scala|  2 +-
 .../datamap/CarbonCreateDataMapCommand.scala|  2 +-
 .../datamap/CarbonDropDataMapCommand.scala  | 29 
 .../CreatePreAggregateTableCommand.scala|  3 +-
 4 files changed, 28 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1be27b08/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
 
b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 5c43d58..8ed7623 100644
--- 
a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -546,7 +546,7 @@ object CarbonDataRDDFactory {
   LOGGER.error(ex, "Problem while committing data maps")
   false
   }
-  if (!done && !commitComplete) {
+  if (!done || !commitComplete) {
 CarbonLoaderUtil.updateTableStatusForFailure(carbonLoadModel, 
uniqueTableStatusId)
 LOGGER.info("starting clean up**")
 CarbonLoaderUtil.deleteSegment(carbonLoadModel, 
carbonLoadModel.getSegmentId.toInt)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1be27b08/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index f2f001e..0fd5437 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -123,7 +123,7 @@ case class CarbonCreateDataMapCommand(
   override def undoMetadata(sparkSession: SparkSession, exception: Exception): 
Seq[Row] = {
 if (dmClassName.equalsIgnoreCase(PREAGGREGATE.toString) ||
   dmClassName.equalsIgnoreCase(TIMESERIES.toString)) {
-  if (!tableIsExists) {
+  if (!tableIsExists && createPreAggregateTableCommands != null) {
 createPreAggregateTableCommands.undoMetadata(sparkSession, exception)
   } else {
 Seq.empty

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1be27b08/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
index bc55988..8ef394c 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
@@ -47,7 +47,8 @@ case class CarbonDropDataMapCommand(
 dataMapName: String,
 ifExistsSet: Boolean,
 databaseNameOp: Option[String],
-tableName: String)
+tableName: String,
+forceDrop: Boolean = false)
   extends AtomicRunnableCommand {
 
   var commandToRun: CarbonDropTableCommand = _
@@ -74,6 +75,10 @@ case class CarbonDropDataMapCommand(
 case ex: NoSuchTableException =>
   throw ex
   }
+  // If datamap to be dropped in parent table then drop the datamap from 
metastore 

[04/36] carbondata git commit: [HOTFIX] Fix documentation errors

2018-02-09 Thread jackylk
[HOTFIX] Fix documentation errors

Fix documentation errors

This closes #1955


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/420cc35d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/420cc35d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/420cc35d

Branch: refs/heads/carbonstore-rebase
Commit: 420cc35d565e344c8bcc8c6dace4138c897b
Parents: 7c05f5f
Author: Raghunandan S 
Authored: Thu Feb 8 21:30:03 2018 +0530
Committer: chenliang613 
Committed: Fri Feb 9 20:28:55 2018 +0800

--
 docs/data-management-on-carbondata.md | 76 +-
 processing/pom.xml|  2 +-
 2 files changed, 35 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/420cc35d/docs/data-management-on-carbondata.md
--
diff --git a/docs/data-management-on-carbondata.md 
b/docs/data-management-on-carbondata.md
index 61bb356..f70e0b7 100644
--- a/docs/data-management-on-carbondata.md
+++ b/docs/data-management-on-carbondata.md
@@ -39,7 +39,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
   STORED BY 'carbondata'
   [TBLPROPERTIES (property_name=property_value, ...)]
   [LOCATION 'path']
-  ```  
+  ```
   
 ### Usage Guidelines
 
@@ -101,11 +101,11 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
  These properties are table level compaction configurations, if not 
specified, system level configurations in carbon.properties will be used.
  Following are 5 configurations:
  
- * MAJOR_COMPACTION_SIZE: same meaning with carbon.major.compaction.size, 
size in MB.
- * AUTO_LOAD_MERGE: same meaning with carbon.enable.auto.load.merge.
- * COMPACTION_LEVEL_THRESHOLD: same meaning with 
carbon.compaction.level.threshold.
- * COMPACTION_PRESERVE_SEGMENTS: same meaning with 
carbon.numberof.preserve.segments.
- * ALLOWED_COMPACTION_DAYS: same meaning with 
carbon.allowed.compaction.days. 
+ * MAJOR_COMPACTION_SIZE: same meaning as carbon.major.compaction.size, 
size in MB.
+ * AUTO_LOAD_MERGE: same meaning as carbon.enable.auto.load.merge.
+ * COMPACTION_LEVEL_THRESHOLD: same meaning as 
carbon.compaction.level.threshold.
+ * COMPACTION_PRESERVE_SEGMENTS: same meaning as 
carbon.numberof.preserve.segments.
+ * ALLOWED_COMPACTION_DAYS: same meaning as 
carbon.allowed.compaction.days. 
 
  ```
  TBLPROPERTIES ('MAJOR_COMPACTION_SIZE'='2048',
@@ -136,17 +136,8 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
saleQuantity Int,
revenue Int)
 STORED BY 'carbondata'
-TBLPROPERTIES ('DICTIONARY_INCLUDE'='productNumber',
-   'NO_INVERTED_INDEX'='productBatch',
-   'SORT_COLUMNS'='productName,storeCity',
-   'SORT_SCOPE'='NO_SORT',
-   'TABLE_BLOCKSIZE'='512',
-   'MAJOR_COMPACTION_SIZE'='2048',
-   'AUTO_LOAD_MERGE'='true',
-   'COMPACTION_LEVEL_THRESHOLD'='5,6',
-   'COMPACTION_PRESERVE_SEGMENTS'='10',
-  'streaming'='true',
-   'ALLOWED_COMPACTION_DAYS'='5')
+TBLPROPERTIES ('SORT_COLUMNS'='productName,storeCity',
+   'SORT_SCOPE'='NO_SORT')
```
 
 ## CREATE DATABASE 
@@ -200,9 +191,9 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
 
  Examples:
  ```
- ALTER TABLE carbon RENAME TO carbondata
+ ALTER TABLE carbon RENAME TO carbonTable
  OR
- ALTER TABLE test_db.carbon RENAME TO test_db.carbondata
+ ALTER TABLE test_db.carbon RENAME TO test_db.carbonTable
  ```
 
- **ADD COLUMNS**
@@ -294,7 +285,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
   * Before executing this command the old table schema and data should be 
copied into the new database location.
   * If the table is aggregate table, then all the aggregate tables should be 
copied to the new database location.
   * For old store, the time zone of the source and destination cluster should 
be same.
-  * If old cluster uses HIVE meta store, refresh will not work as schema file 
does not exist in file system.
+  * If old cluster used HIVE meta store to store schema, refresh will not work 
as schema file does not exist in file system.
   
 
 ## LOAD DATA
@@ -302,7 +293,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
 ### LOAD FILES TO CARBONDATA TABLE
   
   This 

[19/36] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/util/AbstractDataFileFooterConverter.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/AbstractDataFileFooterConverter.java
 
b/core/src/main/java/org/apache/carbondata/core/util/AbstractDataFileFooterConverter.java
index 94a041a..b74c279 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/util/AbstractDataFileFooterConverter.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/util/AbstractDataFileFooterConverter.java
@@ -378,7 +378,6 @@ public abstract class AbstractDataFileFooterConverter {
   cardinality[i] = segmentInfo.getColumn_cardinalities().get(i);
 }
 info.setColumnCardinality(cardinality);
-info.setNumberOfColumns(segmentInfo.getNum_cols());
 return info;
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b79ff18/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
--
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index 83e7d52..b1eb79f 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -47,10 +47,10 @@ import 
org.apache.carbondata.core.cache.dictionary.Dictionary;
 import 
org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.constants.CarbonLoadOptionConstants;
-import org.apache.carbondata.core.datastore.FileHolder;
+import org.apache.carbondata.core.datastore.FileReader;
 import org.apache.carbondata.core.datastore.block.AbstractIndex;
 import org.apache.carbondata.core.datastore.block.TableBlockInfo;
-import org.apache.carbondata.core.datastore.chunk.DimensionColumnDataChunk;
+import org.apache.carbondata.core.datastore.chunk.DimensionColumnPage;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
 import org.apache.carbondata.core.datastore.chunk.impl.MeasureRawColumnChunk;
 import org.apache.carbondata.core.datastore.columnar.ColumnGroupModel;
@@ -80,7 +80,7 @@ import 
org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.mutate.UpdateVO;
 import org.apache.carbondata.core.reader.ThriftReader;
 import org.apache.carbondata.core.reader.ThriftReader.TBaseCreator;
-import org.apache.carbondata.core.scan.model.QueryDimension;
+import org.apache.carbondata.core.scan.model.ProjectionDimension;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentStatus;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
@@ -246,16 +246,13 @@ public final class CarbonUtil {
   public static ColumnGroupModel getColGroupModel(int[][] columnGroups) {
 int[] columnSplit = new int[columnGroups.length];
 int noOfColumnStore = columnSplit.length;
-boolean[] columnarStore = new boolean[noOfColumnStore];
 
 for (int i = 0; i < columnGroups.length; i++) {
   columnSplit[i] = columnGroups[i].length;
-  columnarStore[i] = columnGroups[i].length <= 1;
 }
 ColumnGroupModel colGroupModel = new ColumnGroupModel();
 colGroupModel.setNoOfColumnStore(noOfColumnStore);
 colGroupModel.setColumnSplit(columnSplit);
-colGroupModel.setColumnarStore(columnarStore);
 colGroupModel.setColumnGroup(columnGroups);
 return colGroupModel;
   }
@@ -416,7 +413,7 @@ public final class CarbonUtil {
 }
   }
 
-  public static int getFirstIndexUsingBinarySearch(DimensionColumnDataChunk 
dimColumnDataChunk,
+  public static int getFirstIndexUsingBinarySearch(DimensionColumnPage 
dimColumnDataChunk,
   int low, int high, byte[] compareValue, boolean matchUpLimit) {
 int cmpResult = 0;
 while (high >= low) {
@@ -455,7 +452,7 @@ public final class CarbonUtil {
* @return the compareValue's range index in the dimColumnDataChunk
*/
   public static int[] getRangeIndexUsingBinarySearch(
-  DimensionColumnDataChunk dimColumnDataChunk, int low, int high, byte[] 
compareValue) {
+  DimensionColumnPage dimColumnDataChunk, int low, int high, byte[] 
compareValue) {
 
 int[] rangeIndex = new int[2];
 int cmpResult = 0;
@@ -549,7 +546,7 @@ public final class CarbonUtil {
* @return index value
*/
   public static int nextLesserValueToTarget(int currentIndex,
-  DimensionColumnDataChunk dimColumnDataChunk, byte[] compareValue) {
+  DimensionColumnPage dimColumnDataChunk, byte[] compareValue) {
 while (currentIndex - 1 >= 0
 && dimColumnDataChunk.compareTo(currentIndex - 1, compareValue) >= 0) {
   --currentIndex;
@@ 

[35/36] carbondata git commit: [REBASE] Solve conflict after rebasing master

2018-02-09 Thread jackylk
[REBASE] Solve conflict after rebasing master


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/b93f9542
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/b93f9542
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/b93f9542

Branch: refs/heads/carbonstore-rebase
Commit: b93f9542f8b205f30942bf6518c11eda75442351
Parents: 1acc4e1
Author: Jacky Li 
Authored: Fri Feb 9 01:39:20 2018 +0800
Committer: Jacky Li 
Committed: Sat Feb 10 02:20:11 2018 +0800

--
 .../scan/filter/FilterExpressionProcessor.java  |  2 +-
 .../filter/executer/FalseFilterExecutor.java| 16 +++---
 .../FalseConditionalResolverImpl.java   |  4 ++--
 .../apache/carbondata/core/util/CarbonUtil.java |  1 -
 .../core/util/path/CarbonTablePath.java |  6 +++---
 .../spark/rdd/AggregateDataMapCompactor.scala   | 12 +--
 .../preaaggregate/PreAggregateListeners.scala   | 22 +---
 .../CarbonAlterTableDataTypeChangeCommand.scala | 17 ---
 .../schema/CarbonAlterTableRenameCommand.scala  |  3 ++-
 .../apache/spark/sql/hive/CarbonMetaStore.scala | 12 ++-
 .../processing/util/CarbonLoaderUtil.java   |  5 +++--
 11 files changed, 51 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/b93f9542/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
index b882b51..26b202f 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
@@ -398,7 +398,7 @@ public class FilterExpressionProcessor implements 
FilterProcessor {
 ConditionalExpression condExpression = null;
 switch (filterExpressionType) {
   case FALSE:
-return new FalseConditionalResolverImpl(expression, false, false, 
tableIdentifier);
+return new FalseConditionalResolverImpl(expression, false, false);
   case TRUE:
 return new TrueConditionalResolverImpl(expression, false, false);
   case EQUALS:

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b93f9542/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FalseFilterExecutor.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FalseFilterExecutor.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FalseFilterExecutor.java
index 2d2a15c..75a6ec3 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FalseFilterExecutor.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/FalseFilterExecutor.java
@@ -21,7 +21,7 @@ import java.util.BitSet;
 
 import 
org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
 import org.apache.carbondata.core.scan.filter.intf.RowIntf;
-import org.apache.carbondata.core.scan.processor.BlocksChunkHolder;
+import org.apache.carbondata.core.scan.processor.RawBlockletColumnChunks;
 import org.apache.carbondata.core.util.BitSetGroup;
 
 /**
@@ -33,8 +33,8 @@ import org.apache.carbondata.core.util.BitSetGroup;
 public class FalseFilterExecutor implements FilterExecuter {
 
   @Override
-  public BitSetGroup applyFilter(BlocksChunkHolder blocksChunkHolder, boolean 
useBitsetPipeline)
-  throws FilterUnsupportedException, IOException {
+  public BitSetGroup applyFilter(RawBlockletColumnChunks blocksChunkHolder,
+  boolean useBitsetPipeline) throws FilterUnsupportedException, 
IOException {
 int numberOfPages = blocksChunkHolder.getDataBlock().numberOfPages();
 BitSetGroup group = new BitSetGroup(numberOfPages);
 for (int i = 0; i < numberOfPages; i++) {
@@ -44,17 +44,19 @@ public class FalseFilterExecutor implements FilterExecuter {
 return group;
   }
 
-  @Override public boolean applyFilter(RowIntf value, int dimOrdinalMax)
+  @Override
+  public boolean applyFilter(RowIntf value, int dimOrdinalMax)
   throws FilterUnsupportedException, IOException {
 return false;
   }
 
-  @Override public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] 
blockMinValue) {
-
+  @Override
+  public BitSet isScanRequired(byte[][] blockMaxValue, byte[][] blockMinValue) 
{
 return new BitSet();
   }
 
-  @Override public void readBlocks(BlocksChunkHolder blockChunkHolder) throws 
IOException {
+  @Override
+  public void 

Jenkins build became unstable: carbondata-master-spark-2.2 » Apache CarbonData :: Spark Common Test #89

2018-02-09 Thread Apache Jenkins Server
See 




Jenkins build became unstable: carbondata-master-spark-2.2 #89

2018-02-09 Thread Apache Jenkins Server
See 




carbondata git commit: [CARBONDATA-2150] Unwanted updatetable status files are being generated for the delete operation where no records are deleted

2018-02-09 Thread kunalkapoor
Repository: carbondata
Updated Branches:
  refs/heads/master 420cc35d5 -> da549c2b7


[CARBONDATA-2150] Unwanted updatetable status files are being generated for the 
delete operation where no records are deleted

Problem:
Unwanted updatetable status files are being generated for the delete operation 
where no records are deleted

Analysis:
when the filter value for delete operation is less than the maximum value in 
that column, then getsplits() will return the
block and hence in delete logic, it was creating update table status file even 
though delete operation was not done and
added spark context to create database event

This closes #1957


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/da549c2b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/da549c2b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/da549c2b

Branch: refs/heads/master
Commit: da549c2b721ae66f55fd28bebe775ac10373c4e2
Parents: 420cc35
Author: akashrn5 
Authored: Thu Feb 8 23:10:43 2018 +0530
Committer: kunal642 
Committed: Fri Feb 9 19:30:47 2018 +0530

--
 .../iud/DeleteCarbonTableTestCase.scala | 22 +++-
 .../events/CreateDatabaseEvents.scala   |  6 +-
 .../scala/org/apache/spark/util/FileUtils.scala |  5 +++--
 .../command/mutation/DeleteExecution.scala  |  2 +-
 .../sql/execution/strategy/DDLStrategy.scala|  2 +-
 5 files changed, 31 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/da549c2b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
--
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
index 6521657..22aa385 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
@@ -17,9 +17,11 @@
 package org.apache.carbondata.spark.testsuite.iud
 
 import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.sql.{Row, SaveMode}
+import org.apache.spark.sql.{CarbonEnv, Row, SaveMode}
 import org.scalatest.BeforeAndAfterAll
 
+import org.apache.carbondata.core.datastore.impl.FileFactory
+
 
 class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
   override def beforeAll {
@@ -180,6 +182,24 @@ class DeleteCarbonTableTestCase extends QueryTest with 
BeforeAndAfterAll {
   Seq(Row(1, "abc"), Row(3, "uhj"), Row(4, "frg")))
   }
 
+  test("test number of update table status files after delete query where no 
records are deleted") {
+sql("drop table if exists update_status_files")
+sql("create table update_status_files(name string,age int) stored by 
'carbondata'")
+sql("insert into update_status_files select 'abc',1")
+sql("insert into update_status_files select 'def',2")
+sql("insert into update_status_files select 'xyz',4")
+sql("insert into update_status_files select 'abc',6")
+sql("alter table update_status_files compact 'minor'")
+sql("delete from update_status_files where age=3").show()
+sql("delete from update_status_files where age=5").show()
+val carbonTable = CarbonEnv
+  .getCarbonTable(Some("iud_db"), 
"update_status_files")(sqlContext.sparkSession)
+val metaPath = carbonTable.getMetaDataFilepath
+val files = FileFactory.getCarbonFile(metaPath)
+assert(files.listFiles().length == 2)
+sql("drop table update_status_files")
+  }
+
 
   override def afterAll {
 sql("use default")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/da549c2b/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
--
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
index dae22b1..c1d79db 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/events/CreateDatabaseEvents.scala
@@ -17,11 +17,15 @@
 
 package org.apache.carbondata.events
 
+import org.apache.spark.SparkContext
+
 
 case class CreateDatabasePreExecutionEvent(databaseName: String) extends Event
   with 

carbondata git commit: [CARBONDATA-2023][DataLoad] Add size base block allocation in data loading [Forced Update!]

2018-02-09 Thread jackylk
Repository: carbondata
Updated Branches:
  refs/heads/carbonstore-rebase 5c55dfe19 -> 132fbd4d8 (forced update)


[CARBONDATA-2023][DataLoad] Add size base block allocation in data loading

Carbondata assign blocks to nodes at the beginning of data loading.
Previous block allocation strategy is block number based and it will
suffer skewed data problem if the size of input files differs a lot.

We introduced a size based block allocation strategy to optimize data
loading performance in skewed data scenario.

This closes #1808


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/132fbd4d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/132fbd4d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/132fbd4d

Branch: refs/heads/carbonstore-rebase
Commit: 132fbd4d8d2a91efca542bd31ae5935171802b7e
Parents: c82734e
Author: xuchuanyin 
Authored: Thu Feb 8 14:42:39 2018 +0800
Committer: Jacky Li 
Committed: Fri Feb 9 21:45:48 2018 +0800

--
 .../constants/CarbonLoadOptionConstants.java|  10 +
 .../core/datastore/block/TableBlockInfo.java|  29 ++
 .../carbondata/core/util/CarbonProperties.java  |  11 +
 docs/useful-tips-on-carbondata.md   |   1 +
 .../sdv/generated/MergeIndexTestCase.scala  |  11 +-
 .../CarbonIndexFileMergeTestCase.scala  |  19 +-
 .../StandardPartitionTableLoadingTestCase.scala |   5 +-
 .../spark/rdd/NewCarbonDataLoadRDD.scala|   4 +-
 .../spark/sql/hive/DistributionUtil.scala   |   2 +-
 .../spark/rdd/CarbonDataRDDFactory.scala|  18 +-
 .../merger/NodeMultiBlockRelation.java  |  40 ++
 .../processing/util/CarbonLoaderUtil.java   | 494 ---
 .../processing/util/CarbonLoaderUtilTest.java   | 125 +
 13 files changed, 564 insertions(+), 205 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/132fbd4d/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
index bcfeba0..a6bf60f 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/constants/CarbonLoadOptionConstants.java
@@ -114,4 +114,14 @@ public final class CarbonLoadOptionConstants {
*/
   public static final int MAX_EXTERNAL_DICTIONARY_SIZE = 1000;
 
+  /**
+   * enable block size based block allocation while loading data. By default, 
carbondata assigns
+   * blocks to node based on block number. If this option is set to `true`, 
carbondata will
+   * consider block size first and make sure that all the nodes will process 
almost equal size of
+   * data. This option is especially useful when you encounter skewed data.
+   */
+  @CarbonProperty
+  public static final String ENABLE_CARBON_LOAD_SKEWED_DATA_OPTIMIZATION
+  = "carbon.load.skewedDataOptimization.enabled";
+  public static final String 
ENABLE_CARBON_LOAD_SKEWED_DATA_OPTIMIZATION_DEFAULT = "false";
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/132fbd4d/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
index a7bfdba..c0cebe0 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/block/TableBlockInfo.java
@@ -19,6 +19,8 @@ package org.apache.carbondata.core.datastore.block;
 import java.io.IOException;
 import java.io.Serializable;
 import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -98,6 +100,20 @@ public class TableBlockInfo implements Distributable, 
Serializable {
 
   private String dataMapWriterPath;
 
+  /**
+   * comparator to sort by block size in descending order.
+   * Since each line is not exactly the same, the size of a InputSplit may 
differs,
+   * so we allow some deviation for these splits.
+   */
+  public static final Comparator DATA_SIZE_DESC_COMPARATOR =
+  new Comparator() {
+@Override public int compare(Distributable o1, Distributable o2) {
+  long diff =
+  ((TableBlockInfo) o1).getBlockLength() - ((TableBlockInfo) 
o2).getBlockLength();
+  return diff < 0 ? 1 : 

carbondata git commit: [HOTFIX] Fix documentation errors

2018-02-09 Thread chenliang613
Repository: carbondata
Updated Branches:
  refs/heads/master 7c05f5f18 -> 420cc35d5


[HOTFIX] Fix documentation errors

Fix documentation errors

This closes #1955


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/420cc35d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/420cc35d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/420cc35d

Branch: refs/heads/master
Commit: 420cc35d565e344c8bcc8c6dace4138c897b
Parents: 7c05f5f
Author: Raghunandan S 
Authored: Thu Feb 8 21:30:03 2018 +0530
Committer: chenliang613 
Committed: Fri Feb 9 20:28:55 2018 +0800

--
 docs/data-management-on-carbondata.md | 76 +-
 processing/pom.xml|  2 +-
 2 files changed, 35 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/420cc35d/docs/data-management-on-carbondata.md
--
diff --git a/docs/data-management-on-carbondata.md 
b/docs/data-management-on-carbondata.md
index 61bb356..f70e0b7 100644
--- a/docs/data-management-on-carbondata.md
+++ b/docs/data-management-on-carbondata.md
@@ -39,7 +39,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
   STORED BY 'carbondata'
   [TBLPROPERTIES (property_name=property_value, ...)]
   [LOCATION 'path']
-  ```  
+  ```
   
 ### Usage Guidelines
 
@@ -101,11 +101,11 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
  These properties are table level compaction configurations, if not 
specified, system level configurations in carbon.properties will be used.
  Following are 5 configurations:
  
- * MAJOR_COMPACTION_SIZE: same meaning with carbon.major.compaction.size, 
size in MB.
- * AUTO_LOAD_MERGE: same meaning with carbon.enable.auto.load.merge.
- * COMPACTION_LEVEL_THRESHOLD: same meaning with 
carbon.compaction.level.threshold.
- * COMPACTION_PRESERVE_SEGMENTS: same meaning with 
carbon.numberof.preserve.segments.
- * ALLOWED_COMPACTION_DAYS: same meaning with 
carbon.allowed.compaction.days. 
+ * MAJOR_COMPACTION_SIZE: same meaning as carbon.major.compaction.size, 
size in MB.
+ * AUTO_LOAD_MERGE: same meaning as carbon.enable.auto.load.merge.
+ * COMPACTION_LEVEL_THRESHOLD: same meaning as 
carbon.compaction.level.threshold.
+ * COMPACTION_PRESERVE_SEGMENTS: same meaning as 
carbon.numberof.preserve.segments.
+ * ALLOWED_COMPACTION_DAYS: same meaning as 
carbon.allowed.compaction.days. 
 
  ```
  TBLPROPERTIES ('MAJOR_COMPACTION_SIZE'='2048',
@@ -136,17 +136,8 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
saleQuantity Int,
revenue Int)
 STORED BY 'carbondata'
-TBLPROPERTIES ('DICTIONARY_INCLUDE'='productNumber',
-   'NO_INVERTED_INDEX'='productBatch',
-   'SORT_COLUMNS'='productName,storeCity',
-   'SORT_SCOPE'='NO_SORT',
-   'TABLE_BLOCKSIZE'='512',
-   'MAJOR_COMPACTION_SIZE'='2048',
-   'AUTO_LOAD_MERGE'='true',
-   'COMPACTION_LEVEL_THRESHOLD'='5,6',
-   'COMPACTION_PRESERVE_SEGMENTS'='10',
-  'streaming'='true',
-   'ALLOWED_COMPACTION_DAYS'='5')
+TBLPROPERTIES ('SORT_COLUMNS'='productName,storeCity',
+   'SORT_SCOPE'='NO_SORT')
```
 
 ## CREATE DATABASE 
@@ -200,9 +191,9 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
 
  Examples:
  ```
- ALTER TABLE carbon RENAME TO carbondata
+ ALTER TABLE carbon RENAME TO carbonTable
  OR
- ALTER TABLE test_db.carbon RENAME TO test_db.carbondata
+ ALTER TABLE test_db.carbon RENAME TO test_db.carbonTable
  ```
 
- **ADD COLUMNS**
@@ -294,7 +285,7 @@ This tutorial is going to introduce all commands and data 
operations on CarbonDa
   * Before executing this command the old table schema and data should be 
copied into the new database location.
   * If the table is aggregate table, then all the aggregate tables should be 
copied to the new database location.
   * For old store, the time zone of the source and destination cluster should 
be same.
-  * If old cluster uses HIVE meta store, refresh will not work as schema file 
does not exist in file system.
+  * If old cluster used HIVE meta store to store schema, refresh will not work 
as schema file does not exist in file system.
   
 
 ## LOAD DATA
@@ -302,7 +293,7 @@ This tutorial is going to introduce all commands and data 

Jenkins build is back to normal : carbondata-master-spark-2.1 #2049

2018-02-09 Thread Apache Jenkins Server
See 




Jenkins build is back to normal : carbondata-master-spark-2.1 » Apache CarbonData :: Hadoop #2049

2018-02-09 Thread Apache Jenkins Server
See 




Jenkins build is back to normal : carbondata-master-spark-2.2 #85

2018-02-09 Thread Apache Jenkins Server
See 




Jenkins build is back to normal : carbondata-master-spark-2.2 » Apache CarbonData :: Hadoop #85

2018-02-09 Thread Apache Jenkins Server
See 




carbondata git commit: [CARBONDATA-2137] Delete query performance improved

2018-02-09 Thread ravipesala
Repository: carbondata
Updated Branches:
  refs/heads/branch-1.3 1137c285f -> 6e435de5e


[CARBONDATA-2137] Delete query performance improved

Following is the configuration used :

SPARK_EXECUTOR_MEMORY : 200G
SPARK_DRIVER_MEMORY : 20G
SPARK_EXECUTOR_CORES : 32
SPARK_EXECUTOR_INSTANCEs : 3

Earlier it was taking 20 minute now it is taking approx 5 minute

This closes #1937


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/6e435de5
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/6e435de5
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/6e435de5

Branch: refs/heads/branch-1.3
Commit: 6e435de5e04ace63fe5b105e2f180ef0932d80d3
Parents: 1137c28
Author: rahulforallp 
Authored: Tue Feb 6 18:41:35 2018 +0530
Committer: ravipesala 
Committed: Fri Feb 9 16:48:31 2018 +0530

--
 .../carbondata/core/locks/HdfsFileLock.java |  2 +-
 .../carbondata/core/locks/LocalFileLock.java|  2 +-
 .../core/mutate/DeleteDeltaBlockDetails.java| 27 +++-
 3 files changed, 22 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/6e435de5/core/src/main/java/org/apache/carbondata/core/locks/HdfsFileLock.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/locks/HdfsFileLock.java 
b/core/src/main/java/org/apache/carbondata/core/locks/HdfsFileLock.java
index 1a46770..cc98b03 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/HdfsFileLock.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/HdfsFileLock.java
@@ -85,7 +85,7 @@ public class HdfsFileLock extends AbstractCarbonLock {
   return true;
 
 } catch (IOException e) {
-  LOGGER.error(e, e.getMessage());
+  LOGGER.info(e.getMessage());
   return false;
 }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6e435de5/core/src/main/java/org/apache/carbondata/core/locks/LocalFileLock.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/locks/LocalFileLock.java 
b/core/src/main/java/org/apache/carbondata/core/locks/LocalFileLock.java
index c3dfb57..4fee4c4 100644
--- a/core/src/main/java/org/apache/carbondata/core/locks/LocalFileLock.java
+++ b/core/src/main/java/org/apache/carbondata/core/locks/LocalFileLock.java
@@ -118,7 +118,7 @@ public class LocalFileLock extends AbstractCarbonLock {
 return false;
   }
 } catch (IOException e) {
-  LOGGER.error(e, e.getMessage());
+  LOGGER.info(e.getMessage());
   return false;
 }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6e435de5/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java
 
b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java
index 4275cca..7affe12 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/mutate/DeleteDeltaBlockDetails.java
@@ -19,7 +19,10 @@ package org.apache.carbondata.core.mutate;
 
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
@@ -31,7 +34,7 @@ public class DeleteDeltaBlockDetails implements Serializable {
 
   private static final long serialVersionUID = 1206104914918495724L;
 
-  private List blockletDetails;
+  private Map blockletDetailsMap;
   private String blockName;
 
   /**
@@ -42,7 +45,7 @@ public class DeleteDeltaBlockDetails implements Serializable {
 
   public DeleteDeltaBlockDetails(String blockName) {
 this.blockName = blockName;
-blockletDetails = new ArrayList();
+blockletDetailsMap = new TreeMap<>();
   }
 
   public String getBlockName() {
@@ -68,15 +71,25 @@ public class DeleteDeltaBlockDetails implements 
Serializable {
   }
 
   public List getBlockletDetails() {
-return blockletDetails;
+
+List deleteDeltaBlockletDetailsList = new 
ArrayList<>();
+Iterator> iterator =
+blockletDetailsMap.entrySet().iterator();
+while (iterator.hasNext()) {
+  deleteDeltaBlockletDetailsList.add(iterator.next().getValue());
+}
+return deleteDeltaBlockletDetailsList;
   }
 
   public boolean 

carbondata git commit: [CARBONDATA-1763] Dropped table if exception thrown while creation

2018-02-09 Thread ravipesala
Repository: carbondata
Updated Branches:
  refs/heads/master 957a51fef -> 1be27b085


[CARBONDATA-1763] Dropped table if exception thrown while creation

Preaggregate table is not getting dropped when creation fails because

Exceptions from undo metadata is not handled
If preaggregate table is not registered with main table(main table updation 
fails) then it is not dropped from metastore.

This closes #1951


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1be27b08
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1be27b08
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1be27b08

Branch: refs/heads/master
Commit: 1be27b085696937505acfbf62079c5ca31ad347c
Parents: 957a51f
Author: kunal642 
Authored: Thu Feb 8 11:50:23 2018 +0530
Committer: ravipesala 
Committed: Fri Feb 9 16:22:03 2018 +0530

--
 .../spark/rdd/CarbonDataRDDFactory.scala|  2 +-
 .../datamap/CarbonCreateDataMapCommand.scala|  2 +-
 .../datamap/CarbonDropDataMapCommand.scala  | 29 
 .../CreatePreAggregateTableCommand.scala|  3 +-
 4 files changed, 28 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1be27b08/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
 
b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 5c43d58..8ed7623 100644
--- 
a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -546,7 +546,7 @@ object CarbonDataRDDFactory {
   LOGGER.error(ex, "Problem while committing data maps")
   false
   }
-  if (!done && !commitComplete) {
+  if (!done || !commitComplete) {
 CarbonLoaderUtil.updateTableStatusForFailure(carbonLoadModel, 
uniqueTableStatusId)
 LOGGER.info("starting clean up**")
 CarbonLoaderUtil.deleteSegment(carbonLoadModel, 
carbonLoadModel.getSegmentId.toInt)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1be27b08/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index f2f001e..0fd5437 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -123,7 +123,7 @@ case class CarbonCreateDataMapCommand(
   override def undoMetadata(sparkSession: SparkSession, exception: Exception): 
Seq[Row] = {
 if (dmClassName.equalsIgnoreCase(PREAGGREGATE.toString) ||
   dmClassName.equalsIgnoreCase(TIMESERIES.toString)) {
-  if (!tableIsExists) {
+  if (!tableIsExists && createPreAggregateTableCommands != null) {
 createPreAggregateTableCommands.undoMetadata(sparkSession, exception)
   } else {
 Seq.empty

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1be27b08/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
--
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
index bc55988..8ef394c 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDropDataMapCommand.scala
@@ -47,7 +47,8 @@ case class CarbonDropDataMapCommand(
 dataMapName: String,
 ifExistsSet: Boolean,
 databaseNameOp: Option[String],
-tableName: String)
+tableName: String,
+forceDrop: Boolean = false)
   extends AtomicRunnableCommand {
 
   var commandToRun: CarbonDropTableCommand = _
@@ -74,6 +75,10 @@ case class CarbonDropDataMapCommand(
 case ex: NoSuchTableException =>
   throw ex
   }
+  // If 

Build failed in Jenkins: carbondata-master-spark-2.2 » Apache CarbonData :: Hadoop #84

2018-02-09 Thread Apache Jenkins Server
See 


--
[INFO] 
[INFO] 
[INFO] Building Apache CarbonData :: Hadoop 1.4.0-SNAPSHOT
[INFO] 
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.pom
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.pom
[WARNING] The POM for 
org.apache.carbondata:carbondata-processing:jar:1.4.0-SNAPSHOT is missing, no 
dependency information available
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.jar
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.jar


Build failed in Jenkins: carbondata-master-spark-2.2 #84

2018-02-09 Thread Apache Jenkins Server
See 


--
[...truncated 60.31 KB...]
[INFO] --- jacoco-maven-plugin:0.7.9:check (default-check) @ carbondata-core ---
[INFO] Loading execution data file 

[INFO] Analyzed bundle 'carbondata-core' with 0 classes
[INFO] All coverage checks have been met.
[INFO] 
[INFO] --- maven-install-plugin:2.5.2:install (default-install) @ 
carbondata-core ---
[INFO] Installing 

 to 
/home/jenkins/jenkins-slave/maven-repositories/1/org/apache/carbondata/carbondata-core/1.4.0-SNAPSHOT/carbondata-core-1.4.0-SNAPSHOT.jar
[INFO] Installing 
 to 
/home/jenkins/jenkins-slave/maven-repositories/1/org/apache/carbondata/carbondata-core/1.4.0-SNAPSHOT/carbondata-core-1.4.0-SNAPSHOT.pom
[INFO] 
[INFO] 
[INFO] Building Apache CarbonData :: Processing 1.3.0-SNAPSHOT
[INFO] 
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/maven-metadata.xml
 (789 B at 2.5 KB/sec)
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/maven-metadata.xml
 (791 B at 2.5 KB/sec)
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/maven-metadata.xml
 (791 B at 2.5 KB/sec)
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ carbondata-processing 
---
[INFO] Deleting 

[INFO] 
[INFO] --- jacoco-maven-plugin:0.7.9:prepare-agent (default-prepare-agent) @ 
carbondata-processing ---
[INFO] argLine set to 
-javaagent:/home/jenkins/jenkins-slave/maven-repositories/1/org/jacoco/org.jacoco.agent/0.7.9/org.jacoco.agent-0.7.9-runtime.jar=destfile=
[INFO] 
[INFO] --- maven-remote-resources-plugin:1.5:process (default) @ 
carbondata-processing ---
[INFO] 
[INFO] --- maven-resources-plugin:2.7:resources (default-resources) @ 
carbondata-processing ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 1 resource
[INFO] Copying 1 resource
[INFO] Copying 3 resources
[INFO] 
[INFO] --- maven-compiler-plugin:3.2:compile (default-compile) @ 
carbondata-processing ---
[INFO] Changes detected - recompiling the module!
[INFO] Compiling 145 source files to 

[INFO] 
:
 

 uses or overrides a deprecated API.
[INFO] 
:
 Recompile with -Xlint:deprecation for details.
[INFO] 
:
 Some input files use unchecked or unsafe operations.
[INFO] 
:
 Recompile with -Xlint:unchecked for details.
[INFO] 
[INFO] >>> findbugs-maven-plugin:3.0.4:check (analyze-compile) > :findbugs @ 
carbondata-processing >>>
[INFO] 
[INFO] --- findbugs-maven-plugin:3.0.4:findbugs (findbugs) @ 
carbondata-processing ---
[INFO] Fork Value 

Build failed in Jenkins: carbondata-master-spark-2.1 » Apache CarbonData :: Hadoop #2048

2018-02-09 Thread Apache Jenkins Server
See 


--
[INFO] 
[INFO] 
[INFO] Building Apache CarbonData :: Hadoop 1.4.0-SNAPSHOT
[INFO] 
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.pom
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.pom
[WARNING] The POM for 
org.apache.carbondata:carbondata-processing:jar:1.4.0-SNAPSHOT is missing, no 
dependency information available
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.jar
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.jar


Jenkins build is back to stable : carbondata-master-spark-2.1 #2046

2018-02-09 Thread Apache Jenkins Server
See 




Jenkins build is back to stable : carbondata-master-spark-2.1 » Apache CarbonData :: Spark Common Test #2046

2018-02-09 Thread Apache Jenkins Server
See 




Build failed in Jenkins: carbondata-master-spark-2.1 #2047

2018-02-09 Thread Apache Jenkins Server
See 


Changes:

[chenliang613] [HOTFIX] Upgrade pom version from 1.3.0 to 1.4.0

--
[...truncated 61.50 KB...]
[INFO] Building Apache CarbonData :: Processing 1.3.0-SNAPSHOT
[INFO] 
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/maven-metadata.xml
 (789 B at 2.4 KB/sec)
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/maven-metadata.xml
 (791 B at 2.4 KB/sec)
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/maven-metadata.xml
 (791 B at 2.4 KB/sec)
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ carbondata-processing 
---
[INFO] Deleting 

[INFO] 
[INFO] --- jacoco-maven-plugin:0.7.9:prepare-agent (default-prepare-agent) @ 
carbondata-processing ---
[INFO] argLine set to 
-javaagent:/home/jenkins/jenkins-slave/maven-repositories/0/org/jacoco/org.jacoco.agent/0.7.9/org.jacoco.agent-0.7.9-runtime.jar=destfile=
[INFO] 
[INFO] --- maven-remote-resources-plugin:1.5:process (default) @ 
carbondata-processing ---
[INFO] 
[INFO] --- maven-resources-plugin:2.7:resources (default-resources) @ 
carbondata-processing ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 1 resource
[INFO] Copying 1 resource
[INFO] Copying 3 resources
[INFO] 
[INFO] --- maven-compiler-plugin:3.2:compile (default-compile) @ 
carbondata-processing ---
[INFO] Changes detected - recompiling the module!
[INFO] Compiling 145 source files to 

[INFO] 
:
 

 uses or overrides a deprecated API.
[INFO] 
:
 Recompile with -Xlint:deprecation for details.
[INFO] 
:
 Some input files use unchecked or unsafe operations.
[INFO] 
:
 Recompile with -Xlint:unchecked for details.
[INFO] 
[INFO] >>> findbugs-maven-plugin:3.0.4:check (analyze-compile) > :findbugs @ 
carbondata-processing >>>
[INFO] 
[INFO] --- findbugs-maven-plugin:3.0.4:findbugs (findbugs) @ 
carbondata-processing ---
[INFO] Fork Value is true
[INFO] Done FindBugs Analysis
[INFO] 
[INFO] <<< findbugs-maven-plugin:3.0.4:check (analyze-compile) < :findbugs @ 
carbondata-processing <<<
[INFO] 
[INFO] --- findbugs-maven-plugin:3.0.4:check (analyze-compile) @ 
carbondata-processing ---
[INFO] BugInstance size is 0
[INFO] Error size is 0
[INFO] No errors/warnings found
[INFO] 
[INFO] --- maven-resources-plugin:2.7:testResources (default-testResources) @ 
carbondata-processing ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 14 resources
[INFO] Copying 3 resources
[INFO] 
[INFO] --- maven-compiler-plugin:3.2:testCompile (default-testCompile) @ 
carbondata-processing ---
[INFO] Changes detected - recompiling the module!
[INFO] Compiling 7 source files to 

[INFO] 

Build failed in Jenkins: carbondata-master-spark-2.1 » Apache CarbonData :: Hadoop #2047

2018-02-09 Thread Apache Jenkins Server
See 


Changes:

[chenliang613] [HOTFIX] Upgrade pom version from 1.3.0 to 1.4.0

--
[INFO] 
[INFO] 
[INFO] Building Apache CarbonData :: Hadoop 1.4.0-SNAPSHOT
[INFO] 
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.pom
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.pom
[WARNING] The POM for 
org.apache.carbondata:carbondata-processing:jar:1.4.0-SNAPSHOT is missing, no 
dependency information available
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.jar
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.jar


Build failed in Jenkins: carbondata-master-spark-2.2 #83

2018-02-09 Thread Apache Jenkins Server
See 


Changes:

[chenliang613] [HOTFIX] Upgrade pom version from 1.3.0 to 1.4.0

--
[...truncated 62.07 KB...]
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/carbondata-core-1.3.0-20180204.011833-16.pom
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/carbondata-core-1.3.0-20180204.011833-16.pom
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/carbondata-core-1.3.0-20180204.011833-16.pom
 (6 KB at 16.0 KB/sec)
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/maven-metadata.xml
 (791 B at 2.4 KB/sec)
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/carbondata-format-1.3.0-20180204.011731-16.pom
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/carbondata-format-1.3.0-20180204.011731-16.pom
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/carbondata-format-1.3.0-20180204.011731-16.pom
 (3 KB at 8.2 KB/sec)
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/maven-metadata.xml
 (791 B at 2.5 KB/sec)
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/carbondata-common-1.3.0-20180204.011654-17.pom
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/carbondata-common-1.3.0-20180204.011654-17.pom
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/carbondata-common-1.3.0-20180204.011654-17.pom
 (3 KB at 7.6 KB/sec)
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/carbondata-core-1.3.0-20180204.011833-16.jar
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/carbondata-format-1.3.0-20180204.011731-16.jar
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/carbondata-common-1.3.0-20180204.011654-17.jar
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-common/1.3.0-SNAPSHOT/carbondata-common-1.3.0-20180204.011654-17.jar
 (25 KB at 63.9 KB/sec)
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-format/1.3.0-SNAPSHOT/carbondata-format-1.3.0-20180204.011731-16.jar
 (495 KB at 754.5 KB/sec)
[INFO] Downloaded: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-core/1.3.0-SNAPSHOT/carbondata-core-1.3.0-20180204.011833-16.jar
 (1303 KB at 1759.7 KB/sec)
[INFO] 
[INFO] --- maven-clean-plugin:2.5:clean (default-clean) @ carbondata-processing 
---
[INFO] Deleting 

[INFO] 
[INFO] --- jacoco-maven-plugin:0.7.9:prepare-agent (default-prepare-agent) @ 
carbondata-processing ---
[INFO] argLine set to 
-javaagent:/home/jenkins/jenkins-slave/maven-repositories/1/org/jacoco/org.jacoco.agent/0.7.9/org.jacoco.agent-0.7.9-runtime.jar=destfile=
[INFO] 
[INFO] --- maven-remote-resources-plugin:1.5:process (default) @ 
carbondata-processing ---
[INFO] 
[INFO] --- maven-resources-plugin:2.7:resources (default-resources) @ 
carbondata-processing ---
[INFO] Using 'UTF-8' encoding to copy filtered resources.
[INFO] Copying 1 resource
[INFO] Copying 1 resource
[INFO] Copying 3 resources
[INFO] 
[INFO] --- maven-compiler-plugin:3.2:compile (default-compile) @ 
carbondata-processing ---
[INFO] Changes detected - recompiling the module!
[INFO] Compiling 145 source files to 

[INFO] 
:
 

Build failed in Jenkins: carbondata-master-spark-2.2 » Apache CarbonData :: Hadoop #83

2018-02-09 Thread Apache Jenkins Server
See 


Changes:

[chenliang613] [HOTFIX] Upgrade pom version from 1.3.0 to 1.4.0

--
[INFO] 
[INFO] 
[INFO] Building Apache CarbonData :: Hadoop 1.4.0-SNAPSHOT
[INFO] 
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/maven-metadata.xml
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.pom
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.pom
[WARNING] The POM for 
org.apache.carbondata:carbondata-processing:jar:1.4.0-SNAPSHOT is missing, no 
dependency information available
[INFO] Downloading: 
http://repo1.maven.org/maven2/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.jar
[INFO] Downloading: 
http://repository.apache.org/snapshots/org/apache/carbondata/carbondata-processing/1.4.0-SNAPSHOT/carbondata-processing-1.4.0-SNAPSHOT.jar


Jenkins build is back to stable : carbondata-master-spark-2.2 #82

2018-02-09 Thread Apache Jenkins Server
See 




Jenkins build is back to stable : carbondata-master-spark-2.2 » Apache CarbonData :: Spark Common Test #82

2018-02-09 Thread Apache Jenkins Server
See 




carbondata git commit: [HOTFIX] Upgrade pom version from 1.3.0 to 1.4.0

2018-02-09 Thread chenliang613
Repository: carbondata
Updated Branches:
  refs/heads/master 11a795cec -> 957a51fef


[HOTFIX] Upgrade pom version from 1.3.0 to 1.4.0

Upgrade pom version from 1.3.0 to 1.4.0

This closes #1961


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/957a51fe
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/957a51fe
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/957a51fe

Branch: refs/heads/master
Commit: 957a51fefdfcd810a53e3df6699ee073d5873a15
Parents: 11a795c
Author: chenliang613 
Authored: Fri Feb 9 16:17:37 2018 +0800
Committer: chenliang613 
Committed: Fri Feb 9 16:20:21 2018 +0800

--
 assembly/pom.xml  | 2 +-
 common/pom.xml| 2 +-
 core/pom.xml  | 2 +-
 examples/flink/pom.xml| 2 +-
 examples/spark2/pom.xml   | 2 +-
 format/pom.xml| 2 +-
 hadoop/pom.xml| 2 +-
 integration/hive/pom.xml  | 2 +-
 integration/presto/pom.xml| 2 +-
 integration/spark-common-cluster-test/pom.xml | 2 +-
 integration/spark-common-test/pom.xml | 2 +-
 integration/spark-common/pom.xml  | 2 +-
 integration/spark2/pom.xml| 2 +-
 pom.xml   | 2 +-
 streaming/pom.xml | 2 +-
 15 files changed, 15 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/assembly/pom.xml
--
diff --git a/assembly/pom.xml b/assembly/pom.xml
index 76ad8a5..effc271 100644
--- a/assembly/pom.xml
+++ b/assembly/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/common/pom.xml
--
diff --git a/common/pom.xml b/common/pom.xml
index 6343361..5550129 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/core/pom.xml
--
diff --git a/core/pom.xml b/core/pom.xml
index f874615..92c9607 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/examples/flink/pom.xml
--
diff --git a/examples/flink/pom.xml b/examples/flink/pom.xml
index aa2a14f..75913cf 100644
--- a/examples/flink/pom.xml
+++ b/examples/flink/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/examples/spark2/pom.xml
--
diff --git a/examples/spark2/pom.xml b/examples/spark2/pom.xml
index da39f1d..c17f0ee 100644
--- a/examples/spark2/pom.xml
+++ b/examples/spark2/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/format/pom.xml
--
diff --git a/format/pom.xml b/format/pom.xml
index ae84f0b..41197cf 100644
--- a/format/pom.xml
+++ b/format/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/hadoop/pom.xml
--
diff --git a/hadoop/pom.xml b/hadoop/pom.xml
index 206246f..2aaac99 100644
--- a/hadoop/pom.xml
+++ b/hadoop/pom.xml
@@ -22,7 +22,7 @@
   
 org.apache.carbondata
 carbondata-parent
-1.3.0-SNAPSHOT
+1.4.0-SNAPSHOT
 ../pom.xml
   
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/957a51fe/integration/hive/pom.xml
--
diff --git a/integration/hive/pom.xml b/integration/hive/pom.xml
index e0ad499..68245db 100644
--- a/integration/hive/pom.xml
+++ b/integration/hive/pom.xml
@@ -22,7 +22,7 @@
 

[18/35] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/4749ca29/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
new file mode 100644
index 000..fde4e55
--- /dev/null
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/processor/DataBlockIterator.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.scan.processor;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.core.datastore.DataRefNode;
+import org.apache.carbondata.core.datastore.FileReader;
+import org.apache.carbondata.core.scan.collector.ResultCollectorFactory;
+import org.apache.carbondata.core.scan.collector.ScannedResultCollector;
+import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
+import org.apache.carbondata.core.scan.result.BlockletScannedResult;
+import org.apache.carbondata.core.scan.result.vector.CarbonColumnarBatch;
+import org.apache.carbondata.core.scan.scanner.BlockletScanner;
+import org.apache.carbondata.core.scan.scanner.impl.BlockletFilterScanner;
+import org.apache.carbondata.core.scan.scanner.impl.BlockletFullScanner;
+import org.apache.carbondata.core.stats.QueryStatisticsModel;
+import org.apache.carbondata.core.util.TaskMetricsMap;
+
+/**
+ * This abstract class provides a skeletal implementation of the
+ * Block iterator.
+ */
+public class DataBlockIterator extends CarbonIterator> {
+
+  /**
+   * iterator which will be used to iterate over blocklets
+   */
+  private BlockletIterator blockletIterator;
+
+  /**
+   * result collector which will be used to aggregate the scanned result
+   */
+  private ScannedResultCollector scannerResultAggregator;
+
+  /**
+   * processor which will be used to process the block processing can be
+   * filter processing or non filter processing
+   */
+  private BlockletScanner blockletScanner;
+
+  /**
+   * batch size of result
+   */
+  private int batchSize;
+
+  private ExecutorService executorService;
+
+  private Future future;
+
+  private Future futureIo;
+
+  private BlockletScannedResult scannedResult;
+
+  private BlockExecutionInfo blockExecutionInfo;
+
+  private FileReader fileReader;
+
+  private AtomicBoolean nextBlock;
+
+  private AtomicBoolean nextRead;
+
+  public DataBlockIterator(BlockExecutionInfo blockExecutionInfo, FileReader 
fileReader,
+  int batchSize, QueryStatisticsModel queryStatisticsModel, 
ExecutorService executorService) {
+this.blockExecutionInfo = blockExecutionInfo;
+this.fileReader = fileReader;
+blockletIterator = new 
BlockletIterator(blockExecutionInfo.getFirstDataBlock(),
+blockExecutionInfo.getNumberOfBlockToScan());
+if (blockExecutionInfo.getFilterExecuterTree() != null) {
+  blockletScanner = new BlockletFilterScanner(blockExecutionInfo, 
queryStatisticsModel);
+} else {
+  blockletScanner = new BlockletFullScanner(blockExecutionInfo, 
queryStatisticsModel);
+}
+this.scannerResultAggregator =
+ResultCollectorFactory.getScannedResultCollector(blockExecutionInfo);
+this.batchSize = batchSize;
+this.executorService = executorService;
+this.nextBlock = new AtomicBoolean(false);
+this.nextRead = new AtomicBoolean(false);
+  }
+
+  @Override
+  public List next() {
+List collectedResult = null;
+if (updateScanner()) {
+  collectedResult = 
this.scannerResultAggregator.collectResultInRow(scannedResult, batchSize);
+  while (collectedResult.size() < batchSize && updateScanner()) {
+List data = this.scannerResultAggregator
+

[17/35] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/4749ca29/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSpliterRawResultIterator.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSpliterRawResultIterator.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSpliterRawResultIterator.java
index 553f85e..773fbd7 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSpliterRawResultIterator.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/PartitionSpliterRawResultIterator.java
@@ -17,20 +17,15 @@
 package org.apache.carbondata.core.scan.result.iterator;
 
 import org.apache.carbondata.common.CarbonIterator;
-import org.apache.carbondata.common.logging.LogService;
-import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.scan.result.BatchResult;
+import org.apache.carbondata.core.scan.result.RowBatch;
 
 public class PartitionSpliterRawResultIterator extends 
CarbonIterator {
 
-  private CarbonIterator iterator;
-  private BatchResult batch;
+  private CarbonIterator iterator;
+  private RowBatch batch;
   private int counter;
 
-  private static final LogService LOGGER =
-  
LogServiceFactory.getLogService(PartitionSpliterRawResultIterator.class.getName());
-
-  public PartitionSpliterRawResultIterator(CarbonIterator 
iterator) {
+  public PartitionSpliterRawResultIterator(CarbonIterator iterator) {
 this.iterator = iterator;
   }
 
@@ -65,7 +60,7 @@ public class PartitionSpliterRawResultIterator extends 
CarbonIterator
* @param batch
* @return
*/
-  private boolean checkBatchEnd(BatchResult batch) {
+  private boolean checkBatchEnd(RowBatch batch) {
 return !(counter < batch.getSize());
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4749ca29/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
index 70d0958..1dd1595 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
@@ -21,7 +21,7 @@ import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.keygenerator.KeyGenException;
-import org.apache.carbondata.core.scan.result.BatchResult;
+import org.apache.carbondata.core.scan.result.RowBatch;
 import org.apache.carbondata.core.scan.wrappers.ByteArrayWrapper;
 
 /**
@@ -37,7 +37,7 @@ public class RawResultIterator extends 
CarbonIterator {
   /**
* Iterator of the Batch raw result.
*/
-  private CarbonIterator detailRawQueryResultIterator;
+  private CarbonIterator detailRawQueryResultIterator;
 
   /**
* Counter to maintain the row counter.
@@ -55,9 +55,9 @@ public class RawResultIterator extends 
CarbonIterator {
   /**
* batch of the result.
*/
-  private BatchResult batch;
+  private RowBatch batch;
 
-  public RawResultIterator(CarbonIterator 
detailRawQueryResultIterator,
+  public RawResultIterator(CarbonIterator 
detailRawQueryResultIterator,
   SegmentProperties sourceSegProperties, SegmentProperties 
destinationSegProperties) {
 this.detailRawQueryResultIterator = detailRawQueryResultIterator;
 this.sourceSegProperties = sourceSegProperties;
@@ -155,7 +155,7 @@ public class RawResultIterator extends 
CarbonIterator {
* @param batch
* @return
*/
-  private boolean checkIfBatchIsProcessedCompletely(BatchResult batch) {
+  private boolean checkIfBatchIsProcessedCompletely(RowBatch batch) {
 if (counter < batch.getSize()) {
   return false;
 } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4749ca29/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/VectorDetailQueryResultIterator.java
--
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/VectorDetailQueryResultIterator.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/VectorDetailQueryResultIterator.java
index cc9710e..c7cb00d 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/VectorDetailQueryResultIterator.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/VectorDetailQueryResultIterator.java
@@ -35,10 +35,12 

[15/35] carbondata git commit: [CARBONDATA-2099] Refactor query scan process to improve readability

2018-02-09 Thread jackylk
http://git-wip-us.apache.org/repos/asf/carbondata/blob/4749ca29/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
--
diff --git 
a/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java 
b/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
index 984efdb..c5b4d83 100644
--- a/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
@@ -30,7 +30,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.carbondata.core.datastore.block.TableBlockInfo;
-import 
org.apache.carbondata.core.datastore.chunk.impl.FixedLengthDimensionDataChunk;
+import 
org.apache.carbondata.core.datastore.chunk.impl.FixedLengthDimensionColumnPage;
 import org.apache.carbondata.core.datastore.columnar.ColumnGroupModel;
 import org.apache.carbondata.core.datastore.filesystem.LocalCarbonFile;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
@@ -44,7 +44,7 @@ import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
-import org.apache.carbondata.core.scan.model.QueryDimension;
+import org.apache.carbondata.core.scan.model.ProjectionDimension;
 
 import mockit.Mock;
 import mockit.MockUp;
@@ -266,8 +266,8 @@ public class CarbonUtilTest {
   @Test public void testToGetNextLesserValue() {
 byte[] dataChunks = { 5, 6, 7, 8, 9 };
 byte[] compareValues = { 7 };
-FixedLengthDimensionDataChunk fixedLengthDataChunk =
-new FixedLengthDimensionDataChunk(dataChunks, null, null, 5, 1);
+FixedLengthDimensionColumnPage fixedLengthDataChunk =
+new FixedLengthDimensionColumnPage(dataChunks, null, null, 5, 1);
 int result = CarbonUtil.nextLesserValueToTarget(2, fixedLengthDataChunk, 
compareValues);
 assertEquals(result, 1);
   }
@@ -275,8 +275,8 @@ public class CarbonUtilTest {
   @Test public void testToGetNextLesserValueToTarget() {
 byte[] dataChunks = { 7, 7, 7, 8, 9 };
 byte[] compareValues = { 7 };
-FixedLengthDimensionDataChunk fixedLengthDataChunk =
-new FixedLengthDimensionDataChunk(dataChunks, null, null, 5, 1);
+FixedLengthDimensionColumnPage fixedLengthDataChunk =
+new FixedLengthDimensionColumnPage(dataChunks, null, null, 5, 1);
 int result = CarbonUtil.nextLesserValueToTarget(2, fixedLengthDataChunk, 
compareValues);
 assertEquals(result, -1);
   }
@@ -284,8 +284,8 @@ public class CarbonUtilTest {
   @Test public void testToGetnextGreaterValue() {
 byte[] dataChunks = { 5, 6, 7, 8, 9 };
 byte[] compareValues = { 7 };
-FixedLengthDimensionDataChunk fixedLengthDataChunk =
-new FixedLengthDimensionDataChunk(dataChunks, null, null, 5, 1);
+FixedLengthDimensionColumnPage fixedLengthDataChunk =
+new FixedLengthDimensionColumnPage(dataChunks, null, null, 5, 1);
 int result = CarbonUtil.nextGreaterValueToTarget(2, fixedLengthDataChunk, 
compareValues, 5);
 assertEquals(result, 3);
   }
@@ -301,8 +301,8 @@ public class CarbonUtilTest {
   @Test public void testToGetnextGreaterValueToTarget() {
 byte[] dataChunks = { 5, 6, 7, 7, 7 };
 byte[] compareValues = { 7 };
-FixedLengthDimensionDataChunk fixedLengthDataChunk =
-new FixedLengthDimensionDataChunk(dataChunks, null, null, 5, 1);
+FixedLengthDimensionColumnPage fixedLengthDataChunk =
+new FixedLengthDimensionColumnPage(dataChunks, null, null, 5, 1);
 int result = CarbonUtil.nextGreaterValueToTarget(2, fixedLengthDataChunk, 
compareValues, 5);
 assertEquals(result, 5);
   }
@@ -524,23 +524,23 @@ public class CarbonUtilTest {
   }
 
   @Test public void testToGetDictionaryEncodingArray() {
-QueryDimension column1 = new QueryDimension("Column1");
-QueryDimension column2 = new QueryDimension("Column2");
 ColumnSchema column1Schema = new ColumnSchema();
 ColumnSchema column2Schema = new ColumnSchema();
 column1Schema.setColumnName("Column1");
 List encoding = new ArrayList<>();
 encoding.add(Encoding.DICTIONARY);
 column1Schema.setEncodingList(encoding);
-column1.setDimension(new CarbonDimension(column1Schema, 1, 1, 1, 1));
+ProjectionDimension
+column1 = new ProjectionDimension(new CarbonDimension(column1Schema, 
1, 1, 1, 1));
 
 column2Schema.setColumnName("Column2");
 List encoding2 = new ArrayList<>();
 encoding2.add(Encoding.DELTA);
 column2Schema.setEncodingList(encoding2);
-column2.setDimension(new CarbonDimension(column2Schema, 1, 1, 1, 1));
+ProjectionDimension
+column2 = new ProjectionDimension(new CarbonDimension(column2Schema, 
1, 1, 1, 1));
 
-QueryDimension[]