Repository: carbondata
Updated Branches:
  refs/heads/master 7b56126b7 -> e8da88002


[CARBONDATA-2130] Fix some spelling error in CarbonData

Fix some spelling error in CarbonData:

cloumn => column
realtion => relation
parition=>partition
Dimesion =>Dimension
dictionay=>dictionary

This closes #1930


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/e8da8800
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/e8da8800
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/e8da8800

Branch: refs/heads/master
Commit: e8da880021586aea0301b5b457d481da34bd9bb6
Parents: 7b56126
Author: xubo245 <[email protected]>
Authored: Mon Feb 5 17:10:27 2018 +0800
Committer: Jacky Li <[email protected]>
Committed: Sat Mar 31 13:14:13 2018 +0800

----------------------------------------------------------------------
 .../core/datastore/block/SegmentProperties.java |  12 +-
 ...essedDimChunkFileBasedPageLevelReaderV3.java |   2 +-
 ...CompressedMeasureChunkFileBasedReaderV3.java |   2 +-
 ...essedMsrChunkFileBasedPageLevelReaderV3.java |   2 +-
 .../chunk/store/DimensionChunkStoreFactory.java |   4 +-
 ...feVariableLengthDimensionDataChunkStore.java | 236 +++++++++++++++++++
 ...afeVariableLengthDimesionDataChunkStore.java | 236 -------------------
 .../service/DictionaryOnePassService.java       |   2 +-
 .../factory/KeyGeneratorFactory.java            |   6 +-
 .../ThriftWrapperSchemaConverterImpl.java       |  12 +-
 .../executor/impl/AbstractQueryExecutor.java    |   6 +-
 .../executor/impl/QueryExecutorProperties.java  |   2 +-
 .../core/util/CarbonLoadStatisticsDummy.java    |   2 +-
 .../core/util/CarbonLoadStatisticsImpl.java     |   2 +-
 .../carbondata/core/util/LoadStatistics.java    |   4 +-
 .../spark/rdd/NewCarbonDataLoadRDD.scala        |   2 +-
 .../spark/rdd/DataLoadPartitionCoalescer.scala  |   4 +-
 .../management/CarbonLoadDataCommand.scala      |   2 +-
 .../datasources/SparkCarbonTableFormat.scala    |   6 +-
 .../sql/optimizer/CarbonLateDecodeRule.scala    |   2 +-
 .../spark/sql/parser/CarbonSparkSqlParser.scala |   4 +-
 .../converter/impl/RowConverterImpl.java        |   2 +-
 .../partition/impl/DefaultLoadBalancer.java     |  12 +-
 23 files changed, 282 insertions(+), 282 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
index c134db9..f652381 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentProperties.java
@@ -540,7 +540,7 @@ public class SegmentProperties {
     this.fixedLengthKeySplitter =
         new MultiDimKeyVarLengthVariableSplitGenerator(bitLength, 
dimensionPartitions);
     // get the size of each value in file block
-    int[] dictionayDimColumnValueSize = 
fixedLengthKeySplitter.getBlockKeySize();
+    int[] dictionaryDimColumnValueSize = 
fixedLengthKeySplitter.getBlockKeySize();
     int index = -1;
     this.eachDimColumnValueSize = new int[isDictionaryColumn.size()];
     for (int i = 0; i < eachDimColumnValueSize.length; i++) {
@@ -548,21 +548,21 @@ public class SegmentProperties {
         eachDimColumnValueSize[i] = -1;
         continue;
       }
-      eachDimColumnValueSize[i] = dictionayDimColumnValueSize[++index];
+      eachDimColumnValueSize[i] = dictionaryDimColumnValueSize[++index];
     }
     if (complexDimensions.size() > 0) {
-      int[] complexDimesionParition = new 
int[complexDimColumnCardinality.length];
+      int[] complexDimensionPartition = new 
int[complexDimColumnCardinality.length];
       // as complex dimension will be stored in column format add one
-      Arrays.fill(complexDimesionParition, 1);
+      Arrays.fill(complexDimensionPartition, 1);
       bitLength =
-          CarbonUtil.getDimensionBitLength(complexDimColumnCardinality, 
complexDimesionParition);
+          CarbonUtil.getDimensionBitLength(complexDimColumnCardinality, 
complexDimensionPartition);
       for (int i = 0; i < bitLength.length; i++) {
         if (complexDimColumnCardinality[i] == 0) {
           bitLength[i] = 64;
         }
       }
       ColumnarSplitter keySplitter =
-          new MultiDimKeyVarLengthVariableSplitGenerator(bitLength, 
complexDimesionParition);
+          new MultiDimKeyVarLengthVariableSplitGenerator(bitLength, 
complexDimensionPartition);
       eachComplexDimColumnValueSize = keySplitter.getBlockKeySize();
     } else {
       eachComplexDimColumnValueSize = new int[0];

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/CompressedDimChunkFileBasedPageLevelReaderV3.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/CompressedDimChunkFileBasedPageLevelReaderV3.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/CompressedDimChunkFileBasedPageLevelReaderV3.java
index 60f0b67..3f41c2b 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/CompressedDimChunkFileBasedPageLevelReaderV3.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/dimension/v3/CompressedDimChunkFileBasedPageLevelReaderV3.java
@@ -80,7 +80,7 @@ public class CompressedDimChunkFileBasedPageLevelReaderV3
     // column other than last column we can subtract the offset of current 
column with
     // next column and get the total length.
     // but for last column we need to use lastDimensionOffset which is the end 
position
-    // of the last dimension, we can subtract current dimension offset from 
lastDimesionOffset
+    // of the last dimension, we can subtract current dimension offset from 
lastDimensionOffset
     if (dimensionChunksOffset.size() - 1 == blockletColumnIndex) {
       length = (int) (lastDimensionOffsets - currentDimensionOffset);
     } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMeasureChunkFileBasedReaderV3.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMeasureChunkFileBasedReaderV3.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMeasureChunkFileBasedReaderV3.java
index 6dc02a3..8336363 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMeasureChunkFileBasedReaderV3.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMeasureChunkFileBasedReaderV3.java
@@ -77,7 +77,7 @@ public class CompressedMeasureChunkFileBasedReaderV3 extends 
AbstractMeasureChun
     // column other than last column we can subtract the offset of current 
column with
     // next column and get the total length.
     // but for last column we need to use lastDimensionOffset which is the end 
position
-    // of the last dimension, we can subtract current dimension offset from 
lastDimesionOffset
+    // of the last dimension, we can subtract current dimension offset from 
lastDimensionOffset
     if (measureColumnChunkOffsets.size() - 1 == columnIndex) {
       dataLength = (int) (measureOffsets - 
measureColumnChunkOffsets.get(columnIndex));
     } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMsrChunkFileBasedPageLevelReaderV3.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMsrChunkFileBasedPageLevelReaderV3.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMsrChunkFileBasedPageLevelReaderV3.java
index 6b37575..2ebaa16 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMsrChunkFileBasedPageLevelReaderV3.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/reader/measure/v3/CompressedMsrChunkFileBasedPageLevelReaderV3.java
@@ -73,7 +73,7 @@ public class CompressedMsrChunkFileBasedPageLevelReaderV3
     // column other than last column we can subtract the offset of current 
column with
     // next column and get the total length.
     // but for last column we need to use lastDimensionOffset which is the end 
position
-    // of the last dimension, we can subtract current dimension offset from 
lastDimesionOffset
+    // of the last dimension, we can subtract current dimension offset from 
lastDimensionOffset
     if (measureColumnChunkOffsets.size() - 1 == blockletColumnIndex) {
       dataLength = (int) (measureOffsets - 
measureColumnChunkOffsets.get(blockletColumnIndex));
     } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/DimensionChunkStoreFactory.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/DimensionChunkStoreFactory.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/DimensionChunkStoreFactory.java
index 92927e7..f210641 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/DimensionChunkStoreFactory.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/DimensionChunkStoreFactory.java
@@ -21,7 +21,7 @@ import 
org.apache.carbondata.core.constants.CarbonCommonConstants;
 import 
org.apache.carbondata.core.datastore.chunk.store.impl.safe.SafeFixedLengthDimensionDataChunkStore;
 import 
org.apache.carbondata.core.datastore.chunk.store.impl.safe.SafeVariableLengthDimensionDataChunkStore;
 import 
org.apache.carbondata.core.datastore.chunk.store.impl.unsafe.UnsafeFixedLengthDimensionDataChunkStore;
-import 
org.apache.carbondata.core.datastore.chunk.store.impl.unsafe.UnsafeVariableLengthDimesionDataChunkStore;
+import 
org.apache.carbondata.core.datastore.chunk.store.impl.unsafe.UnsafeVariableLengthDimensionDataChunkStore;
 import org.apache.carbondata.core.util.CarbonProperties;
 
 /**
@@ -67,7 +67,7 @@ public class DimensionChunkStoreFactory {
         return new UnsafeFixedLengthDimensionDataChunkStore(totalSize, 
columnValueSize,
             isInvertedIndex, numberOfRows);
       } else {
-        return new UnsafeVariableLengthDimesionDataChunkStore(totalSize, 
isInvertedIndex,
+        return new UnsafeVariableLengthDimensionDataChunkStore(totalSize, 
isInvertedIndex,
             numberOfRows);
       }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimensionDataChunkStore.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimensionDataChunkStore.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimensionDataChunkStore.java
new file mode 100644
index 0000000..11f2ab8
--- /dev/null
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimensionDataChunkStore.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.datastore.chunk.store.impl.unsafe;
+
+import java.nio.ByteBuffer;
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.memory.CarbonUnsafe;
+import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
+import org.apache.carbondata.core.util.ByteUtil;
+import org.apache.carbondata.core.util.DataTypeUtil;
+
+/**
+ * Below class is responsible to store variable length dimension data chunk in
+ * memory Memory occupied can be on heap or offheap using unsafe interface
+ */
+public class UnsafeVariableLengthDimensionDataChunkStore
+    extends UnsafeAbstractDimensionDataChunkStore {
+
+  /**
+   * total number of rows
+   */
+  private int numberOfRows;
+
+  /**
+   * pointers offsets
+   */
+  private long dataPointersOffsets;
+
+  public UnsafeVariableLengthDimensionDataChunkStore(long totalSize, boolean 
isInvertedIdex,
+      int numberOfRows) {
+    super(totalSize, isInvertedIdex, numberOfRows);
+    this.numberOfRows = numberOfRows;
+  }
+
+  /**
+   * Below method will be used to put the rows and its metadata in offheap
+   *
+   * @param invertedIndex        inverted index to be stored
+   * @param invertedIndexReverse inverted index reverse to be stored
+   * @param data                 data to be stored
+   */
+  @Override public void putArray(final int[] invertedIndex, final int[] 
invertedIndexReverse,
+      byte[] data) {
+    // first put the data, inverted index and reverse inverted index to memory
+    super.putArray(invertedIndex, invertedIndexReverse, data);
+    // position from where offsets will start
+    this.dataPointersOffsets = this.invertedIndexReverseOffset;
+    if (isExplicitSorted) {
+      this.dataPointersOffsets += (long)numberOfRows * 
CarbonCommonConstants.INT_SIZE_IN_BYTE;
+    }
+    // As data is of variable length and data format is
+    // <length in short><data><length in short><data>
+    // we need to store offset of each data so data can be accessed directly
+    // for example:
+    //data = {0,5,1,2,3,4,5,0,6,0,1,2,3,4,5,0,2,8,9}
+    //so value stored in offset will be position of actual data
+    // [2,9,17]
+    // to store this value we need to get the actual data length + 2 bytes 
used for storing the
+    // length
+
+    // start position will be used to store the current data position
+    int startOffset = 0;
+    // position from where offsets will start
+    long pointerOffsets = this.dataPointersOffsets;
+    // as first position will be start from 2 byte as data is stored first in 
the memory block
+    // we need to skip first two bytes this is because first two bytes will be 
length of the data
+    // which we have to skip
+    CarbonUnsafe.getUnsafe().putInt(dataPageMemoryBlock.getBaseObject(),
+        dataPageMemoryBlock.getBaseOffset() + pointerOffsets,
+        CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
+    // incrementing the pointers as first value is already filled and as we 
are storing as int
+    // we need to increment the 4 bytes to set the position of the next value 
to set
+    pointerOffsets += CarbonCommonConstants.INT_SIZE_IN_BYTE;
+    // creating a byte buffer which will wrap the length of the row
+    // using byte buffer as unsafe will return bytes in little-endian encoding
+    ByteBuffer buffer = 
ByteBuffer.allocate(CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
+    // store length of data
+    byte[] length = new byte[CarbonCommonConstants.SHORT_SIZE_IN_BYTE];
+    // as first offset is already stored, we need to start from the 2nd row in 
data array
+    for (int i = 1; i < numberOfRows; i++) {
+      // first copy the length of previous row
+      CarbonUnsafe.getUnsafe().copyMemory(dataPageMemoryBlock.getBaseObject(),
+          dataPageMemoryBlock.getBaseOffset() + startOffset, length, 
CarbonUnsafe.BYTE_ARRAY_OFFSET,
+          CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
+      buffer.put(length);
+      buffer.flip();
+      // so current row position will be
+      // previous row length + 2 bytes used for storing previous row data
+      startOffset += CarbonCommonConstants.SHORT_SIZE_IN_BYTE + 
buffer.getShort();
+      // as same byte buffer is used to avoid creating many byte buffer for 
each row
+      // we need to clear the byte buffer
+      buffer.clear();
+      // now put the offset of current row, here we need to add 2 more bytes 
as current will
+      // also have length part so we have to skip length
+      CarbonUnsafe.getUnsafe().putInt(dataPageMemoryBlock.getBaseObject(),
+          dataPageMemoryBlock.getBaseOffset() + pointerOffsets,
+          startOffset + CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
+      // incrementing the pointers as first value is already filled and as we 
are storing as int
+      // we need to increment the 4 bytes to set the position of the next 
value to set
+      pointerOffsets += CarbonCommonConstants.INT_SIZE_IN_BYTE;
+    }
+
+  }
+
+  /**
+   * Below method will be used to get the row based on row id passed
+   *
+   * @param rowId
+   * @return row
+   */
+  @Override public byte[] getRow(int rowId) {
+    // if column was explicitly sorted we need to get the rowid based inverted 
index reverse
+    if (isExplicitSorted) {
+      rowId = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
+          dataPageMemoryBlock.getBaseOffset() + 
this.invertedIndexReverseOffset + ((long)rowId
+              * CarbonCommonConstants.INT_SIZE_IN_BYTE));
+    }
+    // now to get the row from memory block we need to do following thing
+    // 1. first get the current offset
+    // 2. if it's not a last row- get the next row offset
+    // Subtract the current row offset + 2 bytes(to skip the data length) with 
next row offset
+    // else subtract the current row offset + 2 bytes(to skip the data length)
+    // with complete data length
+    int currentDataOffset = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
+        dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + (rowId
+            * CarbonCommonConstants.INT_SIZE_IN_BYTE));
+    short length = 0;
+    // calculating the length of data
+    if (rowId < numberOfRows - 1) {
+      int OffsetOfNextdata = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
+          dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((rowId + 1)
+              * CarbonCommonConstants.INT_SIZE_IN_BYTE));
+      length = (short) (OffsetOfNextdata - (currentDataOffset
+          + CarbonCommonConstants.SHORT_SIZE_IN_BYTE));
+    } else {
+      // for last record we need to subtract with data length
+      length = (short) (this.dataLength - currentDataOffset);
+    }
+    byte[] data = new byte[length];
+    CarbonUnsafe.getUnsafe().copyMemory(dataPageMemoryBlock.getBaseObject(),
+        dataPageMemoryBlock.getBaseOffset() + currentDataOffset, data,
+        CarbonUnsafe.BYTE_ARRAY_OFFSET, length);
+    return data;
+  }
+
+  @Override public void fillRow(int rowId, CarbonColumnVector vector, int 
vectorRow) {
+    byte[] value = getRow(rowId);
+    DataType dt = vector.getType();
+    if ((!(dt == DataTypes.STRING) && value.length == 0) || 
ByteUtil.UnsafeComparer.INSTANCE
+        .equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY, value)) {
+      vector.putNull(vectorRow);
+    } else {
+      if (dt == DataTypes.STRING) {
+        vector.putBytes(vectorRow, 0, value.length, value);
+      } else if (dt == DataTypes.BOOLEAN) {
+        vector.putBoolean(vectorRow, ByteUtil.toBoolean(value[0]));
+      } else if (dt == DataTypes.SHORT) {
+        vector.putShort(vectorRow, ByteUtil.toShort(value, 0, value.length));
+      } else if (dt == DataTypes.INT) {
+        vector.putInt(vectorRow, ByteUtil.toInt(value, 0, value.length));
+      } else if (dt == DataTypes.LONG) {
+        vector.putLong(vectorRow,
+            DataTypeUtil.getDataBasedOnRestructuredDataType(value, 
vector.getBlockDataType(), 0,
+                value.length));
+      } else if (dt == DataTypes.TIMESTAMP) {
+        vector.putLong(vectorRow, ByteUtil.toLong(value, 0, value.length) * 
1000L);
+      }
+    }
+  }
+
+  /**
+   * to compare the two byte array
+   *
+   * @param rowId index of first byte array
+   * @param compareValue value of to be compared
+   * @return compare result
+   */
+  @Override public int compareTo(int rowId, byte[] compareValue) {
+    // now to get the row from memory block we need to do following thing
+    // 1. first get the current offset
+    // 2. if it's not a last row- get the next row offset
+    // Subtract the current row offset + 2 bytes(to skip the data length) with 
next row offset
+    // else subtract the current row offset
+    // with complete data length get the offset of set of data
+    int currentDataOffset = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
+        dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((long) rowId
+            * CarbonCommonConstants.INT_SIZE_IN_BYTE * 1L));
+    short length = 0;
+    // calculating the length of data
+    if (rowId < numberOfRows - 1) {
+      int OffsetOfNextdata = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
+          dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((rowId + 1)
+              * CarbonCommonConstants.INT_SIZE_IN_BYTE));
+      length = (short) (OffsetOfNextdata - (currentDataOffset
+          + CarbonCommonConstants.SHORT_SIZE_IN_BYTE));
+    } else {
+      // for last record we need to subtract with data length
+      length = (short) (this.dataLength - currentDataOffset);
+    }
+    // as this class handles this variable length data, so filter value can be
+    // smaller or bigger than than actual data, so we need to take the smaller 
length
+    int compareResult;
+    int compareLength = Math.min(length , compareValue.length);
+    for (int i = 0; i < compareLength; i++) {
+      compareResult = 
(CarbonUnsafe.getUnsafe().getByte(dataPageMemoryBlock.getBaseObject(),
+          dataPageMemoryBlock.getBaseOffset() + currentDataOffset) & 0xff) - 
(compareValue[i]
+          & 0xff);
+      // if compare result is not equal we can break
+      if (compareResult != 0) {
+        return compareResult;
+      }
+      // increment the offset by one as comparison is done byte by byte
+      currentDataOffset++;
+    }
+    return length - compareValue.length;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimesionDataChunkStore.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimesionDataChunkStore.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimesionDataChunkStore.java
deleted file mode 100644
index 0321ee7..0000000
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/unsafe/UnsafeVariableLengthDimesionDataChunkStore.java
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.core.datastore.chunk.store.impl.unsafe;
-
-import java.nio.ByteBuffer;
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.memory.CarbonUnsafe;
-import org.apache.carbondata.core.metadata.datatype.DataType;
-import org.apache.carbondata.core.metadata.datatype.DataTypes;
-import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
-import org.apache.carbondata.core.util.ByteUtil;
-import org.apache.carbondata.core.util.DataTypeUtil;
-
-/**
- * Below class is responsible to store variable length dimension data chunk in
- * memory Memory occupied can be on heap or offheap using unsafe interface
- */
-public class UnsafeVariableLengthDimesionDataChunkStore
-    extends UnsafeAbstractDimensionDataChunkStore {
-
-  /**
-   * total number of rows
-   */
-  private int numberOfRows;
-
-  /**
-   * pointers offsets
-   */
-  private long dataPointersOffsets;
-
-  public UnsafeVariableLengthDimesionDataChunkStore(long totalSize, boolean 
isInvertedIdex,
-      int numberOfRows) {
-    super(totalSize, isInvertedIdex, numberOfRows);
-    this.numberOfRows = numberOfRows;
-  }
-
-  /**
-   * Below method will be used to put the rows and its metadata in offheap
-   *
-   * @param invertedIndex        inverted index to be stored
-   * @param invertedIndexReverse inverted index reverse to be stored
-   * @param data                 data to be stored
-   */
-  @Override public void putArray(final int[] invertedIndex, final int[] 
invertedIndexReverse,
-      byte[] data) {
-    // first put the data, inverted index and reverse inverted index to memory
-    super.putArray(invertedIndex, invertedIndexReverse, data);
-    // position from where offsets will start
-    this.dataPointersOffsets = this.invertedIndexReverseOffset;
-    if (isExplicitSorted) {
-      this.dataPointersOffsets += (long)numberOfRows * 
CarbonCommonConstants.INT_SIZE_IN_BYTE;
-    }
-    // As data is of variable length and data format is
-    // <length in short><data><length in short><data>
-    // we need to store offset of each data so data can be accessed directly
-    // for example:
-    //data = {0,5,1,2,3,4,5,0,6,0,1,2,3,4,5,0,2,8,9}
-    //so value stored in offset will be position of actual data
-    // [2,9,17]
-    // to store this value we need to get the actual data length + 2 bytes 
used for storing the
-    // length
-
-    // start position will be used to store the current data position
-    int startOffset = 0;
-    // position from where offsets will start
-    long pointerOffsets = this.dataPointersOffsets;
-    // as first position will be start from 2 byte as data is stored first in 
the memory block
-    // we need to skip first two bytes this is because first two bytes will be 
length of the data
-    // which we have to skip
-    CarbonUnsafe.getUnsafe().putInt(dataPageMemoryBlock.getBaseObject(),
-        dataPageMemoryBlock.getBaseOffset() + pointerOffsets,
-        CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
-    // incrementing the pointers as first value is already filled and as we 
are storing as int
-    // we need to increment the 4 bytes to set the position of the next value 
to set
-    pointerOffsets += CarbonCommonConstants.INT_SIZE_IN_BYTE;
-    // creating a byte buffer which will wrap the length of the row
-    // using byte buffer as unsafe will return bytes in little-endian encoding
-    ByteBuffer buffer = 
ByteBuffer.allocate(CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
-    // store length of data
-    byte[] length = new byte[CarbonCommonConstants.SHORT_SIZE_IN_BYTE];
-    // as first offset is already stored, we need to start from the 2nd row in 
data array
-    for (int i = 1; i < numberOfRows; i++) {
-      // first copy the length of previous row
-      CarbonUnsafe.getUnsafe().copyMemory(dataPageMemoryBlock.getBaseObject(),
-          dataPageMemoryBlock.getBaseOffset() + startOffset, length, 
CarbonUnsafe.BYTE_ARRAY_OFFSET,
-          CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
-      buffer.put(length);
-      buffer.flip();
-      // so current row position will be
-      // previous row length + 2 bytes used for storing previous row data
-      startOffset += CarbonCommonConstants.SHORT_SIZE_IN_BYTE + 
buffer.getShort();
-      // as same byte buffer is used to avoid creating many byte buffer for 
each row
-      // we need to clear the byte buffer
-      buffer.clear();
-      // now put the offset of current row, here we need to add 2 more bytes 
as current will
-      // also have length part so we have to skip length
-      CarbonUnsafe.getUnsafe().putInt(dataPageMemoryBlock.getBaseObject(),
-          dataPageMemoryBlock.getBaseOffset() + pointerOffsets,
-          startOffset + CarbonCommonConstants.SHORT_SIZE_IN_BYTE);
-      // incrementing the pointers as first value is already filled and as we 
are storing as int
-      // we need to increment the 4 bytes to set the position of the next 
value to set
-      pointerOffsets += CarbonCommonConstants.INT_SIZE_IN_BYTE;
-    }
-
-  }
-
-  /**
-   * Below method will be used to get the row based on row id passed
-   *
-   * @param rowId
-   * @return row
-   */
-  @Override public byte[] getRow(int rowId) {
-    // if column was explicitly sorted we need to get the rowid based inverted 
index reverse
-    if (isExplicitSorted) {
-      rowId = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
-          dataPageMemoryBlock.getBaseOffset() + 
this.invertedIndexReverseOffset + ((long)rowId
-              * CarbonCommonConstants.INT_SIZE_IN_BYTE));
-    }
-    // now to get the row from memory block we need to do following thing
-    // 1. first get the current offset
-    // 2. if it's not a last row- get the next row offset
-    // Subtract the current row offset + 2 bytes(to skip the data length) with 
next row offset
-    // else subtract the current row offset + 2 bytes(to skip the data length)
-    // with complete data length
-    int currentDataOffset = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
-        dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + (rowId
-            * CarbonCommonConstants.INT_SIZE_IN_BYTE));
-    short length = 0;
-    // calculating the length of data
-    if (rowId < numberOfRows - 1) {
-      int OffsetOfNextdata = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
-          dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((rowId + 1)
-              * CarbonCommonConstants.INT_SIZE_IN_BYTE));
-      length = (short) (OffsetOfNextdata - (currentDataOffset
-          + CarbonCommonConstants.SHORT_SIZE_IN_BYTE));
-    } else {
-      // for last record we need to subtract with data length
-      length = (short) (this.dataLength - currentDataOffset);
-    }
-    byte[] data = new byte[length];
-    CarbonUnsafe.getUnsafe().copyMemory(dataPageMemoryBlock.getBaseObject(),
-        dataPageMemoryBlock.getBaseOffset() + currentDataOffset, data,
-        CarbonUnsafe.BYTE_ARRAY_OFFSET, length);
-    return data;
-  }
-
-  @Override public void fillRow(int rowId, CarbonColumnVector vector, int 
vectorRow) {
-    byte[] value = getRow(rowId);
-    DataType dt = vector.getType();
-    if ((!(dt == DataTypes.STRING) && value.length == 0) || 
ByteUtil.UnsafeComparer.INSTANCE
-        .equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY, value)) {
-      vector.putNull(vectorRow);
-    } else {
-      if (dt == DataTypes.STRING) {
-        vector.putBytes(vectorRow, 0, value.length, value);
-      } else if (dt == DataTypes.BOOLEAN) {
-        vector.putBoolean(vectorRow, ByteUtil.toBoolean(value[0]));
-      } else if (dt == DataTypes.SHORT) {
-        vector.putShort(vectorRow, ByteUtil.toShort(value, 0, value.length));
-      } else if (dt == DataTypes.INT) {
-        vector.putInt(vectorRow, ByteUtil.toInt(value, 0, value.length));
-      } else if (dt == DataTypes.LONG) {
-        vector.putLong(vectorRow,
-            DataTypeUtil.getDataBasedOnRestructuredDataType(value, 
vector.getBlockDataType(), 0,
-                value.length));
-      } else if (dt == DataTypes.TIMESTAMP) {
-        vector.putLong(vectorRow, ByteUtil.toLong(value, 0, value.length) * 
1000L);
-      }
-    }
-  }
-
-  /**
-   * to compare the two byte array
-   *
-   * @param rowId index of first byte array
-   * @param compareValue value of to be compared
-   * @return compare result
-   */
-  @Override public int compareTo(int rowId, byte[] compareValue) {
-    // now to get the row from memory block we need to do following thing
-    // 1. first get the current offset
-    // 2. if it's not a last row- get the next row offset
-    // Subtract the current row offset + 2 bytes(to skip the data length) with 
next row offset
-    // else subtract the current row offset
-    // with complete data length get the offset of set of data
-    int currentDataOffset = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
-        dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((long) rowId
-            * CarbonCommonConstants.INT_SIZE_IN_BYTE * 1L));
-    short length = 0;
-    // calculating the length of data
-    if (rowId < numberOfRows - 1) {
-      int OffsetOfNextdata = 
CarbonUnsafe.getUnsafe().getInt(dataPageMemoryBlock.getBaseObject(),
-          dataPageMemoryBlock.getBaseOffset() + this.dataPointersOffsets + 
((rowId + 1)
-              * CarbonCommonConstants.INT_SIZE_IN_BYTE));
-      length = (short) (OffsetOfNextdata - (currentDataOffset
-          + CarbonCommonConstants.SHORT_SIZE_IN_BYTE));
-    } else {
-      // for last record we need to subtract with data length
-      length = (short) (this.dataLength - currentDataOffset);
-    }
-    // as this class handles this variable length data, so filter value can be
-    // smaller or bigger than than actual data, so we need to take the smaller 
length
-    int compareResult;
-    int compareLength = Math.min(length , compareValue.length);
-    for (int i = 0; i < compareLength; i++) {
-      compareResult = 
(CarbonUnsafe.getUnsafe().getByte(dataPageMemoryBlock.getBaseObject(),
-          dataPageMemoryBlock.getBaseOffset() + currentDataOffset) & 0xff) - 
(compareValue[i]
-          & 0xff);
-      // if compare result is not equal we can break
-      if (compareResult != 0) {
-        return compareResult;
-      }
-      // increment the offset by one as comparison is done byte by byte
-      currentDataOffset++;
-    }
-    return length - compareValue.length;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/dictionary/service/DictionaryOnePassService.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/dictionary/service/DictionaryOnePassService.java
 
b/core/src/main/java/org/apache/carbondata/core/dictionary/service/DictionaryOnePassService.java
index 1625f40..48c6295 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/dictionary/service/DictionaryOnePassService.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/dictionary/service/DictionaryOnePassService.java
@@ -24,7 +24,7 @@ public class DictionaryOnePassService {
     dictionaryServiceProvider = dictionaryServiceProv;
   }
 
-  public static synchronized DictionaryServiceProvider getDictionayProvider() {
+  public static synchronized DictionaryServiceProvider getDictionaryProvider() 
{
     return dictionaryServiceProvider;
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
 
b/core/src/main/java/org/apache/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
index 10e9e06..83c1d58 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/keygenerator/factory/KeyGeneratorFactory.java
@@ -27,14 +27,14 @@ public final class KeyGeneratorFactory {
 
   }
 
-  public static KeyGenerator getKeyGenerator(int[] dimesion) {
+  public static KeyGenerator getKeyGenerator(int[] dimension) {
     int[] incrementedCardinality;
     boolean isFullyFilled =
         
Boolean.parseBoolean(CarbonCommonConstants.IS_FULLY_FILLED_BITS_DEFAULT_VALUE);
     if (!isFullyFilled) {
-      incrementedCardinality = CarbonUtil.getIncrementedCardinality(dimesion);
+      incrementedCardinality = CarbonUtil.getIncrementedCardinality(dimension);
     } else {
-      incrementedCardinality = 
CarbonUtil.getIncrementedCardinalityFullyFilled(dimesion);
+      incrementedCardinality = 
CarbonUtil.getIncrementedCardinalityFullyFilled(dimension);
     }
     return new MultiDimKeyVarLengthGenerator(incrementedCardinality);
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
index 8d19431..644e6a3 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
@@ -362,19 +362,19 @@ public class ThriftWrapperSchemaConverterImpl implements 
SchemaConverter {
     List<org.apache.carbondata.format.ParentColumnTableRelation> 
thriftColumnRelationList =
         new ArrayList<>();
 
-    for (ParentColumnTableRelation wrapperColumnRealtion : 
wrapperColumnRelations) {
+    for (ParentColumnTableRelation wrapperColumnRelation : 
wrapperColumnRelations) {
       org.apache.carbondata.format.ParentColumnTableRelation 
thriftColumnTableRelation =
           new org.apache.carbondata.format.ParentColumnTableRelation();
-      
thriftColumnTableRelation.setColumnId(wrapperColumnRealtion.getColumnId());
-      
thriftColumnTableRelation.setColumnName(wrapperColumnRealtion.getColumnName());
+      
thriftColumnTableRelation.setColumnId(wrapperColumnRelation.getColumnId());
+      
thriftColumnTableRelation.setColumnName(wrapperColumnRelation.getColumnName());
       org.apache.carbondata.format.RelationIdentifier thriftRelationIdentifier 
=
           new org.apache.carbondata.format.RelationIdentifier();
       thriftRelationIdentifier
-          
.setDatabaseName(wrapperColumnRealtion.getRelationIdentifier().getDatabaseName());
+          
.setDatabaseName(wrapperColumnRelation.getRelationIdentifier().getDatabaseName());
       thriftRelationIdentifier
-          
.setTableName(wrapperColumnRealtion.getRelationIdentifier().getTableName());
+          
.setTableName(wrapperColumnRelation.getRelationIdentifier().getTableName());
       thriftRelationIdentifier
-          
.setTableId(wrapperColumnRealtion.getRelationIdentifier().getTableId());
+          
.setTableId(wrapperColumnRelation.getRelationIdentifier().getTableId());
       
thriftColumnTableRelation.setRelationIdentifier(thriftRelationIdentifier);
       thriftColumnRelationList.add(thriftColumnTableRelation);
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index 8f77996..06c255e 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -196,7 +196,7 @@ public abstract class AbstractQueryExecutor<E> implements 
QueryExecutor<E> {
     queryStatistic = new QueryStatistic();
     // dictionary column unique column id to dictionary mapping
     // which will be used to get column actual data
-    queryProperties.columnToDictionayMapping =
+    queryProperties.columnToDictionaryMapping =
         QueryUtil.getDimensionDictionaryDetail(
             queryModel.getProjectionDimensions(),
             queryProperties.complexFilterDimension,
@@ -205,7 +205,7 @@ public abstract class AbstractQueryExecutor<E> implements 
QueryExecutor<E> {
     queryStatistic
         .addStatistics(QueryStatisticsConstants.LOAD_DICTIONARY, 
System.currentTimeMillis());
     queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
-    
queryModel.setColumnToDictionaryMapping(queryProperties.columnToDictionayMapping);
+    
queryModel.setColumnToDictionaryMapping(queryProperties.columnToDictionaryMapping);
   }
 
   /**
@@ -333,7 +333,7 @@ public abstract class AbstractQueryExecutor<E> implements 
QueryExecutor<E> {
         .getComplexDimensionsMap(projectDimensions,
             segmentProperties.getDimensionOrdinalToChunkMapping(),
             segmentProperties.getEachComplexDimColumnValueSize(),
-            queryProperties.columnToDictionayMapping, 
queryProperties.complexFilterDimension));
+            queryProperties.columnToDictionaryMapping, 
queryProperties.complexFilterDimension));
     IndexKey startIndexKey = null;
     IndexKey endIndexKey = null;
     if (null != queryModel.getFilterExpressionResolverTree()) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/QueryExecutorProperties.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/QueryExecutorProperties.java
 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/QueryExecutorProperties.java
index a579ce5..4b59aa7 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/QueryExecutorProperties.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/QueryExecutorProperties.java
@@ -37,7 +37,7 @@ public class QueryExecutorProperties {
    * this will hold the information about the dictionary dimension
    * which to
    */
-  public Map<String, Dictionary> columnToDictionayMapping;
+  public Map<String, Dictionary> columnToDictionaryMapping;
 
   /**
    * Measure datatypes

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
index ac504f0..a3a6a4e 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsDummy.java
@@ -30,7 +30,7 @@ public class CarbonLoadStatisticsDummy implements 
LoadStatistics {
   }
 
   @Override
-  public void  initPartitonInfo(String PartitionId) {
+  public void initPartitionInfo(String PartitionId) {
 
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
index 9c6ab95..6d6e3ed 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/util/CarbonLoadStatisticsImpl.java
@@ -83,7 +83,7 @@ public class CarbonLoadStatisticsImpl implements 
LoadStatistics {
   private double totalTime = 0;
 
   @Override
-  public void initPartitonInfo(String PartitionId) {
+  public void initPartitionInfo(String PartitionId) {
     parDictionaryValuesTotalTimeMap.put(PartitionId, new Long[2]);
     parCsvInputStepTimeMap.put(PartitionId, new Long[2]);
     parSortRowsStepTotalTimeMap.put(PartitionId, new Long[2]);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java 
b/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
index 5784bdd..cd2bb13 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/LoadStatistics.java
@@ -18,8 +18,8 @@
 package org.apache.carbondata.core.util;
 
 public interface LoadStatistics {
-  //Init PartitonInfo
-  void  initPartitonInfo(String PartitionId);
+  //Init PartitionInfo
+  void initPartitionInfo(String PartitionId);
 
   //Record the time
   void recordDicShuffleAndWriteTime();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index e607726..a2542ab 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -131,7 +131,7 @@ class SparkPartitionLoader(model: CarbonLoadModel,
       System.setProperty("carbon.properties.filepath",
         System.getProperty("user.dir") + '/' + "conf" + '/' + 
"carbon.properties")
     }
-    CarbonTimeStatisticsFactory.getLoadStatisticsInstance.initPartitonInfo(
+    CarbonTimeStatisticsFactory.getLoadStatisticsInstance.initPartitionInfo(
       CarbonTablePath.DEPRECATED_PATITION_ID)
     CarbonProperties.getInstance().addProperty("carbon.is.columnar.storage", 
"true")
     
CarbonProperties.getInstance().addProperty("carbon.dimension.split.value.in.columnar",
 "1")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/integration/spark-common/src/main/scala/org/apache/spark/rdd/DataLoadPartitionCoalescer.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common/src/main/scala/org/apache/spark/rdd/DataLoadPartitionCoalescer.scala
 
b/integration/spark-common/src/main/scala/org/apache/spark/rdd/DataLoadPartitionCoalescer.scala
index 02718e0..508410a 100644
--- 
a/integration/spark-common/src/main/scala/org/apache/spark/rdd/DataLoadPartitionCoalescer.scala
+++ 
b/integration/spark-common/src/main/scala/org/apache/spark/rdd/DataLoadPartitionCoalescer.scala
@@ -170,7 +170,7 @@ class DataLoadPartitionCoalescer(prev: RDD[_], nodeList: 
Array[String]) {
   /**
    *  assign locality partition to each host
    */
-  private def assignPartitonNodeLocality(
+  private def assignPartitionNodeLocality(
       noEmptyHosts: Seq[(String, LinkedHashSet[Int])]): 
Array[ArrayBuffer[Int]] = {
     val localityResult = new Array[ArrayBuffer[Int]](noEmptyHosts.length)
     for (i <- 0 until localityResult.length) {
@@ -284,7 +284,7 @@ class DataLoadPartitionCoalescer(prev: RDD[_], nodeList: 
Array[String]) {
     // sort host and partitions
     tempNoEmptyHosts = sortHostAndPartitions(tempNoEmptyHosts)
     // assign locality partition to non empty hosts
-    val templocalityResult = assignPartitonNodeLocality(tempNoEmptyHosts)
+    val templocalityResult = assignPartitionNodeLocality(tempNoEmptyHosts)
     // collect non empty hosts and empty hosts
     val noEmptyHosts = mutable.Buffer[String]()
     val localityResult = mutable.Buffer[ArrayBuffer[Int]]()

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
index 7a6aa53..e0736fb 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
@@ -525,7 +525,7 @@ case class CarbonLoadDataCommand(
 
   /**
    * Loads the data in a hive partition way. This method uses InsertIntoTable 
command to load data
-   * into partitoned data. The table relation would be converted to 
HadoopFSRelation to let spark
+   * into partitioned data. The table relation would be converted to 
HadoopFSRelation to let spark
    * handling the partitioning.
    */
   private def loadDataWithPartition(

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
index 9110482..e700aab 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
@@ -395,19 +395,19 @@ private class CarbonOutputWriter(path: String,
   override def close(): Unit = {
     recordWriter.close(context)
     // write partition info to new file.
-    val partitonList = new util.ArrayList[String]()
+    val partitionList = new util.ArrayList[String]()
     val formattedPartitions =
     // All dynamic partitions need to be converted to proper format
       CarbonScalaUtil.updatePartitions(
         updatedPartitions.asInstanceOf[mutable.LinkedHashMap[String, String]],
         model.getCarbonDataLoadSchema.getCarbonTable)
-    formattedPartitions.foreach(p => partitonList.add(p._1 + "=" + p._2))
+    formattedPartitions.foreach(p => partitionList.add(p._1 + "=" + p._2))
     SegmentFileStore.writeSegmentFile(
       model.getTablePath,
       taskNo,
       writePath,
       model.getSegmentId + "_" + model.getFactTimeStamp + "",
-      partitonList)
+      partitionList)
   }
 
   def getPartitionPath(path: String,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
index e99f502..7ed1705 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
@@ -164,7 +164,7 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with 
PredicateHelper {
             extraNodeInfo.hasCarbonRelation = true
           }
         }
-        // only put no carbon realtion plan
+        // only put no carbon relation plan
         if (!extraNodeInfo.hasCarbonRelation) {
           extraNodeInfos.put(plan, extraNodeInfo)
         }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index 659f069..7b464f2 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -204,7 +204,7 @@ class CarbonHelperSqlAstBuilder(conf: SQLConf,
 
     // validate partition clause
     val (partitionByStructFields, partitionFields) =
-      validateParitionFields(partitionColumns, colNames, tableProperties)
+      validatePartitionFields(partitionColumns, colNames, tableProperties)
 
     // validate partition clause
     if (partitionFields.nonEmpty) {
@@ -327,7 +327,7 @@ class CarbonHelperSqlAstBuilder(conf: SQLConf,
     }
   }
 
-  private def validateParitionFields(
+  private def validatePartitionFields(
       partitionColumns: ColTypeListContext,
       colNames: Seq[String],
       tableProperties: mutable.Map[String, String]): (Seq[StructField], 
Seq[PartitionerField]) = {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java
----------------------------------------------------------------------
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java
index 208d42f..6e5c2e0 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/RowConverterImpl.java
@@ -128,7 +128,7 @@ public class RowConverterImpl implements RowConverter {
             @Override public DictionaryClient call() throws Exception {
               Thread.currentThread().setName("Dictionary client");
               DictionaryClient client =
-                  
DictionaryOnePassService.getDictionayProvider().getDictionaryClient();
+                  
DictionaryOnePassService.getDictionaryProvider().getDictionaryClient();
               client.startClient(configuration.getDictionaryServerSecretKey(),
                   configuration.getDictionaryServerHost(), 
configuration.getDictionaryServerPort(),
                   configuration.getDictionaryEncryptServerSecure());

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e8da8800/processing/src/main/java/org/apache/carbondata/processing/partition/impl/DefaultLoadBalancer.java
----------------------------------------------------------------------
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/partition/impl/DefaultLoadBalancer.java
 
b/processing/src/main/java/org/apache/carbondata/processing/partition/impl/DefaultLoadBalancer.java
index e533baf..93c5260 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/partition/impl/DefaultLoadBalancer.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/partition/impl/DefaultLoadBalancer.java
@@ -29,10 +29,10 @@ import org.apache.carbondata.processing.partition.Partition;
  * A sample load balancer to distribute the partitions to the available nodes 
in a round robin mode.
  */
 public class DefaultLoadBalancer {
-  private Map<String, List<Partition>> nodeToPartitonMap =
+  private Map<String, List<Partition>> nodeToPartitionMap =
       new HashMap<String, 
List<Partition>>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
 
-  private Map<Partition, String> partitonToNodeMap =
+  private Map<Partition, String> partitionToNodeMap =
       new HashMap<Partition, 
String>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
 
   public DefaultLoadBalancer(List<String> nodes, List<Partition> partitions) {
@@ -44,20 +44,20 @@ public class DefaultLoadBalancer {
       int nodeindex = partitioner % nodeCount;
       String node = nodes.get(nodeindex);
 
-      List<Partition> oldList = nodeToPartitonMap.get(node);
+      List<Partition> oldList = nodeToPartitionMap.get(node);
       if (oldList == null) {
         oldList = new 
ArrayList<Partition>(CarbonCommonConstants.CONSTANT_SIZE_TEN);
-        nodeToPartitonMap.put(node, oldList);
+        nodeToPartitionMap.put(node, oldList);
       }
       oldList.add(partition);
 
-      partitonToNodeMap.put(partition, node);
+      partitionToNodeMap.put(partition, node);
 
       partitioner++;
     }
   }
 
   public String getNodeForPartitions(Partition partition) {
-    return partitonToNodeMap.get(partition);
+    return partitionToNodeMap.get(partition);
   }
 }

Reply via email to