This is an automated email from the ASF dual-hosted git repository.

ajantha pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 8125975  [CARBONDATA-3889] Enable java check style for all java modules
8125975 is described below

commit 81259754896cd669148f802fb4d80c3a8770a313
Author: QiangCai <[email protected]>
AuthorDate: Thu Aug 6 16:24:39 2020 +0800

    [CARBONDATA-3889] Enable java check style for all java modules
    
    Why is this PR needed?
    some java modules skip to check java style
    There are java files under the scala source directory
    
    What changes were proposed in this PR?
    enable check style for all java modules
    Move java files into the java source directory
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    No
    
    This closes #3872
---
 .../carbondata/core/index/IndexStoreManager.java   |   1 +
 .../apache/carbondata/core/index/IndexUtil.java    |   4 +-
 .../metadata/schema/indextable/IndexTableInfo.java |   2 -
 dev/findbugs-exclude.xml                           |   4 +-
 examples/spark/pom.xml                             |   9 -
 .../examples/sdk/CarbonReaderExample.java          |   4 +-
 .../carbondata/examples/sdk/SDKS3Example.java      | 178 +++----
 .../carbondata/examples/sdk/SDKS3ReadExample.java  |   3 +-
 .../examples/sdk/SDKS3SchemaReadExample.java       |   2 +-
 geo/pom.xml                                        |   9 -
 .../geo/scan/expression/PolygonExpression.java     |   1 -
 .../filter/executor/PolygonFilterExecutorImpl.java |   2 +-
 index/secondary-index/pom.xml                      |  47 --
 integration/flink/pom.xml                          |   8 -
 integration/presto/pom.xml                         |   5 -
 .../carbondata/presto/CarbondataPageSource.java    |   2 +-
 .../carbondata/presto/impl/CarbonTableReader.java  |   4 +-
 .../presto/readers/ByteStreamReader.java           |   1 +
 .../converter/SparkDataTypeConverterImpl.java      |  17 +-
 .../apache/carbondata/spark/InitInputMetrics.java  |   3 +-
 .../spark/vectorreader/ColumnarVectorWrapper.java  |   6 +-
 .../vectorreader/ColumnarVectorWrapperDirect.java  |   0
 .../vectorreader/VectorizedCarbonRecordReader.java |  24 +-
 .../stream/CarbonStreamRecordReader.java           |   6 +-
 .../apache/spark/sql/CarbonDictionaryWrapper.java  |   0
 .../org/apache/spark/sql/CarbonVectorProxy.java    | 554 +++++++++++++++++++++
 .../org/apache/spark/sql/ColumnVectorFactory.java  |  27 +-
 .../org/apache/spark/sql/index/IndexTableUtil.java |   6 +-
 .../exception/IndexTableExistException.java        |   4 +-
 .../exception/SecondaryIndexException.java         |   4 +-
 .../jobs}/BlockletIndexDetailsWithSchema.java      |   2 +-
 .../jobs}/BlockletIndexInputFormat.java            |  29 +-
 .../jobs}/CarbonBlockLoaderHelper.java             |   2 +-
 .../load/CarbonInternalLoaderUtil.java             |  84 ++--
 .../sql/secondaryindex/load/RowComparator.java     |   1 +
 .../optimizer/CarbonCostBasedOptimizer.java        |   2 +-
 .../query/CarbonSecondaryIndexExecutor.java        |  18 +-
 .../query/SecondaryIndexQueryResultProcessor.java  |  84 ++--
 .../org/apache/spark/sql/CarbonVectorProxy.java    | 554 ---------------------
 .../SparkBlockletIndexLoaderJob.scala              |   7 +-
 mv/plan/pom.xml                                    |   9 -
 pom.xml                                            |   7 +
 42 files changed, 813 insertions(+), 923 deletions(-)

diff --git 
a/core/src/main/java/org/apache/carbondata/core/index/IndexStoreManager.java 
b/core/src/main/java/org/apache/carbondata/core/index/IndexStoreManager.java
index 8cb157e..e96889a 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/IndexStoreManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/IndexStoreManager.java
@@ -110,6 +110,7 @@ public final class IndexStoreManager {
     }
     return indexes;
   }
+
   /**
    * It gives the default index of the table. Default index of any table is 
BlockletIndex
    *
diff --git a/core/src/main/java/org/apache/carbondata/core/index/IndexUtil.java 
b/core/src/main/java/org/apache/carbondata/core/index/IndexUtil.java
index 473377c..28449c4 100644
--- a/core/src/main/java/org/apache/carbondata/core/index/IndexUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/index/IndexUtil.java
@@ -192,9 +192,9 @@ public class IndexUtil {
     if (!CarbonProperties.getInstance()
         .isDistributedPruningEnabled(carbonTable.getDatabaseName(), 
carbonTable.getTableName())
         && BlockletIndexUtil.loadIndexesParallel(carbonTable)) {
-      String clsName = 
"org.apache.spark.sql.secondaryindex.Jobs.SparkBlockletIndexLoaderJob";
+      String clsName = 
"org.apache.spark.sql.secondaryindex.jobs.SparkBlockletIndexLoaderJob";
       IndexJob indexJob = (IndexJob) createIndexJob(clsName);
-      String className = 
"org.apache.spark.sql.secondaryindex.Jobs.BlockletIndexInputFormat";
+      String className = 
"org.apache.spark.sql.secondaryindex.jobs.BlockletIndexInputFormat";
       FileInputFormat indexFormat =
           createIndexJob(carbonTable, indexExprWrapper, validSegments, 
className);
       indexJob.execute(carbonTable, indexFormat);
diff --git 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/indextable/IndexTableInfo.java
 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/indextable/IndexTableInfo.java
index c3c7302..44d7239 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/metadata/schema/indextable/IndexTableInfo.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/metadata/schema/indextable/IndexTableInfo.java
@@ -176,6 +176,4 @@ public class IndexTableInfo implements Serializable {
   public void setIndexProperties(Map<String, String> indexProperties) {
     this.indexProperties = indexProperties;
   }
-
-
 }
diff --git a/dev/findbugs-exclude.xml b/dev/findbugs-exclude.xml
index db11882..060ea09 100644
--- a/dev/findbugs-exclude.xml
+++ b/dev/findbugs-exclude.xml
@@ -126,6 +126,6 @@
     <Class name="org.apache.carbondata.events.OperationContext"/>
     <Bug pattern="SE_TRANSIENT_FIELD_NOT_RESTORED"/>
   </Match>
-  <Match> <Class 
name="~org.apache.spark.sql.secondaryindex.Jobs.BlockletIndexInputFormat"/> 
<Field name="indexExprWrapper"/> <Bug 
pattern="SE_TRANSIENT_FIELD_NOT_RESTORED"/> </Match>
-  <Match> <Class 
name="~org.apache.spark.sql.secondaryindex.Jobs.BlockletIndexInputFormat"/> 
<Field name="validSegments"/> <Bug pattern="SE_TRANSIENT_FIELD_NOT_RESTORED"/> 
</Match>
+  <Match> <Class 
name="~org.apache.spark.sql.secondaryindex.jobs.BlockletIndexInputFormat"/> 
<Field name="indexExprWrapper"/> <Bug 
pattern="SE_TRANSIENT_FIELD_NOT_RESTORED"/> </Match>
+  <Match> <Class 
name="~org.apache.spark.sql.secondaryindex.jobs.BlockletIndexInputFormat"/> 
<Field name="validSegments"/> <Bug pattern="SE_TRANSIENT_FIELD_NOT_RESTORED"/> 
</Match>
 </FindBugsFilter>
\ No newline at end of file
diff --git a/examples/spark/pom.xml b/examples/spark/pom.xml
index 715732c..3569de8 100644
--- a/examples/spark/pom.xml
+++ b/examples/spark/pom.xml
@@ -114,15 +114,6 @@
           <failIfNoTests>false</failIfNoTests>
         </configuration>
       </plugin>
-
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <version>2.17</version>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
       <plugin>
         <groupId>org.scala-tools</groupId>
         <artifactId>maven-scala-plugin</artifactId>
diff --git 
a/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/CarbonReaderExample.java
 
b/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/CarbonReaderExample.java
index e4ae8d1..eb09fe7 100644
--- 
a/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/CarbonReaderExample.java
+++ 
b/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/CarbonReaderExample.java
@@ -24,12 +24,12 @@ import java.sql.Date;
 import java.sql.Timestamp;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.core.metadata.datatype.Field;
+import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.sdk.file.CarbonReader;
 import org.apache.carbondata.sdk.file.CarbonSchemaReader;
 import org.apache.carbondata.sdk.file.CarbonWriter;
-import org.apache.carbondata.core.metadata.datatype.Field;
 import org.apache.carbondata.sdk.file.Schema;
 
 import org.apache.commons.io.FileUtils;
diff --git 
a/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3Example.java
 
b/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3Example.java
index b8e649f..b6a7cb8 100644
--- 
a/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3Example.java
+++ 
b/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3Example.java
@@ -20,13 +20,13 @@ package org.apache.carbondata.examples.sdk;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonLoadOptionConstants;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.core.metadata.datatype.Field;
 import org.apache.carbondata.core.scan.expression.ColumnExpression;
 import org.apache.carbondata.core.scan.expression.LiteralExpression;
 import 
org.apache.carbondata.core.scan.expression.conditional.EqualToExpression;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.sdk.file.CarbonReader;
 import org.apache.carbondata.sdk.file.CarbonWriter;
-import org.apache.carbondata.core.metadata.datatype.Field;
 import org.apache.carbondata.sdk.file.Schema;
 
 import org.apache.hadoop.conf.Configuration;
@@ -41,100 +41,86 @@ import static 
org.apache.hadoop.fs.s3a.Constants.SECRET_KEY;
  * Example for testing CarbonWriter on S3
  */
 public class SDKS3Example {
-    public static void main(String[] args) throws Exception {
-        Logger logger = 
LogServiceFactory.getLogService(SDKS3Example.class.getName());
-        if (args == null || args.length < 3) {
-            logger.error("Usage: java CarbonS3Example: <access-key> 
<secret-key>"
-                + "<s3-endpoint> [table-path-on-s3] [rows] [Number of 
writes]");
-            System.exit(0);
-        }
-
-        String backupProperty = CarbonProperties.getInstance()
-            
.getProperty(CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH,
-                
CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH_DEFAULT);
-        CarbonProperties.getInstance()
-            
.addProperty(CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH,
 "true");
-
-        String path = "s3a://sdk/WriterOutput";
-        if (args.length > 3) {
-            path=args[3];
-        }
-
-        int rows = 3;
-        if (args.length > 4) {
-            rows = Integer.parseInt(args[4]);
-        }
-        int num = 3;
-        if (args.length > 5) {
-            num = Integer.parseInt(args[5]);
-        }
-
-        Configuration conf = new Configuration(true);
-        conf.set(Constants.ACCESS_KEY, args[0]);
-        conf.set(Constants.SECRET_KEY, args[1]);
-        conf.set(Constants.ENDPOINT, args[2]);
-
-        Field[] fields = new Field[2];
-        fields[0] = new Field("name", DataTypes.STRING);
-        fields[1] = new Field("age", DataTypes.INT);
-        for (int j = 0; j < num; j++) {
-            CarbonWriter writer = CarbonWriter
-                .builder()
-                .outputPath(path)
-                .withHadoopConf(conf)
-                .withCsvInput(new Schema(fields))
-                .writtenBy("SDKS3Example")
-                .build();
-
-            for (int i = 0; i < rows; i++) {
-                writer.write(new String[]{"robot" + (i % 10), 
String.valueOf(i)});
-            }
-            writer.close();
-        }
-        // Read data
-
-        EqualToExpression equalToExpression = new EqualToExpression(
-            new ColumnExpression("name", DataTypes.STRING),
-            new LiteralExpression("robot1", DataTypes.STRING));
-
-        CarbonReader reader = CarbonReader
-            .builder(path, "_temp")
-            .projection(new String[]{"name", "age"})
-            .filter(equalToExpression)
-            .withHadoopConf(conf)
-            .build();
-
-        System.out.println("\nData:");
-        int i = 0;
-        while (i < 20 && reader.hasNext()) {
-            Object[] row = (Object[]) reader.readNextRow();
-            System.out.println(row[0] + " " + row[1]);
-            i++;
-        }
-        System.out.println("\nFinished");
-        reader.close();
-
-        // Read without filter
-        CarbonReader reader2 = CarbonReader
-            .builder(path, "_temp")
-            .projection(new String[]{"name", "age"})
-            .withHadoopConf(ACCESS_KEY, args[0])
-            .withHadoopConf(SECRET_KEY, args[1])
-            .withHadoopConf(ENDPOINT, args[2])
-            .build();
-
-        System.out.println("\nData:");
-        i = 0;
-        while (i < 20 && reader2.hasNext()) {
-            Object[] row = (Object[]) reader2.readNextRow();
-            System.out.println(row[0] + " " + row[1]);
-            i++;
-        }
-        System.out.println("\nFinished");
-        reader2.close();
-
-        CarbonProperties.getInstance()
-            
.addProperty(CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH,
-                backupProperty);
+  public static void main(String[] args) throws Exception {
+    Logger logger = 
LogServiceFactory.getLogService(SDKS3Example.class.getName());
+    if (args == null || args.length < 3) {
+      logger.error("Usage: java CarbonS3Example: <access-key> <secret-key>"
+          + "<s3-endpoint> [table-path-on-s3] [rows] [Number of writes]");
+      System.exit(0);
+    }
+
+    String backupProperty = CarbonProperties.getInstance().getProperty(
+        
CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH,
+        
CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH_DEFAULT);
+    CarbonProperties.getInstance().addProperty(
+        
CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH, 
"true");
+
+    String path = "s3a://sdk/WriterOutput";
+    if (args.length > 3) {
+      path = args[3];
+    }
+
+    int rows = 3;
+    if (args.length > 4) {
+      rows = Integer.parseInt(args[4]);
+    }
+    int num = 3;
+    if (args.length > 5) {
+      num = Integer.parseInt(args[5]);
     }
+
+    Configuration conf = new Configuration(true);
+    conf.set(Constants.ACCESS_KEY, args[0]);
+    conf.set(Constants.SECRET_KEY, args[1]);
+    conf.set(Constants.ENDPOINT, args[2]);
+
+    Field[] fields = new Field[2];
+    fields[0] = new Field("name", DataTypes.STRING);
+    fields[1] = new Field("age", DataTypes.INT);
+    for (int j = 0; j < num; j++) {
+      CarbonWriter writer = 
CarbonWriter.builder().outputPath(path).withHadoopConf(conf)
+          .withCsvInput(new Schema(fields)).writtenBy("SDKS3Example").build();
+
+      for (int i = 0; i < rows; i++) {
+        writer.write(new String[] { "robot" + (i % 10), String.valueOf(i) });
+      }
+      writer.close();
+    }
+    // Read data
+
+    EqualToExpression equalToExpression = new EqualToExpression(
+        new ColumnExpression("name", DataTypes.STRING),
+        new LiteralExpression("robot1", DataTypes.STRING));
+
+    CarbonReader reader = CarbonReader.builder(path, "_temp").projection(
+        new String[] { "name", "age" 
}).filter(equalToExpression).withHadoopConf(conf).build();
+
+    System.out.println("\nData:");
+    int i = 0;
+    while (i < 20 && reader.hasNext()) {
+      Object[] row = (Object[]) reader.readNextRow();
+      System.out.println(row[0] + " " + row[1]);
+      i++;
+    }
+    System.out.println("\nFinished");
+    reader.close();
+
+    // Read without filter
+    CarbonReader reader2 = CarbonReader.builder(path, "_temp").projection(
+        new String[] { "name", "age" }).withHadoopConf(ACCESS_KEY, 
args[0]).withHadoopConf(
+        SECRET_KEY, args[1]).withHadoopConf(ENDPOINT, args[2]).build();
+
+    System.out.println("\nData:");
+    i = 0;
+    while (i < 20 && reader2.hasNext()) {
+      Object[] row = (Object[]) reader2.readNextRow();
+      System.out.println(row[0] + " " + row[1]);
+      i++;
+    }
+    System.out.println("\nFinished");
+    reader2.close();
+
+    CarbonProperties.getInstance().addProperty(
+        
CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH, 
backupProperty);
+  }
 }
diff --git 
a/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3ReadExample.java
 
b/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3ReadExample.java
index 92afd0b..60aaaa7 100644
--- 
a/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3ReadExample.java
+++ 
b/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3ReadExample.java
@@ -27,10 +27,11 @@ import 
org.apache.carbondata.core.scan.expression.conditional.EqualToExpression;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 import org.apache.carbondata.sdk.file.CarbonReader;
 
+import static org.apache.carbondata.sdk.file.utils.SDKUtil.listFiles;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.log4j.Logger;
 
-import static org.apache.carbondata.sdk.file.utils.SDKUtil.listFiles;
 import static org.apache.hadoop.fs.s3a.Constants.ACCESS_KEY;
 import static org.apache.hadoop.fs.s3a.Constants.ENDPOINT;
 import static org.apache.hadoop.fs.s3a.Constants.SECRET_KEY;
diff --git 
a/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3SchemaReadExample.java
 
b/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3SchemaReadExample.java
index ae58c4f..3bd4890 100644
--- 
a/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3SchemaReadExample.java
+++ 
b/examples/spark/src/main/java/org/apache/carbondata/examples/sdk/SDKS3SchemaReadExample.java
@@ -18,8 +18,8 @@
 package org.apache.carbondata.examples.sdk;
 
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.sdk.file.CarbonSchemaReader;
 import org.apache.carbondata.core.metadata.datatype.Field;
+import org.apache.carbondata.sdk.file.CarbonSchemaReader;
 import org.apache.carbondata.sdk.file.Schema;
 
 import org.apache.hadoop.conf.Configuration;
diff --git a/geo/pom.xml b/geo/pom.xml
index 606089f..4457aab 100644
--- a/geo/pom.xml
+++ b/geo/pom.xml
@@ -88,15 +88,6 @@
           <failIfNoTests>false</failIfNoTests>
         </configuration>
       </plugin>
-
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <version>2.17</version>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
       <plugin>
         <groupId>org.scala-tools</groupId>
         <artifactId>maven-scala-plugin</artifactId>
diff --git 
a/geo/src/main/java/org/apache/carbondata/geo/scan/expression/PolygonExpression.java
 
b/geo/src/main/java/org/apache/carbondata/geo/scan/expression/PolygonExpression.java
index 78457d1..1adf029 100644
--- 
a/geo/src/main/java/org/apache/carbondata/geo/scan/expression/PolygonExpression.java
+++ 
b/geo/src/main/java/org/apache/carbondata/geo/scan/expression/PolygonExpression.java
@@ -56,7 +56,6 @@ public class PolygonExpression extends UnknownExpression 
implements ConditionalE
   private static final ExpressionResult falseExpRes =
       new ExpressionResult(DataTypes.BOOLEAN, false);
 
-
   public PolygonExpression(String polygon, String columnName, CustomIndex 
indexInstance) {
     this.polygon = polygon;
     this.instance = indexInstance;
diff --git 
a/geo/src/main/java/org/apache/carbondata/geo/scan/filter/executor/PolygonFilterExecutorImpl.java
 
b/geo/src/main/java/org/apache/carbondata/geo/scan/filter/executor/PolygonFilterExecutorImpl.java
index 4253c3f..230884e 100644
--- 
a/geo/src/main/java/org/apache/carbondata/geo/scan/filter/executor/PolygonFilterExecutorImpl.java
+++ 
b/geo/src/main/java/org/apache/carbondata/geo/scan/filter/executor/PolygonFilterExecutorImpl.java
@@ -85,7 +85,7 @@ public class PolygonFilterExecutorImpl extends 
RowLevelFilterExecutorImpl {
     int startIndex = getNearestRangeIndex(ranges, min);
     int endIndex = getNearestRangeIndex(ranges, max);
     if (endIndex > startIndex) {
-       // Multiple ranges fall between min and max. Need to scan this block or 
blocklet
+      // Multiple ranges fall between min and max. Need to scan this block or 
blocklet
       return true;
     }
     // Got same index for both min and max values.
diff --git a/index/secondary-index/pom.xml b/index/secondary-index/pom.xml
index c363c65..3322983 100644
--- a/index/secondary-index/pom.xml
+++ b/index/secondary-index/pom.xml
@@ -149,53 +149,6 @@
           </execution>
         </executions>
       </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <version>2.17</version>
-        <configuration>
-          <configLocation>${dev.path}/javastyle-config.xml</configLocation>
-          
<suppressionsLocation>${dev.path}/javastyle-suppressions.xml</suppressionsLocation>
-          <consoleOutput>true</consoleOutput>
-          <failsOnError>true</failsOnError>
-          <linkXRef>false</linkXRef>
-          <failOnViolation>true</failOnViolation>
-          <includeTestSourceDirectory>false</includeTestSourceDirectory>
-          <sourceDirectory>${basedir}/src/main/java</sourceDirectory>
-          <testSourceDirectory>${basedir}/src/test/java</testSourceDirectory>
-          <outputFile>${basedir}/target/checkstyle-output.xml</outputFile>
-        </configuration>
-        <executions>
-          <execution>
-            <goals>
-              <goal>check</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.scalastyle</groupId>
-        <artifactId>scalastyle-maven-plugin</artifactId>
-        <version>0.8.0</version>
-        <executions>
-          <execution>
-            <goals>
-              <goal>check</goal>
-            </goals>
-          </execution>
-        </executions>
-        <configuration>
-          <verbose>false</verbose>
-          <failOnViolation>true</failOnViolation>
-          <includeTestSourceDirectory>false</includeTestSourceDirectory>
-          <failOnWarning>false</failOnWarning>
-          <sourceDirectory>${basedir}/src/main/scala</sourceDirectory>
-          <testSourceDirectory>${basedir}/src/test/scala</testSourceDirectory>
-          <configLocation>${dev.path}/scalastyle-config.xml</configLocation>
-          <outputFile>${basedir}/target/scalastyle-output.xml</outputFile>
-          <outputEncoding>${project.build.sourceEncoding}</outputEncoding>
-        </configuration>
-      </plugin>
     </plugins>
   </build>
   <profiles>
diff --git a/integration/flink/pom.xml b/integration/flink/pom.xml
index 191aea0..34eca7b 100644
--- a/integration/flink/pom.xml
+++ b/integration/flink/pom.xml
@@ -296,14 +296,6 @@
                 </configuration>
             </plugin>
             <plugin>
-                <groupId>org.apache.maven.plugins</groupId>
-                <artifactId>maven-checkstyle-plugin</artifactId>
-                <version>2.17</version>
-                <configuration>
-                    <skip>true</skip>
-                </configuration>
-            </plugin>
-            <plugin>
                 <groupId>org.scalatest</groupId>
                 <artifactId>scalatest-maven-plugin</artifactId>
                 <version>1.0</version>
diff --git a/integration/presto/pom.xml b/integration/presto/pom.xml
index f677742..a671f3d 100644
--- a/integration/presto/pom.xml
+++ b/integration/presto/pom.xml
@@ -593,11 +593,6 @@
         <extensions>true</extensions>
       </plugin>
       <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <version>2.17</version>
-      </plugin>
-      <plugin>
         <groupId>org.scala-tools</groupId>
         <artifactId>maven-scala-plugin</artifactId>
         <version>2.15.2</version>
diff --git 
a/integration/presto/src/main/prestosql/org/apache/carbondata/presto/CarbondataPageSource.java
 
b/integration/presto/src/main/prestosql/org/apache/carbondata/presto/CarbondataPageSource.java
index e7452e1..2c647fe 100644
--- 
a/integration/presto/src/main/prestosql/org/apache/carbondata/presto/CarbondataPageSource.java
+++ 
b/integration/presto/src/main/prestosql/org/apache/carbondata/presto/CarbondataPageSource.java
@@ -25,9 +25,9 @@ import java.util.Objects;
 
 import org.apache.carbondata.common.CarbonIterator;
 import org.apache.carbondata.common.logging.LogServiceFactory;
-import org.apache.carbondata.core.index.IndexFilter;
 import org.apache.carbondata.core.datastore.block.TableBlockInfo;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
+import org.apache.carbondata.core.index.IndexFilter;
 import 
org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
 import 
org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
diff --git 
a/integration/presto/src/main/prestosql/org/apache/carbondata/presto/impl/CarbonTableReader.java
 
b/integration/presto/src/main/prestosql/org/apache/carbondata/presto/impl/CarbonTableReader.java
index d25b69e..7364942 100755
--- 
a/integration/presto/src/main/prestosql/org/apache/carbondata/presto/impl/CarbonTableReader.java
+++ 
b/integration/presto/src/main/prestosql/org/apache/carbondata/presto/impl/CarbonTableReader.java
@@ -31,10 +31,10 @@ import java.util.stream.Collectors;
 
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.index.IndexFilter;
-import org.apache.carbondata.core.index.IndexStoreManager;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
+import org.apache.carbondata.core.index.IndexFilter;
+import org.apache.carbondata.core.index.IndexStoreManager;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.CarbonMetadata;
diff --git 
a/integration/presto/src/main/prestosql/org/apache/carbondata/presto/readers/ByteStreamReader.java
 
b/integration/presto/src/main/prestosql/org/apache/carbondata/presto/readers/ByteStreamReader.java
index 8be3f9d..7ba694b 100644
--- 
a/integration/presto/src/main/prestosql/org/apache/carbondata/presto/readers/ByteStreamReader.java
+++ 
b/integration/presto/src/main/prestosql/org/apache/carbondata/presto/readers/ByteStreamReader.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.carbondata.presto.readers;
 
 import org.apache.carbondata.core.metadata.datatype.DataType;
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java
 
b/integration/spark/src/main/java/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java
similarity index 94%
rename from 
integration/spark/src/main/scala/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java
rename to 
integration/spark/src/main/java/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java
index 10037e2..b0e78b2 100644
--- 
a/integration/spark/src/main/scala/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java
+++ 
b/integration/spark/src/main/java/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java
@@ -54,7 +54,7 @@ public final class SparkDataTypeConverterImpl implements 
DataTypeConverter, Seri
     if (null == data) {
       return null;
     }
-    return org.apache.spark.sql.types.Decimal.apply((BigDecimal)data);
+    return org.apache.spark.sql.types.Decimal.apply((BigDecimal) data);
   }
 
   @Override
@@ -161,9 +161,8 @@ public final class SparkDataTypeConverterImpl implements 
DataTypeConverter, Seri
           fields[i] = new StructField(carbonColumn.getColName(),
               convertCarbonToSparkDataType(carbonColumn.getDataType()), true, 
null);
         } else {
-          fields[i] = new StructField(carbonColumn.getColName(),
-              convertCarbonToSparkDataType(
-                  org.apache.carbondata.core.metadata.datatype.DataTypes.INT), 
true, null);
+          fields[i] = new StructField(carbonColumn.getColName(), 
convertCarbonToSparkDataType(
+              org.apache.carbondata.core.metadata.datatype.DataTypes.INT), 
true, null);
         }
       } else if (carbonColumn.isMeasure()) {
         DataType dataType = carbonColumn.getDataType();
@@ -173,16 +172,16 @@ public final class SparkDataTypeConverterImpl implements 
DataTypeConverter, Seri
             || dataType == 
org.apache.carbondata.core.metadata.datatype.DataTypes.LONG
             || dataType == 
org.apache.carbondata.core.metadata.datatype.DataTypes.BINARY
             || dataType == 
org.apache.carbondata.core.metadata.datatype.DataTypes.VARCHAR) {
-          fields[i] = new StructField(carbonColumn.getColName(),
-              convertCarbonToSparkDataType(dataType), true, null);
+          fields[i] =
+              new StructField(carbonColumn.getColName(), 
convertCarbonToSparkDataType(dataType),
+                  true, null);
         } else if 
(org.apache.carbondata.core.metadata.datatype.DataTypes.isDecimal(dataType)) {
           CarbonMeasure measure = (CarbonMeasure) carbonColumn;
           fields[i] = new StructField(carbonColumn.getColName(),
               new DecimalType(measure.getPrecision(), measure.getScale()), 
true, null);
         } else {
-          fields[i] = new StructField(carbonColumn.getColName(),
-              convertCarbonToSparkDataType(
-                  
org.apache.carbondata.core.metadata.datatype.DataTypes.DOUBLE), true, null);
+          fields[i] = new StructField(carbonColumn.getColName(), 
convertCarbonToSparkDataType(
+              org.apache.carbondata.core.metadata.datatype.DataTypes.DOUBLE), 
true, null);
         }
       }
     }
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/InitInputMetrics.java
 
b/integration/spark/src/main/java/org/apache/carbondata/spark/InitInputMetrics.java
similarity index 94%
rename from 
integration/spark/src/main/scala/org/apache/carbondata/spark/InitInputMetrics.java
rename to 
integration/spark/src/main/java/org/apache/carbondata/spark/InitInputMetrics.java
index 1f230ef..953abf9 100644
--- 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/InitInputMetrics.java
+++ 
b/integration/spark/src/main/java/org/apache/carbondata/spark/InitInputMetrics.java
@@ -27,5 +27,6 @@ import org.apache.spark.TaskContext;
  */
 public interface InitInputMetrics extends InputMetricsStats {
 
-  void initBytesReadCallback(TaskContext context, CarbonMultiBlockSplit 
carbonMultiBlockSplit, Long inputMetricsInterval);
+  void initBytesReadCallback(TaskContext context, CarbonMultiBlockSplit 
carbonMultiBlockSplit,
+      Long inputMetricsInterval);
 }
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
 
b/integration/spark/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
similarity index 98%
rename from 
integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
rename to 
integration/spark/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
index 2305f2e..4f6f33a 100644
--- 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
+++ 
b/integration/spark/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
@@ -48,8 +48,8 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
 
   private CarbonColumnVector dictionaryVector;
 
-  ColumnarVectorWrapper(CarbonVectorProxy writableColumnVector,
-      boolean[] filteredRows, int ordinal) {
+  ColumnarVectorWrapper(CarbonVectorProxy writableColumnVector, boolean[] 
filteredRows,
+      int ordinal) {
     this.sparkColumnVectorProxy = 
writableColumnVector.getColumnVector(ordinal);
     this.filteredRows = filteredRows;
     this.carbonVectorProxy = writableColumnVector;
@@ -298,7 +298,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
 
   @Override
   public void setDictionary(CarbonDictionary dictionary) {
-      sparkColumnVectorProxy.setDictionary(dictionary);
+    sparkColumnVectorProxy.setDictionary(dictionary);
   }
 
   @Override
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapperDirect.java
 
b/integration/spark/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapperDirect.java
similarity index 100%
rename from 
integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapperDirect.java
rename to 
integration/spark/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapperDirect.java
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
 
b/integration/spark/src/main/java/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
similarity index 97%
rename from 
integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
rename to 
integration/spark/src/main/java/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
index 7d7cd0e..ea972ed 100644
--- 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
+++ 
b/integration/spark/src/main/java/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.log4j.Logger;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonV3DataFormatConstants;
 import org.apache.carbondata.core.datastore.block.TableBlockInfo;
@@ -49,11 +48,12 @@ import org.apache.carbondata.hadoop.InputMetricsStats;
 import org.apache.commons.lang3.exception.ExceptionUtils;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.log4j.Logger;
 import org.apache.spark.memory.MemoryMode;
+import org.apache.spark.sql.CarbonVectorProxy;
 import 
org.apache.spark.sql.carbondata.execution.datasources.CarbonSparkDataSourceUtil;
 import org.apache.spark.sql.catalyst.InternalRow;
 import org.apache.spark.sql.execution.vectorized.ColumnVectorUtils;
-import org.apache.spark.sql.CarbonVectorProxy;
 import org.apache.spark.sql.types.DecimalType;
 import org.apache.spark.sql.types.StructField;
 import org.apache.spark.sql.types.StructType;
@@ -221,16 +221,16 @@ public class VectorizedCarbonRecordReader extends 
AbstractRecordReader<Object> {
         DirectDictionaryGenerator generator = 
DirectDictionaryKeyGeneratorFactory
             .getDirectDictionaryGenerator(dim.getDimension().getDataType());
         fields[dim.getOrdinal()] = new StructField(dim.getColumnName(),
-            
CarbonSparkDataSourceUtil.convertCarbonToSparkDataType(generator.getReturnType()),
 true, null);
+            
CarbonSparkDataSourceUtil.convertCarbonToSparkDataType(generator.getReturnType()),
 true,
+            null);
       } else {
         if (dim.getDimension().getDataType() == DataTypes.STRING
             || dim.getDimension().getDataType() == DataTypes.VARCHAR || 
dim.getDimension()
             .getColumnSchema().isLocalDictColumn()) {
           this.isNoDictStringField[dim.getOrdinal()] = true;
         }
-        fields[dim.getOrdinal()] = new StructField(dim.getColumnName(),
-            
CarbonSparkDataSourceUtil.convertCarbonToSparkDataType(dim.getDimension().getDataType()),
 true,
-            null);
+        fields[dim.getOrdinal()] = new StructField(dim.getColumnName(), 
CarbonSparkDataSourceUtil
+            .convertCarbonToSparkDataType(dim.getDimension().getDataType()), 
true, null);
       }
     }
 
@@ -238,11 +238,11 @@ public class VectorizedCarbonRecordReader extends 
AbstractRecordReader<Object> {
       ProjectionMeasure msr = queryMeasures.get(i);
       DataType dataType = msr.getMeasure().getDataType();
       if (dataType == DataTypes.BOOLEAN || dataType == DataTypes.SHORT || 
dataType == DataTypes.INT
-          || dataType == DataTypes.LONG || dataType == DataTypes.FLOAT
-          || dataType == DataTypes.BYTE || dataType == DataTypes.BINARY) {
+          || dataType == DataTypes.LONG || dataType == DataTypes.FLOAT || 
dataType == DataTypes.BYTE
+          || dataType == DataTypes.BINARY) {
         fields[msr.getOrdinal()] = new StructField(msr.getColumnName(),
-            
CarbonSparkDataSourceUtil.convertCarbonToSparkDataType(msr.getMeasure().getDataType()),
 true,
-            null);
+            
CarbonSparkDataSourceUtil.convertCarbonToSparkDataType(msr.getMeasure().getDataType()),
+            true, null);
       } else if (DataTypes.isDecimal(dataType)) {
         fields[msr.getOrdinal()] = new StructField(msr.getColumnName(),
             new DecimalType(msr.getMeasure().getPrecision(), 
msr.getMeasure().getScale()), true,
@@ -298,13 +298,11 @@ public class VectorizedCarbonRecordReader extends 
AbstractRecordReader<Object> {
 
   /**
    * Whether to use lazy load in vector or not.
-   * @return
    */
   private boolean isUseLazyLoad() {
     boolean useLazyLoad = false;
     if (queryModel.getIndexFilter() != null) {
-      Expression expression =
-          queryModel.getIndexFilter().getExpression();
+      Expression expression = queryModel.getIndexFilter().getExpression();
       useLazyLoad = true;
       // In case of join queries only not null filter would e pushed down so 
check and disable the
       // lazy load in that case.
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/stream/CarbonStreamRecordReader.java
 
b/integration/spark/src/main/java/org/apache/carbondata/stream/CarbonStreamRecordReader.java
similarity index 96%
rename from 
integration/spark/src/main/scala/org/apache/carbondata/stream/CarbonStreamRecordReader.java
rename to 
integration/spark/src/main/java/org/apache/carbondata/stream/CarbonStreamRecordReader.java
index 042db07..5b5eab2 100644
--- 
a/integration/spark/src/main/scala/org/apache/carbondata/stream/CarbonStreamRecordReader.java
+++ 
b/integration/spark/src/main/java/org/apache/carbondata/stream/CarbonStreamRecordReader.java
@@ -57,8 +57,8 @@ public class CarbonStreamRecordReader extends 
StreamRecordReader {
   protected void initializeAtFirstRow() throws IOException {
     super.initializeAtFirstRow();
     outputRow = new GenericInternalRow(outputValues);
-    outputSchema = new StructType((StructField[])
-        
DataTypeUtil.getDataTypeConverter().convertCarbonSchemaToSparkSchema(projection));
+    outputSchema = new StructType((StructField[]) 
DataTypeUtil.getDataTypeConverter()
+        .convertCarbonSchemaToSparkSchema(projection));
   }
 
   @Override
@@ -160,7 +160,7 @@ public class CarbonStreamRecordReader extends 
StreamRecordReader {
   private void putRowToColumnBatch(int rowId) {
     for (int i = 0; i < projection.length; i++) {
       Object value = outputValues[i];
-      vectorProxy.getColumnVector(i).putRowToColumnBatch(rowId,value);
+      vectorProxy.getColumnVector(i).putRowToColumnBatch(rowId, value);
 
     }
   }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDictionaryWrapper.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/CarbonDictionaryWrapper.java
similarity index 100%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/CarbonDictionaryWrapper.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/CarbonDictionaryWrapper.java
diff --git 
a/integration/spark/src/main/java/org/apache/spark/sql/CarbonVectorProxy.java 
b/integration/spark/src/main/java/org/apache/spark/sql/CarbonVectorProxy.java
new file mode 100644
index 0000000..95ad8cc
--- /dev/null
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/CarbonVectorProxy.java
@@ -0,0 +1,554 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql;
+
+import java.math.BigInteger;
+
+import org.apache.carbondata.core.scan.result.vector.CarbonDictionary;
+import org.apache.carbondata.core.scan.scanner.LazyPageLoader;
+
+import org.apache.spark.memory.MemoryMode;
+import org.apache.spark.sql.catalyst.InternalRow;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+import org.apache.spark.sql.types.*;
+import org.apache.spark.sql.vectorized.ColumnVector;
+import org.apache.spark.sql.vectorized.ColumnarArray;
+import org.apache.spark.sql.vectorized.ColumnarBatch;
+import org.apache.spark.sql.vectorized.ColumnarMap;
+import org.apache.spark.unsafe.types.CalendarInterval;
+import org.apache.spark.unsafe.types.UTF8String;
+
+/**
+ * Adapter class which handles the columnar vector reading of the carbondata
+ * based on the spark ColumnVector and ColumnarBatch API. This proxy class
+ * handles the complexity of spark 2.3 version related api changes since
+ * spark ColumnVector and ColumnarBatch interfaces are still evolving.
+ */
+public class CarbonVectorProxy {
+
+  private ColumnarBatch columnarBatch;
+  private ColumnVectorProxy[] columnVectorProxies;
+
+  /**
+   * Adapter class which handles the columnar vector reading of the carbondata
+   * based on the spark ColumnVector and ColumnarBatch API. This proxy class
+   * handles the complexity of spark 2.3 version related api changes since
+   * spark ColumnVector and ColumnarBatch interfaces are still evolving.
+   *
+   * @param memMode       which represent the type on-heap or off-heap vector.
+   * @param outputSchema, metadata related to current schema of table.
+   * @param rowNum        rows number for vector reading
+   * @param useLazyLoad   Whether to use lazy load while getting the data.
+   */
+  public CarbonVectorProxy(MemoryMode memMode, StructType outputSchema, int 
rowNum,
+      boolean useLazyLoad) {
+    WritableColumnVector[] columnVectors = 
ColumnVectorFactory.getColumnVector(memMode,
+        outputSchema, rowNum);
+    columnVectorProxies = new ColumnVectorProxy[columnVectors.length];
+    for (int i = 0; i < columnVectorProxies.length; i++) {
+      if (useLazyLoad) {
+        columnVectorProxies[i] = new 
ColumnVectorProxyWithLazyLoad(columnVectors[i]);
+      } else {
+        columnVectorProxies[i] = new ColumnVectorProxy(columnVectors[i]);
+      }
+    }
+    columnarBatch = new ColumnarBatch(columnVectorProxies);
+    columnarBatch.setNumRows(rowNum);
+  }
+
+  /**
+   * Returns the number of rows for read, including filtered rows.
+   */
+  public int numRows() {
+    return columnarBatch.numRows();
+  }
+
+  /**
+   * This API will return a column vector from a batch of column vector rows
+   * based on the ordinal
+   *
+   * @param ordinal
+   * @return
+   */
+  public WritableColumnVector column(int ordinal) {
+    return ((ColumnVectorProxy) columnarBatch.column(ordinal)).getVector();
+  }
+
+  public ColumnVectorProxy getColumnVector(int ordinal) {
+    return columnVectorProxies[ordinal];
+  }
+
+  /**
+   * Resets this column for writing. The currently stored values are no longer 
accessible.
+   */
+  public void reset() {
+    for (int i = 0; i < columnarBatch.numCols(); i++) {
+      ((ColumnVectorProxy) columnarBatch.column(i)).reset();
+    }
+  }
+
+  public void resetDictionaryIds(int ordinal) {
+    (((ColumnVectorProxy) 
columnarBatch.column(ordinal)).getVector()).getDictionaryIds().reset();
+  }
+
+  /**
+   * Returns the row in this batch at `rowId`. Returned row is reused across 
calls.
+   */
+  public InternalRow getRow(int rowId) {
+    return columnarBatch.getRow(rowId);
+  }
+
+  /**
+   * Returns the row in this batch at `rowId`. Returned row is reused across 
calls.
+   */
+  public Object getColumnarBatch() {
+    return columnarBatch;
+  }
+
+  /**
+   * Called to close all the columns in this batch. It is not valid to access 
the data after
+   * calling this. This must be called at the end to clean up memory 
allocations.
+   */
+  public void close() {
+    columnarBatch.close();
+  }
+
+  /**
+   * Sets the number of rows in this batch.
+   */
+  public void setNumRows(int numRows) {
+    columnarBatch.setNumRows(numRows);
+  }
+
+  public DataType dataType(int ordinal) {
+    return columnarBatch.column(ordinal).dataType();
+  }
+
+  public static class ColumnVectorProxy extends ColumnVector {
+
+    private WritableColumnVector vector;
+
+    public ColumnVectorProxy(ColumnVector columnVector) {
+      super(columnVector.dataType());
+      vector = (WritableColumnVector) columnVector;
+    }
+
+    public void putRowToColumnBatch(int rowId, Object value) {
+      org.apache.spark.sql.types.DataType t = vector.dataType();
+      if (null == value) {
+        putNull(rowId);
+      } else {
+        if (t == org.apache.spark.sql.types.DataTypes.BooleanType) {
+          putBoolean(rowId, (boolean) value);
+        } else if (t == org.apache.spark.sql.types.DataTypes.ByteType) {
+          putByte(rowId, (byte) value);
+        } else if (t == org.apache.spark.sql.types.DataTypes.ShortType) {
+          putShort(rowId, (short) value);
+        } else if (t == org.apache.spark.sql.types.DataTypes.IntegerType) {
+          putInt(rowId, (int) value);
+        } else if (t == org.apache.spark.sql.types.DataTypes.LongType) {
+          putLong(rowId, (long) value);
+        } else if (t == org.apache.spark.sql.types.DataTypes.FloatType) {
+          putFloat(rowId, (float) value);
+        } else if (t == org.apache.spark.sql.types.DataTypes.DoubleType) {
+          putDouble(rowId, (double) value);
+        } else if (t == org.apache.spark.sql.types.DataTypes.StringType) {
+          UTF8String v = (UTF8String) value;
+          putByteArray(rowId, v.getBytes());
+        } else if (t instanceof org.apache.spark.sql.types.DecimalType) {
+          DecimalType dt = (DecimalType) t;
+          Decimal d = Decimal.fromDecimal(value);
+          if (dt.precision() <= Decimal.MAX_INT_DIGITS()) {
+            putInt(rowId, (int) d.toUnscaledLong());
+          } else if (dt.precision() <= Decimal.MAX_LONG_DIGITS()) {
+            putLong(rowId, d.toUnscaledLong());
+          } else {
+            final BigInteger integer = d.toJavaBigDecimal().unscaledValue();
+            byte[] bytes = integer.toByteArray();
+            putByteArray(rowId, bytes, 0, bytes.length);
+          }
+        } else if (t instanceof CalendarIntervalType) {
+          CalendarInterval c = (CalendarInterval) value;
+          vector.getChild(0).putInt(rowId, c.months);
+          vector.getChild(1).putLong(rowId, c.microseconds);
+        } else if (t instanceof org.apache.spark.sql.types.DateType) {
+          putInt(rowId, (int) value);
+        } else if (t instanceof org.apache.spark.sql.types.TimestampType) {
+          putLong(rowId, (long) value);
+        }
+      }
+    }
+
+    public void putBoolean(int rowId, boolean value) {
+      vector.putBoolean(rowId, value);
+    }
+
+    public void putByte(int rowId, byte value) {
+      vector.putByte(rowId, value);
+    }
+
+    public void putBytes(int rowId, int count, byte[] src, int srcIndex) {
+      vector.putBytes(rowId, count, src, srcIndex);
+    }
+
+    public void putShort(int rowId, short value) {
+      vector.putShort(rowId, value);
+    }
+
+    public void putInt(int rowId, int value) {
+      vector.putInt(rowId, value);
+    }
+
+    public void putFloat(int rowId, float value) {
+      vector.putFloat(rowId, value);
+    }
+
+    public void putFloats(int rowId, int count, float[] src, int srcIndex) {
+      vector.putFloats(rowId, count, src, srcIndex);
+    }
+
+    public void putLong(int rowId, long value) {
+      vector.putLong(rowId, value);
+    }
+
+    public void putDouble(int rowId, double value) {
+      vector.putDouble(rowId, value);
+    }
+
+    public void putByteArray(int rowId, byte[] value) {
+      vector.putByteArray(rowId, value);
+    }
+
+    public void putInts(int rowId, int count, int value) {
+      vector.putInts(rowId, count, value);
+    }
+
+    public void putInts(int rowId, int count, int[] src, int srcIndex) {
+      vector.putInts(rowId, count, src, srcIndex);
+    }
+
+    public void putShorts(int rowId, int count, short value) {
+      vector.putShorts(rowId, count, value);
+    }
+
+    public void putShorts(int rowId, int count, short[] src, int srcIndex) {
+      vector.putShorts(rowId, count, src, srcIndex);
+    }
+
+    public void putLongs(int rowId, int count, long value) {
+      vector.putLongs(rowId, count, value);
+    }
+
+    public void putLongs(int rowId, int count, long[] src, int srcIndex) {
+      vector.putLongs(rowId, count, src, srcIndex);
+    }
+
+    public void putDecimal(int rowId, Decimal value, int precision) {
+      vector.putDecimal(rowId, value, precision);
+
+    }
+
+    public void putDoubles(int rowId, int count, double value) {
+      vector.putDoubles(rowId, count, value);
+    }
+
+    public void putDoubles(int rowId, int count, double[] src, int srcIndex) {
+      vector.putDoubles(rowId, count, src, srcIndex);
+    }
+
+    public void putByteArray(int rowId, byte[] value, int offset, int length) {
+      vector.putByteArray(rowId, value, offset, length);
+    }
+
+    public void putNotNull(int rowId) {
+      vector.putNotNull(rowId);
+    }
+
+    public void putNotNulls(int rowId, int count) {
+      vector.putNotNulls(rowId, count);
+    }
+
+    public void putDictionaryInt(int rowId, int value) {
+      vector.getDictionaryIds().putInt(rowId, value);
+    }
+
+    public void setDictionary(CarbonDictionary dictionary) {
+      if (null != dictionary) {
+        vector.setDictionary(new CarbonDictionaryWrapper(dictionary));
+      } else {
+        vector.setDictionary(null);
+      }
+    }
+
+    public void putNull(int rowId) {
+      vector.putNull(rowId);
+    }
+
+    public void putNulls(int rowId, int count) {
+      vector.putNulls(rowId, count);
+    }
+
+    public boolean hasDictionary() {
+      return vector.hasDictionary();
+    }
+
+    public Object reserveDictionaryIds(int capacity) {
+      return vector.reserveDictionaryIds(capacity);
+    }
+
+    @Override
+    public boolean isNullAt(int i) {
+      return vector.isNullAt(i);
+    }
+
+    @Override
+    public boolean getBoolean(int i) {
+      return vector.getBoolean(i);
+    }
+
+    @Override
+    public byte getByte(int i) {
+      return vector.getByte(i);
+    }
+
+    @Override
+    public short getShort(int i) {
+      return vector.getShort(i);
+    }
+
+    @Override
+    public int getInt(int i) {
+      return vector.getInt(i);
+    }
+
+    @Override
+    public long getLong(int i) {
+      return vector.getLong(i);
+    }
+
+    @Override
+    public float getFloat(int i) {
+      return vector.getFloat(i);
+    }
+
+    @Override
+    public double getDouble(int i) {
+      return vector.getDouble(i);
+    }
+
+    @Override
+    public void close() {
+      vector.close();
+    }
+
+    @Override
+    public boolean hasNull() {
+      return vector.hasNull();
+    }
+
+    @Override
+    public int numNulls() {
+      return vector.numNulls();
+    }
+
+    @Override
+    public ColumnarArray getArray(int i) {
+      return vector.getArray(i);
+    }
+
+    @Override
+    public ColumnarMap getMap(int i) {
+      return vector.getMap(i);
+    }
+
+    @Override
+    public Decimal getDecimal(int i, int i1, int i2) {
+      return vector.getDecimal(i, i1, i2);
+    }
+
+    @Override
+    public UTF8String getUTF8String(int i) {
+      return vector.getUTF8String(i);
+    }
+
+    @Override
+    public byte[] getBinary(int i) {
+      return vector.getBinary(i);
+    }
+
+    @Override
+    protected ColumnVector getChild(int i) {
+      return vector.getChild(i);
+    }
+
+    public void reset() {
+      vector.reset();
+    }
+
+    public void setLazyPage(LazyPageLoader lazyPage) {
+      lazyPage.loadPage();
+    }
+
+    /**
+     * It keeps all binary data of all rows to it.
+     * Should use along with @{putArray(int rowId, int offset, int length)} to 
keep lengths
+     * and offset.
+     */
+    public void putAllByteArray(byte[] data, int offset, int length) {
+      vector.arrayData().appendBytes(length, data, offset);
+    }
+
+    public void putArray(int rowId, int offset, int length) {
+      vector.putArray(rowId, offset, length);
+    }
+
+    public WritableColumnVector getVector() {
+      return vector;
+    }
+  }
+
+  public static class ColumnVectorProxyWithLazyLoad extends ColumnVectorProxy {
+
+    private WritableColumnVector vector;
+
+    private LazyPageLoader pageLoad;
+
+    private boolean isLoaded;
+
+    public ColumnVectorProxyWithLazyLoad(ColumnVector columnVector) {
+      super(columnVector);
+      vector = (WritableColumnVector) columnVector;
+    }
+
+    @Override
+    public boolean isNullAt(int i) {
+      checkPageLoaded();
+      return vector.isNullAt(i);
+    }
+
+    @Override
+    public boolean getBoolean(int i) {
+      checkPageLoaded();
+      return vector.getBoolean(i);
+    }
+
+    @Override
+    public byte getByte(int i) {
+      checkPageLoaded();
+      return vector.getByte(i);
+    }
+
+    @Override
+    public short getShort(int i) {
+      checkPageLoaded();
+      return vector.getShort(i);
+    }
+
+    @Override
+    public int getInt(int i) {
+      checkPageLoaded();
+      return vector.getInt(i);
+    }
+
+    @Override
+    public long getLong(int i) {
+      checkPageLoaded();
+      return vector.getLong(i);
+    }
+
+    @Override
+    public float getFloat(int i) {
+      checkPageLoaded();
+      return vector.getFloat(i);
+    }
+
+    @Override
+    public double getDouble(int i) {
+      checkPageLoaded();
+      return vector.getDouble(i);
+    }
+
+    @Override
+    public boolean hasNull() {
+      checkPageLoaded();
+      return vector.hasNull();
+    }
+
+    @Override
+    public int numNulls() {
+      checkPageLoaded();
+      return vector.numNulls();
+    }
+
+    @Override
+    public ColumnarArray getArray(int i) {
+      checkPageLoaded();
+      return vector.getArray(i);
+    }
+
+    @Override
+    public ColumnarMap getMap(int i) {
+      checkPageLoaded();
+      return vector.getMap(i);
+    }
+
+    @Override
+    public Decimal getDecimal(int i, int i1, int i2) {
+      checkPageLoaded();
+      return vector.getDecimal(i, i1, i2);
+    }
+
+    @Override
+    public UTF8String getUTF8String(int i) {
+      checkPageLoaded();
+      return vector.getUTF8String(i);
+    }
+
+    @Override
+    public byte[] getBinary(int i) {
+      checkPageLoaded();
+      return vector.getBinary(i);
+    }
+
+    @Override
+    protected ColumnVector getChild(int i) {
+      checkPageLoaded();
+      return vector.getChild(i);
+    }
+
+    public void reset() {
+      isLoaded = false;
+      pageLoad = null;
+      vector.reset();
+    }
+
+    private void checkPageLoaded() {
+      if (!isLoaded) {
+        if (pageLoad != null) {
+          pageLoad.loadPage();
+        }
+        isLoaded = true;
+      }
+    }
+
+    public void setLazyPage(LazyPageLoader lazyPage) {
+      this.pageLoad = lazyPage;
+    }
+
+  }
+}
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/ColumnVectorFactory.java
 b/integration/spark/src/main/java/org/apache/spark/sql/ColumnVectorFactory.java
similarity index 66%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/ColumnVectorFactory.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/ColumnVectorFactory.java
index b8c364e..fbeaa69 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/ColumnVectorFactory.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/ColumnVectorFactory.java
@@ -18,26 +18,25 @@
 package org.apache.spark.sql;
 
 import org.apache.spark.memory.MemoryMode;
-import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector;
 import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector;
+import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector;
 import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
 import org.apache.spark.sql.types.StructType;
 
 public class ColumnVectorFactory {
 
-    public static WritableColumnVector[] getColumnVector(MemoryMode memMode, 
StructType outputSchema, int rowNums) {
+  public static WritableColumnVector[] getColumnVector(MemoryMode memMode, 
StructType outputSchema,
+      int rowNums) {
 
-        WritableColumnVector[] writableColumnVectors = null;
-        switch (memMode) {
-            case ON_HEAP:
-                writableColumnVectors = OnHeapColumnVector
-                        .allocateColumns(rowNums, outputSchema);
-                break;
-            case OFF_HEAP:
-                writableColumnVectors = OffHeapColumnVector
-                        .allocateColumns(rowNums, outputSchema);
-                break;
-        }
-        return writableColumnVectors;
+    WritableColumnVector[] writableColumnVectors = null;
+    switch (memMode) {
+      case ON_HEAP:
+        writableColumnVectors = OnHeapColumnVector.allocateColumns(rowNums, 
outputSchema);
+        break;
+      case OFF_HEAP:
+        writableColumnVectors = OffHeapColumnVector.allocateColumns(rowNums, 
outputSchema);
+        break;
     }
+    return writableColumnVectors;
+  }
 }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/index/IndexTableUtil.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/index/IndexTableUtil.java
similarity index 99%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/index/IndexTableUtil.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/index/IndexTableUtil.java
index 7e1640d..b84d3c9 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/index/IndexTableUtil.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/index/IndexTableUtil.java
@@ -14,22 +14,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.spark.sql.index;
 
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
-import org.apache.spark.sql.secondaryindex.exception.IndexTableExistException;
-
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.metadata.index.IndexType;
 import org.apache.carbondata.core.metadata.schema.indextable.IndexTableInfo;
 
+import org.apache.spark.sql.secondaryindex.exception.IndexTableExistException;
+
 public class IndexTableUtil {
   /**
    * adds index table info into parent table properties
-   *
    */
   public static String checkAndAddIndexTable(String gsonData, IndexTableInfo 
newIndexTable,
       boolean isSecondaryIndex) throws IndexTableExistException {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/exception/IndexTableExistException.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/exception/IndexTableExistException.java
similarity index 96%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/exception/IndexTableExistException.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/exception/IndexTableExistException.java
index 08bfa26..9483bcd 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/exception/IndexTableExistException.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/exception/IndexTableExistException.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.spark.sql.secondaryindex.exception;
 
 public class IndexTableExistException extends Exception {
@@ -42,7 +43,8 @@ public class IndexTableExistException extends Exception {
   /**
    * getMessage
    */
-  @Override public String getMessage() {
+  @Override
+  public String getMessage() {
     return this.msg;
   }
 }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/exception/SecondaryIndexException.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/exception/SecondaryIndexException.java
similarity index 96%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/exception/SecondaryIndexException.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/exception/SecondaryIndexException.java
index 7c84066..2794a8d 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/exception/SecondaryIndexException.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/exception/SecondaryIndexException.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.spark.sql.secondaryindex.exception;
 
 /**
@@ -33,7 +34,8 @@ public class SecondaryIndexException extends Exception {
     this.message = message;
   }
 
-  @Override public String getMessage() {
+  @Override
+  public String getMessage() {
     return message;
   }
 }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexDetailsWithSchema.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/jobs/BlockletIndexDetailsWithSchema.java
similarity index 98%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexDetailsWithSchema.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/jobs/BlockletIndexDetailsWithSchema.java
index 43429e2..914365a 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexDetailsWithSchema.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/jobs/BlockletIndexDetailsWithSchema.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.sql.secondaryindex.Jobs;
+package org.apache.spark.sql.secondaryindex.jobs;
 
 import java.io.Serializable;
 import java.util.List;
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexInputFormat.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/jobs/BlockletIndexInputFormat.java
similarity index 92%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexInputFormat.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/jobs/BlockletIndexInputFormat.java
index f65715d..19a48ee 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/BlockletIndexInputFormat.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/jobs/BlockletIndexInputFormat.java
@@ -15,8 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.sql.secondaryindex.Jobs;
-
+package org.apache.spark.sql.secondaryindex.jobs;
 
 import java.io.IOException;
 import java.io.Serializable;
@@ -32,13 +31,13 @@ import 
org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.cache.Cache;
 import org.apache.carbondata.core.cache.CacheProvider;
 import org.apache.carbondata.core.cache.CacheType;
+import 
org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder;
 import org.apache.carbondata.core.index.IndexInputSplit;
 import org.apache.carbondata.core.index.IndexStoreManager;
 import org.apache.carbondata.core.index.Segment;
 import org.apache.carbondata.core.index.dev.CacheableIndex;
 import org.apache.carbondata.core.index.dev.IndexFactory;
 import org.apache.carbondata.core.index.dev.expr.IndexExprWrapper;
-import 
org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder;
 import org.apache.carbondata.core.indexstore.BlockMetaInfo;
 import org.apache.carbondata.core.indexstore.BlockletIndexStore;
 import org.apache.carbondata.core.indexstore.BlockletIndexWrapper;
@@ -88,7 +87,8 @@ public class BlockletIndexInputFormat
     this.validSegments = validSegments;
   }
 
-  @Override public List<InputSplit> getSplits(JobContext job) throws 
IOException {
+  @Override
+  public List<InputSplit> getSplits(JobContext job) throws IOException {
     IndexFactory indexFactory =
         
IndexStoreManager.getInstance().getDefaultIndex(table).getIndexFactory();
     CacheableIndex factory = (CacheableIndex) indexFactory;
@@ -117,9 +117,9 @@ public class BlockletIndexInputFormat
   }
 
   @Override
-  public RecordReader<TableBlockIndexUniqueIdentifier, 
BlockletIndexDetailsWithSchema>
-  createRecordReader(InputSplit inputSplit, TaskAttemptContext 
taskAttemptContext)
-      throws IOException, InterruptedException {
+  public RecordReader<TableBlockIndexUniqueIdentifier,
+      BlockletIndexDetailsWithSchema> createRecordReader(InputSplit inputSplit,
+      TaskAttemptContext taskAttemptContext) {
     return new RecordReader<TableBlockIndexUniqueIdentifier, 
BlockletIndexDetailsWithSchema>() {
       private BlockletIndexWrapper wrapper = null;
       private TableBlockIndexUniqueIdentifier tableBlockIndexUniqueIdentifier 
= null;
@@ -143,7 +143,8 @@ public class BlockletIndexInputFormat
             
BlockletIndexUtil.getTableBlockUniqueIdentifiers(segment).iterator();
       }
 
-      @Override public boolean nextKeyValue() throws IOException, 
InterruptedException {
+      @Override
+      public boolean nextKeyValue() {
         if (iterator.hasNext()) {
           TableBlockIndexUniqueIdentifier tableBlockIndexUniqueIdentifier = 
iterator.next();
           this.tableBlockIndexUniqueIdentifier  = 
tableBlockIndexUniqueIdentifier;
@@ -158,21 +159,25 @@ public class BlockletIndexInputFormat
         return false;
       }
 
-      @Override public TableBlockIndexUniqueIdentifier getCurrentKey() {
+      @Override
+      public TableBlockIndexUniqueIdentifier getCurrentKey() {
         return tableBlockIndexUniqueIdentifier;
       }
 
-      @Override public BlockletIndexDetailsWithSchema getCurrentValue() {
+      @Override
+      public BlockletIndexDetailsWithSchema getCurrentValue() {
         BlockletIndexDetailsWithSchema blockletIndexDetailsWithSchema =
             new BlockletIndexDetailsWithSchema(wrapper, 
table.getTableInfo().isSchemaModified());
         return blockletIndexDetailsWithSchema;
       }
 
-      @Override public float getProgress() {
+      @Override
+      public float getProgress() {
         return 0;
       }
 
-      @Override public void close() {
+      @Override
+      public void close() {
         if (null != tableBlockIndexUniqueIdentifierWrapper) {
           if (null != wrapper && null != wrapper.getIndexes() && 
!wrapper.getIndexes()
               .isEmpty()) {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/CarbonBlockLoaderHelper.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/jobs/CarbonBlockLoaderHelper.java
similarity index 98%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/CarbonBlockLoaderHelper.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/jobs/CarbonBlockLoaderHelper.java
index e0003b8..a5a6e8a 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/CarbonBlockLoaderHelper.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/jobs/CarbonBlockLoaderHelper.java
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-package org.apache.spark.sql.secondaryindex.Jobs;
+package org.apache.spark.sql.secondaryindex.jobs;
 
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
similarity index 85%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
index 827dba1..02bb465 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/load/CarbonInternalLoaderUtil.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.spark.sql.secondaryindex.load;
 
 import java.io.IOException;
@@ -36,7 +37,6 @@ import 
org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentStatus;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
-import org.apache.carbondata.processing.loading.model.CarbonLoadModel;
 import org.apache.carbondata.processing.util.CarbonLoaderUtil;
 
 import org.apache.log4j.Logger;
@@ -44,12 +44,11 @@ import org.apache.spark.sql.index.CarbonIndexUtil;
 
 public class CarbonInternalLoaderUtil {
 
-  private static final Logger LOGGER =
-      
LogServiceFactory.getLogService(CarbonInternalLoaderUtil.class.getName());
+  private static final Logger LOGGER = LogServiceFactory.getLogService(
+      CarbonInternalLoaderUtil.class.getName());
 
   public static List<String> getListOfValidSlices(LoadMetadataDetails[] 
details) {
-    List<String> activeSlices =
-        new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    List<String> activeSlices = new 
ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
     for (LoadMetadataDetails oneLoad : details) {
       if (SegmentStatus.SUCCESS.equals(oneLoad.getSegmentStatus())
           || 
SegmentStatus.LOAD_PARTIAL_SUCCESS.equals(oneLoad.getSegmentStatus())
@@ -62,7 +61,6 @@ public class CarbonInternalLoaderUtil {
 
   /**
    * This method will return the mapping of valid segments to segment load 
start time
-   *
    */
   public static Map<String, Long> 
getSegmentToLoadStartTimeMapping(LoadMetadataDetails[] details) {
     Map<String, Long> segmentToLoadStartTimeMap = new 
HashMap<>(details.length);
@@ -100,18 +98,18 @@ public class CarbonInternalLoaderUtil {
           return false;
         }
 
-        LoadMetadataDetails[] currentLoadMetadataDetails =
-            SegmentStatusManager.readLoadMetadata(metaDataFilepath);
+        LoadMetadataDetails[] currentLoadMetadataDetails = 
SegmentStatusManager.readLoadMetadata(
+            metaDataFilepath);
 
-        List<LoadMetadataDetails> updatedLoadMetadataDetails =
-            new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+        List<LoadMetadataDetails> updatedLoadMetadataDetails = new ArrayList<>(
+            CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
 
         // check which load needs to be overwritten which are in in progress 
state
         boolean found = false;
         for (int i = 0; i < currentLoadMetadataDetails.length; i++) {
           for (LoadMetadataDetails newLoadMetadataDetail : 
newLoadMetadataDetails) {
-            if (currentLoadMetadataDetails[i].getLoadName()
-                .equals(newLoadMetadataDetail.getLoadName())) {
+            if (currentLoadMetadataDetails[i].getLoadName().equals(
+                newLoadMetadataDetail.getLoadName())) {
               currentLoadMetadataDetails[i] = newLoadMetadataDetail;
               found = true;
               break;
@@ -126,7 +124,8 @@ public class CarbonInternalLoaderUtil {
         for (int i = 0; i < newLoadMetadataDetails.size(); i++) {
           foundNext = false;
           for (int j = 0; j < currentLoadMetadataDetails.length; j++) {
-            if 
(newLoadMetadataDetails.get(i).getLoadName().equals(currentLoadMetadataDetails[j].getLoadName()))
 {
+            if (newLoadMetadataDetails.get(i).getLoadName().equals(
+                currentLoadMetadataDetails[j].getLoadName())) {
               foundNext = true;
               break;
             }
@@ -145,28 +144,27 @@ public class CarbonInternalLoaderUtil {
 
         List<String> indexTables = 
CarbonIndexUtil.getSecondaryIndexes(carbonTable);
         if (!indexTables.isEmpty()) {
-          List<LoadMetadataDetails> newSegmentDetailsListForIndexTable =
-              new ArrayList<>(validSegments.size());
+          List<LoadMetadataDetails> newSegmentDetailsListForIndexTable = new 
ArrayList<>(
+              validSegments.size());
           for (String segmentId : validSegments) {
             LoadMetadataDetails newSegmentDetailsObject = new 
LoadMetadataDetails();
             newSegmentDetailsObject.setLoadName(segmentId);
             newSegmentDetailsListForIndexTable.add(newSegmentDetailsObject);
           }
           for (CarbonTable indexTable : indexCarbonTables) {
-            List<LoadMetadataDetails> indexTableDetailsList = CarbonIndexUtil
-                
.getTableStatusDetailsForIndexTable(updatedLoadMetadataDetails, indexTable,
-                    newSegmentDetailsListForIndexTable);
+            List<LoadMetadataDetails> indexTableDetailsList =
+                
CarbonIndexUtil.getTableStatusDetailsForIndexTable(updatedLoadMetadataDetails,
+                    indexTable, newSegmentDetailsListForIndexTable);
 
             SegmentStatusManager.writeLoadDetailsIntoFile(
                 
CarbonTablePath.getTableStatusFilePath(indexTable.getTablePath()),
-                indexTableDetailsList
-                    .toArray(new LoadMetadataDetails[0]));
+                indexTableDetailsList.toArray(new LoadMetadataDetails[0]));
           }
         } else if (carbonTable.isIndexTable()) {
           SegmentStatusManager.writeLoadDetailsIntoFile(
               metaDataFilepath + CarbonCommonConstants.FILE_SEPARATOR
-                  + CarbonTablePath.TABLE_STATUS_FILE, 
updatedLoadMetadataDetails
-                  .toArray(new LoadMetadataDetails[0]));
+                  + CarbonTablePath.TABLE_STATUS_FILE,
+              updatedLoadMetadataDetails.toArray(new LoadMetadataDetails[0]));
         }
         status = true;
       } else {
@@ -178,8 +176,7 @@ public class CarbonInternalLoaderUtil {
       LOGGER.error(
           "Not able to acquire the lock for Table status update for table " + 
databaseName + "."
               + tableName);
-    }
-    finally {
+    } finally {
       if (carbonLock.unlock()) {
         LOGGER.info("Table unlocked successfully after table status update" + 
databaseName + "."
             + tableName);
@@ -209,12 +206,12 @@ public class CarbonInternalLoaderUtil {
     }
     boolean isIndexTableSegmentsCompacted = false;
     if (null != currentIndexTable) {
-      LoadMetadataDetails[] existingLoadMetaDataDetails =
-          
SegmentStatusManager.readLoadMetadata(currentIndexTable.getMetadataPath());
+      LoadMetadataDetails[] existingLoadMetaDataDetails = 
SegmentStatusManager.readLoadMetadata(
+          currentIndexTable.getMetadataPath());
       for (LoadMetadataDetails existingLoadMetaDataDetail : 
existingLoadMetaDataDetails) {
         for (LoadMetadataDetails newLoadMetadataDetail : 
newLoadMetadataDetails) {
-          if (existingLoadMetaDataDetail.getLoadName()
-              .equalsIgnoreCase(newLoadMetadataDetail.getLoadName())
+          if (existingLoadMetaDataDetail.getLoadName().equalsIgnoreCase(
+              newLoadMetadataDetail.getLoadName())
               && existingLoadMetaDataDetail.getSegmentStatus() == 
SegmentStatus.COMPACTED) {
             isIndexTableSegmentsCompacted = true;
             break;
@@ -232,7 +229,6 @@ public class CarbonInternalLoaderUtil {
 
   /**
    * method to update table status in case of IUD Update Delta Compaction.
-   *
    */
   public static boolean updateLoadMetadataWithMergeStatus(CarbonTable 
indexCarbonTable,
       String[] loadsToMerge, String mergedLoadNumber, Map<String, String> 
segmentToLoadStartTimeMap,
@@ -240,8 +236,8 @@ public class CarbonInternalLoaderUtil {
       List<String> rebuiltSegments) throws IOException {
     boolean tableStatusUpdateStatus = false;
     List<String> loadMergeList = new ArrayList<>(Arrays.asList(loadsToMerge));
-    SegmentStatusManager segmentStatusManager =
-        new 
SegmentStatusManager(indexCarbonTable.getAbsoluteTableIdentifier());
+    SegmentStatusManager segmentStatusManager = new SegmentStatusManager(
+        indexCarbonTable.getAbsoluteTableIdentifier());
 
     ICarbonLock carbonLock = segmentStatusManager.getTableStatusLock();
 
@@ -249,14 +245,14 @@ public class CarbonInternalLoaderUtil {
       if (carbonLock.lockWithRetries()) {
         LOGGER.info("Acquired lock for the table " + 
indexCarbonTable.getDatabaseName() + "."
             + indexCarbonTable.getTableName() + " for table status update ");
-        LoadMetadataDetails[] loadDetails =
-            
SegmentStatusManager.readLoadMetadata(indexCarbonTable.getMetadataPath());
+        LoadMetadataDetails[] loadDetails = 
SegmentStatusManager.readLoadMetadata(
+            indexCarbonTable.getMetadataPath());
 
         long modificationOrDeletionTimeStamp = 
CarbonUpdateUtil.readCurrentTime();
         for (LoadMetadataDetails loadDetail : loadDetails) {
           // check if this segment is merged.
-          if (loadMergeList.contains(loadDetail.getLoadName()) || loadMergeList
-              .contains(loadDetail.getMergedLoadName())) {
+          if (loadMergeList.contains(loadDetail.getLoadName()) || 
loadMergeList.contains(
+              loadDetail.getMergedLoadName())) {
             // if the compacted load is deleted after the start of the 
compaction process,
             // then need to discard the compaction process and treat it as 
failed compaction.
             if (loadDetail.getSegmentStatus() == 
SegmentStatus.MARKED_FOR_DELETE) {
@@ -279,8 +275,8 @@ public class CarbonInternalLoaderUtil {
         
loadMetadataDetails.setSegmentFile(SegmentFileStore.genSegmentFileName(mergedLoadNumber,
             String.valueOf(segmentToLoadStartTimeMap.get(mergedLoadNumber)))
             + CarbonTablePath.SEGMENT_EXT);
-        CarbonLoaderUtil
-            .addDataIndexSizeIntoMetaEntry(loadMetadataDetails, 
mergedLoadNumber, indexCarbonTable);
+        CarbonLoaderUtil.addDataIndexSizeIntoMetaEntry(loadMetadataDetails, 
mergedLoadNumber,
+            indexCarbonTable);
         if (rebuiltSegments.contains(loadMetadataDetails.getLoadName())) {
           loadMetadataDetails.setLoadStartTime(newLoadStartTime);
         } else {
@@ -322,12 +318,11 @@ public class CarbonInternalLoaderUtil {
   /**
    * Method to check if main table and SI have same number of valid segments 
or not
    */
-  public static boolean checkMainTableSegEqualToSISeg(LoadMetadataDetails[] 
mainTableLoadMetadataDetails,
-                                                      LoadMetadataDetails[] 
siTableLoadMetadataDetails) {
-    List<String> mainTableSegmentsList =
-        getListOfValidSlices(mainTableLoadMetadataDetails);
-    List<String> indexList =
-        getListOfValidSlices(siTableLoadMetadataDetails);
+  public static boolean checkMainTableSegEqualToSISeg(
+      LoadMetadataDetails[] mainTableLoadMetadataDetails,
+      LoadMetadataDetails[] siTableLoadMetadataDetails) {
+    List<String> mainTableSegmentsList = 
getListOfValidSlices(mainTableLoadMetadataDetails);
+    List<String> indexList = getListOfValidSlices(siTableLoadMetadataDetails);
     Collections.sort(mainTableSegmentsList);
     Collections.sort(indexList);
     // In the case when number of SI segments are more than the main table 
segments do nothing
@@ -365,7 +360,8 @@ public class CarbonInternalLoaderUtil {
     if (mainTableLoadMetadataDetails.length != 0) {
       for (LoadMetadataDetails loadDetail : mainTableLoadMetadataDetails) {
         // if load in progress and check if same load is present in SI.
-        if 
(SegmentStatusManager.isLoadInProgress(carbonTable.getAbsoluteTableIdentifier(),
 loadDetail.getLoadName())) {
+        if 
(SegmentStatusManager.isLoadInProgress(carbonTable.getAbsoluteTableIdentifier(),
+            loadDetail.getLoadName())) {
           if (!allSiSlices.contains(loadDetail.getLoadName())) {
             return false;
           }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/RowComparator.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/load/RowComparator.java
similarity index 99%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/RowComparator.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/load/RowComparator.java
index 61b569e..0892b2b 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/load/RowComparator.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/load/RowComparator.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.spark.sql.secondaryindex.load;
 
 import java.util.Comparator;
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonCostBasedOptimizer.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/optimizer/CarbonCostBasedOptimizer.java
similarity index 99%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonCostBasedOptimizer.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/optimizer/CarbonCostBasedOptimizer.java
index 5bef4d5..ca63f39 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/optimizer/CarbonCostBasedOptimizer.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/optimizer/CarbonCostBasedOptimizer.java
@@ -14,11 +14,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.spark.sql.secondaryindex.optimizer;
 
 import java.util.ArrayList;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/CarbonSecondaryIndexExecutor.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/query/CarbonSecondaryIndexExecutor.java
similarity index 91%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/CarbonSecondaryIndexExecutor.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/query/CarbonSecondaryIndexExecutor.java
index ccff79f..a8b3c48 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/CarbonSecondaryIndexExecutor.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/query/CarbonSecondaryIndexExecutor.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.spark.sql.secondaryindex.query;
 
 import java.io.IOException;
@@ -57,12 +58,11 @@ public class CarbonSecondaryIndexExecutor {
   // converter for UTF8String and decimal conversion
   private DataTypeConverter dataTypeConverter;
 
-  private static final Logger LOGGER =
-      
LogServiceFactory.getLogService(CarbonSecondaryIndexExecutor.class.getName());
+  private static final Logger LOGGER = LogServiceFactory.getLogService(
+      CarbonSecondaryIndexExecutor.class.getName());
 
   /**
    * Constructor
-   *
    */
   public CarbonSecondaryIndexExecutor(TaskBlockInfo taskBlockInfo, CarbonTable 
carbonTable,
       List<String> secondaryIndexColumns, DataTypeConverter dataTypeConverter) 
{
@@ -80,12 +80,12 @@ public class CarbonSecondaryIndexExecutor {
    * @return List of Carbon iterators
    */
   public List<CarbonIterator<RowBatch>> processTableBlocks() throws 
QueryExecutionException {
-    List<CarbonIterator<RowBatch>> resultList =
-        new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+    List<CarbonIterator<RowBatch>> resultList = new ArrayList<>(
+        CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
     List<TableBlockInfo> blockList = null;
     queryModel = prepareQueryModel();
-    this.queryExecutor =
-        QueryExecutorFactory.getQueryExecutor(queryModel, 
FileFactory.getConfiguration());
+    this.queryExecutor = QueryExecutorFactory.getQueryExecutor(queryModel,
+        FileFactory.getConfiguration());
     // for each segment get task block info
     Set<String> taskBlockListMapping = taskBlockInfo.getTaskSet();
     for (String task : taskBlockListMapping) {
@@ -112,7 +112,6 @@ public class CarbonSecondaryIndexExecutor {
 
   /**
    * get executor and execute the query model.
-   *
    */
   private CarbonIterator<RowBatch> executeBlockList(List<TableBlockInfo> 
blockList)
       throws QueryExecutionException {
@@ -133,8 +132,7 @@ public class CarbonSecondaryIndexExecutor {
   public QueryModel prepareQueryModel() {
 
     // Add implicit column position id or row id in case of secondary index 
creation
-    List<CarbonDimension> implicitDimensionList =
-        carbonTable.getImplicitDimensions();
+    List<CarbonDimension> implicitDimensionList = 
carbonTable.getImplicitDimensions();
     String[] columnsArray = new String[implicitDimensionList.size() + 
secondaryIndexColumns.length];
     int j = 0;
     for (String secondaryIndexColumn : secondaryIndexColumns) {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java
 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java
similarity index 85%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java
rename to 
integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java
index cc5af16..51e36be 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java
+++ 
b/integration/spark/src/main/java/org/apache/spark/sql/secondaryindex/query/SecondaryIndexQueryResultProcessor.java
@@ -14,6 +14,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.spark.sql.secondaryindex.query;
 
 import java.util.ArrayList;
@@ -63,8 +64,8 @@ public class SecondaryIndexQueryResultProcessor {
   /**
    * LOGGER
    */
-  private static final Logger LOGGER =
-      
LogServiceFactory.getLogService(SecondaryIndexQueryResultProcessor.class.getName());
+  private static final Logger LOGGER = LogServiceFactory.getLogService(
+      SecondaryIndexQueryResultProcessor.class.getName());
   /**
    * carbon load model that contains all the required information for load
    */
@@ -177,7 +178,6 @@ public class SecondaryIndexQueryResultProcessor {
   /**
    * This method will iterate over the query result and convert it into a 
format compatible
    * for data loading
-   *
    */
   public void processQueryResult(List<CarbonIterator<RowBatch>> 
detailQueryResultIteratorList)
       throws SecondaryIndexException {
@@ -196,11 +196,11 @@ public class SecondaryIndexQueryResultProcessor {
     } finally {
       // clear temp files and folders created during secondary index creation
       String databaseName = carbonLoadModel.getDatabaseName();
-      String tempLocationKey = CarbonDataProcessorUtil
-          .getTempStoreLocationKey(databaseName, indexTable.getTableName(),
-              carbonLoadModel.getSegmentId(), carbonLoadModel.getTaskNo(), 
false, false);
-      TableProcessingOperations
-          .deleteLocalDataLoadFolderLocation(tempLocationKey, 
indexTable.getTableName());
+      String tempLocationKey = 
CarbonDataProcessorUtil.getTempStoreLocationKey(databaseName,
+          indexTable.getTableName(), carbonLoadModel.getSegmentId(), 
carbonLoadModel.getTaskNo(),
+          false, false);
+      
TableProcessingOperations.deleteLocalDataLoadFolderLocation(tempLocationKey,
+          indexTable.getTableName());
     }
   }
 
@@ -219,7 +219,6 @@ public class SecondaryIndexQueryResultProcessor {
 
   /**
    * This method will iterate over the query result and perform row sorting 
operation
-   *
    */
   private void processResult(List<CarbonIterator<RowBatch>> 
detailQueryResultIteratorList)
       throws SecondaryIndexException {
@@ -244,7 +243,6 @@ public class SecondaryIndexQueryResultProcessor {
 
   /**
    * This method will prepare the data from raw object that will take part in 
sorting
-   *
    */
   private Object[] prepareRowObjectForSorting(Object[] row) {
     ByteArrayWrapper wrapper = (ByteArrayWrapper) row[0];
@@ -268,9 +266,8 @@ public class SecondaryIndexQueryResultProcessor {
         // no dictionary primitive columns are expected to be in original data 
while loading,
         // so convert it to original data
         if (DataTypeUtil.isPrimitiveColumn(dims.getDataType())) {
-          Object dataFromBytes = DataTypeUtil
-              
.getDataBasedOnDataTypeForNoDictionaryColumn(noDictionaryKeyByIndex,
-                  dims.getDataType());
+          Object dataFromBytes = 
DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(
+              noDictionaryKeyByIndex, dims.getDataType());
           if (null != dataFromBytes && dims.getDataType() == 
DataTypes.TIMESTAMP) {
             dataFromBytes = (long) dataFromBytes / 1000L;
           }
@@ -289,7 +286,6 @@ public class SecondaryIndexQueryResultProcessor {
 
   /**
    * This method will read sort temp files, perform merge sort and add it to 
store for data loading
-   *
    */
   private void readAndLoadDataFromSortTempFiles() throws 
SecondaryIndexException {
     Throwable throwable = null;
@@ -350,9 +346,8 @@ public class SecondaryIndexQueryResultProcessor {
    * initialise segment properties
    */
   private void initSegmentProperties() {
-    List<ColumnSchema> columnSchemaList = CarbonUtil
-        .getColumnSchemaList(indexTable.getVisibleDimensions(),
-            indexTable.getVisibleMeasures());
+    List<ColumnSchema> columnSchemaList = CarbonUtil.getColumnSchemaList(
+        indexTable.getVisibleDimensions(), indexTable.getVisibleMeasures());
     segmentProperties = new SegmentProperties(columnSchemaList);
     srcSegmentProperties = new 
SegmentProperties(getParentColumnOrder(columnSchemaList));
   }
@@ -375,7 +370,6 @@ public class SecondaryIndexQueryResultProcessor {
 
   /**
    * add row to a temp array which will we written to a sort temp file after 
sorting
-   *
    */
   private void addRowForSorting(Object[] row) throws SecondaryIndexException {
     try {
@@ -394,10 +388,8 @@ public class SecondaryIndexQueryResultProcessor {
    */
   private void initSortDataRows() throws SecondaryIndexException {
     measureCount = indexTable.getVisibleMeasures().size();
-    implicitColumnCount =
-        indexTable.getImplicitDimensions().size();
-    List<CarbonDimension> dimensions =
-        indexTable.getVisibleDimensions();
+    implicitColumnCount = indexTable.getImplicitDimensions().size();
+    List<CarbonDimension> dimensions = indexTable.getVisibleDimensions();
     noDictionaryColMapping = new boolean[dimensions.size()];
     sortColumnMapping = new boolean[dimensions.size()];
     isVarcharDimMapping = new boolean[dimensions.size()];
@@ -428,15 +420,13 @@ public class SecondaryIndexQueryResultProcessor {
 
   /**
    * This method will create the sort parameters VO object
-   *
    */
   private SortParameters createSortParameters() {
     int numberOfCompactingCores = 
CarbonProperties.getInstance().getNumberOfCompactingCores();
-    return SortParameters
-        .createSortParameters(indexTable, databaseName, 
indexTable.getTableName(),
-            dimensionColumnCount, complexDimensionCount, measureCount, 
noDictionaryCount, segmentId,
-            carbonLoadModel.getTaskNo(), noDictionaryColMapping, 
sortColumnMapping,
-            isVarcharDimMapping, false, numberOfCompactingCores / 2);
+    return SortParameters.createSortParameters(indexTable, databaseName, 
indexTable.getTableName(),
+        dimensionColumnCount, complexDimensionCount, measureCount, 
noDictionaryCount, segmentId,
+        carbonLoadModel.getTaskNo(), noDictionaryColMapping, 
sortColumnMapping, isVarcharDimMapping,
+        false, numberOfCompactingCores / 2);
   }
 
   /**
@@ -444,29 +434,26 @@ public class SecondaryIndexQueryResultProcessor {
    * sort temp files
    */
   private void initializeFinalThreadMergerForMergeSort() {
-    String[] sortTempFileLocation = CarbonDataProcessorUtil
-        .arrayAppend(tempStoreLocation, CarbonCommonConstants.FILE_SEPARATOR,
-            CarbonCommonConstants.SORT_TEMP_FILE_LOCATION);
-    sortParameters
-        
.setNoDictionarySortColumn(CarbonDataProcessorUtil.getNoDictSortColMapping(indexTable));
-    finalMerger =
-        new SingleThreadFinalSortFilesMerger(sortTempFileLocation, 
indexTable.getTableName(),
-            sortParameters);
+    String[] sortTempFileLocation = 
CarbonDataProcessorUtil.arrayAppend(tempStoreLocation,
+        CarbonCommonConstants.FILE_SEPARATOR, 
CarbonCommonConstants.SORT_TEMP_FILE_LOCATION);
+    sortParameters.setNoDictionarySortColumn(
+        CarbonDataProcessorUtil.getNoDictSortColMapping(indexTable));
+    finalMerger = new SingleThreadFinalSortFilesMerger(sortTempFileLocation,
+        indexTable.getTableName(), sortParameters);
   }
 
   /**
    * initialise carbon data writer instance
    */
   private void initDataHandler() throws SecondaryIndexException {
-    String carbonStoreLocation =
-        CarbonDataProcessorUtil.createCarbonStoreLocation(this.indexTable, 
segmentId);
-    CarbonFactDataHandlerModel carbonFactDataHandlerModel = 
CarbonFactDataHandlerModel
-        .getCarbonFactDataHandlerModel(carbonLoadModel, indexTable, 
segmentProperties,
-            indexTable.getTableName(), tempStoreLocation, carbonStoreLocation);
+    String carbonStoreLocation = 
CarbonDataProcessorUtil.createCarbonStoreLocation(this.indexTable,
+        segmentId);
+    CarbonFactDataHandlerModel carbonFactDataHandlerModel =
+        
CarbonFactDataHandlerModel.getCarbonFactDataHandlerModel(carbonLoadModel, 
indexTable,
+            segmentProperties, indexTable.getTableName(), tempStoreLocation, 
carbonStoreLocation);
     
carbonFactDataHandlerModel.setSchemaUpdatedTimeStamp(indexTable.getTableLastUpdatedTime());
-    CarbonDataFileAttributes carbonDataFileAttributes =
-        new 
CarbonDataFileAttributes(Integer.parseInt(carbonLoadModel.getTaskNo()),
-            carbonLoadModel.getFactTimeStamp());
+    CarbonDataFileAttributes carbonDataFileAttributes = new 
CarbonDataFileAttributes(
+        Integer.parseInt(carbonLoadModel.getTaskNo()), 
carbonLoadModel.getFactTimeStamp());
     
carbonFactDataHandlerModel.setCarbonDataFileAttributes(carbonDataFileAttributes);
     dataHandler = 
CarbonFactHandlerFactory.createCarbonFactHandler(carbonFactDataHandlerModel);
     try {
@@ -483,16 +470,15 @@ public class SecondaryIndexQueryResultProcessor {
    * initialise temporary store location
    */
   private void initTempStoreLocation() {
-    tempStoreLocation = CarbonDataProcessorUtil
-        .getLocalDataFolderLocation(indexTable, carbonLoadModel.getTaskNo(), 
segmentId, false,
-            false);
+    tempStoreLocation = 
CarbonDataProcessorUtil.getLocalDataFolderLocation(indexTable,
+        carbonLoadModel.getTaskNo(), segmentId, false, false);
   }
 
   /**
    * initialise aggregation type for measures for their storage format
    */
   private void initAggType() {
-    aggType =
-        CarbonDataProcessorUtil.initDataType(indexTable, 
indexTable.getTableName(), measureCount);
+    aggType = CarbonDataProcessorUtil.initDataType(indexTable, 
indexTable.getTableName(),
+        measureCount);
   }
 }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonVectorProxy.java 
b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonVectorProxy.java
deleted file mode 100644
index 158b3e2..0000000
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonVectorProxy.java
+++ /dev/null
@@ -1,554 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.sql;
-
-import java.math.BigInteger;
-
-import org.apache.carbondata.core.scan.result.vector.CarbonDictionary;
-import org.apache.carbondata.core.scan.scanner.LazyPageLoader;
-
-import org.apache.spark.memory.MemoryMode;
-import org.apache.spark.sql.catalyst.InternalRow;
-import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
-import org.apache.spark.sql.types.*;
-import org.apache.spark.sql.vectorized.ColumnVector;
-import org.apache.spark.sql.vectorized.ColumnarArray;
-import org.apache.spark.sql.vectorized.ColumnarBatch;
-import org.apache.spark.sql.vectorized.ColumnarMap;
-import org.apache.spark.unsafe.types.CalendarInterval;
-import org.apache.spark.unsafe.types.UTF8String;
-
-/**
- * Adapter class which handles the columnar vector reading of the carbondata
- * based on the spark ColumnVector and ColumnarBatch API. This proxy class
- * handles the complexity of spark 2.3 version related api changes since
- * spark ColumnVector and ColumnarBatch interfaces are still evolving.
- */
-public class CarbonVectorProxy {
-
-  private ColumnarBatch columnarBatch;
-  private ColumnVectorProxy[] columnVectorProxies;
-
-  /**
-   * Adapter class which handles the columnar vector reading of the carbondata
-   * based on the spark ColumnVector and ColumnarBatch API. This proxy class
-   * handles the complexity of spark 2.3 version related api changes since
-   * spark ColumnVector and ColumnarBatch interfaces are still evolving.
-   *
-   * @param memMode       which represent the type on-heap or off-heap vector.
-   * @param outputSchema, metadata related to current schema of table.
-   * @param rowNum        rows number for vector reading
-   * @param useLazyLoad   Whether to use lazy load while getting the data.
-   */
-    public CarbonVectorProxy(MemoryMode memMode, StructType outputSchema, int 
rowNum,
-        boolean useLazyLoad) {
-        WritableColumnVector[] columnVectors = ColumnVectorFactory
-                .getColumnVector(memMode, outputSchema, rowNum);
-        columnVectorProxies = new ColumnVectorProxy[columnVectors.length];
-        for (int i = 0; i < columnVectorProxies.length; i++) {
-          if (useLazyLoad) {
-            columnVectorProxies[i] = new 
ColumnVectorProxyWithLazyLoad(columnVectors[i]);
-          } else {
-            columnVectorProxies[i] = new ColumnVectorProxy(columnVectors[i]);
-          }
-        }
-        columnarBatch = new ColumnarBatch(columnVectorProxies);
-        columnarBatch.setNumRows(rowNum);
-    }
-
-    /**
-     * Returns the number of rows for read, including filtered rows.
-     */
-    public int numRows() {
-        return columnarBatch.numRows();
-    }
-
-    /**
-     * This API will return a column vector from a batch of column vector rows
-     * based on the ordinal
-     *
-     * @param ordinal
-     * @return
-     */
-    public WritableColumnVector column(int ordinal) {
-        return ((ColumnVectorProxy) columnarBatch.column(ordinal)).getVector();
-    }
-
-    public ColumnVectorProxy getColumnVector(int ordinal) {
-        return columnVectorProxies[ordinal];
-    }
-
-    /**
-     * Resets this column for writing. The currently stored values are no 
longer accessible.
-     */
-    public void reset() {
-        for (int i = 0; i < columnarBatch.numCols(); i++) {
-            ((ColumnVectorProxy) columnarBatch.column(i)).reset();
-        }
-    }
-
-    public void resetDictionaryIds(int ordinal) {
-        (((ColumnVectorProxy) 
columnarBatch.column(ordinal)).getVector()).getDictionaryIds().reset();
-    }
-
-    /**
-     * Returns the row in this batch at `rowId`. Returned row is reused across 
calls.
-     */
-    public InternalRow getRow(int rowId) {
-        return columnarBatch.getRow(rowId);
-    }
-
-    /**
-     * Returns the row in this batch at `rowId`. Returned row is reused across 
calls.
-     */
-    public Object getColumnarBatch() {
-        return columnarBatch;
-    }
-
-    /**
-     * Called to close all the columns in this batch. It is not valid to 
access the data after
-     * calling this. This must be called at the end to clean up memory 
allocations.
-     */
-    public void close() {
-        columnarBatch.close();
-    }
-
-    /**
-     * Sets the number of rows in this batch.
-     */
-    public void setNumRows(int numRows) {
-        columnarBatch.setNumRows(numRows);
-    }
-
-    public DataType dataType(int ordinal) {
-        return columnarBatch.column(ordinal).dataType();
-    }
-
-    public static class ColumnVectorProxy extends ColumnVector {
-
-        private WritableColumnVector vector;
-
-        public ColumnVectorProxy(ColumnVector columnVector) {
-            super(columnVector.dataType());
-            vector = (WritableColumnVector) columnVector;
-        }
-
-        public void putRowToColumnBatch(int rowId, Object value) {
-            org.apache.spark.sql.types.DataType t = vector.dataType();
-            if (null == value) {
-                putNull(rowId);
-            } else {
-                if (t == org.apache.spark.sql.types.DataTypes.BooleanType) {
-                    putBoolean(rowId, (boolean) value);
-                } else if (t == org.apache.spark.sql.types.DataTypes.ByteType) 
{
-                    putByte(rowId, (byte) value);
-                } else if (t == 
org.apache.spark.sql.types.DataTypes.ShortType) {
-                    putShort(rowId, (short) value);
-                } else if (t == 
org.apache.spark.sql.types.DataTypes.IntegerType) {
-                    putInt(rowId, (int) value);
-                } else if (t == org.apache.spark.sql.types.DataTypes.LongType) 
{
-                    putLong(rowId, (long) value);
-                } else if (t == 
org.apache.spark.sql.types.DataTypes.FloatType) {
-                    putFloat(rowId, (float) value);
-                } else if (t == 
org.apache.spark.sql.types.DataTypes.DoubleType) {
-                    putDouble(rowId, (double) value);
-                } else if (t == 
org.apache.spark.sql.types.DataTypes.StringType) {
-                    UTF8String v = (UTF8String) value;
-                    putByteArray(rowId, v.getBytes());
-                } else if (t instanceof 
org.apache.spark.sql.types.DecimalType) {
-                    DecimalType dt = (DecimalType) t;
-                    Decimal d = Decimal.fromDecimal(value);
-                    if (dt.precision() <= Decimal.MAX_INT_DIGITS()) {
-                        putInt(rowId, (int) d.toUnscaledLong());
-                    } else if (dt.precision() <= Decimal.MAX_LONG_DIGITS()) {
-                        putLong(rowId, d.toUnscaledLong());
-                    } else {
-                        final BigInteger integer = 
d.toJavaBigDecimal().unscaledValue();
-                        byte[] bytes = integer.toByteArray();
-                        putByteArray(rowId, bytes, 0, bytes.length);
-                    }
-                } else if (t instanceof CalendarIntervalType) {
-                    CalendarInterval c = (CalendarInterval) value;
-                    vector.getChild(0).putInt(rowId, c.months);
-                    vector.getChild(1).putLong(rowId, c.microseconds);
-                } else if (t instanceof org.apache.spark.sql.types.DateType) {
-                    putInt(rowId, (int) value);
-                } else if (t instanceof 
org.apache.spark.sql.types.TimestampType) {
-                    putLong(rowId, (long) value);
-                }
-            }
-        }
-
-        public void putBoolean(int rowId, boolean value) {
-            vector.putBoolean(rowId, value);
-        }
-
-        public void putByte(int rowId, byte value) {
-            vector.putByte(rowId, value);
-        }
-
-        public void putBytes(int rowId, int count, byte[] src, int srcIndex) {
-            vector.putBytes(rowId, count, src, srcIndex);
-        }
-
-        public void putShort(int rowId, short value) {
-            vector.putShort(rowId, value);
-        }
-
-        public void putInt(int rowId, int value) {
-            vector.putInt(rowId, value);
-        }
-
-        public void putFloat(int rowId, float value) {
-            vector.putFloat(rowId, value);
-        }
-
-        public void putFloats(int rowId, int count, float[] src, int srcIndex) 
 {
-            vector.putFloats(rowId, count, src, srcIndex);
-        }
-
-        public void putLong(int rowId, long value) {
-            vector.putLong(rowId, value);
-        }
-
-        public void putDouble(int rowId, double value) {
-            vector.putDouble(rowId, value);
-        }
-
-        public void putByteArray(int rowId, byte[] value) {
-            vector.putByteArray(rowId, value);
-        }
-
-        public void putInts(int rowId, int count, int value) {
-            vector.putInts(rowId, count, value);
-        }
-
-        public void putInts(int rowId, int count, int[] src, int srcIndex) {
-            vector.putInts(rowId, count, src, srcIndex);
-        }
-
-        public void putShorts(int rowId, int count, short value) {
-            vector.putShorts(rowId, count, value);
-        }
-
-        public void putShorts(int rowId, int count, short[] src, int srcIndex) 
{
-            vector.putShorts(rowId, count, src, srcIndex);
-        }
-
-        public void putLongs(int rowId, int count, long value) {
-            vector.putLongs(rowId, count, value);
-        }
-
-        public void putLongs(int rowId, int count, long[] src, int srcIndex) {
-            vector.putLongs(rowId, count, src, srcIndex);
-        }
-
-        public void putDecimal(int rowId, Decimal value, int precision) {
-            vector.putDecimal(rowId, value, precision);
-
-        }
-
-        public void putDoubles(int rowId, int count, double value) {
-            vector.putDoubles(rowId, count, value);
-        }
-
-        public void putDoubles(int rowId, int count, double[] src, int 
srcIndex) {
-            vector.putDoubles(rowId, count, src, srcIndex);
-        }
-
-        public void putByteArray(int rowId, byte[] value, int offset, int 
length) {
-            vector.putByteArray(rowId, value, offset, length);
-        }
-
-        public void putNotNull(int rowId) {
-            vector.putNotNull(rowId);
-        }
-
-        public void putNotNulls(int rowId, int count) {
-            vector.putNotNulls(rowId, count);
-        }
-
-        public void putDictionaryInt(int rowId, int value) {
-            vector.getDictionaryIds().putInt(rowId, value);
-        }
-
-      public void setDictionary(CarbonDictionary dictionary) {
-        if (null != dictionary) {
-          vector.setDictionary(new CarbonDictionaryWrapper(dictionary));
-        } else {
-          vector.setDictionary(null);
-        }
-      }
-
-        public void putNull(int rowId) {
-            vector.putNull(rowId);
-        }
-
-        public void putNulls(int rowId, int count) {
-            vector.putNulls(rowId, count);
-        }
-
-        public boolean hasDictionary() {
-            return vector.hasDictionary();
-        }
-
-        public Object reserveDictionaryIds(int capacity) {
-            return vector.reserveDictionaryIds(capacity);
-        }
-
-        @Override
-        public boolean isNullAt(int i) {
-            return vector.isNullAt(i);
-        }
-
-        @Override
-        public boolean getBoolean(int i) {
-            return vector.getBoolean(i);
-        }
-
-        @Override
-        public byte getByte(int i) {
-            return vector.getByte(i);
-        }
-
-        @Override
-        public short getShort(int i) {
-            return vector.getShort(i);
-        }
-
-        @Override
-        public int getInt(int i) {
-            return vector.getInt(i);
-        }
-
-        @Override
-        public long getLong(int i) {
-            return vector.getLong(i);
-        }
-
-        @Override
-        public float getFloat(int i) {
-            return vector.getFloat(i);
-        }
-
-        @Override
-        public double getDouble(int i) {
-            return vector.getDouble(i);
-        }
-
-        @Override
-        public void close() {
-            vector.close();
-        }
-
-        @Override
-        public boolean hasNull() {
-            return vector.hasNull();
-        }
-
-        @Override
-        public int numNulls() {
-            return vector.numNulls();
-        }
-
-        @Override
-        public ColumnarArray getArray(int i) {
-            return vector.getArray(i);
-        }
-
-        @Override
-        public ColumnarMap getMap(int i) {
-            return vector.getMap(i);
-        }
-
-        @Override
-        public Decimal getDecimal(int i, int i1, int i2) {
-            return vector.getDecimal(i, i1, i2);
-        }
-
-        @Override
-        public UTF8String getUTF8String(int i) {
-            return vector.getUTF8String(i);
-        }
-
-        @Override
-        public byte[] getBinary(int i) {
-            return vector.getBinary(i);
-        }
-
-        @Override
-        protected ColumnVector getChild(int i) {
-            return vector.getChild(i);
-        }
-
-        public void reset() {
-            vector.reset();
-        }
-
-        public void setLazyPage(LazyPageLoader lazyPage) {
-            lazyPage.loadPage();
-        }
-
-      /**
-       * It keeps all binary data of all rows to it.
-       * Should use along with @{putArray(int rowId, int offset, int length)} 
to keep lengths
-       * and offset.
-       */
-      public void putAllByteArray(byte[] data, int offset, int length) {
-        vector.arrayData().appendBytes(length, data, offset);
-      }
-
-      public void putArray(int rowId, int offset, int length) {
-        vector.putArray(rowId, offset, length);
-      }
-
-      public WritableColumnVector getVector() {
-            return vector;
-        }
-    }
-
-  public static class ColumnVectorProxyWithLazyLoad extends ColumnVectorProxy {
-
-    private WritableColumnVector vector;
-
-    private LazyPageLoader pageLoad;
-
-    private boolean isLoaded;
-
-    public ColumnVectorProxyWithLazyLoad(ColumnVector columnVector) {
-      super(columnVector);
-      vector = (WritableColumnVector) columnVector;
-    }
-
-    @Override
-    public boolean isNullAt(int i) {
-      checkPageLoaded();
-      return vector.isNullAt(i);
-    }
-
-    @Override
-    public boolean getBoolean(int i) {
-      checkPageLoaded();
-      return vector.getBoolean(i);
-    }
-
-    @Override
-    public byte getByte(int i) {
-      checkPageLoaded();
-      return vector.getByte(i);
-    }
-
-    @Override
-    public short getShort(int i) {
-      checkPageLoaded();
-      return vector.getShort(i);
-    }
-
-    @Override
-    public int getInt(int i) {
-      checkPageLoaded();
-      return vector.getInt(i);
-    }
-
-    @Override
-    public long getLong(int i) {
-      checkPageLoaded();
-      return vector.getLong(i);
-    }
-
-    @Override
-    public float getFloat(int i) {
-      checkPageLoaded();
-      return vector.getFloat(i);
-    }
-
-    @Override
-    public double getDouble(int i) {
-      checkPageLoaded();
-      return vector.getDouble(i);
-    }
-
-    @Override
-    public boolean hasNull() {
-      checkPageLoaded();
-      return vector.hasNull();
-    }
-
-    @Override
-    public int numNulls() {
-      checkPageLoaded();
-      return vector.numNulls();
-    }
-
-    @Override
-    public ColumnarArray getArray(int i) {
-      checkPageLoaded();
-      return vector.getArray(i);
-    }
-
-    @Override
-    public ColumnarMap getMap(int i) {
-      checkPageLoaded();
-      return vector.getMap(i);
-    }
-
-    @Override
-    public Decimal getDecimal(int i, int i1, int i2) {
-      checkPageLoaded();
-      return vector.getDecimal(i, i1, i2);
-    }
-
-    @Override
-    public UTF8String getUTF8String(int i) {
-      checkPageLoaded();
-      return vector.getUTF8String(i);
-    }
-
-    @Override
-    public byte[] getBinary(int i) {
-      checkPageLoaded();
-      return vector.getBinary(i);
-    }
-
-    @Override
-    protected ColumnVector getChild(int i) {
-      checkPageLoaded();
-      return vector.getChild(i);
-    }
-
-    public void reset() {
-      isLoaded = false;
-      pageLoad = null;
-      vector.reset();
-    }
-
-    private void checkPageLoaded() {
-      if (!isLoaded) {
-        if (pageLoad != null) {
-          pageLoad.loadPage();
-        }
-        isLoaded = true;
-      }
-    }
-
-    public void setLazyPage(LazyPageLoader lazyPage) {
-      this.pageLoad = lazyPage;
-    }
-
-  }
-}
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletIndexLoaderJob.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/jobs/SparkBlockletIndexLoaderJob.scala
similarity index 97%
rename from 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletIndexLoaderJob.scala
rename to 
integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/jobs/SparkBlockletIndexLoaderJob.scala
index b81cc56..a15518f 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletIndexLoaderJob.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/jobs/SparkBlockletIndexLoaderJob.scala
@@ -15,12 +15,12 @@
  * limitations under the License.
  */
 
-package org.apache.spark.sql.secondaryindex.Jobs
+package org.apache.spark.sql.secondaryindex.jobs
 
 import java.{lang, util}
 import java.text.SimpleDateFormat
-import java.util.concurrent.{Callable, Executors, ExecutorService, TimeUnit}
 import java.util.Date
+import java.util.concurrent.{Callable, Executors, ExecutorService, TimeUnit}
 
 import scala.collection.JavaConverters._
 
@@ -32,7 +32,6 @@ import org.apache.spark.{Partition, TaskContext, 
TaskKilledException}
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.util.SparkSQLUtil
 
-import org.apache.carbondata.common.logging.LogServiceFactory
 import 
org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder
 import org.apache.carbondata.core.index.{AbstractIndexJob, IndexInputFormat, 
IndexStoreManager}
 import org.apache.carbondata.core.index.dev.CacheableIndex
@@ -43,8 +42,6 @@ import org.apache.carbondata.core.util.CarbonUtil
 import org.apache.carbondata.spark.rdd.CarbonRDD
 
 class SparkBlockletIndexLoaderJob extends AbstractIndexJob {
-  private val LOGGER = LogServiceFactory
-    .getLogService(classOf[SparkBlockletIndexLoaderJob].getName)
   override def execute(carbonTable: CarbonTable,
       indexFormat: FileInputFormat[Void, BlockletIndexWrapper]): Unit = {
     val loader: BlockletIndexInputFormat = indexFormat
diff --git a/mv/plan/pom.xml b/mv/plan/pom.xml
index 11e7120..e335d09 100644
--- a/mv/plan/pom.xml
+++ b/mv/plan/pom.xml
@@ -70,15 +70,6 @@
           <failIfNoTests>false</failIfNoTests>
         </configuration>
       </plugin>
-
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <version>2.17</version>
-        <configuration>
-          <skip>true</skip>
-        </configuration>
-      </plugin>
       <plugin>
         <groupId>org.scala-tools</groupId>
         <artifactId>maven-scala-plugin</artifactId>
diff --git a/pom.xml b/pom.xml
index ac52197..3c2e73a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -398,6 +398,13 @@
           <testSourceDirectory>${basedir}/src/test/java</testSourceDirectory>
           <outputFile>${basedir}/target/checkstyle-output.xml</outputFile>
         </configuration>
+        <dependencies>
+          <dependency>
+            <groupId>com.puppycrawl.tools</groupId>
+            <artifactId>checkstyle</artifactId>
+            <version>6.19</version>
+          </dependency>
+        </dependencies>
         <executions>
           <execution>
             <goals>

Reply via email to