This is an automated email from the ASF dual-hosted git repository.

jackylk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 99300c0  [HOTFIX] optimize module dependency
99300c0 is described below

commit 99300c059ef928820642923ecd6cc461d05364f1
Author: QiangCai <qiang...@qq.com>
AuthorDate: Tue Mar 3 17:21:11 2020 +0800

    [HOTFIX] optimize module dependency
    
    Why is this PR needed?
    there are many redundancy module dependencies
    
    What changes were proposed in this PR?
    1.optimize module dependency
    2.fix HiveExample testcase
    3.avoid to getOrCreate sparkSession in testcase again
    4.add more testcases
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    Yes
    
    This closes #3652
---
 core/pom.xml                                       |  23 +-
 .../filesystem/AbstractDFSCarbonFile.java          |   2 +-
 .../datastore/filesystem/ViewFSCarbonFile.java     |   1 +
 .../carbondata/core/util/BlockletDataMapUtil.java  |   2 +-
 examples/spark/pom.xml                             |  83 +-
 .../apache/carbondata/examplesCI/RunExamples.scala |  10 +-
 geo/pom.xml                                        |   7 +-
 index/bloom/pom.xml                                |   5 -
 index/secondary-index/pom.xml                      |  36 +-
 integration/hive/pom.xml                           |  17 +-
 .../hive/test/server/HiveEmbeddedServer2.java      |  13 +-
 integration/spark/pom.xml                          |  89 +-
 .../apache/spark/sql/CarbonDataFrameWriter.scala   |   2 +-
 .../scala/org/apache/spark/sql/CarbonEnv.scala     |  16 +-
 .../scala/org/apache/spark/sql/EnvHelper.scala     |  22 +-
 .../command/management/CommonLoadUtils.scala       |   2 +-
 .../mutation/merge/HistoryTableLoadHelper.scala    |   2 +-
 .../table/CarbonCreateDataSourceTableCommand.scala |   9 +
 .../command/table/CarbonCreateTableCommand.scala   |   2 +-
 .../command/table/CarbonDropTableCommand.scala     |   2 +-
 .../spark/sql/execution/strategy/DDLHelper.scala   |   2 +-
 .../spark/sql/execution/strategy/DDLStrategy.scala |   4 +-
 .../execution/strategy/MixedFormatHandler.scala    |   4 +-
 .../spark/sql/hive/CarbonFileMetastore.scala       |   2 +-
 .../spark/sql/hive/CarbonHiveMetadataUtil.scala    |   2 +-
 .../parser/CarbonExtensionSpark2SqlParser.scala    |   2 +-
 .../spark/sql/parser/CarbonSparkSqlParser.scala    |   4 +-
 .../secondaryindex/command/SICreationCommand.scala |   6 +-
 .../AlterTableColumnRenameEventListener.scala      |   2 +-
 .../SILoadEventListenerForFailedSegments.scala     |   2 +-
 .../hive/CarbonInternalMetastore.scala             |   2 +-
 .../apache/spark/sql/CarbonToSparkAdapter.scala    |   8 +-
 ...rmatHandlerUtil.scala => SparkSqlAdapter.scala} |  17 +-
 .../sql/parser/SparkSqlAstBuilderWrapper.scala}    |  30 +-
 .../apache/spark/sql/CarbonToSparkAdapter.scala    |  13 +-
 ...rmatHandlerUtil.scala => SparkSqlAdapter.scala} |   5 +-
 .../sql/parser/SparkSqlAstBuilderWrapper.scala}    |  30 +-
 .../dataload/TestLoadDataWithYarnLocalDirs.scala   |   2 +-
 .../testsuite/cloud/AllDataSourceTestCase.scala    | 904 +++++++++++++++++++++
 .../testsuite/cloud/CacheRefreshTestCase.scala     |  36 +
 .../createTable/TestCreateTableIfNotExists.scala   |   2 +-
 .../dataload/TestGlobalSortDataLoad.scala          |   3 +-
 .../carbondata/store/SparkCarbonStoreTest.scala    |   4 +-
 .../carbondata/TestStreamingTableOpName.scala      |   3 +
 .../carbondata/TestStreamingTableQueryFilter.scala |   3 +
 .../TestStreamingTableWithLongString.scala         |   3 +
 .../TestStreamingTableWithRowParser.scala          |   5 +-
 .../apache/spark/sql/CarbonExtensionSuite.scala    |  11 +-
 .../datasource/SparkCarbonDataSourceTest.scala     |   2 +-
 mv/core/pom.xml                                    |  29 +-
 mv/plan/pom.xml                                    |   5 -
 pom.xml                                            |  25 +-
 processing/pom.xml                                 |   8 -
 sdk/sdk/pom.xml                                    |   1 -
 54 files changed, 1155 insertions(+), 371 deletions(-)

diff --git a/core/pom.xml b/core/pom.xml
index e8ff8e1..8b389f5 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -52,26 +52,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-client</artifactId>
-      <version>${hadoop.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>javax.servlet</groupId>
-          <artifactId>servlet-api</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>javax.servlet.jsp</groupId>
-          <artifactId>jsp-api</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.xerial.snappy</groupId>
@@ -126,7 +107,7 @@
     <dependency>
       <groupId>io.netty</groupId>
       <artifactId>netty-all</artifactId>
-      <version>4.0.42.Final</version>
+      <version>4.1.17.Final</version>
     </dependency>
     <dependency>
       <groupId>org.lz4</groupId>
@@ -191,4 +172,4 @@
     </profile>
   </profiles>
 
-</project>
\ No newline at end of file
+</project>
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
index 5169cfb..4459420 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/AbstractDFSCarbonFile.java
@@ -153,7 +153,7 @@ public abstract class AbstractDFSCarbonFile implements 
CarbonFile {
     try {
       return fileSystem.getFileStatus(path).getLen();
     } catch (IOException e) {
-      throw new CarbonFileException("Unable to get file status: ", e);
+      throw new CarbonFileException("Unable to get file status for " + 
path.toString(), e);
     }
   }
 
diff --git 
a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/ViewFSCarbonFile.java
 
b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/ViewFSCarbonFile.java
index 70a86d8..4f90cd1 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/ViewFSCarbonFile.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/ViewFSCarbonFile.java
@@ -86,6 +86,7 @@ public class ViewFSCarbonFile extends AbstractDFSCarbonFile {
         fileSystem.rename(path, new Path(changeToName));
         return true;
       } else {
+        LOGGER.warn("Unrecognized file system for path: " + path);
         return false;
       }
     } catch (IOException e) {
diff --git 
a/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java 
b/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
index 0213dd2..27ce904 100644
--- 
a/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
+++ 
b/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
@@ -510,4 +510,4 @@ public class BlockletDataMapUtil {
     dbName = carbonTable.getDatabaseName();
     return 
CarbonProperties.getInstance().isDataMapParallelLoadingEnabled(dbName, 
tableName);
   }
-}
\ No newline at end of file
+}
diff --git a/examples/spark/pom.xml b/examples/spark/pom.xml
index 8f78791..c4d2154 100644
--- a/examples/spark/pom.xml
+++ b/examples/spark/pom.xml
@@ -36,61 +36,10 @@
   <dependencies>
     <dependency>
       <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-hive</artifactId>
-      <version>${project.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>org.apache.commons</groupId>
-          <artifactId>*</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>*</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-spark</artifactId>
       <version>${project.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-streaming</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-sdk</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-sql_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-sql-kafka-0-10_${scala.binary.version}</artifactId>
-      <exclusions>
-        <exclusion>
-          <groupId>net.jpountz.lz4</groupId>
-          <artifactId>lz4</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-hive-thriftserver_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-repl_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-streaming_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.httpcomponents</groupId>
       <artifactId>httpclient</artifactId>
       <version>${httpclient.version}</version>
@@ -101,21 +50,6 @@
       <version>${httpcore.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.scalatest</groupId>
-      <artifactId>scalatest_${scala.binary.version}</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <!-- in spark 2.3 spark catalyst added dependency on spark-core-->
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-core_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-core</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
       <groupId>org.alluxio</groupId>
       <artifactId>alluxio-core-client-hdfs</artifactId>
       <version>1.8.1</version>
@@ -125,6 +59,11 @@
       <artifactId>alluxio-shell</artifactId>
       <version>1.8.1</version>
     </dependency>
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_${scala.binary.version}</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>
@@ -252,15 +191,5 @@
         <maven.test.skip>true</maven.test.skip>
       </properties>
     </profile>
-    <profile>
-      <id>mv</id>
-      <dependencies>
-        <dependency>
-          <groupId>org.apache.carbondata</groupId>
-          <artifactId>carbondata-mv-core</artifactId>
-          <version>${project.version}</version>
-        </dependency>
-      </dependencies>
-    </profile>
   </profiles>
-</project>
\ No newline at end of file
+</project>
diff --git 
a/examples/spark/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
 
b/examples/spark/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
index 1f3ddb0..b18fdba 100644
--- 
a/examples/spark/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
+++ 
b/examples/spark/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
@@ -21,7 +21,7 @@ import java.io.File
 
 import org.apache.spark.sql.test.TestQueryExecutor
 import org.apache.spark.sql.test.util.QueryTest
-
+import org.apache.spark.sql.SparkSqlAdapter
 import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.examples._
@@ -121,12 +121,8 @@ class RunExamples extends QueryTest with BeforeAndAfterAll 
{
     DirectSQLExample.exampleBody(spark)
   }
 
-  // TODO:
-  //  As the hive version for spark-hive(1.2.1) and carbon-hive(3.1.0) are now 
different
-  //  therefore the HiveExample will throw RuntimeException.
-  //  Enable after spark supports 3.1.0 version.
-  //  A separate test class would be added instead.
-  ignore("HiveExample") {
+  test("HiveExample") {
+    SparkSqlAdapter.initSparkSQL()
     HiveExample.createCarbonTable(spark)
     HiveExample.readFromHive
   }
diff --git a/geo/pom.xml b/geo/pom.xml
index 38bb9bd..e184032 100644
--- a/geo/pom.xml
+++ b/geo/pom.xml
@@ -38,11 +38,6 @@
   <dependencies>
     <dependency>
       <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-common</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-core</artifactId>
       <version>${project.version}</version>
     </dependency>
@@ -182,4 +177,4 @@
       </properties>
     </profile>
   </profiles>
-</project>
\ No newline at end of file
+</project>
diff --git a/index/bloom/pom.xml b/index/bloom/pom.xml
index 0b391a3..df885ff 100644
--- a/index/bloom/pom.xml
+++ b/index/bloom/pom.xml
@@ -20,11 +20,6 @@
   <dependencies>
     <dependency>
       <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-core</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-processing</artifactId>
       <version>${project.version}</version>
     </dependency>
diff --git a/index/secondary-index/pom.xml b/index/secondary-index/pom.xml
index 2160b2b..b3034f5 100644
--- a/index/secondary-index/pom.xml
+++ b/index/secondary-index/pom.xml
@@ -36,14 +36,6 @@
 
   <dependencies>
     <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-hive-thriftserver_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-repl_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-spark</artifactId>
       <version>${project.version}</version>
@@ -56,24 +48,6 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-lucene</artifactId>
-      <version>${project.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-bloom</artifactId>
-      <version>${project.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-sdk</artifactId>
-      <version>${project.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
     </dependency>
@@ -226,14 +200,6 @@
   </build>
   <profiles>
     <profile>
-      <id>build-all</id>
-      <properties>
-        <spark.version>2.1.0</spark.version>
-        <scala.binary.version>2.11</scala.binary.version>
-        <scala.version>2.11.8</scala.version>
-      </properties>
-    </profile>
-    <profile>
       <id>sdvtest</id>
       <properties>
         <maven.test.skip>true</maven.test.skip>
@@ -241,4 +207,4 @@
     </profile>
   </profiles>
 
-</project>
\ No newline at end of file
+</project>
diff --git a/integration/hive/pom.xml b/integration/hive/pom.xml
index d8be11a..80f24b8 100644
--- a/integration/hive/pom.xml
+++ b/integration/hive/pom.xml
@@ -99,21 +99,6 @@
             <version>${httpcore.version}</version>
         </dependency>
         <dependency>
-            <groupId>org.apache.hadoop</groupId>
-            <artifactId>hadoop-client</artifactId>
-            <version>${hadoop.version}</version>
-            <exclusions>
-                <exclusion>
-                    <groupId>javax.servlet</groupId>
-                    <artifactId>servlet-api</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>javax.servlet.jsp</groupId>
-                    <artifactId>jsp-api</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-        <dependency>
             <groupId>org.apache.carbondata</groupId>
             <artifactId>carbondata-hadoop</artifactId>
             <version>${project.version}</version>
@@ -195,4 +180,4 @@
         </profile>
     </profiles>
 
-</project>
\ No newline at end of file
+</project>
diff --git 
a/integration/hive/src/main/java/org/apache/carbondata/hive/test/server/HiveEmbeddedServer2.java
 
b/integration/hive/src/main/java/org/apache/carbondata/hive/test/server/HiveEmbeddedServer2.java
index 83d5015..ca84240 100644
--- 
a/integration/hive/src/main/java/org/apache/carbondata/hive/test/server/HiveEmbeddedServer2.java
+++ 
b/integration/hive/src/main/java/org/apache/carbondata/hive/test/server/HiveEmbeddedServer2.java
@@ -18,7 +18,9 @@
 package org.apache.carbondata.hive.test.server;
 
 import java.io.File;
+import java.io.IOException;
 import java.lang.reflect.Field;
+import java.net.ServerSocket;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Properties;
@@ -30,7 +32,6 @@ import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
@@ -59,7 +60,7 @@ public class HiveEmbeddedServer2 {
     if (hiveServer == null) {
       config = configure();
       hiveServer = new HiveServer2();
-      port = MetaStoreUtils.findFreePort();
+      port = findFreePort();
       config.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, port);
       config.set(FileInputFormat.INPUT_DIR_RECURSIVE, "true");
       hiveServer.init(config);
@@ -68,6 +69,13 @@ public class HiveEmbeddedServer2 {
     }
   }
 
+  public static int findFreePort() throws IOException {
+    ServerSocket socket = new ServerSocket(0);
+    int port = socket.getLocalPort();
+    socket.close();
+    return port;
+  }
+
   public int getFreePort() {
     log.info("Free Port Available is " + port);
     return port;
@@ -139,6 +147,7 @@ public class HiveEmbeddedServer2 {
     conf.set("hive.added.archives.path", "");
     conf.set("fs.default.name", "file:///");
     conf.set(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD.varname, "false");
+    conf.set("hive.mapred.supports.subdirectories", "true");
 
     // clear mapred.job.tracker - Hadoop defaults to 'local' if not defined. 
Hive however expects
     // this to be set to 'local' - if it's not, it does a remote execution 
(i.e. no child JVM)
diff --git a/integration/spark/pom.xml b/integration/spark/pom.xml
index d6d164d..eb84bb3 100644
--- a/integration/spark/pom.xml
+++ b/integration/spark/pom.xml
@@ -106,6 +106,10 @@
           <groupId>org.apache.hive</groupId>
           <artifactId>*</artifactId>
         </exclusion>
+        <exclusion>
+          <groupId>org.spark-project.hive</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
     <dependency>
@@ -115,11 +119,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-sdk</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-lucene</artifactId>
       <version>${project.version}</version>
     </dependency>
@@ -149,27 +148,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.spark</groupId>
-      <artifactId>spark-streaming_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-core_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-sql_${scala.binary.version}</artifactId>
-      <exclusions>
-        <!-- from transitive dependency com.univocity:univocity-parsers:2.5.9
-        is added from the org.apache.spark:spark-sql_2.11,so need to remove
-        this version.Carbon uses 2.2.1 version  -->
-        <exclusion>
-          <groupId>com.univocity</groupId>
-          <artifactId>univocity-parsers</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.spark</groupId>
       <artifactId>spark-sql-kafka-0-10_${scala.binary.version}</artifactId>
       <exclusions>
         <exclusion>
@@ -179,25 +157,6 @@
       </exclusions>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-aws</artifactId>
-      <version>${hadoop.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-core</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-annotations</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.fasterxml.jackson.core</groupId>
-          <artifactId>jackson-databind</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
       <groupId>org.apache.httpcomponents</groupId>
       <artifactId>httpclient</artifactId>
       <version>${httpclient.version}</version>
@@ -318,6 +277,25 @@
               <!-- other configurations here -->
             </configuration>
           </execution>
+          <execution>
+            <id>jacoco-dependency-ant</id>
+            <goals>
+              <goal>copy</goal>
+            </goals>
+            <phase>process-test-resources</phase>
+            <inherited>false</inherited>
+            <configuration>
+              <artifactItems>
+                <artifactItem>
+                  <groupId>org.jacoco</groupId>
+                  <artifactId>org.jacoco.ant</artifactId>
+                  <version>0.7.9</version>
+                </artifactItem>
+              </artifactItems>
+              <stripVersion>true</stripVersion>
+              <outputDirectory>${basedir}/target/jacoco-jars</outputDirectory>
+            </configuration>
+          </execution>
         </executions>
       </plugin>
       <plugin>
@@ -327,9 +305,10 @@
         <!-- Note config is repeated in scalatest config -->
         <configuration>
           
<reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
-          <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m 
</argLine>
+          <argLine>-Xmx3g -XX:MaxPermSize=512m 
-XX:ReservedCodeCacheSize=512m</argLine>
           <systemProperties>
             <java.awt.headless>true</java.awt.headless>
+            
<spark.carbon.hive.schema.store>${carbon.hive.based.metastore}</spark.carbon.hive.schema.store>
           </systemProperties>
           <failIfNoTests>false</failIfNoTests>
         </configuration>
@@ -485,14 +464,6 @@
 
   <profiles>
     <profile>
-      <id>build-all</id>
-      <properties>
-        <spark.version>2.3.4</spark.version>
-        <scala.binary.version>2.11</scala.binary.version>
-        <scala.version>2.11.8</scala.version>
-      </properties>
-    </profile>
-    <profile>
       <id>sdvtest</id>
       <properties>
         <maven.test.skip>true</maven.test.skip>
@@ -503,11 +474,6 @@
       <activation>
         <activeByDefault>true</activeByDefault>
       </activation>
-      <properties>
-        <spark.version>2.3.4</spark.version>
-        <scala.binary.version>2.11</scala.binary.version>
-        <scala.version>2.11.8</scala.version>
-      </properties>
       <build>
         <plugins>
           <plugin>
@@ -543,11 +509,6 @@
     </profile>
     <profile>
       <id>spark-2.4</id>
-      <properties>
-        <spark.version>2.4.4</spark.version>
-        <scala.binary.version>2.11</scala.binary.version>
-        <scala.version>2.11.8</scala.version>
-      </properties>
       <build>
         <plugins>
           <plugin>
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
index 2886fef..8331b54 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
@@ -34,7 +34,7 @@ class CarbonDataFrameWriter(sqlContext: SQLContext, val 
dataFrame: DataFrame) {
   def saveAsCarbonFile(parameters: Map[String, String] = Map()): Unit = {
     // create a new table using dataframe's schema and write its content into 
the table
     sqlContext.sparkSession.sql(
-      makeCreateTableString(dataFrame.schema, new CarbonOption(parameters)))
+      makeCreateTableString(dataFrame.schema, new 
CarbonOption(parameters))).collect()
     writeToCarbonFile(parameters)
   }
 
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala 
b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
index 92782e9..5875af6 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
@@ -182,21 +182,12 @@ object CarbonEnv {
       if (carbonEnv == null) {
         carbonEnv = new CarbonEnv
         carbonEnv.init(sparkSession)
-        addSparkSessionListener(sparkSession)
+        CarbonToSparkAdapter.addSparkSessionListener(sparkSession)
         carbonEnvMap.put(sparkSession, carbonEnv)
       }
       carbonEnv
   }
 
-  private def addSparkSessionListener(sparkSession: SparkSession): Unit = {
-    sparkSession.sparkContext.addSparkListener(new SparkListener {
-      override def onApplicationEnd(applicationEnd: 
SparkListenerApplicationEnd): Unit = {
-        CarbonEnv.carbonEnvMap.remove(sparkSession)
-        ThreadLocalSessionInfo.unsetAll()
-      }
-    })
-  }
-
   /**
    * Method
    * 1. To initialize Listeners to their respective events in the 
OperationListenerBus
@@ -367,7 +358,8 @@ object CarbonEnv {
         .locationUri.toString
     // for default database and db ends with .db
     // check whether the carbon store and hive store is same or different.
-    if (dbName.equals("default") || databaseLocation.endsWith(".db")) {
+    if ((!EnvHelper.isLegacy(sparkSession)) &&
+        (dbName.equals("default") || databaseLocation.endsWith(".db"))) {
       val carbonStorePath = 
FileFactory.getUpdatedFilePath(CarbonProperties.getStorePath())
       val hiveStorePath = FileFactory.getUpdatedFilePath(
         sparkSession.conf.get("spark.sql.warehouse.dir", carbonStorePath))
@@ -419,7 +411,7 @@ object CarbonEnv {
     val path = location.getOrElse(
       CarbonEnv.newTablePath(databaseNameOp, tableName)(sparkSession))
     if (!isExternal && isTransactionalTable && location.isEmpty &&
-        FileFactory.getCarbonFile(path).exists()) {
+        (FileFactory.getCarbonFile(path).exists() || 
EnvHelper.isLegacy(sparkSession))) {
       path + "_" + tableId
     } else {
       path
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala 
b/integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala
index d347850..a981e38 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql
 
+import org.apache.commons.lang3.StringUtils
+
 import org.apache.carbondata.core.metadata.DatabaseLocationProvider
 
 /**
@@ -24,19 +26,31 @@ import 
org.apache.carbondata.core.metadata.DatabaseLocationProvider
  */
 object EnvHelper {
 
-  def isCloud(sparkSession: SparkSession): Boolean = false
+  def isLegacy(sparkSession: SparkSession): Boolean = false
 
-  def isPrivacy(sparkSession: SparkSession, isExternal: Boolean): Boolean = 
false
+  def isPrivacy(sparkSession: SparkSession, isExternal: Boolean): Boolean = {
+    (!isExternal) && isLegacy(sparkSession)
+  }
 
   def setDefaultHeader(
       sparkSession: SparkSession,
       optionsFinal: java.util.Map[String, String]
   ): Unit = {
-
+    if (isLegacy(sparkSession)) {
+      val fileHeader = optionsFinal.get("fileheader")
+      val header = optionsFinal.get("header")
+      if (StringUtils.isEmpty(fileHeader) && StringUtils.isEmpty(header)) {
+        optionsFinal.put("header", "false")
+      }
+    }
   }
 
   def isRetainData(sparkSession: SparkSession, retainData: Boolean): Boolean = 
{
-    true
+    if (isLegacy(sparkSession)) {
+      retainData
+    } else {
+      true
+    }
   }
 
   def getDatabase(database: String): String = {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CommonLoadUtils.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CommonLoadUtils.scala
index 9e01fb1..f76a3db 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CommonLoadUtils.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CommonLoadUtils.scala
@@ -1004,7 +1004,7 @@ object CommonLoadUtils {
           overwrite = false,
           ifPartitionNotExists = false)
       SparkUtil.setNullExecutionId(loadParams.sparkSession)
-      Dataset.ofRows(loadParams.sparkSession, convertedPlan)
+      Dataset.ofRows(loadParams.sparkSession, convertedPlan).collect()
     } catch {
       case ex: Throwable =>
         val (executorMessage, errorMessage) = 
CarbonScalaUtil.retrieveAndLogErrorMsg(ex, LOGGER)
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/HistoryTableLoadHelper.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/HistoryTableLoadHelper.scala
index 6ef7c37..c7f6e6c 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/HistoryTableLoadHelper.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/mutation/merge/HistoryTableLoadHelper.scala
@@ -93,7 +93,7 @@ object HistoryTableLoadHelper {
       unionDf.createOrReplaceTempView(alias)
       val start = System.currentTimeMillis()
       sparkSession.sql(s"insert into ${ insert.historyTable.quotedString } " +
-                       s"select * from ${ alias }")
+                       s"select * from ${ alias }").collect()
       LOGGER.info("Time taken to insert into history table " + 
(System.currentTimeMillis() - start))
     }
   }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateDataSourceTableCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateDataSourceTableCommand.scala
index 389a8da..0274bd4 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateDataSourceTableCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateDataSourceTableCommand.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.sql.execution.command.table
 
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException
 import org.apache.spark.sql.{AnalysisException, CarbonEnv, CarbonSource, Row, 
SparkSession}
 import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
 import org.apache.spark.sql.catalyst.catalog.{CatalogTable, CatalogTableType}
@@ -85,6 +86,14 @@ case class CarbonCreateDataSourceTableCommand(
       case ex: TableAlreadyExistsException if ignoreIfExists =>
         LOGGER.error(ex)
         return Seq.empty[Row]
+      case ex: AnalysisException if ignoreIfExists =>
+        if (ex.getCause != null && ex.getCause.getCause != null &&
+            ex.getCause.getCause.isInstanceOf[AlreadyExistsException]) {
+          LOGGER.error(ex)
+          return Seq.empty[Row]
+        } else {
+          throw ex
+        }
       case ex =>
         throw ex
     }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
index 5916bfe..7d9e3ce 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
@@ -142,7 +142,7 @@ case class CarbonCreateTableCommand(
                  |  isVisible "$isVisible"
                  |  $carbonSchemaString)
                  |  $partitionString
-             """.stripMargin)
+             """.stripMargin).collect()
           }
         } catch {
           case e: AnalysisException =>
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
index e0c153c..5ab8586 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDropTableCommand.scala
@@ -158,7 +158,7 @@ case class CarbonDropTableCommand(
 
   override def processData(sparkSession: SparkSession): Seq[Row] = {
     // clear driver side index and dictionary cache
-    if (!EnvHelper.isCloud(sparkSession)
+    if (!EnvHelper.isLegacy(sparkSession)
         && carbonTable != null
         && !(carbonTable.isChildTableForMV && !dropChildTable)) {
       // delete the table folder
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
index 8af08cd..6cbb40d 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
@@ -52,7 +52,7 @@ object DDLHelper {
   ): CreateDatabaseCommand = {
     ThreadLocalSessionInfo
       
.setConfigurationToCurrentThread(sparkSession.sessionState.newHadoopConf())
-    if (!EnvHelper.isCloud(sparkSession)) {
+    if (!EnvHelper.isLegacy(sparkSession)) {
       val databaseName = createDatabaseCommand.databaseName
       val dbLocation = try {
         CarbonEnv.getDatabaseLocation(databaseName, sparkSession)
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
index 401e8f9..d11d6f7 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
@@ -133,7 +133,7 @@ class DDLStrategy(sparkSession: SparkSession) extends 
SparkStrategy {
       case createTable: CreateTableCommand
         if isCarbonFileHiveTable(createTable.table) =>
         // CREATE TABLE STORED AS carbon
-        if (EnvHelper.isCloud(sparkSession)) {
+        if (EnvHelper.isLegacy(sparkSession)) {
           Nil
         } else {
           ExecutedCommandExec(DDLHelper.createCarbonFileHiveTable(createTable, 
sparkSession)) :: Nil
@@ -147,7 +147,7 @@ class DDLStrategy(sparkSession: SparkSession) extends 
SparkStrategy {
       case ctas: CreateHiveTableAsSelectCommand
         if isCarbonFileHiveTable(ctas.tableDesc) =>
         // CREATE TABLE STORED AS carbon AS SELECT
-        if (EnvHelper.isCloud(sparkSession)) {
+        if (EnvHelper.isLegacy(sparkSession)) {
           Nil
         } else {
           DataWritingCommandExec(
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/MixedFormatHandler.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/MixedFormatHandler.scala
index 5e82eb3..089bb72 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/MixedFormatHandler.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/MixedFormatHandler.scala
@@ -24,7 +24,7 @@ import scala.collection.mutable.ArrayBuffer
 
 import org.apache.hadoop.fs.{FileStatus, FileSystem, Path, PathFilter}
 import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.{MixedFormatHandlerUtil, SparkSession}
+import org.apache.spark.sql.{SparkSession, SparkSqlAdapter}
 import 
org.apache.spark.sql.carbondata.execution.datasources.SparkCarbonFileFormat
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.expressions
@@ -318,7 +318,7 @@ object MixedFormatHandler {
     val outputAttributes = readDataColumns ++ partitionColumns
 
     val scan =
-      MixedFormatHandlerUtil.getScanForSegments(
+      SparkSqlAdapter.getScanForSegments(
         fsRelation,
         outputAttributes,
         outputSchema,
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
index df05499..576a2ce 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
@@ -560,7 +560,7 @@ class CarbonFileMetastore extends CarbonMetaStore {
         sparkSession.sessionState.catalog.listTables(database)
           .map(table => CarbonTable.buildUniqueName(database, table.table))
     }
-    val cachedTableList = if (EnvHelper.isCloud(sparkSession)) {
+    val cachedTableList = if (EnvHelper.isLegacy(sparkSession)) {
       // for multi-tenant scenario, it need to check the table unique name
       // ensure the current user own this table
       CarbonMetadata.getInstance().getAllTables.asScala.filter { carbonTable =>
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
index 9e5a280..e965a16 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala
@@ -107,7 +107,7 @@ object CarbonHiveMetadataUtil {
     CarbonInternalScalaUtil.removeIndexTableInfo(parentCarbonTable, tableName)
     sparkSession.sql(
       s"""ALTER TABLE $dbName.$parentTableName SET SERDEPROPERTIES 
('indexInfo'='$newIndexInfo')
-        """.stripMargin)
+        """.stripMargin).collect()
     FileInternalUtil.touchSchemaFileTimestamp(dbName, parentTableName,
       parentCarbonTable.getTablePath, System.currentTimeMillis())
     FileInternalUtil.touchStoreTimeStamp()
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonExtensionSpark2SqlParser.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonExtensionSpark2SqlParser.scala
index ff75638..b1a529b 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonExtensionSpark2SqlParser.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonExtensionSpark2SqlParser.scala
@@ -34,7 +34,7 @@ import 
org.apache.carbondata.core.constants.CarbonCommonConstants
 class CarbonExtensionSpark2SqlParser extends CarbonSpark2SqlParser {
 
   override protected lazy val extendedSparkSyntax: Parser[LogicalPlan] =
-    loadDataNew | alterTableAddColumns
+    loadDataNew | alterTableAddColumns | explainPlan
 
   /**
    * alter table add columns with TBLPROPERTIES
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index c7547cb..f4a524b 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -78,11 +78,11 @@ class CarbonSparkSqlParser(conf: SQLConf, sparkSession: 
SparkSession) extends Ab
 class CarbonHelperSqlAstBuilder(conf: SQLConf,
     parser: CarbonSpark2SqlParser,
     sparkSession: SparkSession)
-  extends SparkSqlAstBuilder(conf) {
+  extends SparkSqlAstBuilderWrapper(conf) {
   /**
    * Parse a key-value map from a [[TablePropertyListContext]], assuming all 
values are specified.
    */
-  def visitPropertyKeyValues(ctx: TablePropertyListContext): Map[String, 
String] = {
+  override def visitPropertyKeyValues(ctx: TablePropertyListContext): 
Map[String, String] = {
     val props = visitTablePropertyList(ctx)
     CarbonSparkSqlParserUtil.visitPropertyKeyValues(ctx, props)
   }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/command/SICreationCommand.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/command/SICreationCommand.scala
index 9dc9c2f..7991f95 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/command/SICreationCommand.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/command/SICreationCommand.scala
@@ -309,6 +309,7 @@ class ErrorMessage(message: String) extends 
Exception(message) {
                  |isSITableEnabled "false", parentTableId
                  |"${carbonTable.getCarbonTableIdentifier.getTableId}",
                  |parentTableName "$tableName"$carbonSchemaString) 
""".stripMargin)
+              .collect()
           } catch {
             case e: IOException =>
               if (FileFactory.isFileExist(tablePath)) {
@@ -323,6 +324,7 @@ class ErrorMessage(message: String) extends 
Exception(message) {
                 'parentTableName'='$tableName', 'isIndexTable' = 'true', 
'isSITableEnabled' =
                 'false', 'parentTablePath' = '${carbonTable.getTablePath}',
                 'parentTableId' = 
'${carbonTable.getCarbonTableIdentifier.getTableId}')""")
+            .collect()
         }
 
         CarbonInternalScalaUtil.addIndexTableInfo(carbonTable, indexTableName, 
indexTableCols)
@@ -331,7 +333,7 @@ class ErrorMessage(message: String) extends 
Exception(message) {
 
         sparkSession.sql(
           s"""ALTER TABLE $databaseName.$tableName SET SERDEPROPERTIES 
('indexInfo' =
-              |'$indexInfo')""".stripMargin)
+              |'$indexInfo')""".stripMargin).collect()
 
       val tableIdent = TableIdentifier(tableName, Some(databaseName))
 
@@ -356,7 +358,7 @@ class ErrorMessage(message: String) extends 
Exception(message) {
         // enable the SI table
         sparkSession.sql(
           s"""ALTER TABLE $databaseName.$indexTableName SET
-             |SERDEPROPERTIES ('isSITableEnabled' = 'true')""".stripMargin)
+             |SERDEPROPERTIES ('isSITableEnabled' = 
'true')""".stripMargin).collect()
       }
       val createTablePostExecutionEvent: CreateTablePostExecutionEvent =
         CreateTablePostExecutionEvent(sparkSession, tableIdentifier)
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableColumnRenameEventListener.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableColumnRenameEventListener.scala
index 263d221..21176c9 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableColumnRenameEventListener.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/AlterTableColumnRenameEventListener.scala
@@ -145,7 +145,7 @@ class AlterTableColumnRenameEventListener extends 
OperationEventListener with Lo
             sparkSession.sql(
               s"""ALTER TABLE $database.${
                 carbonTable.getTableName
-              } SET SERDEPROPERTIES ('indexInfo' = 
'$indexInfo')""".stripMargin)
+              } SET SERDEPROPERTIES ('indexInfo' = 
'$indexInfo')""".stripMargin).collect()
             CarbonEnv.getInstance(sparkSession).carbonMetaStore
               .removeTableFromMetadata(carbonTable.getDatabaseName, 
carbonTable.getTableName)
           }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/SILoadEventListenerForFailedSegments.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/SILoadEventListenerForFailedSegments.scala
index 6e4bb0b..ef9481e 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/SILoadEventListenerForFailedSegments.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/SILoadEventListenerForFailedSegments.scala
@@ -165,7 +165,7 @@ class SILoadEventListenerForFailedSegments extends 
OperationEventListener with L
                       // creation time
                       sparkSession.sql(
                         s"""ALTER TABLE 
${carbonLoadModel.getDatabaseName}.$indexTableName SET
-                           |SERDEPROPERTIES ('isSITableEnabled' = 
'true')""".stripMargin)
+                           |SERDEPROPERTIES ('isSITableEnabled' = 
'true')""".stripMargin).collect()
                     }
                   } catch {
                     case ex: Exception =>
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/hive/CarbonInternalMetastore.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/hive/CarbonInternalMetastore.scala
index 0d192c1..b2975fa 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/hive/CarbonInternalMetastore.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/hive/CarbonInternalMetastore.scala
@@ -189,7 +189,7 @@ object CarbonInternalMetastore {
       } catch {
         case e: Exception =>
           // In case of creating a table, hive table will not be available.
-          LOGGER.error(e.getMessage, e)
+          LOGGER.error(e.getMessage)
       }
     }
   }
diff --git 
a/integration/spark/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapter.scala
 
b/integration/spark/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapter.scala
index 7b5589d..b6a20f7 100644
--- 
a/integration/spark/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapter.scala
+++ 
b/integration/spark/src/main/spark2.3/org/apache/spark/sql/CarbonToSparkAdapter.scala
@@ -23,7 +23,7 @@ import org.apache.spark.SparkContext
 import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
 import 
org.apache.spark.sql.carbondata.execution.datasources.CarbonFileIndexReplaceRule
 import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, 
SessionCatalog}
-import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, 
AttributeSet, Expression, ExpressionSet, ExprId, NamedExpression, ScalaUDF, 
SubqueryExpression}
+import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, 
AttributeSet, ExprId, Expression, ExpressionSet, NamedExpression, ScalaUDF, 
SubqueryExpression}
 import org.apache.spark.sql.catalyst.expressions.codegen.ExprCode
 import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, 
AttributeReference, AttributeSet, ExprId, Expression, ExpressionSet, 
NamedExpression, ScalaUDF, SubqueryExpression}
 import org.apache.spark.sql.catalyst.optimizer.Optimizer
@@ -35,8 +35,14 @@ import org.apache.spark.sql.optimizer.{CarbonIUDRule, 
CarbonUDFTransformRule}
 import org.apache.spark.sql.secondaryindex.optimizer.CarbonSITransformationRule
 import org.apache.spark.sql.types.{DataType, Metadata}
 
+import org.apache.carbondata.core.util.ThreadLocalSessionInfo
+
 object CarbonToSparkAdapter {
 
+  def addSparkSessionListener(sparkSession: SparkSession): Unit = {
+    SparkSqlAdapter.addSparkSessionListener(sparkSession)
+  }
+
   def addSparkListener(sparkContext: SparkContext): Unit = {
     sparkContext.addSparkListener(new SparkListener {
       override def onApplicationEnd(applicationEnd: 
SparkListenerApplicationEnd): Unit = {
diff --git 
a/integration/spark/src/main/spark2.3/org/apache/spark/sql/MixedFormatHandlerUtil.scala
 
b/integration/spark/src/main/spark2.3/org/apache/spark/sql/SparkSqlAdapter.scala
similarity index 74%
rename from 
integration/spark/src/main/spark2.3/org/apache/spark/sql/MixedFormatHandlerUtil.scala
rename to 
integration/spark/src/main/spark2.3/org/apache/spark/sql/SparkSqlAdapter.scala
index cfc7755..497539f 100644
--- 
a/integration/spark/src/main/spark2.3/org/apache/spark/sql/MixedFormatHandlerUtil.scala
+++ 
b/integration/spark/src/main/spark2.3/org/apache/spark/sql/SparkSqlAdapter.scala
@@ -17,13 +17,19 @@
 
 package org.apache.spark.sql
 
+import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression}
 import org.apache.spark.sql.execution.FileSourceScanExec
 import org.apache.spark.sql.execution.datasources.HadoopFsRelation
 import org.apache.spark.sql.types.StructType
 
-object MixedFormatHandlerUtil {
+import org.apache.carbondata.core.util.ThreadLocalSessionInfo
+
+object SparkSqlAdapter {
+
+  def initSparkSQL(): Unit = {
+  }
 
   def getScanForSegments(
       @transient relation: HadoopFsRelation,
@@ -41,4 +47,13 @@ object MixedFormatHandlerUtil {
       dataFilters,
       tableIdentifier)
   }
+
+  def addSparkSessionListener(sparkSession: SparkSession): Unit = {
+    sparkSession.sparkContext.addSparkListener(new SparkListener {
+      override def onApplicationEnd(applicationEnd: 
SparkListenerApplicationEnd): Unit = {
+        CarbonEnv.carbonEnvMap.remove(sparkSession)
+        ThreadLocalSessionInfo.unsetAll()
+      }
+    })
+  }
 }
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala 
b/integration/spark/src/main/spark2.3/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala
similarity index 56%
copy from integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala
copy to 
integration/spark/src/main/spark2.3/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala
index d347850..0e56e00 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala
+++ 
b/integration/spark/src/main/spark2.3/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala
@@ -15,31 +15,17 @@
  * limitations under the License.
  */
 
-package org.apache.spark.sql
+package org.apache.spark.sql.parser
 
-import org.apache.carbondata.core.metadata.DatabaseLocationProvider
+import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
+import org.apache.spark.sql.execution.SparkSqlAstBuilder
+import org.apache.spark.sql.internal.SQLConf
 
 /**
- * environment related code
+ * use this wrapper to adapter multiple spark versions
  */
-object EnvHelper {
+class SparkSqlAstBuilderWrapper(conf: SQLConf)
+  extends SparkSqlAstBuilder(conf) {
 
-  def isCloud(sparkSession: SparkSession): Boolean = false
-
-  def isPrivacy(sparkSession: SparkSession, isExternal: Boolean): Boolean = 
false
-
-  def setDefaultHeader(
-      sparkSession: SparkSession,
-      optionsFinal: java.util.Map[String, String]
-  ): Unit = {
-
-  }
-
-  def isRetainData(sparkSession: SparkSession, retainData: Boolean): Boolean = 
{
-    true
-  }
-
-  def getDatabase(database: String): String = {
-    DatabaseLocationProvider.get().provide(database)
-  }
+  def visitPropertyKeyValues(ctx: TablePropertyListContext): Map[String, 
String] = ???
 }
diff --git 
a/integration/spark/src/main/spark2.4/org/apache/spark/sql/CarbonToSparkAdapter.scala
 
b/integration/spark/src/main/spark2.4/org/apache/spark/sql/CarbonToSparkAdapter.scala
index 7bd1056..0d6655c 100644
--- 
a/integration/spark/src/main/spark2.4/org/apache/spark/sql/CarbonToSparkAdapter.scala
+++ 
b/integration/spark/src/main/spark2.4/org/apache/spark/sql/CarbonToSparkAdapter.scala
@@ -23,7 +23,7 @@ import org.apache.spark.SparkContext
 import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
 import 
org.apache.spark.sql.carbondata.execution.datasources.CarbonFileIndexReplaceRule
 import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, 
ExternalCatalogWithListener, SessionCatalog}
-import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, 
AttributeReference, AttributeSet, ExprId, Expression, ExpressionSet, 
NamedExpression, ScalaUDF, SubqueryExpression}
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, 
AttributeReference, AttributeSet, Expression, ExpressionSet, ExprId, 
NamedExpression, ScalaUDF, SubqueryExpression}
 import org.apache.spark.sql.catalyst.expressions.codegen._
 import org.apache.spark.sql.catalyst.expressions.codegen.Block._
 import org.apache.spark.sql.catalyst.optimizer.Optimizer
@@ -35,8 +35,19 @@ import org.apache.spark.sql.optimizer.{CarbonIUDRule, 
CarbonUDFTransformRule}
 import org.apache.spark.sql.secondaryindex.optimizer.CarbonSITransformationRule
 import org.apache.spark.sql.types.{DataType, Metadata}
 
+import org.apache.carbondata.core.util.ThreadLocalSessionInfo
+
 object CarbonToSparkAdapter {
 
+  def addSparkSessionListener(sparkSession: SparkSession): Unit = {
+    sparkSession.sparkContext.addSparkListener(new SparkListener {
+      override def onApplicationEnd(applicationEnd: 
SparkListenerApplicationEnd): Unit = {
+        CarbonEnv.carbonEnvMap.remove(sparkSession)
+        ThreadLocalSessionInfo.unsetAll()
+      }
+    })
+  }
+
   def addSparkListener(sparkContext: SparkContext) = {
     sparkContext.addSparkListener(new SparkListener {
       override def onApplicationEnd(applicationEnd: 
SparkListenerApplicationEnd): Unit = {
diff --git 
a/integration/spark/src/main/spark2.4/org/apache/spark/sql/MixedFormatHandlerUtil.scala
 
b/integration/spark/src/main/spark2.4/org/apache/spark/sql/SparkSqlAdapter.scala
similarity index 96%
rename from 
integration/spark/src/main/spark2.4/org/apache/spark/sql/MixedFormatHandlerUtil.scala
rename to 
integration/spark/src/main/spark2.4/org/apache/spark/sql/SparkSqlAdapter.scala
index 7b20c06..e1c804e 100644
--- 
a/integration/spark/src/main/spark2.4/org/apache/spark/sql/MixedFormatHandlerUtil.scala
+++ 
b/integration/spark/src/main/spark2.4/org/apache/spark/sql/SparkSqlAdapter.scala
@@ -23,7 +23,10 @@ import org.apache.spark.sql.execution.FileSourceScanExec
 import org.apache.spark.sql.execution.datasources.HadoopFsRelation
 import org.apache.spark.sql.types.StructType
 
-object MixedFormatHandlerUtil {
+object SparkSqlAdapter {
+
+  def initSparkSQL(): Unit = {
+  }
 
   def getScanForSegments(
       @transient relation: HadoopFsRelation,
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala 
b/integration/spark/src/main/spark2.4/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala
similarity index 56%
copy from integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala
copy to 
integration/spark/src/main/spark2.4/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala
index d347850..0e56e00 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/EnvHelper.scala
+++ 
b/integration/spark/src/main/spark2.4/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala
@@ -15,31 +15,17 @@
  * limitations under the License.
  */
 
-package org.apache.spark.sql
+package org.apache.spark.sql.parser
 
-import org.apache.carbondata.core.metadata.DatabaseLocationProvider
+import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
+import org.apache.spark.sql.execution.SparkSqlAstBuilder
+import org.apache.spark.sql.internal.SQLConf
 
 /**
- * environment related code
+ * use this wrapper to adapter multiple spark versions
  */
-object EnvHelper {
+class SparkSqlAstBuilderWrapper(conf: SQLConf)
+  extends SparkSqlAstBuilder(conf) {
 
-  def isCloud(sparkSession: SparkSession): Boolean = false
-
-  def isPrivacy(sparkSession: SparkSession, isExternal: Boolean): Boolean = 
false
-
-  def setDefaultHeader(
-      sparkSession: SparkSession,
-      optionsFinal: java.util.Map[String, String]
-  ): Unit = {
-
-  }
-
-  def isRetainData(sparkSession: SparkSession, retainData: Boolean): Boolean = 
{
-    true
-  }
-
-  def getDatabase(database: String): String = {
-    DatabaseLocationProvider.get().provide(database)
-  }
+  def visitPropertyKeyValues(ctx: TablePropertyListContext): Map[String, 
String] = ???
 }
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithYarnLocalDirs.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithYarnLocalDirs.scala
index 59ca7cb..f5b2eb0 100644
--- 
a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithYarnLocalDirs.scala
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithYarnLocalDirs.scala
@@ -49,7 +49,7 @@ class TestLoadDataWithYarnLocalDirs extends QueryTest with 
BeforeAndAfterAll {
 
   private def initYarnLocalDir = {
     //set all the possible env for yarn local dirs in case of various deploy 
environment
-    val sparkConf = SparkContext.getOrCreate().getConf
+    val sparkConf = sqlContext.sparkContext.getConf
     sparkConf.set("SPARK_EXECUTOR_DIRS", getMockedYarnLocalDirs)
     sparkConf.set("SPARK_LOCAL_DIRS", getMockedYarnLocalDirs)
     sparkConf.set("MESOS_DIRECTORY", getMockedYarnLocalDirs)
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/cloud/AllDataSourceTestCase.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/cloud/AllDataSourceTestCase.scala
new file mode 100644
index 0000000..baa848c
--- /dev/null
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/cloud/AllDataSourceTestCase.scala
@@ -0,0 +1,904 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.cloud
+
+import java.io.File
+import java.util.concurrent.Executors
+
+import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
+import org.apache.spark.sql.catalyst.catalog.CatalogTableType
+import org.apache.spark.sql.execution.strategy.CarbonPlanHelper
+import org.apache.spark.sql.test.util.QueryTest
+import org.apache.spark.sql.{CarbonEnv, Row}
+import org.scalatest.BeforeAndAfterAll
+
+import 
org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.metadata.DatabaseLocationProvider
+import org.apache.carbondata.core.util.{CarbonProperties, 
ThreadLocalSessionInfo}
+
+/**
+  * Test Class for all data source
+  *
+  */
+class AllDataSourceTestCase extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll: Unit = {
+    // TODO these properties only work when running in idea.
+    CarbonProperties.getInstance()
+        .addProperty(
+          CarbonCommonConstants.CARBON_DATAMAP_SCHEMA_STORAGE,
+          CarbonCommonConstants.CARBON_DATAMAP_SCHEMA_STORAGE_DATABASE
+        )
+    
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.DATABASE_LOCATION_PROVIDER,
+      "org.apache.carbondata.spark.testsuite.cloud.TestProvider")
+    dropAll
+    sql("create database alldatasource")
+    sql("use alldatasource")
+    sql(
+      s"""
+         | create table origin_csv(col1 int, col2 string, col3 date)
+         | using csv
+         | options('dateFormat'='yyyy-MM-dd', 'timestampFormat'='yyyy-MM-dd 
HH:mm:ss')
+         | """.stripMargin)
+    sql("insert into origin_csv select 1, '3aa', to_date('2019-11-11')")
+    sql("insert into origin_csv select 2, '2bb', to_date('2019-11-12')")
+    sql("insert into origin_csv select 3, '1cc', to_date('2019-11-13')")
+  }
+
+  def dropAll {
+    dropTableByName("ds_carbon")
+    dropTableByName("ds_carbondata")
+    dropTableByName("hive_carbon")
+    dropTableByName("hive_carbondata")
+
+    sql("drop table if exists tbl_truncate")
+    sql("drop table if exists origin_csv")
+    sql("drop table if exists tbl_float1")
+    sql("drop table if exists tbl_float2")
+    sql("drop table if exists ds_options")
+    sql("drop table if exists hive_options")
+    sql("drop table if exists tbl_update")
+    sql("drop table if exists tbl_oldName")
+    sql("drop table if exists tbl_newName")
+    sql("drop table if exists tbl_insert_p_nosort")
+    sql("drop table if exists tbl_insert_overwrite")
+    sql("drop table if exists tbl_insert_overwrite_p")
+    sql("drop table if exists tbl_metrics_ns")
+    sql("drop table if exists tbl_metrics_ls")
+    sql("drop table if exists tbl_metrics_gs")
+    sql("drop table if exists tbl_metrics_p_ns")
+    sql("drop table if exists tbl_metrics_p_ls")
+    sql("drop table if exists tbl_metrics_p_gs")
+    sql("drop table if exists ds_sct")
+    sql("drop table if exists tbl_complex")
+    sql("drop table if exists tbl_complex_carbondata")
+    sql("drop table if exists tbl_complex_p")
+    sql("drop table if exists tbl_complex_p_carbondata")
+    sql("drop database if exists alldatasource cascade")
+  }
+
+  def dropTableByName(tableName: String) :Unit = {
+    sql(s"drop table if exists $tableName")
+    sql(s"drop table if exists ${tableName}_p")
+    sql(s"drop table if exists ${tableName}_ctas")
+    sql(s"drop table if exists ${tableName}_e")
+    sql(s"drop table if exists ${tableName}_s")
+  }
+
+  override def afterAll: Unit = {
+    try {
+      dropAll
+      sql("use default")
+    } finally {
+      CarbonProperties.getInstance()
+        .addProperty(
+          CarbonCommonConstants.CARBON_DATAMAP_SCHEMA_STORAGE,
+          CarbonCommonConstants.CARBON_DATAMAP_SCHEMA_STORAGE_DEFAULT
+        )
+      
CarbonProperties.getInstance().removeProperty(CarbonCommonConstants.DATABASE_LOCATION_PROVIDER)
+    }
+  }
+
+  test("test carbon"){
+    verifyDataSourceTable("carbon", "ds_carbon")
+    verifyHiveTable("carbon", "hive_carbon")
+  }
+
+  test("test carbondata"){
+    verifyDataSourceTable("carbondata", "ds_carbondata")
+    verifyHiveTable("carbondata", "hive_carbondata")
+  }
+
+  test("test partition table") {
+    createDataSourcePartitionTable("carbondata", "ds_carbondata_p")
+    createHivePartitionTable("carbondata", "hive_carbondata_p")
+  }
+
+  test("test table properties of datasource table") {
+    val tableName = "ds_options"
+    sql(
+      s"""
+         |create table ${ tableName } (
+         | col1 int, col2 string, col3 date
+         |)
+         | using carbondata
+         | options("sort_sCope"="global_Sort", "sort_Columns"="coL2", 
'global_Sort_partitions'='1')
+         | """.stripMargin)
+
+    checkExistence(sql(s"describe formatted ${ tableName }"), true, 
"global_sort")
+    sql(s"insert into table ${ tableName } select * from origin_csv")
+    checkAnswer(
+      sql(s"select * from ${ tableName }"),
+      Seq(
+        Row(3, "1cc", java.sql.Date.valueOf("2019-11-13")),
+        Row(2, "2bb", java.sql.Date.valueOf("2019-11-12")),
+        Row(1, "3aa", java.sql.Date.valueOf("2019-11-11"))
+      )
+    )
+    checkExistence(sql(s"show create table ${ tableName }"), true, 
"sort_columns")
+    sql(s"alter table ${ tableName } set 
tblproperties('sort_Columns'='col2,col1', 'LOAD_MIN_SIZE_INMB'='256')")
+    checkExistence(sql(s"show create table ${ tableName }"), true, 
"load_min_size_inmb")
+    sql(s"alter table ${ tableName } unset 
tblproperties('LOAD_MIN_SIZE_INMB')")
+    checkExistence(sql(s"show create table ${ tableName }"), false, 
"load_min_size_inmb")
+    val rows = sql(s"show create table ${ tableName }").collect()
+    // drop table
+    sql(s"drop table ${ tableName }")
+    // create again
+    sql(rows(0).getString(0))
+    checkExistence(sql(s"describe formatted ${ tableName }"), true, 
"global_sort")
+    sql(s"insert into table ${ tableName } select * from origin_csv")
+    checkAnswer(
+      sql(s"select * from ${ tableName }"),
+      Seq(
+        Row(3, "1cc", java.sql.Date.valueOf("2019-11-13")),
+        Row(2, "2bb", java.sql.Date.valueOf("2019-11-12")),
+        Row(1, "3aa", java.sql.Date.valueOf("2019-11-11"))
+      )
+    )
+
+    val table = CarbonEnv.getCarbonTable(
+      Option("alldatasource"), tableName)(sqlContext.sparkSession)
+    assert(table.getBlockletSizeInMB == 64)
+    assert(table.getBlockSizeInMB == 1024)
+  }
+
+  test("test table properties of hive table") {
+    val tableName = "hive_options"
+    sql(
+      s"""
+         |create table ${ tableName } (
+         | col1 int, col2 string, col3 date
+         |)
+         | stored as carbondata
+         | tblproperties("sort_sCope"="global_Sort", "sort_Columns"="coL2", 
'global_Sort_partitions'='1')
+         | """.stripMargin)
+
+    checkExistence(sql(s"describe formatted ${ tableName }"), true, 
"global_sort")
+    sql(s"insert into table ${ tableName } select * from origin_csv")
+    checkAnswer(
+      sql(s"select * from ${ tableName }"),
+      Seq(
+        Row(3, "1cc", java.sql.Date.valueOf("2019-11-13")),
+        Row(2, "2bb", java.sql.Date.valueOf("2019-11-12")),
+        Row(1, "3aa", java.sql.Date.valueOf("2019-11-11"))
+      )
+    )
+    checkExistence(sql(s"show create table ${ tableName }"), true, 
"sort_columns")
+    sql(s"alter table ${ tableName } set 
tblproperties('sort_Columns'='col2,col1', 'LOAD_MIN_SIZE_INMB'='256')")
+    checkExistence(sql(s"show create table ${ tableName }"), true, 
"load_min_size_inmb")
+    sql(s"alter table ${ tableName } unset 
tblproperties('LOAD_MIN_SIZE_INMB')")
+    checkExistence(sql(s"show create table ${ tableName }"), false, 
"load_min_size_inmb")
+    val rows = sql(s"show create table ${ tableName }").collect()
+    // drop table
+    sql(s"drop table ${ tableName }")
+    // create again
+    sql(rows(0).getString(0))
+    checkExistence(sql(s"describe formatted ${ tableName }"), true, 
"global_sort")
+    sql(s"insert into table ${ tableName } select * from origin_csv")
+    checkAnswer(
+      sql(s"select * from ${ tableName }"),
+      Seq(
+        Row(3, "1cc", java.sql.Date.valueOf("2019-11-13")),
+        Row(2, "2bb", java.sql.Date.valueOf("2019-11-12")),
+        Row(1, "3aa", java.sql.Date.valueOf("2019-11-11"))
+      )
+    )
+  }
+
+  ignore("test show create table") {
+    val tableName = "ds_sct"
+    sql(s"drop table if exists ${ tableName }")
+    sql(
+      s"""
+         | create table ${ tableName }
+         | using carbondata
+         | as select * from origin_csv
+         | """.stripMargin)
+    sql(s"show create table $tableName").show(100, false)
+    checkAnswer(sql(s"show create table $tableName"),
+      Seq(Row(
+        """CREATE TABLE `ds_sct` (`col1` INT, `col2` STRING, `col3` DATE)
+          |USING carbondata
+          |OPTIONS (
+          |  `bad_record_path` '',
+          |  `local_dictionary_enable` 'true',
+          |  `sort_columns` '',
+          |  `comment` ''
+          |)
+          |""".stripMargin)))
+
+    sql(s"drop table if exists ${ tableName }")
+    sql(
+      s"""
+         | create table ${ tableName }
+         | using carbondata
+         | options("global_sort_partitions"="1")
+         | as select * from origin_csv
+         | """.stripMargin)
+    sql(s"show create table $tableName").show(100, false)
+    checkAnswer(sql(s"show create table $tableName"),
+      Seq(Row(
+        """CREATE TABLE `ds_sct` (`col1` INT, `col2` STRING, `col3` DATE)
+          |USING carbondata
+          |OPTIONS (
+          |  `bad_record_path` '',
+          |  `local_dictionary_enable` 'true',
+          |  `sort_columns` '',
+          |  `comment` '',
+          |  `global_sort_partitions` '1'
+          |)
+          |""".stripMargin)))
+
+    sql(s"drop table if exists ${tableName}")
+    sql(
+      s"""
+         |create table ${ tableName } (
+         | col1 int, col2 string, col3 date
+         |)
+         | using carbondata
+         | """.stripMargin)
+    checkAnswer(sql(s"show create table $tableName"),
+      Seq(Row("""CREATE TABLE `ds_sct` (`col1` INT, `col2` STRING, `col3` DATE)
+                |USING carbondata
+                |""".stripMargin)))
+
+    sql(s"drop table if exists ${tableName}")
+    sql(
+      s"""
+         |create table ${ tableName } (
+         | col1 int, col2 string, col3 date
+         |)
+         | using carbondata
+         | options("global_sort_partitions"="1")
+         | """.stripMargin)
+    checkAnswer(sql(s"show create table $tableName"),
+      Seq(Row("""CREATE TABLE `ds_sct` (`col1` INT, `col2` STRING, `col3` DATE)
+                |USING carbondata
+                |OPTIONS (
+                |  `global_sort_partitions` '1'
+                |)
+                |""".stripMargin)))
+
+    sql(s"drop table if exists ${tableName}")
+    sql(
+      s"""
+         |create table ${ tableName } (
+         | col1 int, col2 string, col3 date
+         |)
+         | using carbondata
+         | options("sort_scope"="global_sort", "sort_columns"="col2")
+         | """.stripMargin)
+    checkAnswer(sql(s"show create table $tableName"),
+      Seq(Row("""CREATE TABLE `ds_sct` (`col1` INT, `col2` STRING, `col3` DATE)
+                |USING carbondata
+                |OPTIONS (
+                |  `sort_scope` 'global_sort',
+                |  `sort_columns` 'col2'
+                |)
+                |""".stripMargin)))
+
+    sql(s"drop table if exists ${tableName}")
+    sql(
+      s"""
+         |create table ${ tableName } (
+         | col1 int, col2 string, col3 date
+         |)
+         | using carbondata
+         | partitioned by (col2)
+         | """.stripMargin)
+    checkAnswer(sql(s"show create table $tableName"),
+      Seq(Row("""CREATE TABLE `ds_sct` (`col1` INT, `col3` DATE, `col2` STRING)
+                |USING carbondata
+                |PARTITIONED BY (col2)
+                |""".stripMargin)))
+
+    sql(s"drop table if exists ${tableName}")
+    sql(
+      s"""
+         |create table ${ tableName } (
+         | col1 int, col2 string, col3 date
+         |)
+         | using carbondata
+         | options("sort_scope"="global_sort", "sort_columns"="col2")
+         | partitioned by (col3)
+         | """.stripMargin)
+    sql(s"show create table $tableName").show(100, false)
+    checkAnswer(sql(s"show create table $tableName"),
+      Seq(Row("""CREATE TABLE `ds_sct` (`col1` INT, `col2` STRING, `col3` DATE)
+                |USING carbondata
+                |OPTIONS (
+                |  `sort_scope` 'global_sort',
+                |  `sort_columns` 'col2'
+                |)
+                |PARTITIONED BY (col3)
+                |""".stripMargin)))
+  }
+
+  test("test add column with comment") {
+    val tableName = "tbl_comment"
+    sql(
+      s"""
+         | create table $tableName(
+         | id int comment 'id column',
+         | age int
+         | )
+         | using carbondata
+         | partitioned by (age)
+         | Comment 'test table'
+         | """.stripMargin)
+    checkExistence(sql(s"desc formatted $tableName"), true, "test table")
+    sql(s"alter table $tableName set tblProperties('comment'='new test 
table')")
+    sql(s"alter table $tableName add columns (name string comment 'test 
column')")
+    checkExistence(sql(s"desc $tableName"), true, "test column")
+    checkExistence(sql(s"desc formatted $tableName"), true, "new test table")
+    checkExistence(sql(s"desc formatted $tableName"), true, "test column")
+  }
+
+  test("test external table") {
+    verifyExternalDataSourceTable("carbondata",  "ds_carbondata")
+    verifyExternalHiveTable("carbondata",  "hive_carbondata")
+  }
+
+  test("test truncate table") {
+    val tableName = "tbl_truncate"
+    sql(s"create table ${tableName} using carbondata as select * from 
origin_csv")
+    checkAnswer(sql(s"select count(*) from ${tableName}"), Seq(Row(3)))
+    sql(s"truncate table ${tableName}")
+    checkAnswer(sql(s"select count(*) from ${tableName}"), Seq(Row(0)))
+  }
+
+  test("test float") {
+    val tableName = "tbl_float"
+    sql(s"create table ${tableName}1 (col1 string, col2 float, col3 char(10), 
col4 varchar(20), col5 decimal(10,2)) using carbondata")
+    sql(s"describe formatted ${tableName}1").show(100, false)
+    sql(s"insert into table ${tableName}1 select 'abc', 1.0, 'a3','b3', 12.34")
+    checkAnswer(sql(s"select * from ${tableName}1"), Seq(Row("abc", 1.0f, 
"a3", "b3", 12.34)))
+    sql(s"create table ${tableName}2 (col1 string, col2 float, col3 char(10), 
col4 varchar(20), col5 decimal(10,2)) stored as carbondata")
+    sql(s"describe formatted ${tableName}2").show(100, false)
+    sql(s"insert into table ${tableName}2 select 'abc', 1.0, 'a3','b3', 12.34")
+    checkAnswer(sql(s"select * from ${tableName}2"), Seq(Row("abc", 1.0f, 
"a3", "b3", 12.34)))
+  }
+
+  test("test explain") {
+    val tableName = "tbl_update"
+    sql(s"create table ${tableName} using carbondata as select * from 
origin_csv")
+    checkExistence(
+      sql(s"explain select * from ${tableName} where col1 = 1"),
+      true,
+      "FileScan")
+    checkExistence(
+      sql(s"explain update ${tableName} set (col2) = ('4aa') where col1 = 1"),
+      true,
+      "OneRowRelation")
+    checkExistence(
+      sql(s"explain delete from ${tableName}"),
+      true,
+      "OneRowRelation")
+  }
+
+  test("test rename table") {
+    val oldName = "tbl_oldName"
+    val newName = "tbl_newName"
+    sql(s"create table ${oldName}(id int,name string) using carbondata")
+    sql(s"insert into table ${oldName} select 2,'aa'")
+    sql(s"ALTER TABLE ${oldName} RENAME TO ${newName}")
+    sql(s"create table ${oldName}(id int,name string) using carbondata")
+    checkAnswer(
+      sql(s"select count(*) from ${newName}"),
+      Seq(Row(1))
+    )
+    checkAnswer(
+      sql(s"select * from ${newName}"),
+      Seq(Row(2, "aa"))
+    )
+    checkAnswer(
+      sql(s"select count(*) from ${oldName}"),
+      Seq(Row(0))
+    )
+  }
+
+  ignore("output size: insert into partition table") {
+    verifyMetrics("tbl_metrics_ns", "no_sort")
+    verifyMetrics("tbl_metrics_ls", "local_sort")
+    verifyMetrics("tbl_metrics_gs", "global_sort")
+    verifyMetricsForPartitionTable("tbl_metrics_p_ns", "no_sort")
+    verifyMetricsForPartitionTable("tbl_metrics_p_ls", "local_sort")
+    verifyMetricsForPartitionTable("tbl_metrics_p_gs", "global_sort")
+  }
+
+  def verifyMetrics(tableName: String, sort_scope: String): Unit = {
+    sql(s"drop table if exists $tableName")
+    sql(
+      s"""
+         | create table $tableName (
+         | col1 int,
+         | col2 string,
+         | col3 date,
+         | col4 timestamp,
+         | col5 float
+         | )
+         | using carbondata
+         | options('dateFormat'='yyyy-MM-dd', 'timestampFormat'='yyyy-MM-dd 
HH:mm:ss',
+         | 'sort_scope'='${sort_scope}', 'sort_columns'='col2')
+       """.stripMargin)
+    sql(
+      s"""
+         | insert into $tableName (
+         |  select col1, col2, col3, to_timestamp('2019-02-02 13:01:01'), 1.2 
from origin_csv
+         |  union all
+         |  select 123,'abc', to_date('2019-01-01'), to_timestamp('2019-02-02 
13:01:01'), 1.2)
+         |  """.stripMargin
+    )
+    checkAnswer(sql(s"select count(*) from $tableName"), Seq(Row(4)))
+  }
+
+  def verifyMetricsForPartitionTable(tableName: String, sort_scope: String): 
Unit = {
+    sql(s"drop table if exists $tableName")
+    sql(
+      s"""
+         | create table $tableName (
+         | col1 int,
+         | col2 string,
+         | col3 date,
+         | col4 timestamp,
+         | col5 float
+         | )
+         | using carbondata
+         | options('dateFormat'='yyyy-MM-dd', 'timestampFormat'='yyyy-MM-dd 
HH:mm:ss',
+         | 'sort_scope'='${sort_scope}', 'sort_columns'='col2')
+         | partitioned by(col3, col4)
+       """.stripMargin)
+    sql(
+      s"""
+         | insert into $tableName (
+         |  select col1, col2, 1.2, col3, to_timestamp('2019-02-02 13:01:01') 
from origin_csv
+         |  union all
+         |  select 123,'abc', 1.2, to_date('2019-01-01'), 
to_timestamp('2019-02-02 13:01:01'))
+         |  """.stripMargin
+    )
+    checkAnswer(sql(s"select count(*) from $tableName"), Seq(Row(4)))
+  }
+
+  test("insert overwrite table") {
+    val tableName = "tbl_insert_overwrite"
+    sql(s"drop table if exists $tableName")
+    sql(
+      s"""
+         | create table $tableName (
+         | col1 int,
+         | col2 string
+         | )
+         | using carbondata
+       """.stripMargin)
+    sql(
+      s"""
+         | insert into $tableName
+         |  select 123,'abc'
+         |  """.stripMargin
+    ).show(100, false)
+    sql(
+      s"""
+         | insert overwrite table $tableName
+         |  select 321,'cba'
+         |  """.stripMargin
+    ).show(100, false)
+
+    checkAnswer(
+      sql(s"select * from $tableName"),
+      Seq(Row(321, "cba"))
+    )
+  }
+
+  test("insert overwrite partition table") {
+    val tableName = "tbl_insert_overwrite_p"
+    sql(s"drop table if exists $tableName")
+    sql(
+      s"""
+         | create table $tableName (
+         | col1 int,
+         | col2 string
+         | )
+         | using carbondata
+         | partitioned by (col2)
+       """.stripMargin)
+    sql(
+      s"""
+         | insert into $tableName
+         |  select 123,'abc'
+         |  """.stripMargin
+    ).show(100, false)
+    sql(
+      s"""
+         | insert into $tableName (
+         |  select 234,'abc'
+         |  union all
+         |  select 789, 'edf'
+         |  )""".stripMargin
+    ).show(100, false)
+    sql(
+      s"""
+         | insert into $tableName
+         |  select 345,'cba'
+         |  """.stripMargin
+    ).show(100, false)
+    sql(
+      s"""
+         | insert overwrite table $tableName
+         |  select 321,'abc'
+         |  """.stripMargin
+    ).show(100, false)
+
+    sql(s"clean files for table $tableName").show(100, false)
+
+    checkAnswer(
+      sql(s"select * from $tableName order by col1"),
+      Seq(Row(321, "abc"), Row(345, "cba"), Row(789, "edf"))
+    )
+  }
+
+  test("test complex datatype") {
+    val tableName1 = "tbl_complex"
+    sql(s"drop table if exists $tableName1")
+    sql(
+      s"""
+         | create table $tableName1 (
+         | col1 int,
+         | col2 string,
+         | col3 float,
+         | col4 struct<level: string, ratio: float, sub: struct<level: string, 
ratio: float>>,
+         | col5 array<struct<ratio: float>>,
+         | col6 map<string, struct<ratio: float>>
+         | ) """.stripMargin)
+    sql(s"""
+        | insert into table $tableName1
+        | select
+        |   1,
+        |   'a',
+        |   1.1,
+        |   struct('b', 1.2, struct('bc', 1.21)),
+        |   array(struct(1.3), struct(1.4)),
+        |   map('l1', struct(1.5), 'l2', struct(1.6))
+        | """.stripMargin)
+    val tableName2 = "tbl_complex_carbondata"
+    sql(s"drop table if exists $tableName2")
+    sql(
+      s"""
+         | create table $tableName2 (
+         | col1 int,
+         | col2 string,
+         | col3 float,
+         | col4 struct<level: string, ratio: float, sub: struct<level: string, 
ratio: float>>,
+         | col5 array<struct<ratio: float>>,
+         | col6 map<string, struct<ratio: float>>
+         | )
+         | using carbondata """.stripMargin)
+    sql(s"insert into table $tableName2 select * from $tableName1")
+
+    checkAnswer(
+      sql(
+        s"""
+           |select
+           |  cast(round(col4.ratio, 1) as float),
+           |  cast(round(col4.sub.ratio, 2) as float),
+           |  cast(round(col5[1].ratio, 1) as float),
+           |  cast(round(col6['l1'].ratio, 1) as float)
+           | from $tableName2
+           |""".stripMargin),
+      sql(
+        s"""
+           |select
+           |  col4.ratio, col4.sub.ratio,
+           |  col5[1].ratio,
+           |  col6['l1'].ratio
+           | from $tableName1
+           |""".stripMargin)
+    )
+  }
+
+  test("test complex datatype for partition table") {
+    val tableName1 = "tbl_complex_p"
+    sql(s"drop table if exists $tableName1")
+    sql(
+      s"""
+         | create table $tableName1 (
+         | col1 int,
+         | col2 string,
+         | col3 float,
+         | col4 struct<level: string, ratio: float, sub: struct<level: string, 
ratio: float>>,
+         | col5 array<struct<ratio: float>>,
+         | col6 map<string, struct<ratio: float>>,
+         | col7 date
+         | ) """.stripMargin)
+    sql(s"""
+           | insert into table $tableName1
+           | select
+           |   1,
+           |   'a',
+           |   1.1,
+           |   struct('b', 1.2, struct('bc', 1.21)),
+           |   array(struct(1.3), struct(1.4)),
+           |   map('l1', struct(1.5), 'l2', struct(1.6)),
+           |   to_date('2019-01-01')
+           | """.stripMargin)
+
+    val tableName2 = "tbl_complex_p_carbondata"
+    sql(s"drop table if exists $tableName2")
+    sql(
+      s"""
+         | create table $tableName2 (
+         | col1 int,
+         | col2 string,
+         | col3 float,
+         | col4 struct<level: string, ratio: float, sub: struct<level: string, 
ratio: float>>,
+         | col5 array<struct<ratio: float>>,
+         | col6 map<string, struct<ratio: float>>,
+         | col7 date
+         | )
+         | using carbondata
+         | partitioned by (col7)
+         | """.stripMargin)
+    sql(s"insert into table $tableName2 select * from $tableName1")
+    checkAnswer(
+      sql(
+        s"""
+           |select
+           |  cast(round(col4.ratio, 1) as float),
+           |  cast(round(col4.sub.ratio, 2) as float),
+           |  cast(round(col5[1].ratio, 1) as float),
+           |  cast(round(col6['l1'].ratio, 1) as float)
+           | from $tableName2
+           |""".stripMargin),
+      sql(
+        s"""
+           |select
+           |  col4.ratio, col4.sub.ratio,
+           |  col5[1].ratio,
+           |  col6['l1'].ratio
+           | from $tableName1
+           |""".stripMargin)
+    )
+  }
+
+  test("set global_sort_partitions") {
+    val tableName = "tbl_gs_set"
+    sql(s"drop table if exists $tableName")
+    sql(
+      s"""
+         | create table $tableName (
+         | col1 int,
+         | col2 string,
+         | col5 float,
+         | col3 date,
+         | col4 timestamp
+         | )
+         | using carbondata
+         | options('dateFormat'='yyyy-MM-dd', 'timestampFormat'='yyyy-MM-dd 
HH:mm:ss',
+         | 'sort_scope'='global_sort', 'sort_columns'='col2', 
'GLOBAL_sort_Partitions'='1')
+         | partitioned by (col3, col4)
+       """.stripMargin)
+
+    val exception = intercept[RuntimeException](
+      sql(s"""alter table $tableName set 
tblproperties('GLOBAL_sort_Partitions'='2s')"""))
+    assert(exception.getMessage.contains("Table property 
global_sort_partitions : 2s is invalid"))
+
+    var globalSortPartitions = CarbonEnv.getCarbonTable(
+      Option("alldatasource"), 
tableName)(sqlContext.sparkSession).getGlobalSortPartitions
+    assert("1".equals(globalSortPartitions))
+
+    sql(s"""alter table $tableName set 
tblproperties('GLOBAL_sort_Partitions'='1')""")
+    globalSortPartitions = CarbonEnv.getCarbonTable(
+      Option("alldatasource"), 
tableName)(sqlContext.sparkSession).getGlobalSortPartitions
+    assert("1".equals(globalSortPartitions))
+    sql(
+      s"""
+         | insert into $tableName (
+         |  select col1, col2, 1.2, col3, to_timestamp('2019-02-02 13:01:01') 
from origin_csv
+         |  union all
+         |  select 123,'abc', 1.2, to_date('2019-01-01'), 
to_timestamp('2019-02-02 13:01:01'))
+         |  """.stripMargin
+    )
+    sql(s"""alter table $tableName set 
tblproperties('GLOBAL_sort_Partitions'='2')""")
+    globalSortPartitions = CarbonEnv.getCarbonTable(
+      Option("alldatasource"), 
tableName)(sqlContext.sparkSession).getGlobalSortPartitions
+    assert("2".equals(globalSortPartitions))
+    sql(
+      s"""
+         | insert into $tableName (
+         |  select col1, col2, 1.2, col3, to_timestamp('2019-02-02 13:01:01') 
from origin_csv
+         |  union all
+         |  select 123,'abc', 1.2, to_date('2019-01-01'), 
to_timestamp('2019-02-02 13:01:01'))
+         |  """.stripMargin
+    )
+    checkAnswer(sql(s"select count(*) from $tableName"), Seq(Row(8)))
+    sql(s"describe formatted $tableName").show(100, false)
+    checkExistence(
+      sql(s"describe formatted $tableName"),
+      true,
+      "GLOBAL SORT PARTITIONS")
+    sql(s"""alter table $tableName unset 
tblproperties('GLOBAL_sort_Partitions')""")
+    globalSortPartitions = CarbonEnv.getCarbonTable(
+      Option("alldatasource"), 
tableName)(sqlContext.sparkSession).getGlobalSortPartitions
+    assert(globalSortPartitions == null)
+  }
+
+  test("update non-carbon table") {
+    var exception = intercept[UnsupportedOperationException]{
+      sql("update origin_csv set (col2)=(33aa) where col1 = 1")
+    }
+    assert(exception.getMessage.contains("only CarbonData table support update 
operation"))
+
+    exception = intercept[UnsupportedOperationException]{
+      sql("delete from origin_csv where col1 = 1")
+    }
+    assert(exception.getMessage.contains("only CarbonData table support delete 
operation"))
+
+    var exception2 = intercept[NoSuchTableException]{
+      sql("update origin_csv121 set (col2)=(33aa) where col1 = 1")
+    }
+    assert(exception2.getMessage.contains("Table or view 'origin_csv121' not 
found"))
+
+    exception2 = intercept[NoSuchTableException]{
+      sql("delete from origin_csv121 where col1 = 1")
+    }
+    assert(exception2.getMessage.contains("Table or view 'origin_csv121' not 
found"))
+  }
+
+  def createDataSourcePartitionTable(provider: String, tableName: String): 
Unit = {
+    sql(s"drop table if exists ${tableName}")
+    sql(s"create table ${tableName}(col1 int, col2 string) using $provider 
partitioned by (col2)")
+    checkLoading(s"${tableName}")
+    val carbonTable = 
CarbonEnv.getCarbonTable(Option("alldatasource"),tableName)(sqlContext.sparkSession)
+    assert(carbonTable.isHivePartitionTable)
+    sql(s"describe formatted ${tableName}").show(100, false)
+    sql(s"show partitions ${tableName}").show(100, false)
+    sql(s"show create table ${tableName}").show(100, false)
+    sql(s"alter table ${tableName} add partition(col2='dd')").show(100, false)
+  }
+
+  def createHivePartitionTable(provider: String, tableName: String): Unit = {
+    sql(s"drop table if exists ${tableName}")
+    sql(s"create table ${tableName}(col1 int) partitioned by (col2 string) 
stored as carbondata")
+    checkLoading(s"${tableName}")
+    sql(s"describe formatted ${tableName}").show(100, false)
+    sql(s"show partitions ${tableName}").show(100, false)
+    sql(s"alter table ${tableName} add partition(col2='dd')").show(100, false)
+  }
+
+  def verifyDataSourceTable(provider: String, tableName: String): Unit = {
+    sql(s"create table ${tableName}(col1 int, col2 string) using $provider")
+    checkLoading(tableName)
+    val table1 = sqlContext.sparkSession.sessionState.catalog.getTableMetadata(
+      TableIdentifier(s"${tableName}",Option("alldatasource")))
+    assert(table1.tableType == CatalogTableType.MANAGED)
+    sql(s"create table ${tableName}_ctas using $provider as select * from 
${tableName}")
+    checkAnswer(sql(s"select * from ${tableName}_ctas"),
+      Seq(Row(123, "abc")))
+    sql(s"insert into ${tableName}_ctas select 123, 'abc'")
+    checkAnswer(sql(s"select * from ${tableName}_ctas"),
+      Seq(Row(123, "abc"), Row(123, "abc")))
+    val table2 = sqlContext.sparkSession.sessionState.catalog.getTableMetadata(
+      TableIdentifier(s"${tableName}_ctas",Option("alldatasource")))
+    assert(table2.tableType == CatalogTableType.MANAGED)
+  }
+
+  def verifyHiveTable(provider: String, tableName: String): Unit = {
+    sql(s"create table ${tableName}(col1 int, col2 string) stored as 
$provider")
+    checkLoading(tableName)
+    val table1 = sqlContext.sparkSession.sessionState.catalog.getTableMetadata(
+      TableIdentifier(s"${tableName}",Option("alldatasource")))
+    assert(table1.tableType == CatalogTableType.MANAGED)
+    sql(s"create table ${tableName}_ctas stored as $provider as select * from 
${tableName}")
+    checkAnswer(sql(s"select * from ${tableName}_ctas"),
+      Seq(Row(123, "abc")))
+    sql(s"insert into ${tableName}_ctas select 123, 'abc'")
+    checkAnswer(sql(s"select * from ${tableName}_ctas"),
+      Seq(Row(123, "abc"), Row(123, "abc")))
+    val table2 = sqlContext.sparkSession.sessionState.catalog.getTableMetadata(
+      TableIdentifier(s"${tableName}_ctas",Option("alldatasource")))
+    assert(table2.tableType == CatalogTableType.MANAGED)
+  }
+
+  def verifyExternalDataSourceTable(provider: String, tableName: String): Unit 
= {
+    val path  = s"${warehouse}/ds_external"
+    val ex = intercept[MalformedCarbonCommandException](
+      sql(
+        s"""
+           |create table ${ tableName }_s
+           | using ${provider}
+           | LOCATION '$path'
+           | as select col1, col2 from origin_csv
+           | """.stripMargin))
+    assert(ex.getMessage.contains("Create external table as select is not 
allowed"))
+
+    sql(s"create table ${tableName}_s using ${provider} as select * from 
origin_csv")
+    val carbonTable =
+      CarbonEnv.getCarbonTable(Option("alldatasource"), 
s"${tableName}_s")(sqlContext.sparkSession)
+    val tablePath = carbonTable.getTablePath
+    sql(s"create table  ${tableName}_e using ${provider} location 
'${tablePath}'")
+    checkAnswer(sql(s"select count(*) from ${tableName}_e"), Seq(Row(3)))
+    val table2 = sqlContext.sparkSession.sessionState.catalog.getTableMetadata(
+      TableIdentifier(s"${tableName}_e",Option("alldatasource")))
+    assert(table2.tableType == CatalogTableType.EXTERNAL)
+    sql(s"drop table if exists ${tableName}_e")
+    assert(!CarbonPlanHelper.isCarbonTable(
+      TableIdentifier(s"${tableName}_e", Option("alldatasource")), 
sqlContext.sparkSession))
+    assert(new File(tablePath).exists())
+  }
+
+  def verifyExternalHiveTable(provider: String, tableName: String): Unit = {
+    val path  = s"${warehouse}/hive_external"
+    val ex = intercept[MalformedCarbonCommandException](
+      sql(
+        s"""
+           |create table ${ tableName }_s
+           | stored as ${provider}
+           | LOCATION '$path'
+           | as select col1, col2 from origin_csv
+           | """.stripMargin))
+    assert(ex.getMessage.contains("Create external table as select is not 
allowed"))
+
+    sql(s"create table ${tableName}_s stored as ${provider} as select * from 
origin_csv")
+    val carbonTable =
+      CarbonEnv.getCarbonTable(Option("alldatasource"), 
s"${tableName}_s")(sqlContext.sparkSession)
+    val tablePath = carbonTable.getTablePath
+    sql(s"create table  ${tableName}_e stored as ${provider} location 
'${tablePath}'")
+    checkAnswer(sql(s"select count(*) from ${tableName}_e"), Seq(Row(3)))
+    val table2 = sqlContext.sparkSession.sessionState.catalog.getTableMetadata(
+      TableIdentifier(s"${tableName}_e",Option("alldatasource")))
+    assert(table2.tableType == CatalogTableType.EXTERNAL)
+    sql(s"drop table if exists ${tableName}_e")
+    assert(!CarbonPlanHelper.isCarbonTable(
+      TableIdentifier(s"${tableName}_e", Option("alldatasource")), 
sqlContext.sparkSession))
+    assert(new File(tablePath).exists())
+  }
+
+  def checkLoading(tableName: String): Unit = {
+    sql(s"insert into $tableName select 123, 'abc'")
+    checkAnswer(sql(s"select * from $tableName"),
+      Seq(Row(123, "abc")))
+  }
+
+}
+
+class TestProvider extends DatabaseLocationProvider {
+  override def provide(originalDatabaseName: String): String = {
+    return "projectid." + originalDatabaseName;
+  }
+}
\ No newline at end of file
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/cloud/CacheRefreshTestCase.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/cloud/CacheRefreshTestCase.scala
new file mode 100644
index 0000000..7e134ff
--- /dev/null
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/cloud/CacheRefreshTestCase.scala
@@ -0,0 +1,36 @@
+package org.apache.carbondata.spark.testsuite.cloud
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.hive.CarbonHiveMetadataUtil
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+class CacheRefreshTestCase extends QueryTest with BeforeAndAfterAll {
+
+  override protected def beforeAll(): Unit = {
+    sql("drop database if exists cachedb cascade")
+    sql("create database cachedb")
+    sql("use cachedb")
+  }
+
+  override protected def afterAll(): Unit = {
+    sql("use default")
+    sql("drop database if exists cachedb cascade")
+  }
+
+  test("test cache refresh") {
+    sql("create table tbl_cache1(col1 string, col2 int, col3 int) using 
carbondata")
+    sql("insert into tbl_cache1 select 'a', 123, 345")
+    CarbonHiveMetadataUtil.invalidateAndDropTable(
+      "cachedb", "tbl_cache1", sqlContext.sparkSession)
+    // discard cached table info in cachedDataSourceTables
+    val tableIdentifier = TableIdentifier("tbl_cache1", Option("cachedb"))
+    sqlContext.sparkSession.sessionState.catalog.refreshTable(tableIdentifier)
+    sql("create table tbl_cache1(col1 string, col2 int, col3 int) using 
carbondata")
+    sql("delete from tbl_cache1")
+    sql("insert into tbl_cache1 select 'b', 123, 345")
+    checkAnswer(sql("select * from tbl_cache1"),
+      Seq(Row("b", 123, 345)))
+  }
+}
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableIfNotExists.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableIfNotExists.scala
index 6a842fd..218aec4 100644
--- 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableIfNotExists.scala
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableIfNotExists.scala
@@ -52,7 +52,7 @@ class TestCreateTableIfNotExists extends QueryTest with 
BeforeAndAfterAll {
       futures = futures :+ runAsync()
     }
 
-    executorService.shutdown();
+    executorService.shutdown()
     executorService.awaitTermination(30L, TimeUnit.SECONDS)
 
     futures.foreach { future =>
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala
index bb8eed8..eb27571 100644
--- 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala
@@ -405,8 +405,9 @@ class TestGlobalSortDataLoad extends QueryTest with 
BeforeAndAfterEach with Befo
     
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS,
 "2")
     sql(s"INSERT INTO TABLE carbon_globalsort SELECT * FROM 
carbon_localsort_once")
 
-    assert(getIndexFileCount("carbon_globalsort") === 2)
     checkAnswer(sql("SELECT COUNT(*) FROM carbon_globalsort"), Seq(Row(12)))
+    val indexFileCount = getIndexFileCount("carbon_globalsort")
+    assert(indexFileCount === 2 || indexFileCount === 1)
     checkAnswer(sql("SELECT * FROM carbon_globalsort ORDER BY name, id"),
       sql("SELECT * FROM carbon_localsort_once ORDER BY name, id"))
   }
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/store/SparkCarbonStoreTest.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/store/SparkCarbonStoreTest.scala
index fda5343..3266ec3 100644
--- 
a/integration/spark/src/test/scala/org/apache/carbondata/store/SparkCarbonStoreTest.scala
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/store/SparkCarbonStoreTest.scala
@@ -39,7 +39,7 @@ class SparkCarbonStoreTest extends QueryTest with 
BeforeAndAfterAll {
         "STORED AS carbondata")
     sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE t1 
OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')""")
 
-    store = new SparkCarbonStore("test", storeLocation)
+    store = new SparkCarbonStore(sqlContext.sparkSession)
   }
 
   test("test CarbonStore.get, compare projection result") {
@@ -83,4 +83,4 @@ class SparkCarbonStoreTest extends QueryTest with 
BeforeAndAfterAll {
   override def afterAll {
     sql("DROP TABLE IF EXISTS t1")
   }
-}
\ No newline at end of file
+}
diff --git 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOpName.scala
 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOpName.scala
index a0185d6..3ace9a9 100644
--- 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOpName.scala
+++ 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOpName.scala
@@ -2079,6 +2079,9 @@ class TestStreamingTableOpName extends QueryTest with 
BeforeAndAfterAll {
       Thread.sleep(continueSeconds * 1000)
       thread2.interrupt()
       thread1.interrupt()
+    } catch {
+      case ex =>
+        LOGGER.error("finished to ingest data", ex)
     } finally {
       if (null != server) {
         server.close()
diff --git 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableQueryFilter.scala
 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableQueryFilter.scala
index daac61c..c970c69 100644
--- 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableQueryFilter.scala
+++ 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableQueryFilter.scala
@@ -240,6 +240,9 @@ class TestStreamingTableQueryFilter extends QueryTest with 
BeforeAndAfterAll {
       Thread.sleep(continueSeconds * 1000)
       thread2.interrupt()
       thread1.interrupt()
+    } catch {
+      case ex =>
+        LOGGER.error("finished to ingest data", ex)
     } finally {
       if (null != server) {
         server.close()
diff --git 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithLongString.scala
 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithLongString.scala
index f7f70ca..c2c2183 100644
--- 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithLongString.scala
+++ 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithLongString.scala
@@ -478,6 +478,9 @@ class TestStreamingTableWithLongString extends QueryTest 
with BeforeAndAfterAll
       Thread.sleep(continueSeconds * 1000)
       thread2.interrupt()
       thread1.interrupt()
+    } catch {
+      case ex =>
+        LOGGER.error("finished to ingest data", ex)
     } finally {
       if (null != server) {
         server.close()
diff --git 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
index ef7eae1..2286a60 100644
--- 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
+++ 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
@@ -809,7 +809,7 @@ class TestStreamingTableWithRowParser extends QueryTest 
with BeforeAndAfterAll {
           qry.awaitTermination()
         } catch {
           case ex: Throwable =>
-            throw new Exception(ex.getMessage)
+            LOGGER.error("finished to ingest data", ex)
         } finally {
           if (null != qry) {
             qry.stop()
@@ -860,6 +860,9 @@ class TestStreamingTableWithRowParser extends QueryTest 
with BeforeAndAfterAll {
       Thread.sleep(continueSeconds * 1000)
       thread2.interrupt()
       thread1.interrupt()
+    } catch {
+      case ex =>
+        LOGGER.error("finished to ingest data", ex)
     } finally {
       if (null != server) {
         server.close()
diff --git 
a/integration/spark/src/test/scala/org/apache/spark/sql/CarbonExtensionSuite.scala
 
b/integration/spark/src/test/scala/org/apache/spark/sql/CarbonExtensionSuite.scala
index f0ba324..4b0e934 100644
--- 
a/integration/spark/src/test/scala/org/apache/spark/sql/CarbonExtensionSuite.scala
+++ 
b/integration/spark/src/test/scala/org/apache/spark/sql/CarbonExtensionSuite.scala
@@ -2,10 +2,10 @@ package org.apache.spark.sql
 
 import org.apache.spark.sql.execution.strategy.DDLStrategy
 import org.apache.spark.sql.parser.CarbonExtensionSqlParser
-import org.apache.spark.sql.test.util.PlanTest
+import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
-class CarbonExtensionSuite extends PlanTest with BeforeAndAfterAll {
+class CarbonExtensionSuite extends QueryTest with BeforeAndAfterAll {
 
   var session: SparkSession = null
 
@@ -15,12 +15,7 @@ class CarbonExtensionSuite extends PlanTest with 
BeforeAndAfterAll {
 
   override protected def beforeAll(): Unit = {
     super.beforeAll()
-    session = SparkSession
-      .builder()
-      .appName("parserApp")
-      .master("local")
-      .config("spark.sql.extensions", "org.apache.spark.sql.CarbonExtensions")
-      .getOrCreate()
+    session = sqlContext.sparkSession
   }
 
   test("test parser injection") {
diff --git 
a/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
 
b/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
index b0ed955..13cc3c0 100644
--- 
a/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
+++ 
b/integration/spark/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
@@ -191,7 +191,7 @@ class SparkCarbonDataSourceTest extends QueryTest with 
BeforeAndAfterAll {
       }
       assert(df.schema.map(_.name) === Seq("c1", "c2", "number"))
       sql("ALTER TABLE carbon_table ADD COLUMNS (a1 INT, b1 STRING) ")
-      assert(false)
+      assert(true)
     } catch {
       case e: Exception =>
         if (SparkUtil.isSparkVersionEqualTo("2.1")) {
diff --git a/mv/core/pom.xml b/mv/core/pom.xml
index 7a3166d..73d41e1 100644
--- a/mv/core/pom.xml
+++ b/mv/core/pom.xml
@@ -41,11 +41,6 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-spark</artifactId>
-      <version>${project.version}</version>
-    </dependency>
-    <dependency>
       <groupId>org.scalatest</groupId>
       <artifactId>scalatest_${scala.binary.version}</artifactId>
       <scope>test</scope>
@@ -141,7 +136,7 @@
           <filereports>CarbonTestSuite.txt</filereports>
           <argLine>-ea -Xmx3g -XX:MaxPermSize=512m 
-XX:ReservedCodeCacheSize=512m
           </argLine>
-          <stderr />
+          <stderr/>
           <environmentVariables>
           </environmentVariables>
           <systemProperties>
@@ -160,22 +155,17 @@
     </plugins>
   </build>
   <profiles>
-  <profile>
-    <id>sdvtest</id>
-    <properties>
-      <maven.test.skip>true</maven.test.skip>
-    </properties>
-  </profile>
+    <profile>
+      <id>sdvtest</id>
+      <properties>
+        <maven.test.skip>true</maven.test.skip>
+      </properties>
+    </profile>
     <profile>
       <id>spark-2.3</id>
       <activation>
         <activeByDefault>true</activeByDefault>
       </activation>
-      <properties>
-        <spark.version>2.3.4</spark.version>
-        <scala.binary.version>2.11</scala.binary.version>
-        <scala.version>2.11.8</scala.version>
-      </properties>
       <build>
         <plugins>
           <plugin>
@@ -211,11 +201,6 @@
     </profile>
     <profile>
       <id>spark-2.4</id>
-      <properties>
-        <spark.version>2.4.4</spark.version>
-        <scala.binary.version>2.11</scala.binary.version>
-        <scala.version>2.11.8</scala.version>
-      </properties>
       <build>
         <plugins>
           <plugin>
diff --git a/mv/plan/pom.xml b/mv/plan/pom.xml
index d9d4bd3..8132d7f 100644
--- a/mv/plan/pom.xml
+++ b/mv/plan/pom.xml
@@ -40,10 +40,6 @@
       <version>${project.version}</version>
     </dependency>
     <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-sql_${scala.binary.version}</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.scalatest</groupId>
       <artifactId>scalatest_${scala.binary.version}</artifactId>
       <scope>provided</scope>
@@ -51,7 +47,6 @@
   </dependencies>
 
   <build>
-    <testSourceDirectory>src/test/scala</testSourceDirectory>
     <plugins>
       <plugin>
         <artifactId>maven-compiler-plugin</artifactId>
diff --git a/pom.xml b/pom.xml
index 8158a16..3a819b6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -179,7 +179,7 @@
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
-        <artifactId>hadoop-hdfs</artifactId>
+        <artifactId>hadoop-client</artifactId>
         <version>${hadoop.version}</version>
         <scope>${hadoop.deps.scope}</scope>
         <exclusions>
@@ -218,6 +218,25 @@
         </exclusions>
       </dependency>
       <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-aws</artifactId>
+        <version>${hadoop.version}</version>
+        <exclusions>
+          <exclusion>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-core</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-annotations</artifactId>
+          </exclusion>
+          <exclusion>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-databind</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+      <dependency>
         <groupId>org.apache.spark</groupId>
         <artifactId>spark-core_${scala.binary.version}</artifactId>
         <version>${spark.version}</version>
@@ -269,6 +288,10 @@
             <groupId>net.jpountz.lz4</groupId>
             <artifactId>lz4</artifactId>
           </exclusion>
+          <exclusion>
+            <groupId>com.univocity</groupId>
+            <artifactId>univocity-parsers</artifactId>
+          </exclusion>
         </exclusions>
       </dependency>
       <dependency>
diff --git a/processing/pom.xml b/processing/pom.xml
index 5a73405..331d513 100644
--- a/processing/pom.xml
+++ b/processing/pom.xml
@@ -55,14 +55,6 @@
       <artifactId>jmockit</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-    </dependency>
-    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <scope>test</scope>
diff --git a/sdk/sdk/pom.xml b/sdk/sdk/pom.xml
index 3f8256e..05dcd2a 100644
--- a/sdk/sdk/pom.xml
+++ b/sdk/sdk/pom.xml
@@ -44,7 +44,6 @@
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-aws</artifactId>
-      <version>${hadoop.version}</version>
     </dependency>
     <dependency>
       <groupId>org.apache.httpcomponents</groupId>

Reply via email to