This is an automated email from the ASF dual-hosted git repository.

akashrn5 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 42f6982  [CARBONDATA-4273] Fix Cannot create external table with 
partitions
42f6982 is described below

commit 42f69827e0a577b6128417104c0a49cd5bf21ad7
Author: Indhumathi27 <[email protected]>
AuthorDate: Fri Aug 27 18:43:09 2021 +0530

    [CARBONDATA-4273] Fix Cannot create external table with partitions
    
    Why is this PR needed?
    Create partition table with location fails with unsupported message.
    
    What changes were proposed in this PR?
    This scenario works in cluster mode. This check can be moved in local
    mode also and partition table can be created with table with location
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    Yes
    
    This closes #4211
---
 .../sql/parser/CarbonSparkSqlParserUtil.scala      |  4 --
 .../createTable/TestCreateExternalTable.scala      | 82 +++++++++++++++++++---
 2 files changed, 73 insertions(+), 13 deletions(-)

diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
index 5467276..e972fb3 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
@@ -215,10 +215,6 @@ object CarbonSparkSqlParserUtil {
       table.identifier.table.toLowerCase()
     )
     val tableInfo = if (isExternal) {
-      if (partitionColumnNames.nonEmpty) {
-        throw new MalformedCarbonCommandException(
-          "Partition is not supported for external table")
-      }
       // read table info from schema file in the provided table path
       val tableInfo = {
         try {
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
index dc232d3..c8e797c 100644
--- 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
@@ -19,7 +19,8 @@ package org.apache.carbondata.spark.testsuite.createTable
 
 import java.io.File
 
-import org.apache.spark.sql.{AnalysisException, CarbonEnv}
+import org.apache.commons.io.FileUtils
+import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row}
 import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
@@ -76,19 +77,22 @@ class TestCreateExternalTable extends QueryTest with 
BeforeAndAfterAll {
            |STORED AS carbondata
            |LOCATION '$storeLocation/origin'
        """.stripMargin)
-      checkAnswer(sql("SELECT count(*) from source"), sql("SELECT count(*) 
from origin"))
+      verifyResult()
+    }
+  }
 
-      checkExistence(sql("describe formatted source"), true, storeLocation + 
"/origin")
+  private def verifyResult(): Unit = {
+    checkAnswer(sql("SELECT count(*) from source"), sql("SELECT count(*) from 
origin"))
 
-      val carbonTable = CarbonEnv.getCarbonTable(None, 
"source")(sqlContext.sparkSession)
-      assert(carbonTable.isExternalTable)
+    checkExistence(sql("describe formatted source"), true, storeLocation + 
"/origin")
 
-      sql("DROP TABLE IF EXISTS source")
+    val carbonTable = CarbonEnv.getCarbonTable(None, 
"source")(sqlContext.sparkSession)
+    assert(carbonTable.isExternalTable)
 
-      // DROP TABLE should not delete data
-      assert(new File(originDataPath).exists())
+    sql("DROP TABLE IF EXISTS source")
 
-    }
+    // DROP TABLE should not delete data
+    assert(new File(originDataPath).exists())
   }
 
   ignore("create external table with specified schema") {
@@ -146,4 +150,64 @@ class TestCreateExternalTable extends QueryTest with 
BeforeAndAfterAll {
     checkAnswer(sql("select * from rstest1"), sql("select * from rsext"))
   }
 
+  test("create external table and non-external table on partition table with 
location") {
+    sql("set hive.exec.dynamic.partition.mode=nonstrict")
+    sql("drop table if exists origin")
+    // create carbon table and insert data
+    sql("CREATE TABLE origin(a int, b string) partitioned by (c string) STORED 
AS carbondata")
+    sql("INSERT INTO origin select 100,'spark','test1'")
+    sql("INSERT INTO origin select 200,'hive','test2'")
+
+    // test external table with partition by with location
+    sql("drop table if exists source")
+      sql(
+        s"""
+           |CREATE EXTERNAL TABLE source(a int, b string) partitioned by (c 
string)
+           |stored as carbondata
+           |LOCATION '$storeLocation/origin'
+       """.stripMargin)
+    verifyResult()
+
+    // test table with partition by with location
+    sql("drop table if exists source")
+    sql(
+      s"""
+         |CREATE TABLE source(a int, b string) partitioned by (c string)
+         |stored as carbondata
+         |LOCATION '$storeLocation/origin'
+       """.stripMargin)
+    FileUtils.deleteDirectory(new File( s"$storeLocation/origin1"))
+    val newStoreLocation = s"$storeLocation/origin1"
+    FileUtils.copyDirectory(new File(s"$storeLocation/origin/c=test1"), new 
File(newStoreLocation))
+    verifyResult()
+
+    // test without any schema file in location specified
+    sql("drop table if exists source")
+    sql(
+      s"""
+         |CREATE TABLE source(a int, b string) partitioned by (c string)
+         |stored as carbondata
+         |LOCATION '$newStoreLocation'
+       """.stripMargin)
+    checkAnswer(sql("select * from source"), Seq(Row(100, "spark", "test1")))
+    sql("drop table if exists source")
+
+    // test with empty directory
+    FileUtils.deleteDirectory(new File( s"$storeLocation/origin1"))
+    sql("drop table if exists source")
+    sql(
+      s"""
+         |CREATE TABLE source(a int, b string) partitioned by (c string)
+         |stored as carbondata
+         |LOCATION '$newStoreLocation'
+       """.stripMargin)
+    val exception = intercept[Exception] {
+      sql("select * from source").show(false)
+    }
+    assert(exception.getMessage.contains("No Index files are present in the 
table location"))
+    sql("INSERT INTO source select 100,'spark','test1'")
+    checkAnswer(sql("select * from source"), Seq(Row(100, "spark", "test1")))
+    sql("drop table if exists source")
+  }
+
 }

Reply via email to