Fixed database cascade in spark 2.1 and alter table in vector mode.

Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/809d8806
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/809d8806
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/809d8806

Branch: refs/heads/branch-1.1
Commit: 809d880684f68a43bb83eccf23c5409fe02bf15f
Parents: 105b7c3
Author: ravipesala <ravi.pes...@gmail.com>
Authored: Sat Jun 3 13:11:57 2017 +0530
Committer: ravipesala <ravi.pes...@gmail.com>
Committed: Thu Jun 15 13:25:54 2017 +0530

----------------------------------------------------------------------
 ...tCreateTableWithDatabaseNameCaseChange.scala | 24 ++++++++++++++++++++
 .../spark/sql/test/SparkTestQueryExecutor.scala |  1 +
 .../vectorreader/ColumnarVectorWrapper.java     | 14 ++++++------
 .../execution/command/CarbonHiveCommands.scala  |  7 +++---
 4 files changed, 35 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/809d8806/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala
index 87aac94..5bf55f9 100644
--- 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithDatabaseNameCaseChange.scala
@@ -51,6 +51,30 @@ class TestCreateTableWithDatabaseNameCaseChange extends 
QueryTest with BeforeAnd
     }
   }
 
+  test("test drop database cascade with case sensitive") {
+    // this test case will test the creation of table for different case for 
database name.
+    // In hive dbName folder is always created with small case in HDFS. Carbon 
should behave
+    // the same way. If table creation fails during second time creation it 
means in HDFS
+    // separate folders are created for the matching case in commands executed.
+    sql("drop database if exists AbCdEf cascade")
+    sql("create database AbCdEf")
+    sql("use AbCdEf")
+    sql("create table carbonTable(a int, b string)stored by 'carbondata'")
+    sql("use default")
+    sql("drop database if exists AbCdEf cascade")
+    sql("create database AbCdEf")
+    sql("use AbCdEf")
+    try {
+      sql("create table carbonTable(a int, b string)stored by 'carbondata'")
+      assert(true)
+    } catch {
+      case ex: Exception =>
+        assert(false)
+    }
+    sql("use default")
+    sql("drop database if exists AbCdEf cascade")
+  }
+
   override def afterAll {
     sql("use default")
     sql("drop database if exists dbCaseChange cascade")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/809d8806/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
index 591cdf4..27df623 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/test/SparkTestQueryExecutor.scala
@@ -42,6 +42,7 @@ object SparkTestQueryExecutor {
     .addProperty(CarbonCommonConstants.STORE_LOCATION_TEMP_PATH,
       System.getProperty("java.io.tmpdir"))
     .addProperty(CarbonCommonConstants.LOCK_TYPE, 
CarbonCommonConstants.CARBON_LOCK_TYPE_LOCAL)
+    .addProperty(CarbonCommonConstants.STORE_LOCATION, 
TestQueryExecutor.storeLocation)
 
   val sc = new SparkContext(new SparkConf()
     .setAppName("CarbonSpark")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/809d8806/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
 
b/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
index c3d2a87..5ab741b 100644
--- 
a/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
+++ 
b/integration/spark2/src/main/java/org/apache/carbondata/spark/vectorreader/ColumnarVectorWrapper.java
@@ -60,7 +60,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putShort(counter++, value);
+          columnVector.putShort(counter++, value);
         }
         rowId++;
       }
@@ -79,7 +79,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putInt(counter++, value);
+          columnVector.putInt(counter++, value);
         }
         rowId++;
       }
@@ -98,7 +98,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putLong(counter++, value);
+          columnVector.putLong(counter++, value);
         }
         rowId++;
       }
@@ -116,7 +116,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
   @Override public void putDecimals(int rowId, int count, Decimal value, int 
precision) {
     for (int i = 0; i < count; i++) {
       if (!filteredRows[rowId]) {
-        putDecimal(counter++, value, precision);
+        columnVector.putDecimal(counter++, value, precision);
       }
       rowId++;
     }
@@ -132,7 +132,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putDouble(counter++, value);
+          columnVector.putDouble(counter++, value);
         }
         rowId++;
       }
@@ -150,7 +150,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
   @Override public void putBytes(int rowId, int count, byte[] value) {
     for (int i = 0; i < count; i++) {
       if (!filteredRows[rowId]) {
-        putBytes(counter++, value);
+        columnVector.putByteArray(counter++, value);
       }
       rowId++;
     }
@@ -172,7 +172,7 @@ class ColumnarVectorWrapper implements CarbonColumnVector {
     if (filteredRowsExist) {
       for (int i = 0; i < count; i++) {
         if (!filteredRows[rowId]) {
-          putNull(counter++);
+          columnVector.putNull(counter++);
         }
         rowId++;
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/809d8806/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
index 2786620..b72f077 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonHiveCommands.scala
@@ -31,13 +31,12 @@ case class CarbonDropDatabaseCommand(command: 
DropDatabaseCommand)
     val rows = command.run(sparkSession)
     if (command.cascade) {
       val tablesInDB = 
CarbonEnv.getInstance(sparkSession).carbonMetastore.getAllTables()
-        .filterNot(_.database.exists(_.equalsIgnoreCase(dbName)))
+        .filter(_.database.exists(_.equalsIgnoreCase(dbName)))
       tablesInDB.foreach { tableName =>
-        CarbonDropTableCommand(true, Some(dbName), 
tableName.table).run(sparkSession)
+        CarbonDropTableCommand(true, tableName.database, 
tableName.table).run(sparkSession)
       }
     }
-    
CarbonEnv.getInstance(sparkSession).carbonMetastore.dropDatabaseDirectory(dbName)
+    
CarbonEnv.getInstance(sparkSession).carbonMetastore.dropDatabaseDirectory(dbName.toLowerCase)
     rows
   }
 }
-

Reply via email to