This is an automated email from the ASF dual-hosted git repository.

indhumuthumurugesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 35091a2  [CARBONDATA-4184] alter table Set TBLPROPERTIES for 
RANGE_COLUMN sets unsupported 
datatype(complex_datatypes/Binary/Boolean/Decimal) as RANGE_COLUMN
35091a2 is described below

commit 35091a2e5c837b9ecdb292ab70038b8e329813db
Author: Karan980 <[email protected]>
AuthorDate: Wed May 12 17:04:53 2021 +0530

    [CARBONDATA-4184] alter table Set TBLPROPERTIES for RANGE_COLUMN sets 
unsupported
    datatype(complex_datatypes/Binary/Boolean/Decimal) as RANGE_COLUMN
    
    Why is this PR needed?
    Alter table set command was not validating unsupported dataTypes for range 
column.
    
    What changes were proposed in this PR?
    Added validation for unsupported dataTypes before setting range column 
value.
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    Yes
    
    This closes #4133
---
 .../spark/sql/catalyst/CarbonParserUtil.scala      | 16 ++++++++------
 .../org/apache/spark/util/AlterTableUtil.scala     |  7 +++++-
 .../dataload/TestRangeColumnDataLoad.scala         | 25 ++++++++++++++++++++++
 3 files changed, 41 insertions(+), 7 deletions(-)

diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonParserUtil.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonParserUtil.scala
index 9cb8ed7..266cd95 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonParserUtil.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/CarbonParserUtil.scala
@@ -589,6 +589,15 @@ object CarbonParserUtil {
     noInvertedIdxCols
   }
 
+  def validateUnsupportedDataTypeForRangeColumn(dataType: String): Boolean = {
+    DataTypes.BINARY.getName.equalsIgnoreCase(dataType) ||
+      DataTypes.BOOLEAN.getName.equalsIgnoreCase(dataType) ||
+      CarbonCommonConstants.ARRAY.equalsIgnoreCase(dataType) ||
+      CarbonCommonConstants.STRUCT.equalsIgnoreCase(dataType) ||
+      CarbonCommonConstants.MAP.equalsIgnoreCase(dataType) ||
+      CarbonCommonConstants.DECIMAL.equalsIgnoreCase(dataType)
+  }
+
   protected def extractInvertedIndexColumns(fields: Seq[Field],
       tableProperties: Map[String, String]): Seq[String] = {
     // check whether the column name is in fields
@@ -684,12 +693,7 @@ object CarbonParserUtil {
         val errorMsg = "range_column: " + rangeColumn +
                        " does not exist in table. Please check the create 
table statement."
         throw new MalformedCarbonCommandException(errorMsg)
-      } else if (DataTypes.BINARY.getName.equalsIgnoreCase(dataType) ||
-                 DataTypes.BOOLEAN.getName.equalsIgnoreCase(dataType) ||
-                 CarbonCommonConstants.ARRAY.equalsIgnoreCase(dataType) ||
-                 CarbonCommonConstants.STRUCT.equalsIgnoreCase(dataType) ||
-                 CarbonCommonConstants.MAP.equalsIgnoreCase(dataType) ||
-                 CarbonCommonConstants.DECIMAL.equalsIgnoreCase(dataType)) {
+      } else if (validateUnsupportedDataTypeForRangeColumn(dataType)) {
         throw new MalformedCarbonCommandException(
           s"RANGE_COLUMN doesn't support $dataType data type: " + rangeColumn)
       } else {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/util/AlterTableUtil.scala 
b/integration/spark/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
index c6eca1c..2133b16 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
@@ -25,7 +25,7 @@ import scala.collection.mutable.ListBuffer
 
 import org.apache.commons.lang3.StringUtils
 import org.apache.spark.sql.{CarbonEnv, SparkSession}
-import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.{CarbonParserUtil, TableIdentifier}
 import org.apache.spark.sql.catalyst.catalog.SessionCatalog
 import org.apache.spark.sql.hive.{CarbonRelation, CarbonSessionCatalogUtil}
 import org.apache.spark.sql.index.CarbonIndexUtil
@@ -652,6 +652,11 @@ object AlterTableUtil {
         throw new MalformedCarbonCommandException(
           s"Table property ${ CarbonCommonConstants.RANGE_COLUMN }: ${ 
rangeColumnProp }" +
           s" is not exists in the table")
+      }
+      val dataType = rangeColumn.getDataType.getName;
+      if 
(CarbonParserUtil.validateUnsupportedDataTypeForRangeColumn(dataType)) {
+        throw new MalformedCarbonCommandException(
+          s"RANGE_COLUMN doesn't support $dataType data type: " + 
rangeColumnProp)
       } else {
         propertiesMap.put(CarbonCommonConstants.RANGE_COLUMN, 
rangeColumn.getColName)
       }
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
index b8dc63c..f426afb 100644
--- 
a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
@@ -847,6 +847,31 @@ class TestRangeColumnDataLoad extends QueryTest with 
BeforeAndAfterEach with Bef
     sql("ALTER TABLE carbon_range_column6 SET 
TBLPROPERTIES('range_column'='name')")
   }
 
+  test("check range column validation for unsupported data types") {
+    sql("""drop table if exists carbon_range_column7""".stripMargin)
+    sql(
+      """
+        | CREATE TABLE carbon_range_column7(id INT, name STRING, city STRING, 
age INT, bin binary,
+        |  bool1 boolean, arr1 array<int>, struct1 
struct<id1:string,name1:string>,
+        |  map1 map<string,string>, dec1 decimal(10,5)) STORED AS carbondata
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='name, city')
+      """.stripMargin)
+    val invalidRangeColumnsMap = Map("bin" -> "BINARY", "bool1" -> "BOOLEAN", 
"arr1" -> "ARRAY",
+      "struct1" -> "STRUCT", "map1" -> "MAP", "dec1" -> "DECIMAL")
+    validateInvalidRangeColumn(invalidRangeColumnsMap)
+    sql("""drop table if exists carbon_range_column7""".stripMargin)
+  }
+
+  def validateInvalidRangeColumn(invalidRangeColumnsMap: Map[String, String]): 
Unit = {
+    invalidRangeColumnsMap.foreach { value =>
+      val ex = intercept[RuntimeException] {
+        sql(s"ALTER TABLE carbon_range_column7 SET 
TBLPROPERTIES('range_column'='${ value._1 }')")
+      }
+      assertResult("Alter table newProperties operation failed: RANGE_COLUMN 
doesn't " +
+        s"support ${ value._2 } data type: ${value._1}")(ex.getMessage)
+    }
+  }
+
   private def getIndexFileCount(tableName: String, segmentNo: String = "0"): 
Int = {
     val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default", 
tableName)
     val segmentDir = CarbonTablePath.getSegmentPath(carbonTable.getTablePath, 
segmentNo)

Reply via email to