This is an automated email from the ASF dual-hosted git repository.

indhumuthumurugesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 65462ff  [CARBONDATA-4230] table properties not updated with 
lower-case and table comment is not working in carbon spark3.1
65462ff is described below

commit 65462ff890e90aa95adf3daf13c52f1eb9f6e80e
Author: Mahesh Raju Somalaraju <[email protected]>
AuthorDate: Wed Jun 23 18:50:25 2021 +0530

    [CARBONDATA-4230] table properties not updated with lower-case and table
    comment is not working in carbon spark3.1
    
    Why is this PR needed?
    1. table properties storing with case-sensitive and when we query table
       properties with the small case then property not able to get hence table
       create command is failed. this is induced with spark 3.1 integration 
changes.
    2. Table comment is displayed as byte code in spark 3.1 cluster.
       CommentSpecContext is changed in 3.1
    
    What changes were proposed in this PR?
    1. convert to small case and store in table properties.
    2. Get string value from commentSpec and set as table comment
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    No, already test case is present but not failed in local ut setup as create
    flow is different in local ut env and real cluster setup
    
    This closes #4163
---
 .../org/apache/spark/sql/parser/CarbonSparkSqlParser.scala   | 12 ++----------
 .../apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala   | 12 ++++++++++++
 .../org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala      |  2 +-
 .../org/apache/spark/sql/parser/CarbonSparkSqlParser.scala   |  4 ++--
 4 files changed, 17 insertions(+), 13 deletions(-)

diff --git 
a/integration/spark/src/main/common2.3and2.4/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
 
b/integration/spark/src/main/common2.3and2.4/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index 87bd876..6fbd4b6 100644
--- 
a/integration/spark/src/main/common2.3and2.4/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ 
b/integration/spark/src/main/common2.3and2.4/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -25,7 +25,7 @@ import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.{SparkSqlAstBuilder, SparkSqlParser}
 import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
-import 
org.apache.spark.sql.parser.CarbonSparkSqlParserUtil.needToConvertToLowerCase
+import 
org.apache.spark.sql.parser.CarbonSparkSqlParserUtil.convertPropertiesToLowercase
 import org.apache.spark.sql.types.StructField
 import org.apache.spark.sql.util.CarbonException
 import org.apache.spark.util.CarbonReflectionUtils
@@ -128,16 +128,8 @@ class CarbonHelperSqlAstBuilder(conf: SQLConf,
       None
     }
 
-    val tableProperties = mutable.Map[String, String]()
     val properties: Map[String, String] = 
getPropertyKeyValues(tablePropertyList)
-
-    properties.foreach { property =>
-      if (needToConvertToLowerCase(property._1)) {
-        tableProperties.put(property._1.toLowerCase, property._2.toLowerCase)
-      } else {
-        tableProperties.put(property._1.toLowerCase, property._2)
-      }
-    }
+    val tableProperties = convertPropertiesToLowercase(properties)
     // validate partition clause
     val partitionByStructFields = 
Option(partitionColumns).toSeq.flatMap(visitColTypeList)
     val partitionFields = CarbonToSparkAdapter.
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
index 7557cc0..609a476 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
@@ -547,4 +547,16 @@ object CarbonSparkSqlParserUtil {
       isOverwriteTable = isOverwrite.isDefined,
       partition = partitionSpec)
   }
+
+  def convertPropertiesToLowercase(properties: Map[String, String]): 
mutable.Map[String, String] = {
+    val tableProperties = mutable.Map[String, String]()
+    properties.foreach { property =>
+      if (needToConvertToLowerCase(property._1)) {
+        tableProperties.put(property._1.toLowerCase, property._2.toLowerCase)
+      } else {
+        tableProperties.put(property._1.toLowerCase, property._2)
+      }
+    }
+    tableProperties
+  }
 }
diff --git 
a/integration/spark/src/main/spark3.1/org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala
 
b/integration/spark/src/main/spark3.1/org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala
index 4ec963f..fcfa548 100644
--- 
a/integration/spark/src/main/spark3.1/org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala
+++ 
b/integration/spark/src/main/spark3.1/org/apache/spark/sql/hive/CarbonSqlAstBuilder.scala
@@ -44,7 +44,7 @@ class CarbonSqlAstBuilder(conf: SQLConf, parser: 
CarbonSpark2SqlParser, sparkSes
       val commentSpecContext = if (commentSpec.isEmpty) {
         null
       } else {
-        commentSpec.get(0).toString()
+        string(commentSpec.get(0).STRING())
       }
       val createTableTuple = (ctx.createTableHeader, 
createTableClauses.skewSpec(0),
         createTableClauses.bucketSpec(0), createTableClauses.partitioning, 
ctx.colTypeList(),
diff --git 
a/integration/spark/src/main/spark3.1/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
 
b/integration/spark/src/main/spark3.1/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index 561f567..ac0be49 100644
--- 
a/integration/spark/src/main/spark3.1/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ 
b/integration/spark/src/main/spark3.1/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -25,6 +25,7 @@ import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.{SparkSqlAstBuilder, SparkSqlParser}
 import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
+import 
org.apache.spark.sql.parser.CarbonSparkSqlParserUtil.convertPropertiesToLowercase
 import org.apache.spark.sql.types.StructField
 import org.apache.spark.sql.util.CarbonException
 import org.apache.spark.util.CarbonReflectionUtils
@@ -125,9 +126,8 @@ class CarbonHelperSqlAstBuilder(conf: SQLConf,
       None
     }
 
-    val tableProperties = mutable.Map[String, String]()
     val properties: Map[String, String] = 
getPropertyKeyValues(tablePropertyList)
-    properties.foreach{property => tableProperties.put(property._1, 
property._2)}
+    val tableProperties = convertPropertiesToLowercase(properties)
 
     // validate partition clause
     val partitionByStructFields = Option(partitionColumns).toSeq

Reply via email to