This is an automated email from the ASF dual-hosted git repository.

indhumuthumurugesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new f2698fe  [CARBONDATA-4248] Fixed upper case column name in explain 
command
f2698fe is described below

commit f2698fed9b83fbdca5cc473ffac1ebcdd7880e23
Author: Nihal ojha <[email protected]>
AuthorDate: Mon Jul 19 13:27:47 2021 +0530

    [CARBONDATA-4248] Fixed upper case column name in explain command
    
    Why is this PR needed?
    Explain command with upper case column name fails with key not found 
exception.
    
    What changes were proposed in this PR?
    Changed column name to lower case before conversion of spark data type to 
carbon data type.
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    Yes
    
    This closes #4175
---
 .../org/apache/spark/sql/optimizer/CarbonFilters.scala  |  2 +-
 .../spark/testsuite/dataload/TestLoadDataGeneral.scala  | 17 +++++++++++++++++
 2 files changed, 18 insertions(+), 1 deletion(-)

diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
index 796c91e..b666668 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
@@ -347,7 +347,7 @@ object CarbonFilters {
   }
 
   def translateDataType(name: String, columnTypes: Map[String, 
SparkDataType]): DataType = {
-    CarbonSparkDataSourceUtil.convertSparkToCarbonDataType(columnTypes(name))
+    
CarbonSparkDataSourceUtil.convertSparkToCarbonDataType(columnTypes(name.toLowerCase()))
   }
 
   def translateColumn(name: String, dataType: SparkDataType): ColumnExpression 
= {
diff --git 
a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
 
b/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
index 8235116..2456a23 100644
--- 
a/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
+++ 
b/integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
@@ -61,6 +61,23 @@ class TestLoadDataGeneral extends QueryTest with 
BeforeAndAfterEach {
     segment != null
   }
 
+  test("test explain with case sensitive") {
+    sql("drop table if exists carbon_table")
+    sql("drop table if exists parquet_table")
+    sql("create table IF NOT EXISTS carbon_table(`BEGIN_TIME` BIGINT," +
+      " `SAI_CGI_ECGI` STRING) stored as carbondata")
+    sql("create table IF NOT EXISTS parquet_table(CELL_NAME string, CGISAI 
string)" +
+      " stored as parquet")
+    val df = sql("explain extended with grpMainDatathroughput as (select" +
+      " from_unixtime(begin_time, 'yyyyMMdd') as data_time, SAI_CGI_ECGI from 
carbon_table)," +
+      " grpMainData as (select * from grpMainDatathroughput a JOIN(select 
CELL_NAME, CGISAI from" +
+      " parquet_table) b ON b.CGISAI=a.SAI_CGI_ECGI) " +
+      "select * from grpMainData a left join grpMainData b on 
a.cell_name=b.cell_name").collect()
+    assert(df(0).getString(0).contains("carbon_table"))
+    sql("drop table if exists carbon_table")
+    sql("drop table if exists parquet_table")
+  }
+
   test("test data loading CSV file") {
     val testData = s"$resourcesPath/sample.csv"
     checkAnswer(

Reply via email to