This is an automated email from the ASF dual-hosted git repository.

ajantha pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 5963df0  [CARBONDATA-3853] Data load failure when loading with bucket 
column as DATE data type
5963df0 is described below

commit 5963df031c0906ce746c64867227678bce7720f3
Author: Mahesh Raju Somalaraju <mahesh.somalar...@huawei.com>
AuthorDate: Wed Jul 8 22:27:23 2020 +0530

    [CARBONDATA-3853] Data load failure when loading with bucket column as DATE 
data type
    
    Why is this PR needed?
    While loading data into a table which has bucket column as DATE data type 
then receiving data load failure due to class-cast exception. The class-cast 
exception is due to DATE data type is not going as IntegralHash type for 
getting the hash because of there is no case for DATE datatype and it is going 
for StringHash type and typecast results class-cast Exception.
    
    What changes were proposed in this PR?
    Added a OR (||) condition if data type is DATE then it will go IntegralHash 
getHash() method for getting the hash code.
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    Yes
    
    This closes #3830
---
 .../bucketing/TableBucketingTestCase.scala         | 35 +++++++++++++++++++++-
 .../impl/SparkHashExpressionPartitionerImpl.java   |  5 ++--
 2 files changed, 37 insertions(+), 3 deletions(-)

diff --git 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/bucketing/TableBucketingTestCase.scala
 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/bucketing/TableBucketingTestCase.scala
index 5d7dbda..3e48003 100644
--- 
a/integration/spark/src/test/scala/org/apache/spark/carbondata/bucketing/TableBucketingTestCase.scala
+++ 
b/integration/spark/src/test/scala/org/apache/spark/carbondata/bucketing/TableBucketingTestCase.scala
@@ -17,6 +17,9 @@
 
 package org.apache.spark.carbondata.bucketing
 
+import java.text.SimpleDateFormat
+import java.sql.Date
+
 import org.apache.spark.sql.{CarbonEnv, Row}
 import org.apache.spark.sql.execution.WholeStageCodegenExec
 import org.apache.spark.sql.execution.exchange.Exchange
@@ -24,7 +27,6 @@ import org.apache.spark.sql.execution.joins.SortMergeJoinExec
 import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 import 
org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
-import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.metadata.CarbonMetadata
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.constants.CarbonCommonConstants
@@ -105,6 +107,37 @@ class TableBucketingTestCase extends QueryTest with 
BeforeAndAfterAll {
     }
   }
 
+  test("test load data with DATE data type as bucket column") {
+    sql("DROP TABLE IF EXISTS table_bucket")
+    sql("""
+           CREATE TABLE IF NOT EXISTS table_bucket
+           (ID Int, date DATE, starttime Timestamp, country String,
+           name String, phonetype String, serialname String, salary Int)
+           STORED AS carbondata TBLPROPERTIES ('BUCKET_NUMBER'='2', 
'BUCKET_COLUMNS'='date')
+        """)
+    sql(
+      s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/timeStampFormatData1.csv' 
into table table_bucket
+           OPTIONS('dateformat' = 'yyyy/MM/dd','timestampformat'='yyyy-MM-dd 
HH:mm:ss')
+           """)
+    sql(
+      s"""
+           LOAD DATA LOCAL INPATH '$resourcesPath/timeStampFormatData2.csv' 
into table table_bucket
+           OPTIONS('dateformat' = 'yyyy-MM-dd','timestampformat'='yyyy/MM/dd 
HH:mm:ss')
+           """)
+
+    val sdf = new SimpleDateFormat("yyyy-MM-dd")
+    checkAnswer(
+      sql("SELECT date FROM table_bucket WHERE ID = 1"),
+      Seq(Row(new Date(sdf.parse("2015-07-23").getTime)))
+    )
+    checkAnswer(
+      sql("SELECT date FROM table_bucket WHERE ID = 18"),
+      Seq(Row(new Date(sdf.parse("2015-07-25").getTime)))
+    )
+    sql("DROP TABLE IF EXISTS table_bucket")
+  }
+
   test("test IUD of bucket table") {
     sql("CREATE TABLE t40 (ID Int, date Timestamp, country String, name 
String, phonetype String," +
       "serialname String, salary Int) STORED AS carbondata TBLPROPERTIES " +
diff --git 
a/processing/src/main/java/org/apache/carbondata/processing/loading/partition/impl/SparkHashExpressionPartitionerImpl.java
 
b/processing/src/main/java/org/apache/carbondata/processing/loading/partition/impl/SparkHashExpressionPartitionerImpl.java
index 3853bf2..92e1320 100644
--- 
a/processing/src/main/java/org/apache/carbondata/processing/loading/partition/impl/SparkHashExpressionPartitionerImpl.java
+++ 
b/processing/src/main/java/org/apache/carbondata/processing/loading/partition/impl/SparkHashExpressionPartitionerImpl.java
@@ -48,8 +48,9 @@ public class SparkHashExpressionPartitionerImpl implements 
Partitioner<CarbonRow
       DataType dataType = columnSchemas.get(i).getDataType();
       if (dataType == DataTypes.LONG || dataType == DataTypes.DOUBLE) {
         hashes[i] = new LongHash(indexes.get(i));
-      } else if (dataType == DataTypes.SHORT || dataType == DataTypes.INT ||
-          dataType == DataTypes.FLOAT || dataType == DataTypes.BOOLEAN) {
+      } else if (dataType == DataTypes.SHORT || dataType == DataTypes.INT
+          || dataType == DataTypes.FLOAT || dataType == DataTypes.BOOLEAN
+          || dataType == DataTypes.DATE) {
         hashes[i] = new IntegralHash(indexes.get(i));
       } else if (DataTypes.isDecimal(dataType)) {
         hashes[i] = new DecimalHash(indexes.get(i));

Reply via email to