Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 30444b837 -> 74358b05a


Added conversion class for carbon datatypes


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: 
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/3fb37448
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/3fb37448
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/3fb37448

Branch: refs/heads/master
Commit: 3fb374480f95c657f4977b426a20bdedc1e89019
Parents: 30444b8
Author: nareshpr <prnaresh.nar...@gmail.com>
Authored: Sat Sep 17 18:34:13 2016 +0530
Committer: nareshpr <prnaresh.nar...@gmail.com>
Committed: Sat Sep 17 21:09:44 2016 +0530

----------------------------------------------------------------------
 .../spark/util/DataTypeConverterUtil.scala      | 55 ++++++++++++++++++++
 .../execution/command/carbonTableSchema.scala   | 31 +++--------
 2 files changed, 62 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/3fb37448/integration/spark/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
 
b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
new file mode 100644
index 0000000..1fee428
--- /dev/null
+++ 
b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.util
+
+import org.apache.carbondata.core.carbon.metadata.datatype.DataType
+
+object DataTypeConverterUtil {
+  def convertToCarbonType(dataType: String): DataType = {
+    dataType.toLowerCase match {
+      case "string" => DataType.STRING
+      case "int" => DataType.INT
+      case "integer" => DataType.INT
+      case "tinyint" => DataType.SHORT
+      case "short" => DataType.SHORT
+      case "long" => DataType.LONG
+      case "bigint" => DataType.LONG
+      case "numeric" => DataType.DOUBLE
+      case "double" => DataType.DOUBLE
+      case "decimal" => DataType.DECIMAL
+      case "timestamp" => DataType.TIMESTAMP
+      case "array" => DataType.ARRAY
+      case "struct" => DataType.STRUCT
+      case _ => sys.error("Unsupported data type : " + dataType)
+    }
+  }
+
+  def convertToString(dataType: DataType): String = {
+    dataType match {
+      case DataType.STRING => "string"
+      case DataType.SHORT => "smallint"
+      case DataType.INT => "int"
+      case DataType.LONG => "bigint"
+      case DataType.DOUBLE => "double"
+      case DataType.DECIMAL => "decimal"
+      case DataType.TIMESTAMP => "timestamp"
+      case DataType.ARRAY => "array"
+      case DataType.STRUCT => "struct"
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/3fb37448/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
index da79f6d..b8c56d3 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchema.scala
@@ -59,8 +59,7 @@ import 
org.apache.carbondata.spark.exception.MalformedCarbonCommandException
 import org.apache.carbondata.spark.load._
 import org.apache.carbondata.spark.partition.api.impl.QueryPartitionHelper
 import org.apache.carbondata.spark.rdd.CarbonDataRDDFactory
-import org.apache.carbondata.spark.util.{CarbonScalaUtil, GlobalDictionaryUtil}
-
+import org.apache.carbondata.spark.util.{CarbonScalaUtil, 
DataTypeConverterUtil, GlobalDictionaryUtil}
 
 case class tableModel(
     ifNotExistsSet: Boolean,
@@ -183,7 +182,8 @@ class TableNewProcessor(cm: tableModel, sqlContext: 
SQLContext) {
         val encoders = new java.util.ArrayList[Encoding]()
         encoders.add(Encoding.DICTIONARY)
         val columnSchema: ColumnSchema = getColumnSchema(
-          normalizeType(field.dataType.getOrElse("")), 
field.name.getOrElse(field.column), index,
+          
DataTypeConverterUtil.convertToCarbonType(field.dataType.getOrElse("")),
+          field.name.getOrElse(field.column), index,
           isCol = true, encoders, isDimensionCol = true, rowGroup, 
field.precision, field.scale)
         allColumns ++= Seq(columnSchema)
         index = index + 1
@@ -240,7 +240,8 @@ class TableNewProcessor(cm: tableModel, sqlContext: 
SQLContext) {
     cm.dimCols.foreach(field => {
       val encoders = new java.util.ArrayList[Encoding]()
       encoders.add(Encoding.DICTIONARY)
-      val columnSchema: ColumnSchema = 
getColumnSchema(normalizeType(field.dataType.getOrElse("")),
+      val columnSchema: ColumnSchema = getColumnSchema(
+        
DataTypeConverterUtil.convertToCarbonType(field.dataType.getOrElse("")),
         field.name.getOrElse(field.column),
         index,
         isCol = true,
@@ -259,7 +260,8 @@ class TableNewProcessor(cm: tableModel, sqlContext: 
SQLContext) {
 
     cm.msrCols.foreach(field => {
       val encoders = new java.util.ArrayList[Encoding]()
-      val coloumnSchema: ColumnSchema = 
getColumnSchema(normalizeType(field.dataType.getOrElse("")),
+      val coloumnSchema: ColumnSchema = getColumnSchema(
+        
DataTypeConverterUtil.convertToCarbonType(field.dataType.getOrElse("")),
         field.name.getOrElse(field.column),
         index,
         isCol = true,
@@ -417,25 +419,6 @@ class TableNewProcessor(cm: tableModel, sqlContext: 
SQLContext) {
     tableInfo
   }
 
-  private def normalizeType(dataType: String): DataType = {
-    dataType match {
-      case "String" => DataType.STRING
-      case "int" => DataType.INT
-      case "Integer" => DataType.INT
-      case "tinyint" => DataType.SHORT
-      case "short" => DataType.SHORT
-      case "Long" => DataType.LONG
-      case "BigInt" => DataType.LONG
-      case "Numeric" => DataType.DOUBLE
-      case "Double" => DataType.DOUBLE
-      case "Decimal" => DataType.DECIMAL
-      case "Timestamp" => DataType.TIMESTAMP
-      case "Array" => DataType.ARRAY
-      case "Struct" => DataType.STRUCT
-      case _ => sys.error("Unsupported data type : " + dataType)
-    }
-  }
-
   //  For checking if the specified col group columns are specified in fields 
list.
   protected def checkColGroupsValidity(colGrps: Seq[String],
       allCols: Seq[ColumnSchema],

Reply via email to