ajantha-bhat commented on a change in pull request #3538: [CARBONDATA-3637] 
Optimize insert into flow
URL: https://github.com/apache/carbondata/pull/3538#discussion_r378771038
 
 

 ##########
 File path: 
integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
 ##########
 @@ -834,4 +843,179 @@ object CommonUtil {
     displaySize
   }
 
+  def getObjectArrayFromInternalRowAndConvertComplexType(row: InternalRow,
+      fieldTypes: Seq[DataType],
+      outputArrayLength: Int): Array[AnyRef] = {
+    val data = new Array[AnyRef](outputArrayLength)
+    var i = 0
+    val fieldTypesLen = fieldTypes.length
+    while (i < fieldTypesLen) {
+      if (!row.isNullAt(i)) {
+        fieldTypes(i) match {
+          case StringType =>
+            data(i) = row.getString(i)
+          case d: DecimalType =>
+            data(i) = row.getDecimal(i, d.precision, d.scale).toJavaBigDecimal
+          case arrayType : ArrayType =>
+            data(i) = convertSparkComplexTypeToCarbonObject(row.getArray(i), 
arrayType)
+          case structType : StructType =>
+            data(i) = convertSparkComplexTypeToCarbonObject(row.getStruct(i,
+              structType.fields.length), structType)
+          case mapType : MapType =>
+            data(i) = convertSparkComplexTypeToCarbonObject(row.getMap(i), 
mapType)
+          case other =>
+            data(i) = row.get(i, other)
+        }
+      }
+      i += 1
+    }
+    data
+  }
+
+  /**
+   * After converting complex objects to carbon objects, need to convert to 
byte array
+   *
+   * @param row
+   * @param fields
+   * @param dataFieldsWithComplexDataType
+   * @return
+   */
+  def getObjectArrayFromInternalRowAndConvertComplexTypeForGlobalSort(
+      row: InternalRow,
+      fields: Seq[StructField],
+      dataFieldsWithComplexDataType: Map[String, GenericDataType[_]]): 
Array[AnyRef] = {
+    val data = new Array[AnyRef](fields.size)
+    val badRecordLogHolder = new BadRecordLogHolder();
+    var i = 0
+    val fieldTypesLen = fields.length
+    while (i < fieldTypesLen) {
+      if (!row.isNullAt(i)) {
+        fields(i).dataType match {
+          case StringType =>
+            data(i) = 
DataTypeUtil.getBytesDataDataTypeForNoDictionaryColumn(row.getString(i),
+              DataTypes.STRING)
+          case d: DecimalType =>
+            data(i) = row.getDecimal(i, d.precision, d.scale).toJavaBigDecimal
+          case arrayType : ArrayType =>
+            val result = convertSparkComplexTypeToCarbonObject(row.get(i, 
arrayType), arrayType)
+            // convert carbon complex object to byte array
+            val byteArray: ByteArrayOutputStream = new ByteArrayOutputStream()
+            val dataOutputStream: DataOutputStream = new 
DataOutputStream(byteArray)
+            
dataFieldsWithComplexDataType(fields(i).name).asInstanceOf[ArrayDataType]
+              .writeByteArray(result.asInstanceOf[ArrayObject],
+                dataOutputStream,
+                badRecordLogHolder)
+            dataOutputStream.close()
+            data(i) = byteArray.toByteArray.asInstanceOf[AnyRef]
 
 Review comment:
   both are different asInstanceOf[], so method will have if, else again. not 
useful here

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to