tpalfy commented on a change in pull request #3684: NIFI-6295: Refactored 
NiFiRecordSerDe to handle nested complex types
URL: https://github.com/apache/nifi/pull/3684#discussion_r322828012
 
 

 ##########
 File path: 
nifi-nar-bundles/nifi-hive-bundle/nifi-hive3-processors/src/main/java/org/apache/hive/streaming/NiFiRecordSerDe.java
 ##########
 @@ -187,152 +182,153 @@ private Object extractCurrentField(Record record, 
RecordField field, TypeInfo fi
                 }
                 switch (primitiveCategory) {
                     case BYTE:
-                        Integer bIntValue = record.getAsInt(fieldName);
-                        val = bIntValue == null ? null : bIntValue.byteValue();
+                        Integer bIntValue = 
DataTypeUtils.toInteger(fieldValue, fieldName);
+                        val = bIntValue.byteValue();
                         break;
                     case SHORT:
-                        Integer sIntValue = record.getAsInt(fieldName);
-                        val = sIntValue == null ? null : 
sIntValue.shortValue();
+                        Integer sIntValue = 
DataTypeUtils.toInteger(fieldValue, fieldName);
+                        val = sIntValue.shortValue();
                         break;
                     case INT:
-                        val = record.getAsInt(fieldName);
+                        val = DataTypeUtils.toInteger(fieldValue, fieldName);
                         break;
                     case LONG:
-                        val = record.getAsLong(fieldName);
+                        val = DataTypeUtils.toLong(fieldValue, fieldName);
                         break;
                     case BOOLEAN:
-                        val = record.getAsBoolean(fieldName);
+                        val = DataTypeUtils.toBoolean(fieldValue, fieldName);
                         break;
                     case FLOAT:
-                        val = record.getAsFloat(fieldName);
+                        val = DataTypeUtils.toFloat(fieldValue, fieldName);
                         break;
                     case DOUBLE:
-                        val = record.getAsDouble(fieldName);
+                        val = DataTypeUtils.toDouble(fieldValue, fieldName);
                         break;
                     case STRING:
                     case VARCHAR:
                     case CHAR:
-                        val = record.getAsString(fieldName);
+                        val = DataTypeUtils.toString(fieldValue, fieldName);
                         break;
                     case BINARY:
-                        Object[] array = record.getAsArray(fieldName);
-                        if (array == null) {
-                            return null;
+                        final ArrayDataType arrayDataType;
+                        if(fieldValue instanceof String) {
+                            // Treat this as an array of bytes
+                            arrayDataType = (ArrayDataType) 
RecordFieldType.ARRAY.getArrayDataType(RecordFieldType.BYTE.getDataType());
+                        } else {
+                            arrayDataType = (ArrayDataType) fieldDataType;
                         }
+                        Object[] array = DataTypeUtils.toArray(fieldValue, 
fieldName, arrayDataType.getElementType());
                         val = AvroTypeUtil.convertByteArray(array).array();
                         break;
                     case DATE:
-                        Date d = record.getAsDate(fieldName, 
field.getDataType().getFormat());
-                        if(d != null) {
-                            org.apache.hadoop.hive.common.type.Date hiveDate = 
new org.apache.hadoop.hive.common.type.Date();
-                            hiveDate.setTimeInMillis(d.getTime());
-                            val = hiveDate;
-                        } else {
-                            val = null;
-                        }
+                        Date d = DataTypeUtils.toDate(fieldValue, () -> 
DataTypeUtils.getDateFormat(fieldDataType.getFormat()), fieldName);
+                        org.apache.hadoop.hive.common.type.Date hiveDate = new 
org.apache.hadoop.hive.common.type.Date();
+                        hiveDate.setTimeInMillis(d.getTime());
+                        val = hiveDate;
                         break;
                     // ORC doesn't currently handle TIMESTAMPLOCALTZ
                     case TIMESTAMP:
-                        Timestamp ts = 
DataTypeUtils.toTimestamp(record.getValue(fieldName), () -> 
DataTypeUtils.getDateFormat(field.getDataType().getFormat()), fieldName);
-                        if(ts != null) {
-                            // Convert to Hive's Timestamp type
-                            org.apache.hadoop.hive.common.type.Timestamp 
hivetimestamp = new org.apache.hadoop.hive.common.type.Timestamp();
-                            hivetimestamp.setTimeInMillis(ts.getTime(), 
ts.getNanos());
-                            val = hivetimestamp;
-                        } else {
-                            val = null;
-                        }
+                        Timestamp ts = DataTypeUtils.toTimestamp(fieldValue, 
() -> DataTypeUtils.getDateFormat(fieldDataType.getFormat()), fieldName);
+                        // Convert to Hive's Timestamp type
+                        org.apache.hadoop.hive.common.type.Timestamp 
hivetimestamp = new org.apache.hadoop.hive.common.type.Timestamp();
+                        hivetimestamp.setTimeInMillis(ts.getTime(), 
ts.getNanos());
+                        val = hivetimestamp;
                         break;
                     case DECIMAL:
-                        Double value = record.getAsDouble(fieldName);
-                        val = value == null ? null : HiveDecimal.create(value);
+                        if(fieldValue instanceof BigDecimal){
+                            val = HiveDecimal.create((BigDecimal) fieldValue);
+                        } else if (fieldValue instanceof Double){
+                            val = HiveDecimal.create((Double)fieldValue);
+                        } else if (fieldValue instanceof Number) {
+                            val = 
HiveDecimal.create(((Number)fieldValue).doubleValue());
+                        } else {
+                            val = 
HiveDecimal.create(DataTypeUtils.toDouble(fieldValue, 
fieldDataType.getFormat()));
+                        }
                         break;
                     default:
                         throw new IllegalArgumentException("Field " + 
fieldName + " cannot be converted to type: " + primitiveCategory.name());
                 }
                 break;
             case LIST:
-                Object[] value = record.getAsArray(fieldName);
-                val = value == null ? null : Arrays.asList(value);
+                Object[] value = (Object[])fieldValue;
+                ListTypeInfo listTypeInfo = (ListTypeInfo)fieldTypeInfo;
+                TypeInfo nestedType = listTypeInfo.getListElementTypeInfo();
+                List<Object> converted = new ArrayList<>(value.length);
+                for (Object o : value) {
+                    converted.add(extractCurrentField(o, fieldName, 
((ArrayDataType) fieldDataType).getElementType(), nestedType));
 
 Review comment:
   Minor: I think `fieldName` could use a prefix here (something like 
`fieldName + "[ " +fieldIndex+  "]"`).

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to