sadikovi commented on code in PR #42618:
URL: https://github.com/apache/spark/pull/42618#discussion_r1302363525
##########
connector/avro/src/main/scala/org/apache/spark/sql/avro/SchemaConverters.scala:
##########
@@ -142,18 +143,30 @@ object SchemaConverters {
if (avroSchema.getTypes.asScala.exists(_.getType == NULL)) {
// In case of a union with null, eliminate it and make a recursive
call
val remainingUnionTypes = AvroUtils.nonNullUnionBranches(avroSchema)
- if (remainingUnionTypes.size == 1) {
- toSqlTypeHelper(remainingUnionTypes.head, existingRecordNames,
avroOptions)
- .copy(nullable = true)
- } else {
- toSqlTypeHelper(
- Schema.createUnion(remainingUnionTypes.asJava),
- existingRecordNames,
- avroOptions).copy(nullable = true)
- }
+ toSqlTypeHelper(
+ Schema.createUnion(remainingUnionTypes.asJava),
+ existingRecordNames,
+ avroOptions).copy(nullable = true)
} else avroSchema.getTypes.asScala.map(_.getType).toSeq match {
case Seq(t1) =>
- toSqlTypeHelper(avroSchema.getTypes.get(0), existingRecordNames,
avroOptions)
+ // If spark.sql.avro.alwaysConvertUnionToStructType is set to
false (default),
+ // we convert Avro union with a single primitive type into a
primitive Spark type
+ // instead of a StructType.
+ if (!SQLConf.get.avroAlwaysConvertUnionToStruct) {
+ toSqlTypeHelper(avroSchema.getTypes.get(0), existingRecordNames,
avroOptions)
Review Comment:
Maybe it is good to compare the two approaches and see which one makes sense.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]