xkrogen commented on a change in pull request #31333:
URL: https://github.com/apache/spark/pull/31333#discussion_r568762858
##########
File path: core/src/main/scala/org/apache/spark/TestUtils.scala
##########
@@ -223,24 +224,37 @@ private[spark] object TestUtils {
/**
* Asserts that exception message contains the message. Please note this
checks all
- * exceptions in the tree.
+ * exceptions in the tree. If a type parameter `E` is supplied, this will
additionally confirm
+ * that the exception is a subtype of the exception provided in the type
parameter.
*/
- def assertExceptionMsg(exception: Throwable, msg: String, ignoreCase:
Boolean = false): Unit = {
- def contain(msg1: String, msg2: String): Boolean = {
+ def assertExceptionMsg[E <: Throwable : ClassTag](
+ exception: Throwable,
Review comment:
Argh. Try as I might, I can't convince my IDE to use 4 spaces here.
Thanks for the catch.
##########
File path:
external/avro/src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala
##########
@@ -64,30 +65,35 @@ private[sql] class AvroDeserializer(
private val timestampRebaseFunc =
DataSourceUtils.creteTimestampRebaseFuncInRead(
datetimeRebaseMode, "Avro")
- private val converter: Any => Option[Any] = rootCatalystType match {
- // A shortcut for empty schema.
- case st: StructType if st.isEmpty =>
- (data: Any) => Some(InternalRow.empty)
-
- case st: StructType =>
- val resultRow = new SpecificInternalRow(st.map(_.dataType))
- val fieldUpdater = new RowUpdater(resultRow)
- val applyFilters = filters.skipRow(resultRow, _)
- val writer = getRecordWriter(rootAvroType, st, Nil, applyFilters)
- (data: Any) => {
- val record = data.asInstanceOf[GenericRecord]
- val skipRow = writer(fieldUpdater, record)
- if (skipRow) None else Some(resultRow)
- }
+ private val converter: Any => Option[Any] = try {
+ rootCatalystType match {
+ // A shortcut for empty schema.
+ case st: StructType if st.isEmpty =>
+ (_: Any) => Some(InternalRow.empty)
+
+ case st: StructType =>
+ val resultRow = new SpecificInternalRow(st.map(_.dataType))
+ val fieldUpdater = new RowUpdater(resultRow)
+ val applyFilters = filters.skipRow(resultRow, _)
+ val writer = getRecordWriter(rootAvroType, st, Nil, Nil, applyFilters)
+ (data: Any) => {
+ val record = data.asInstanceOf[GenericRecord]
+ val skipRow = writer(fieldUpdater, record)
+ if (skipRow) None else Some(resultRow)
+ }
- case _ =>
- val tmpRow = new SpecificInternalRow(Seq(rootCatalystType))
- val fieldUpdater = new RowUpdater(tmpRow)
- val writer = newWriter(rootAvroType, rootCatalystType, Nil)
- (data: Any) => {
- writer(fieldUpdater, 0, data)
- Some(tmpRow.get(0, rootCatalystType))
- }
+ case _ =>
+ val tmpRow = new SpecificInternalRow(Seq(rootCatalystType))
+ val fieldUpdater = new RowUpdater(tmpRow)
+ val writer = newWriter(rootAvroType, rootCatalystType, Nil, Nil)
+ (data: Any) => {
+ writer(fieldUpdater, 0, data)
+ Some(tmpRow.get(0, rootCatalystType))
+ }
+ }
+ } catch {
+ case ise: IncompatibleSchemaException => throw new
IncompatibleSchemaException(
+ s"Cannot convert Avro type $rootAvroType to Catalyst type
$rootCatalystType.", ise)
Review comment:
Thanks for the tip, this is much better! 👍🏼
##########
File path:
external/avro/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala
##########
@@ -93,7 +99,10 @@ private[sql] class AvroSerializer(
private lazy val decimalConversions = new DecimalConversion()
- private def newConverter(catalystType: DataType, avroType: Schema):
Converter = {
+ private def newConverter(catalystType: DataType, avroType: Schema,
+ catalystPath: Seq[String], avroPath: Seq[String]): Converter = {
Review comment:
I used `sqlPath` in `AvroDeserializer` because `getRecordWriter` was
already calling the Catalyst datatype `sqlType` and I wanted to be consistent.
But now I see that even `AvroDeserializer#newWriter` is referring to this as
`catalystType`, so I will make it consistent throughout. Thanks for the callout.
##########
File path:
external/avro/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala
##########
@@ -93,7 +99,10 @@ private[sql] class AvroSerializer(
private lazy val decimalConversions = new DecimalConversion()
- private def newConverter(catalystType: DataType, avroType: Schema):
Converter = {
+ private def newConverter(catalystType: DataType, avroType: Schema,
+ catalystPath: Seq[String], avroPath: Seq[String]): Converter = {
Review comment:
I used `sqlPath` in `AvroDeserializer` because `getRecordWriter` was
already calling the Catalyst datatype `sqlType` and I wanted to be consistent.
But now I see that even `AvroDeserializer#newWriter` is referring to this as
`catalystType`, so I will make it consistent throughout by changing both
`sqlPath` and `sqlType`. Thanks for the callout.
##########
File path:
external/avro/src/test/scala/org/apache/spark/sql/avro/AvroSerdeSuite.scala
##########
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.avro
+
+import org.apache.avro.{Schema, SchemaBuilder}
+import org.apache.avro.generic.GenericRecordBuilder
+
+import org.apache.spark.SparkFunSuite
+import org.apache.spark.sql.types.{IntegerType, StructType}
+
+/**
+ * Tests for [[AvroSerializer]] and [[AvroDeserializer]], complementing those
in [[AvroSuite]]
+ * with a more specific focus on those classes.
+ */
+class AvroSerdeSuite extends SparkFunSuite {
+ import AvroSerdeSuite._
+
+ test("Test basic conversion") {
+ val avro = createNestedAvroSchemaWithFields("foo", _.optionalInt("bar"))
+ val record = new GenericRecordBuilder(avro)
+ .set("foo", new
GenericRecordBuilder(avro.getField("foo").schema()).set("bar", 42).build())
+ .build()
+ val serializer = new AvroSerializer(CATALYST_STRUCT, avro, false)
+ val deserializer = new AvroDeserializer(avro, CATALYST_STRUCT)
+ assert(serializer.serialize(deserializer.deserialize(record).get) ===
record)
+ }
+
+ test("Fail to convert with field type mismatch") {
+ val avro = createAvroSchemaWithTopLevelFields(_.requiredInt("foo"))
+
+ assertFailedConversionMessage(avro, deserialize = true,
+ "Cannot convert Avro field 'foo' to Catalyst field 'foo' because schema
is incompatible " +
+ s"""(avroType = "int", sqlType =
${CATALYST_STRUCT.head.dataType.sql})""")
+
+ assertFailedConversionMessage(avro, deserialize = false,
+ s"Cannot convert Catalyst field 'foo' to Avro field 'foo' because schema
is incompatible " +
+ s"""(sqlType = ${CATALYST_STRUCT.head.dataType.sql}, avroType =
"int")""")
+ }
+
+ test("Fail to convert with nested field type mismatch") {
+ val avro = createNestedAvroSchemaWithFields("foo", _.optionalFloat("bar"))
+
+ assertFailedConversionMessage(avro, deserialize = true,
+ "Cannot convert Avro field 'foo.bar' to Catalyst field 'foo.bar' because
schema is " +
+ """incompatible (avroType = "float", sqlType = INT)""")
+
+ assertFailedConversionMessage(avro, deserialize = false,
+ "Cannot convert Catalyst field 'foo.bar' to Avro field 'foo.bar' because
" +
+ """schema is incompatible (sqlType = INT, avroType = "float")""")
+ }
+
+ test("Fail to convert with nested field name mismatch") {
+ val avro = createNestedAvroSchemaWithFields("foo", _.optionalInt("NOTbar"))
+ val nonnullCatalyst = new StructType()
+ .add("foo", new StructType().add("bar", IntegerType, nullable = false))
+
+ // deserialize should have no issues when 'bar' is nullable but fail when
it is nonnull
+ new AvroDeserializer(avro, CATALYST_STRUCT)
+ assertFailedConversionMessage(avro, deserialize = true,
+ "Cannot find non-nullable field 'foo.bar' in Avro schema.",
+ nonnullCatalyst)
+
+ // serialize fails whether or not 'bar' is nullable
+ val expectMsg = "Cannot find field 'foo.bar' in Avro schema at field 'foo'"
+ assertFailedConversionMessage(avro, deserialize = false, expectMsg)
+ assertFailedConversionMessage(avro, deserialize = false, expectMsg,
nonnullCatalyst)
+ }
+
+ test("Fail to convert with deeply nested field type mismatch") {
+ val avro = SchemaBuilder.builder().record("toptest").fields()
+ .name("top").`type`(createNestedAvroSchemaWithFields("foo",
_.optionalFloat("bar")))
+ .noDefault().endRecord()
+ val catalyst = new StructType().add("top", CATALYST_STRUCT)
+
+ assertFailedConversionMessage(avro, deserialize = true,
+ "Cannot convert Avro field 'top.foo.bar' to Catalyst field 'top.foo.bar'
because schema " +
+ """is incompatible (avroType = "float", sqlType = INT)""",
+ catalyst)
+
+ assertFailedConversionMessage(avro, deserialize = false,
+ "Cannot convert Catalyst field 'top.foo.bar' to Avro field 'top.foo.bar'
because schema is " +
+ """incompatible (sqlType = INT, avroType = "float")""",
+ catalyst)
+ }
+
+ test("Fail to convert for serialization with field count mismatch") {
+ val tooManyFields =
createAvroSchemaWithTopLevelFields(_.optionalInt("foo").optionalLong("bar"))
+ assertFailedConversionMessage(tooManyFields, deserialize = false,
+ "Avro top-level record schema length (2) " +
+ "doesn't match Catalyst top-level record schema length (1)")
+
+ val tooFewFields = createAvroSchemaWithTopLevelFields(f => f)
+ assertFailedConversionMessage(tooFewFields, deserialize = false,
+ "Avro top-level record schema length (0) " +
+ "doesn't match Catalyst top-level record schema length (1)")
+ }
+
+ /**
+ * Attempt to convert `catalystSchema` to `avroSchema` (or vice-versa if
`deserialize` is true),
+ * assert that it fails, and assert that the _cause_ of the thrown exception
has a message
+ * matching `expectedCauseMessage`.
+ */
+ private def assertFailedConversionMessage(avroSchema: Schema,
+ deserialize: Boolean,
+ expectedCauseMessage: String,
+ catalystSchema: StructType = CATALYST_STRUCT): Unit = {
+ val e = intercept[IncompatibleSchemaException] {
+ if (deserialize) {
+ new AvroDeserializer(avroSchema, catalystSchema)
+ } else {
+ new AvroSerializer(catalystSchema, avroSchema, false)
+ }
+ }
+ val expectMsg = if (deserialize) {
+ s"Cannot convert Avro type $avroSchema to Catalyst type
${catalystSchema.sql}."
+ } else {
+ s"Cannot convert Catalyst type ${catalystSchema.sql} to Avro type
$avroSchema."
+ }
+ assert(e.getMessage === expectMsg)
+ assert(e.getCause.getMessage === expectedCauseMessage)
+ }
+}
+
+
+object AvroSerdeSuite {
+
+ private val CATALYST_STRUCT = new StructType()
+ .add("foo", new StructType().add("bar", IntegerType))
+
+ /**
+ * Convenience method to create a top-level Avro schema with a single nested
record
+ * (at field name `nestedRecordFieldName`) which has fields as defined by
those set
+ * on the field assembler using `f`.
+ */
+ private def createNestedAvroSchemaWithFields(
+ nestedRecordFieldName: String,
Review comment:
Argh. This time I had the good sense to audit for any more bad instances
and found one additional spot. Thank you.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]