Github user cloud-fan commented on a diff in the pull request:
https://github.com/apache/spark/pull/22037#discussion_r209412578
--- Diff:
external/avro/src/test/scala/org/apache/spark/sql/avro/AvroSuite.scala ---
@@ -494,6 +522,68 @@ class AvroSuite extends QueryTest with
SharedSQLContext with SQLTestUtils {
checkAnswer(df, expected)
}
+ test("Logical type: Decimal") {
+ val expected = Seq("1.23", "4.56", "78.90", "-1", "-2.31")
+ .map { x => Row(new java.math.BigDecimal(x), new
java.math.BigDecimal(x)) }
+ val df = spark.read.format("avro").load(decimalAvro)
+
+ checkAnswer(df, expected)
+
+ val avroSchema = s"""
+ {
+ "namespace": "logical",
+ "type": "record",
+ "name": "test",
+ "fields": [
+ {"name": "bytes", "type":
+ {"type": "bytes", "logicalType": "decimal", "precision": 4,
"scale": 2}
+ },
+ {"name": "fixed", "type":
+ {"type": "fixed", "size": 5, "logicalType": "decimal",
+ "precision": 4, "scale": 2, "name": "foo"}
+ }
+ ]
+ }
+ """
+
+ checkAnswer(spark.read.format("avro").option("avroSchema",
avroSchema).load(decimalAvro),
+ expected)
+
+ withTempPath { dir =>
+ df.write.format("avro").save(dir.toString)
+ checkAnswer(spark.read.format("avro").load(dir.toString), expected)
+ }
+ }
+
+ test("Logical type: Decimal with too large precision") {
+ withTempDir { dir =>
+ val schema = new Schema.Parser().parse("""{
+ "namespace": "logical",
+ "type": "record",
+ "name": "test",
+ "fields": [{
+ "name": "decimal",
+ "type": {"type": "bytes", "logicalType": "decimal", "precision":
4, "scale": 2}
+ }]
+ }""")
+ val datumWriter = new GenericDatumWriter[GenericRecord](schema)
+ val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
+ dataFileWriter.create(schema, new File(s"$dir.avro"))
--- End diff --
Let's either always use python to write test files, or always use java.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]