This is an automated email from the ASF dual-hosted git repository.

yumwang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 8a552bf  [SPARK-34778][BUILD] Upgrade to Avro 1.10.2
8a552bf is described below

commit 8a552bfc767dff987be41f7f463db17395b74e6f
Author: Ismaël Mejía <ieme...@gmail.com>
AuthorDate: Mon Mar 22 19:30:14 2021 +0800

    [SPARK-34778][BUILD] Upgrade to Avro 1.10.2
    
    ### What changes were proposed in this pull request?
    Update the  Avro version to 1.10.2
    
    ### Why are the changes needed?
    To stay up to date with upstream and catch compatibility issues with zstd
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Unit tests
    
    Closes #31866 from iemejia/SPARK-27733-upgrade-avro-1.10.2.
    
    Authored-by: Ismaël Mejía <ieme...@gmail.com>
    Signed-off-by: Yuming Wang <yumw...@ebay.com>
---
 dev/deps/spark-deps-hadoop-2.7-hive-2.3                             | 6 +++---
 dev/deps/spark-deps-hadoop-3.2-hive-2.3                             | 6 +++---
 docs/sql-data-sources-avro.md                                       | 4 ++--
 .../avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala | 4 ++--
 pom.xml                                                             | 2 +-
 project/SparkBuild.scala                                            | 2 +-
 project/plugins.sbt                                                 | 2 +-
 7 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/dev/deps/spark-deps-hadoop-2.7-hive-2.3 
b/dev/deps/spark-deps-hadoop-2.7-hive-2.3
index e6619d2..2f17c11 100644
--- a/dev/deps/spark-deps-hadoop-2.7-hive-2.3
+++ b/dev/deps/spark-deps-hadoop-2.7-hive-2.3
@@ -22,9 +22,9 @@ arrow-memory-netty/2.0.0//arrow-memory-netty-2.0.0.jar
 arrow-vector/2.0.0//arrow-vector-2.0.0.jar
 audience-annotations/0.5.0//audience-annotations-0.5.0.jar
 automaton/1.11-8//automaton-1.11-8.jar
-avro-ipc/1.10.1//avro-ipc-1.10.1.jar
-avro-mapred/1.10.1//avro-mapred-1.10.1.jar
-avro/1.10.1//avro-1.10.1.jar
+avro-ipc/1.10.2//avro-ipc-1.10.2.jar
+avro-mapred/1.10.2//avro-mapred-1.10.2.jar
+avro/1.10.2//avro-1.10.2.jar
 bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar
 breeze-macros_2.12/1.0//breeze-macros_2.12-1.0.jar
 breeze_2.12/1.0//breeze_2.12-1.0.jar
diff --git a/dev/deps/spark-deps-hadoop-3.2-hive-2.3 
b/dev/deps/spark-deps-hadoop-3.2-hive-2.3
index ea595a0..ea44748 100644
--- a/dev/deps/spark-deps-hadoop-3.2-hive-2.3
+++ b/dev/deps/spark-deps-hadoop-3.2-hive-2.3
@@ -17,9 +17,9 @@ arrow-memory-netty/2.0.0//arrow-memory-netty-2.0.0.jar
 arrow-vector/2.0.0//arrow-vector-2.0.0.jar
 audience-annotations/0.5.0//audience-annotations-0.5.0.jar
 automaton/1.11-8//automaton-1.11-8.jar
-avro-ipc/1.10.1//avro-ipc-1.10.1.jar
-avro-mapred/1.10.1//avro-mapred-1.10.1.jar
-avro/1.10.1//avro-1.10.1.jar
+avro-ipc/1.10.2//avro-ipc-1.10.2.jar
+avro-mapred/1.10.2//avro-mapred-1.10.2.jar
+avro/1.10.2//avro-1.10.2.jar
 bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar
 breeze-macros_2.12/1.0//breeze-macros_2.12-1.0.jar
 breeze_2.12/1.0//breeze_2.12-1.0.jar
diff --git a/docs/sql-data-sources-avro.md b/docs/sql-data-sources-avro.md
index 928b3d0..ab1163a 100644
--- a/docs/sql-data-sources-avro.md
+++ b/docs/sql-data-sources-avro.md
@@ -378,7 +378,7 @@ applications. Read the [Advanced Dependency 
Management](https://spark.apache
 Submission Guide for more details. 
 
 ## Supported types for Avro -> Spark SQL conversion
-Currently Spark supports reading all [primitive 
types](https://avro.apache.org/docs/1.10.1/spec.html#schema_primitive) and 
[complex types](https://avro.apache.org/docs/1.10.1/spec.html#schema_complex) 
under records of Avro.
+Currently Spark supports reading all [primitive 
types](https://avro.apache.org/docs/1.10.2/spec.html#schema_primitive) and 
[complex types](https://avro.apache.org/docs/1.10.2/spec.html#schema_complex) 
under records of Avro.
 <table class="table">
   <tr><th><b>Avro type</b></th><th><b>Spark SQL type</b></th></tr>
   <tr>
@@ -442,7 +442,7 @@ In addition to the types listed above, it supports reading 
`union` types. The fo
 3. `union(something, null)`, where something is any supported Avro type. This 
will be mapped to the same Spark SQL type as that of something, with nullable 
set to true.
 All other union types are considered complex. They will be mapped to 
StructType where field names are member0, member1, etc., in accordance with 
members of the union. This is consistent with the behavior when converting 
between Avro and Parquet.
 
-It also supports reading the following Avro [logical 
types](https://avro.apache.org/docs/1.10.1/spec.html#Logical+Types):
+It also supports reading the following Avro [logical 
types](https://avro.apache.org/docs/1.10.2/spec.html#Logical+Types):
 
 <table class="table">
   <tr><th><b>Avro logical type</b></th><th><b>Avro type</b></th><th><b>Spark 
SQL type</b></th></tr>
diff --git 
a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala 
b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
index 49772ee..4c2e2a5 100644
--- a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
+++ b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
@@ -69,14 +69,14 @@ private[sql] class AvroOptions(
 
   /**
    * Top level record name in write result, which is required in Avro spec.
-   * See https://avro.apache.org/docs/1.10.1/spec.html#schema_record .
+   * See https://avro.apache.org/docs/1.10.2/spec.html#schema_record .
    * Default value is "topLevelRecord"
    */
   val recordName: String = parameters.getOrElse("recordName", "topLevelRecord")
 
   /**
    * Record namespace in write result. Default value is "".
-   * See Avro spec for details: 
https://avro.apache.org/docs/1.10.1/spec.html#schema_record .
+   * See Avro spec for details: 
https://avro.apache.org/docs/1.10.2/spec.html#schema_record .
    */
   val recordNamespace: String = parameters.getOrElse("recordNamespace", "")
 
diff --git a/pom.xml b/pom.xml
index e7603b3..120213d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -148,7 +148,7 @@
     the link to metrics.dropwizard.io in docs/monitoring.md.
     -->
     <codahale.metrics.version>4.1.1</codahale.metrics.version>
-    <avro.version>1.10.1</avro.version>
+    <avro.version>1.10.2</avro.version>
     <aws.kinesis.client.version>1.14.0</aws.kinesis.client.version>
     <!-- Should be consistent with Kinesis client dependency -->
     <aws.java.sdk.version>1.11.844</aws.java.sdk.version>
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 9b28205..a9d38d2 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -658,7 +658,7 @@ object DependencyOverrides {
     dependencyOverrides += "com.google.guava" % "guava" % guavaVersion,
     dependencyOverrides += "xerces" % "xercesImpl" % "2.12.0",
     dependencyOverrides += "jline" % "jline" % "2.14.6",
-    dependencyOverrides += "org.apache.avro" % "avro" % "1.10.1")
+    dependencyOverrides += "org.apache.avro" % "avro" % "1.10.2")
 }
 
 /**
diff --git a/project/plugins.sbt b/project/plugins.sbt
index 0a73b26c..a6e7e94 100644
--- a/project/plugins.sbt
+++ b/project/plugins.sbt
@@ -34,7 +34,7 @@ addSbtPlugin("com.typesafe" % "sbt-mima-plugin" % "0.8.0")
 addSbtPlugin("com.eed3si9n" % "sbt-unidoc" % "0.4.3")
 
 addSbtPlugin("com.cavorite" % "sbt-avro" % "2.1.1")
-libraryDependencies += "org.apache.avro" % "avro-compiler" % "1.10.1"
+libraryDependencies += "org.apache.avro" % "avro-compiler" % "1.10.2"
 
 addSbtPlugin("io.spray" % "sbt-revolver" % "0.9.1")
 

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to