This is an automated email from the ASF dual-hosted git repository.
gengliang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new de141a3 [SPARK-32660][SQL][DOC] Show Avro related API in documentation
de141a3 is described below
commit de141a32714fd2dbc4be2d540adabf328bbce2c4
Author: Gengliang Wang <[email protected]>
AuthorDate: Fri Aug 21 13:12:43 2020 +0800
[SPARK-32660][SQL][DOC] Show Avro related API in documentation
### What changes were proposed in this pull request?
Currently, the Avro related APIs are missing in the documentation
https://spark.apache.org/docs/latest/api/scala/org/apache/spark/index.html .
This PR is to:
1. Mark internal Avro related classes as private
2. Show Avro related API in Spark official API documentation
### Why are the changes needed?
Better documentation.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Build doc and preview:



Closes #29476 from gengliangwang/avroAPIDoc.
Authored-by: Gengliang Wang <[email protected]>
Signed-off-by: Gengliang Wang <[email protected]>
---
.../spark/sql/avro/SparkAvroKeyOutputFormat.java | 4 ++--
.../apache/spark/sql/avro/AvroDataToCatalyst.scala | 2 +-
.../apache/spark/sql/avro/AvroDeserializer.scala | 2 +-
.../org/apache/spark/sql/avro/AvroOptions.scala | 4 ++--
.../org/apache/spark/sql/avro/AvroSerializer.scala | 2 +-
.../scala/org/apache/spark/sql/avro/AvroUtils.scala | 2 +-
.../apache/spark/sql/avro/CatalystDataToAvro.scala | 2 +-
.../apache/spark/sql/avro/SchemaConverters.scala | 21 ++++++++++++++++++---
project/SparkBuild.scala | 5 +++--
9 files changed, 30 insertions(+), 14 deletions(-)
diff --git
a/external/avro/src/main/java/org/apache/spark/sql/avro/SparkAvroKeyOutputFormat.java
b/external/avro/src/main/java/org/apache/spark/sql/avro/SparkAvroKeyOutputFormat.java
index 55696a6..a455584 100644
---
a/external/avro/src/main/java/org/apache/spark/sql/avro/SparkAvroKeyOutputFormat.java
+++
b/external/avro/src/main/java/org/apache/spark/sql/avro/SparkAvroKeyOutputFormat.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
// A variant of `AvroKeyOutputFormat`, which is used to inject the custom
`RecordWriterFactory` so
// that we can set avro file metadata.
-public class SparkAvroKeyOutputFormat extends
AvroKeyOutputFormat<GenericRecord> {
- public SparkAvroKeyOutputFormat(Map<String, String> metadata) {
+class SparkAvroKeyOutputFormat extends AvroKeyOutputFormat<GenericRecord> {
+ SparkAvroKeyOutputFormat(Map<String, String> metadata) {
super(new SparkRecordWriterFactory(metadata));
}
diff --git
a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroDataToCatalyst.scala
b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroDataToCatalyst.scala
index 285a30b..9813f7f 100644
---
a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroDataToCatalyst.scala
+++
b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroDataToCatalyst.scala
@@ -30,7 +30,7 @@ import
org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodeGe
import org.apache.spark.sql.catalyst.util.{FailFastMode, ParseMode,
PermissiveMode}
import org.apache.spark.sql.types._
-case class AvroDataToCatalyst(
+private[avro] case class AvroDataToCatalyst(
child: Expression,
jsonFormatSchema: String,
options: Map[String, String])
diff --git
a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala
b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala
index 29385b7..360a7fc 100644
---
a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala
+++
b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroDeserializer.scala
@@ -42,7 +42,7 @@ import org.apache.spark.unsafe.types.UTF8String
/**
* A deserializer to deserialize data in avro format to data in catalyst
format.
*/
-class AvroDeserializer(
+private[sql] class AvroDeserializer(
rootAvroType: Schema,
rootCatalystType: DataType,
datetimeRebaseMode: LegacyBehaviorPolicy.Value,
diff --git
a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
index f3ea785..8972b05 100644
--- a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
+++ b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
@@ -27,7 +27,7 @@ import org.apache.spark.sql.internal.SQLConf
/**
* Options for Avro Reader and Writer stored in case insensitive manner.
*/
-class AvroOptions(
+private[sql] class AvroOptions(
@transient val parameters: CaseInsensitiveMap[String],
@transient val conf: Configuration) extends Logging with Serializable {
@@ -95,7 +95,7 @@ class AvroOptions(
parameters.get("mode").map(ParseMode.fromString).getOrElse(FailFastMode)
}
-object AvroOptions {
+private[sql] object AvroOptions {
def apply(parameters: Map[String, String]): AvroOptions = {
val hadoopConf = SparkSession
.getActiveSession
diff --git
a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala
b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala
index 21c5dec..08b1b41 100644
---
a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala
+++
b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala
@@ -43,7 +43,7 @@ import org.apache.spark.sql.types._
/**
* A serializer to serialize data in catalyst format to data in avro format.
*/
-class AvroSerializer(
+private[sql] class AvroSerializer(
rootCatalystType: DataType,
rootAvroType: Schema,
nullable: Boolean,
diff --git
a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroUtils.scala
b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroUtils.scala
index 51cc51e..4a38970 100644
--- a/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroUtils.scala
+++ b/external/avro/src/main/scala/org/apache/spark/sql/avro/AvroUtils.scala
@@ -38,7 +38,7 @@ import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
-object AvroUtils extends Logging {
+private[sql] object AvroUtils extends Logging {
def inferSchema(
spark: SparkSession,
options: Map[String, String],
diff --git
a/external/avro/src/main/scala/org/apache/spark/sql/avro/CatalystDataToAvro.scala
b/external/avro/src/main/scala/org/apache/spark/sql/avro/CatalystDataToAvro.scala
index 7732c83..53910b7 100644
---
a/external/avro/src/main/scala/org/apache/spark/sql/avro/CatalystDataToAvro.scala
+++
b/external/avro/src/main/scala/org/apache/spark/sql/avro/CatalystDataToAvro.scala
@@ -27,7 +27,7 @@ import org.apache.spark.sql.catalyst.expressions.{Expression,
UnaryExpression}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext,
ExprCode}
import org.apache.spark.sql.types.{BinaryType, DataType}
-case class CatalystDataToAvro(
+private[avro] case class CatalystDataToAvro(
child: Expression,
jsonFormatSchema: Option[String]) extends UnaryExpression {
diff --git
a/external/avro/src/main/scala/org/apache/spark/sql/avro/SchemaConverters.scala
b/external/avro/src/main/scala/org/apache/spark/sql/avro/SchemaConverters.scala
index 75690bb..27d5871 100644
---
a/external/avro/src/main/scala/org/apache/spark/sql/avro/SchemaConverters.scala
+++
b/external/avro/src/main/scala/org/apache/spark/sql/avro/SchemaConverters.scala
@@ -24,6 +24,7 @@ import org.apache.avro.{LogicalTypes, Schema, SchemaBuilder}
import org.apache.avro.LogicalTypes.{Date, Decimal, TimestampMicros,
TimestampMillis}
import org.apache.avro.Schema.Type._
+import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.sql.catalyst.util.RandomUUIDGenerator
import org.apache.spark.sql.types._
import org.apache.spark.sql.types.Decimal.{maxPrecisionForBytes,
minBytesForPrecision}
@@ -32,21 +33,29 @@ import
org.apache.spark.sql.types.Decimal.{maxPrecisionForBytes, minBytesForPrec
* This object contains method that are used to convert sparkSQL schemas to
avro schemas and vice
* versa.
*/
+@DeveloperApi
object SchemaConverters {
private lazy val uuidGenerator = RandomUUIDGenerator(new Random().nextLong())
private lazy val nullSchema = Schema.create(Schema.Type.NULL)
+ /**
+ * Internal wrapper for SQL data type and nullability.
+ *
+ * @since 2.4.0
+ */
case class SchemaType(dataType: DataType, nullable: Boolean)
/**
- * This function takes an avro schema and returns a sql schema.
+ * Converts an Avro schema to a corresponding Spark SQL schema.
+ *
+ * @since 2.4.0
*/
def toSqlType(avroSchema: Schema): SchemaType = {
toSqlTypeHelper(avroSchema, Set.empty)
}
- def toSqlTypeHelper(avroSchema: Schema, existingRecordNames: Set[String]):
SchemaType = {
+ private def toSqlTypeHelper(avroSchema: Schema, existingRecordNames:
Set[String]): SchemaType = {
avroSchema.getType match {
case INT => avroSchema.getLogicalType match {
case _: Date => SchemaType(DateType, nullable = false)
@@ -133,6 +142,11 @@ object SchemaConverters {
}
}
+ /**
+ * Converts a Spark SQL schema to a corresponding Avro schema.
+ *
+ * @since 2.4.0
+ */
def toAvroType(
catalystType: DataType,
nullable: Boolean = false,
@@ -192,4 +206,5 @@ object SchemaConverters {
}
}
-class IncompatibleSchemaException(msg: String, ex: Throwable = null) extends
Exception(msg, ex)
+private[avro] class IncompatibleSchemaException(
+ msg: String, ex: Throwable = null) extends Exception(msg, ex)
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 110c311..c94ae4e 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -852,6 +852,7 @@ object Unidoc {
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive/test")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalog/v2/utils")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/hive")))
+
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/v2/avro")))
}
private def ignoreClasspaths(classpaths: Seq[Classpath]): Seq[Classpath] = {
@@ -867,10 +868,10 @@ object Unidoc {
unidocProjectFilter in(ScalaUnidoc, unidoc) :=
inAnyProject -- inProjects(OldDeps.project, repl, examples, tools,
kubernetes,
- yarn, tags, streamingKafka010, sqlKafka010, avro),
+ yarn, tags, streamingKafka010, sqlKafka010),
unidocProjectFilter in(JavaUnidoc, unidoc) :=
inAnyProject -- inProjects(OldDeps.project, repl, examples, tools,
kubernetes,
- yarn, tags, streamingKafka010, sqlKafka010, avro),
+ yarn, tags, streamingKafka010, sqlKafka010),
unidocAllClasspaths in (ScalaUnidoc, unidoc) := {
ignoreClasspaths((unidocAllClasspaths in (ScalaUnidoc, unidoc)).value)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]