This is an automated email from the ASF dual-hosted git repository. dongjoon pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push: new 8147620ac49 [SPARK-45964][SQL] Remove private sql accessor in XML and JSON package under catalyst package 8147620ac49 is described below commit 8147620ac49ec4c82b9ef34681334a34c0ad0e37 Author: Hyukjin Kwon <gurwls...@apache.org> AuthorDate: Thu Nov 16 17:57:55 2023 -0800 [SPARK-45964][SQL] Remove private sql accessor in XML and JSON package under catalyst package ### What changes were proposed in this pull request? This PR removes `private[sql]` in XML and JSON packages at `catalyst` package. ### Why are the changes needed? `catalyst` is already a private package: https://github.com/apache/spark/blob/master/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/package.scala#L21-L22 See also SPARK-16813 ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? CI in this PR should test them out. ### Was this patch authored or co-authored using generative AI tooling? No. Closes #43856 from HyukjinKwon/SPARK-45964. Authored-by: Hyukjin Kwon <gurwls...@apache.org> Signed-off-by: Dongjoon Hyun <dh...@apple.com> --- .../org/apache/spark/sql/catalyst/json/CreateJacksonParser.scala | 2 +- .../main/scala/org/apache/spark/sql/catalyst/json/JSONOptions.scala | 6 +++--- .../scala/org/apache/spark/sql/catalyst/json/JacksonGenerator.scala | 2 +- .../scala/org/apache/spark/sql/catalyst/json/JsonInferSchema.scala | 2 +- .../scala/org/apache/spark/sql/catalyst/xml/CreateXmlParser.scala | 2 +- .../scala/org/apache/spark/sql/catalyst/xml/StaxXmlParser.scala | 4 ++-- .../org/apache/spark/sql/catalyst/xml/StaxXmlParserUtils.scala | 2 +- .../scala/org/apache/spark/sql/catalyst/xml/ValidatorUtil.scala | 2 +- .../scala/org/apache/spark/sql/catalyst/xml/XmlInferSchema.scala | 2 +- .../main/scala/org/apache/spark/sql/catalyst/xml/XmlOptions.scala | 4 ++-- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/CreateJacksonParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/CreateJacksonParser.scala index 156c6b819f2..61ef14a3f10 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/CreateJacksonParser.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/CreateJacksonParser.scala @@ -29,7 +29,7 @@ import sun.nio.cs.StreamDecoder import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.unsafe.types.UTF8String -private[sql] object CreateJacksonParser extends Serializable { +object CreateJacksonParser extends Serializable { def string(jsonFactory: JsonFactory, record: String): JsonParser = { jsonFactory.createParser(record) } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JSONOptions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JSONOptions.scala index 596d9e39b94..e5aa0bb6d2c 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JSONOptions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JSONOptions.scala @@ -34,7 +34,7 @@ import org.apache.spark.sql.internal.{LegacyBehaviorPolicy, SQLConf} * * Most of these map directly to Jackson's internal options, specified in [[JsonReadFeature]]. */ -private[sql] class JSONOptions( +class JSONOptions( @transient val parameters: CaseInsensitiveMap[String], defaultTimeZoneId: String, defaultColumnNameOfCorruptRecord: String) @@ -212,7 +212,7 @@ private[sql] class JSONOptions( } } -private[sql] class JSONOptionsInRead( +class JSONOptionsInRead( @transient override val parameters: CaseInsensitiveMap[String], defaultTimeZoneId: String, defaultColumnNameOfCorruptRecord: String) @@ -242,7 +242,7 @@ private[sql] class JSONOptionsInRead( } } -private[sql] object JSONOptionsInRead { +object JSONOptionsInRead { // The following encodings are not supported in per-line mode (multiline is false) // because they cause some problems in reading files with BOM which is supposed to // present in the files with such encodings. After splitting input files by lines, diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonGenerator.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonGenerator.scala index 0a243c63685..e02b2860618 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonGenerator.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonGenerator.scala @@ -37,7 +37,7 @@ import org.apache.spark.util.ArrayImplicits._ * of map. An exception will be thrown if trying to write out a struct if it is initialized with * a `MapType`, and vice verse. */ -private[sql] class JacksonGenerator( +class JacksonGenerator( dataType: DataType, writer: Writer, options: JSONOptions) { diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JsonInferSchema.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JsonInferSchema.scala index 4d04b34876c..7b8de7d84cf 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JsonInferSchema.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JsonInferSchema.scala @@ -37,7 +37,7 @@ import org.apache.spark.sql.internal.{LegacyBehaviorPolicy, SQLConf} import org.apache.spark.sql.types._ import org.apache.spark.util.Utils -private[sql] class JsonInferSchema(options: JSONOptions) extends Serializable with Logging { +class JsonInferSchema(options: JSONOptions) extends Serializable with Logging { private val decimalParser = ExprUtils.getDecimalParser(options.locale) diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/CreateXmlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/CreateXmlParser.scala index 3a7f0b4be28..553c09da332 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/CreateXmlParser.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/CreateXmlParser.scala @@ -29,7 +29,7 @@ import sun.nio.cs.StreamDecoder import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.unsafe.types.UTF8String -private[sql] object CreateXmlParser extends Serializable { +object CreateXmlParser extends Serializable { val filter = new EventFilter { override def accept(event: XMLEvent): Boolean = // Ignore comments and processing instructions diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/StaxXmlParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/StaxXmlParser.scala index 754b54ce157..b3174b70441 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/StaxXmlParser.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/StaxXmlParser.scala @@ -587,7 +587,7 @@ class StaxXmlParser( * * This implementation is ultimately loosely based on LineRecordReader in Hadoop. */ -private[xml] class XmlTokenizer( +class XmlTokenizer( inputStream: InputStream, options: XmlOptions) { private val reader = new InputStreamReader(inputStream, Charset.forName(options.charset)) @@ -742,7 +742,7 @@ private[xml] class XmlTokenizer( } } -private[sql] object StaxXmlParser { +object StaxXmlParser { /** * Parses a stream that contains CSV strings and turns it into an iterator of tokens. */ diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/StaxXmlParserUtils.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/StaxXmlParserUtils.scala index ad1c0b729c5..d3b90564a75 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/StaxXmlParserUtils.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/StaxXmlParserUtils.scala @@ -24,7 +24,7 @@ import javax.xml.stream.events._ import scala.annotation.tailrec import scala.jdk.CollectionConverters._ -private[sql] object StaxXmlParserUtils { +object StaxXmlParserUtils { private[sql] val factory: XMLInputFactory = { val factory = XMLInputFactory.newInstance() diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/ValidatorUtil.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/ValidatorUtil.scala index 0d85a512d7e..51140d01019 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/ValidatorUtil.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/ValidatorUtil.scala @@ -31,7 +31,7 @@ import org.apache.spark.internal.Logging /** * Utilities for working with XSD validation. */ -private[sql] object ValidatorUtil extends Logging{ +object ValidatorUtil extends Logging { // Parsing XSDs may be slow, so cache them by path: private val cache = CacheBuilder.newBuilder().softValues().build( diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/XmlInferSchema.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/XmlInferSchema.scala index 467fbe868be..eeb5a9de4ed 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/XmlInferSchema.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/XmlInferSchema.scala @@ -36,7 +36,7 @@ import org.apache.spark.sql.catalyst.util.{DateFormatter, PermissiveMode, Timest import org.apache.spark.sql.catalyst.util.LegacyDateFormats.FAST_DATE_FORMAT import org.apache.spark.sql.types._ -private[sql] class XmlInferSchema(options: XmlOptions, caseSensitive: Boolean) +class XmlInferSchema(options: XmlOptions, caseSensitive: Boolean) extends Serializable with Logging { diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/XmlOptions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/XmlOptions.scala index 8f6cdbf360e..742506d6fdc 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/XmlOptions.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/xml/XmlOptions.scala @@ -30,7 +30,7 @@ import org.apache.spark.sql.internal.{LegacyBehaviorPolicy, SQLConf} /** * Options for the XML data source. */ -private[sql] class XmlOptions( +class XmlOptions( val parameters: CaseInsensitiveMap[String], defaultTimeZoneId: String, defaultColumnNameOfCorruptRecord: String, @@ -172,7 +172,7 @@ private[sql] class XmlOptions( } } -private[sql] object XmlOptions extends DataSourceOptions { +object XmlOptions extends DataSourceOptions { val DEFAULT_ATTRIBUTE_PREFIX = "_" val DEFAULT_VALUE_TAG = "_VALUE" val DEFAULT_ROW_TAG = "ROW" --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org