Repository: spark
Updated Branches:
  refs/heads/branch-2.0 3ca0dc007 -> c9bd67e94


[SPARK-17561][DOCS] DataFrameWriter documentation formatting problems

Fix `<ul> / <li>` problems in SQL scaladoc.

Scaladoc build and manual verification of generated HTML.

Author: Sean Owen <so...@cloudera.com>

Closes #15117 from srowen/SPARK-17561.

(cherry picked from commit b9323fc9381a09af510f542fd5c86473e029caf6)
Signed-off-by: Sean Owen <so...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/c9bd67e9
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/c9bd67e9
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/c9bd67e9

Branch: refs/heads/branch-2.0
Commit: c9bd67e94d9d9d2e1f2cb1e5c4bb71a69b1e1d4e
Parents: 3ca0dc0
Author: Sean Owen <so...@cloudera.com>
Authored: Fri Sep 16 13:43:05 2016 -0700
Committer: Sean Owen <so...@cloudera.com>
Committed: Sat Sep 17 12:43:30 2016 +0100

----------------------------------------------------------------------
 .../org/apache/spark/sql/DataFrameReader.scala  | 32 +++++++++--------
 .../org/apache/spark/sql/DataFrameWriter.scala  | 10 ++++++
 .../spark/sql/streaming/DataStreamReader.scala  | 38 ++++++++++++--------
 3 files changed, 51 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/c9bd67e9/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
index 083c2e2..410cb20 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
@@ -269,14 +269,15 @@ class DataFrameReader private[sql](sparkSession: 
SparkSession) extends Logging {
    * <li>`allowBackslashEscapingAnyCharacter` (default `false`): allows 
accepting quoting of all
    * character using backslash quoting mechanism</li>
    * <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt 
records
-   * during parsing.</li>
-   * <ul>
-   *  <li> - `PERMISSIVE` : sets other fields to `null` when it meets a 
corrupted record, and puts
-   *  the malformed string into a new field configured by 
`columnNameOfCorruptRecord`. When
-   *  a schema is set by user, it sets `null` for extra fields.</li>
-   *  <li> - `DROPMALFORMED` : ignores the whole corrupted records.</li>
-   *  <li> - `FAILFAST` : throws an exception when it meets corrupted 
records.</li>
-   * </ul>
+   * during parsing.
+   *   <ul>
+   *     <li>`PERMISSIVE` : sets other fields to `null` when it meets a 
corrupted record, and puts
+   *     the malformed string into a new field configured by 
`columnNameOfCorruptRecord`. When
+   *     a schema is set by user, it sets `null` for extra fields.</li>
+   *     <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
+   *     <li>`FAILFAST` : throws an exception when it meets corrupted 
records.</li>
+   *   </ul>
+   * </li>
    * <li>`columnNameOfCorruptRecord` (default is the value specified in
    * `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field 
having malformed string
    * created by `PERMISSIVE` mode. This overrides 
`spark.sql.columnNameOfCorruptRecord`.</li>
@@ -396,13 +397,14 @@ class DataFrameReader private[sql](sparkSession: 
SparkSession) extends Logging {
    * <li>`maxMalformedLogPerPartition` (default `10`): sets the maximum number 
of malformed rows
    * Spark will log for each partition. Malformed records beyond this number 
will be ignored.</li>
    * <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt 
records
-   *    during parsing.</li>
-   * <ul>
-   *   <li> - `PERMISSIVE` : sets other fields to `null` when it meets a 
corrupted record. When
-   *     a schema is set by user, it sets `null` for extra fields.</li>
-   *   <li> - `DROPMALFORMED` : ignores the whole corrupted records.</li>
-   *   <li> - `FAILFAST` : throws an exception when it meets corrupted 
records.</li>
-   * </ul>
+   *    during parsing.
+   *   <ul>
+   *     <li>`PERMISSIVE` : sets other fields to `null` when it meets a 
corrupted record. When
+   *       a schema is set by user, it sets `null` for extra fields.</li>
+   *     <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
+   *     <li>`FAILFAST` : throws an exception when it meets corrupted 
records.</li>
+   *   </ul>
+   * </li>
    * </ul>
    * @since 2.0.0
    */

http://git-wip-us.apache.org/repos/asf/spark/blob/c9bd67e9/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
index 767af99..a4c4a5d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
@@ -449,6 +449,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) 
{
    * }}}
    *
    * You can set the following JSON-specific option(s) for writing JSON files:
+   * <ul>
    * <li>`compression` (default `null`): compression codec to use when saving 
to file. This can be
    * one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, 
`lz4`,
    * `snappy` and `deflate`). </li>
@@ -458,6 +459,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) 
{
    * <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the 
string that
    * indicates a timestamp format. Custom date formats follow the formats at
    * `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
+   * </ul>
    *
    * @since 1.4.0
    */
@@ -473,10 +475,12 @@ final class DataFrameWriter[T] private[sql](ds: 
Dataset[T]) {
    * }}}
    *
    * You can set the following Parquet-specific option(s) for writing Parquet 
files:
+   * <ul>
    * <li>`compression` (default is the value specified in 
`spark.sql.parquet.compression.codec`):
    * compression codec to use when saving to file. This can be one of the 
known case-insensitive
    * shorten names(none, `snappy`, `gzip`, and `lzo`). This will override
    * `spark.sql.parquet.compression.codec`.</li>
+   * </ul>
    *
    * @since 1.4.0
    */
@@ -492,9 +496,11 @@ final class DataFrameWriter[T] private[sql](ds: 
Dataset[T]) {
    * }}}
    *
    * You can set the following ORC-specific option(s) for writing ORC files:
+   * <ul>
    * <li>`compression` (default `snappy`): compression codec to use when 
saving to file. This can be
    * one of the known case-insensitive shorten names(`none`, `snappy`, `zlib`, 
and `lzo`).
    * This will override `orc.compress`.</li>
+   * </ul>
    *
    * @since 1.5.0
    * @note Currently, this method can only be used after enabling Hive support
@@ -516,9 +522,11 @@ final class DataFrameWriter[T] private[sql](ds: 
Dataset[T]) {
    * }}}
    *
    * You can set the following option(s) for writing text files:
+   * <ul>
    * <li>`compression` (default `null`): compression codec to use when saving 
to file. This can be
    * one of the known case-insensitive shorten names (`none`, `bzip2`, `gzip`, 
`lz4`,
    * `snappy` and `deflate`). </li>
+   * </ul>
    *
    * @since 1.6.0
    */
@@ -534,6 +542,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) 
{
    * }}}
    *
    * You can set the following CSV-specific option(s) for writing CSV files:
+   * <ul>
    * <li>`sep` (default `,`): sets the single character as a separator for each
    * field and value.</li>
    * <li>`quote` (default `"`): sets the single character used for escaping 
quoted values where
@@ -556,6 +565,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) 
{
    * <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the 
string that
    * indicates a timestamp format. Custom date formats follow the formats at
    * `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
+   * </ul>
    *
    * @since 2.0.0
    */

http://git-wip-us.apache.org/repos/asf/spark/blob/c9bd67e9/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala
index e0a19b1..613177e 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala
@@ -161,6 +161,7 @@ final class DataStreamReader private[sql](sparkSession: 
SparkSession) extends Lo
    * schema in advance, use the version that specifies the schema to avoid the 
extra scan.
    *
    * You can set the following JSON-specific options to deal with non-standard 
JSON files:
+   * <ul>
    * <li>`maxFilesPerTrigger` (default: no max limit): sets the maximum number 
of new files to be
    * considered in every trigger.</li>
    * <li>`primitivesAsString` (default `false`): infers all primitive values 
as a string type</li>
@@ -175,14 +176,15 @@ final class DataStreamReader private[sql](sparkSession: 
SparkSession) extends Lo
    * <li>`allowBackslashEscapingAnyCharacter` (default `false`): allows 
accepting quoting of all
    * character using backslash quoting mechanism</li>
    * <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt 
records
-   * during parsing.</li>
-   * <ul>
-   *  <li>`PERMISSIVE` : sets other fields to `null` when it meets a corrupted 
record, and puts the
-   *  malformed string into a new field configured by 
`columnNameOfCorruptRecord`. When
-   *  a schema is set by user, it sets `null` for extra fields.</li>
-   *  <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
-   *  <li>`FAILFAST` : throws an exception when it meets corrupted 
records.</li>
-   * </ul>
+   * during parsing.
+   *   <ul>
+   *     <li>`PERMISSIVE` : sets other fields to `null` when it meets a 
corrupted record, and puts
+   *     the malformed string into a new field configured by 
`columnNameOfCorruptRecord`. When
+   *     a schema is set by user, it sets `null` for extra fields.</li>
+   *     <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
+   *     <li>`FAILFAST` : throws an exception when it meets corrupted 
records.</li>
+   *   </ul>
+   * </li>
    * <li>`columnNameOfCorruptRecord` (default is the value specified in
    * `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field 
having malformed string
    * created by `PERMISSIVE` mode. This overrides 
`spark.sql.columnNameOfCorruptRecord`.</li>
@@ -192,6 +194,7 @@ final class DataStreamReader private[sql](sparkSession: 
SparkSession) extends Lo
    * <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSZZ`): sets the 
string that
    * indicates a timestamp format. Custom date formats follow the formats at
    * `java.text.SimpleDateFormat`. This applies to timestamp type.</li>
+   * </ul>
    *
    * @since 2.0.0
    */
@@ -207,6 +210,7 @@ final class DataStreamReader private[sql](sparkSession: 
SparkSession) extends Lo
    * specify the schema explicitly using [[schema]].
    *
    * You can set the following CSV-specific options to deal with CSV files:
+   * <ul>
    * <li>`maxFilesPerTrigger` (default: no max limit): sets the maximum number 
of new files to be
    * considered in every trigger.</li>
    * <li>`sep` (default `,`): sets the single character as a separator for each
@@ -245,12 +249,14 @@ final class DataStreamReader private[sql](sparkSession: 
SparkSession) extends Lo
    * <li>`maxCharsPerColumn` (default `1000000`): defines the maximum number 
of characters allowed
    * for any given value being read.</li>
    * <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt 
records
-   *    during parsing.</li>
-   * <ul>
-   *   <li>`PERMISSIVE` : sets other fields to `null` when it meets a 
corrupted record. When
-   *     a schema is set by user, it sets `null` for extra fields.</li>
-   *   <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
-   *   <li>`FAILFAST` : throws an exception when it meets corrupted 
records.</li>
+   *    during parsing.
+   *   <ul>
+   *     <li>`PERMISSIVE` : sets other fields to `null` when it meets a 
corrupted record. When
+   *       a schema is set by user, it sets `null` for extra fields.</li>
+   *     <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
+   *     <li>`FAILFAST` : throws an exception when it meets corrupted 
records.</li>
+   *   </ul>
+   * </li>
    * </ul>
    *
    * @since 2.0.0
@@ -263,11 +269,13 @@ final class DataStreamReader private[sql](sparkSession: 
SparkSession) extends Lo
    * Loads a Parquet file stream, returning the result as a [[DataFrame]].
    *
    * You can set the following Parquet-specific option(s) for reading Parquet 
files:
+   * <ul>
    * <li>`maxFilesPerTrigger` (default: no max limit): sets the maximum number 
of new files to be
    * considered in every trigger.</li>
    * <li>`mergeSchema` (default is the value specified in 
`spark.sql.parquet.mergeSchema`): sets
    * whether we should merge schemas collected from all Parquet part-files. 
This will override
    * `spark.sql.parquet.mergeSchema`.</li>
+   * </ul>
    *
    * @since 2.0.0
    */
@@ -291,8 +299,10 @@ final class DataStreamReader private[sql](sparkSession: 
SparkSession) extends Lo
    * }}}
    *
    * You can set the following text-specific options to deal with text files:
+   * <ul>
    * <li>`maxFilesPerTrigger` (default: no max limit): sets the maximum number 
of new files to be
    * considered in every trigger.</li>
+   * </ul>
    *
    * @since 2.0.0
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to