Repository: spark
Updated Branches:
  refs/heads/branch-2.1 06a56df22 -> 84b2af229


http://git-wip-us.apache.org/repos/asf/spark/blob/84b2af22/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index 83857c3..e328b86 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -40,8 +40,8 @@ case class JdbcType(databaseTypeDefinition : String, 
jdbcNullType : Int)
  * SQL dialect of a certain database or jdbc driver.
  * Lots of databases define types that aren't explicitly supported
  * by the JDBC spec.  Some JDBC drivers also report inaccurate
- * information---for instance, BIT(n>1) being reported as a BIT type is 
quite
- * common, even though BIT in JDBC is meant for single-bit values.  Also, there
+ * information---for instance, BIT(n{@literal >}1) being reported as a BIT 
type is quite
+ * common, even though BIT in JDBC is meant for single-bit values. Also, there
  * does not appear to be a standard name for an unbounded string or binary
  * type; we use BLOB and CLOB by default but override with database-specific
  * alternatives when these are absent or do not behave correctly.

http://git-wip-us.apache.org/repos/asf/spark/blob/84b2af22/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala
index ff6dd8c..f288ad6 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala
@@ -112,7 +112,7 @@ trait SchemaRelationProvider {
 
 /**
  * ::Experimental::
- * Implemented by objects that can produce a streaming [[Source]] for a 
specific format or system.
+ * Implemented by objects that can produce a streaming `Source` for a specific 
format or system.
  *
  * @since 2.0.0
  */
@@ -143,7 +143,7 @@ trait StreamSourceProvider {
 
 /**
  * ::Experimental::
- * Implemented by objects that can produce a streaming [[Sink]] for a specific 
format or system.
+ * Implemented by objects that can produce a streaming `Sink` for a specific 
format or system.
  *
  * @since 2.0.0
  */
@@ -185,7 +185,7 @@ trait CreatableRelationProvider {
 
 /**
  * Represents a collection of tuples with a known schema. Classes that extend 
BaseRelation must
- * be able to produce the schema of their data in the form of a 
[[StructType]]. Concrete
+ * be able to produce the schema of their data in the form of a `StructType`. 
Concrete
  * implementation should inherit from one of the descendant `Scan` classes, 
which define various
  * abstract methods for execution.
  *
@@ -216,10 +216,10 @@ abstract class BaseRelation {
 
   /**
    * Whether does it need to convert the objects in Row to internal 
representation, for example:
-   *  java.lang.String -> UTF8String
-   *  java.lang.Decimal -> Decimal
+   *  java.lang.String to UTF8String
+   *  java.lang.Decimal to Decimal
    *
-   * If `needConversion` is `false`, buildScan() should return an [[RDD]] of 
[[InternalRow]]
+   * If `needConversion` is `false`, buildScan() should return an `RDD` of 
`InternalRow`
    *
    * @note The internal representation is not stable across releases and thus 
data sources outside
    * of Spark SQL should leave this as true.

http://git-wip-us.apache.org/repos/asf/spark/blob/84b2af22/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
index a2d64da..5f5c8e2 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala
@@ -57,9 +57,17 @@ import org.apache.spark.util.SerializableJobConf
  * @param partition a map from the partition key to the partition value 
(optional). If the partition
  *                  value is optional, dynamic partition insert will be 
performed.
  *                  As an example, `INSERT INTO tbl PARTITION (a=1, b=2) AS 
...` would have
- *                  Map('a' -> Some('1'), 'b' -> Some('2')),
+ *
+ *                  {{{
+ *                  Map('a' -> Some('1'), 'b' -> Some('2'))
+ *                  }}}
+ *
  *                  and `INSERT INTO tbl PARTITION (a=1, b) AS ...`
- *                  would have Map('a' -> Some('1'), 'b' -> None).
+ *                  would have
+ *
+ *                  {{{
+ *                  Map('a' -> Some('1'), 'b' -> None)
+ *                  }}}.
  * @param child the logical plan representing data to write to.
  * @param overwrite overwrite existing table or partitions.
  * @param ifNotExists If true, only write if the table or partition does not 
exist.

http://git-wip-us.apache.org/repos/asf/spark/blob/84b2af22/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
index 4203308..e30e0f9 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala
@@ -175,7 +175,7 @@ private[hive] case class HiveGenericUDF(
 
 /**
  * Converts a Hive Generic User Defined Table Generating Function (UDTF) to a
- * [[Generator]].  Note that the semantics of Generators do not allow
+ * `Generator`. Note that the semantics of Generators do not allow
  * Generators to maintain state in between input rows.  Thus UDTFs that rely 
on partitioning
  * dependent operations like calls to `close()` before producing output will 
not operate the same as
  * in Hive.  However, in practice this should not affect compatibility for 
most sane UDTFs

http://git-wip-us.apache.org/repos/asf/spark/blob/84b2af22/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
index e53c3e4..16cfa9d 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveWriterContainers.scala
@@ -48,7 +48,7 @@ import 
org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter
 
 /**
  * Internal helper class that saves an RDD using a Hive OutputFormat.
- * It is based on [[SparkHadoopWriter]].
+ * It is based on `SparkHadoopWriter`.
  */
 private[hive] class SparkHiveWriterContainer(
     @transient private val jobConf: JobConf,

http://git-wip-us.apache.org/repos/asf/spark/blob/84b2af22/streaming/src/main/scala/org/apache/spark/streaming/StateSpec.scala
----------------------------------------------------------------------
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/StateSpec.scala 
b/streaming/src/main/scala/org/apache/spark/streaming/StateSpec.scala
index 7c1ea2f..c3b28bd 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/StateSpec.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/StateSpec.scala
@@ -30,7 +30,7 @@ import org.apache.spark.util.ClosureCleaner
  * `mapWithState` operation of a
  * [[org.apache.spark.streaming.dstream.PairDStreamFunctions pair DStream]] 
(Scala) or a
  * [[org.apache.spark.streaming.api.java.JavaPairDStream JavaPairDStream]] 
(Java).
- * Use [[org.apache.spark.streaming.StateSpec.function() StateSpec.function]] 
factory methods
+ * Use `org.apache.spark.streaming.StateSpec.function()` factory methods
  * to create instances of this class.
  *
  * Example in Scala:
@@ -100,7 +100,7 @@ sealed abstract class StateSpec[KeyType, ValueType, 
StateType, MappedType] exten
 
 /**
  * :: Experimental ::
- * Builder object for creating instances of 
[[org.apache.spark.streaming.StateSpec StateSpec]]
+ * Builder object for creating instances of 
`org.apache.spark.streaming.StateSpec`
  * that is used for specifying the parameters of the DStream transformation 
`mapWithState`
  * that is used for specifying the parameters of the DStream transformation
  * `mapWithState` operation of a

http://git-wip-us.apache.org/repos/asf/spark/blob/84b2af22/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
----------------------------------------------------------------------
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
 
b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
index aa4003c..2ec907c 100644
--- 
a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
+++ 
b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala
@@ -434,8 +434,8 @@ class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
    * Return a [[JavaMapWithStateDStream]] by applying a function to every 
key-value element of
    * `this` stream, while maintaining some state data for each unique key. The 
mapping function
    * and other specification (e.g. partitioners, timeouts, initial state data, 
etc.) of this
-   * transformation can be specified using [[StateSpec]] class. The state data 
is accessible in
-   * as a parameter of type [[State]] in the mapping function.
+   * transformation can be specified using `StateSpec` class. The state data 
is accessible in
+   * as a parameter of type `State` in the mapping function.
    *
    * Example of using `mapWithState`:
    * {{{

http://git-wip-us.apache.org/repos/asf/spark/blob/84b2af22/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala
----------------------------------------------------------------------
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala
 
b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala
index b43b940..982e72c 100644
--- 
a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala
+++ 
b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingContext.scala
@@ -44,7 +44,7 @@ import org.apache.spark.streaming.scheduler.StreamingListener
  * A Java-friendly version of [[org.apache.spark.streaming.StreamingContext]] 
which is the main
  * entry point for Spark Streaming functionality. It provides methods to create
  * [[org.apache.spark.streaming.api.java.JavaDStream]] and
- * [[org.apache.spark.streaming.api.java.JavaPairDStream.]] from input 
sources. The internal
+ * [[org.apache.spark.streaming.api.java.JavaPairDStream]] from input sources. 
The internal
  * org.apache.spark.api.java.JavaSparkContext (see core Spark documentation) 
can be accessed
  * using `context.sparkContext`. After creating and transforming DStreams, the 
streaming
  * computation can be started and stopped using `context.start()` and 
`context.stop()`,

http://git-wip-us.apache.org/repos/asf/spark/blob/84b2af22/streaming/src/main/scala/org/apache/spark/streaming/dstream/PairDStreamFunctions.scala
----------------------------------------------------------------------
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/dstream/PairDStreamFunctions.scala
 
b/streaming/src/main/scala/org/apache/spark/streaming/dstream/PairDStreamFunctions.scala
index ac73941..f38c1e7 100644
--- 
a/streaming/src/main/scala/org/apache/spark/streaming/dstream/PairDStreamFunctions.scala
+++ 
b/streaming/src/main/scala/org/apache/spark/streaming/dstream/PairDStreamFunctions.scala
@@ -356,8 +356,8 @@ class PairDStreamFunctions[K, V](self: DStream[(K, V)])
    * Return a [[MapWithStateDStream]] by applying a function to every 
key-value element of
    * `this` stream, while maintaining some state data for each unique key. The 
mapping function
    * and other specification (e.g. partitioners, timeouts, initial state data, 
etc.) of this
-   * transformation can be specified using [[StateSpec]] class. The state data 
is accessible in
-   * as a parameter of type [[State]] in the mapping function.
+   * transformation can be specified using `StateSpec` class. The state data 
is accessible in
+   * as a parameter of type `State` in the mapping function.
    *
    * Example of using `mapWithState`:
    * {{{


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to