Repository: spark
Updated Branches:
  refs/heads/master c68344400 -> df7974b8e


SPARK-3359 [DOCS] sbt/sbt unidoc doesn't work with Java 8

This follows https://github.com/apache/spark/pull/2893 , but does not 
completely fix SPARK-3359 either. This fixes minor scaladoc/javadoc issues that 
Javadoc 8 will treat as errors.

Author: Sean Owen <[email protected]>

Closes #2909 from srowen/SPARK-3359 and squashes the following commits:

f62c347 [Sean Owen] Fix some javadoc issues that javadoc 8 considers errors. 
This is not all of the errors turned up when javadoc 8 runs on output of 
genjavadoc.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/df7974b8
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/df7974b8
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/df7974b8

Branch: refs/heads/master
Commit: df7974b8e59d00e8efbb61629418fa6265c1ddab
Parents: c683444
Author: Sean Owen <[email protected]>
Authored: Sat Oct 25 23:18:02 2014 -0700
Committer: Xiangrui Meng <[email protected]>
Committed: Sat Oct 25 23:18:02 2014 -0700

----------------------------------------------------------------------
 core/src/main/java/org/apache/spark/TaskContext.java         | 2 --
 .../org/apache/spark/api/java/function/PairFunction.java     | 3 ++-
 .../main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala | 6 +++---
 .../main/scala/org/apache/spark/api/java/JavaPairRDD.scala   | 4 ++--
 .../scala/org/apache/spark/api/java/JavaSparkContext.scala   | 5 ++++-
 .../scala/org/apache/spark/mllib/feature/Normalizer.scala    | 2 +-
 .../apache/spark/mllib/linalg/distributed/RowMatrix.scala    | 7 ++++---
 .../src/main/scala/org/apache/spark/mllib/util/MLUtils.scala | 8 ++++----
 .../scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala  | 2 +-
 9 files changed, 21 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/df7974b8/core/src/main/java/org/apache/spark/TaskContext.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/spark/TaskContext.java 
b/core/src/main/java/org/apache/spark/TaskContext.java
index 2d998d4..0d69732 100644
--- a/core/src/main/java/org/apache/spark/TaskContext.java
+++ b/core/src/main/java/org/apache/spark/TaskContext.java
@@ -71,7 +71,6 @@ public abstract class TaskContext implements Serializable {
   /**
    * Add a (Java friendly) listener to be executed on task completion.
    * This will be called in all situation - success, failure, or cancellation.
-   * <p/>
    * An example use is for HadoopRDD to register a callback to close the input 
stream.
    */
   public abstract TaskContext addTaskCompletionListener(TaskCompletionListener 
listener);
@@ -79,7 +78,6 @@ public abstract class TaskContext implements Serializable {
   /**
    * Add a listener in the form of a Scala closure to be executed on task 
completion.
    * This will be called in all situations - success, failure, or cancellation.
-   * <p/>
    * An example use is for HadoopRDD to register a callback to close the input 
stream.
    */
   public abstract TaskContext addTaskCompletionListener(final 
Function1<TaskContext, Unit> f);

http://git-wip-us.apache.org/repos/asf/spark/blob/df7974b8/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java
----------------------------------------------------------------------
diff --git 
a/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java 
b/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java
index abd9bcc..99bf240 100644
--- a/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java
+++ b/core/src/main/java/org/apache/spark/api/java/function/PairFunction.java
@@ -22,7 +22,8 @@ import java.io.Serializable;
 import scala.Tuple2;
 
 /**
- * A function that returns key-value pairs (Tuple2<K, V>), and can be used to 
construct PairRDDs.
+ * A function that returns key-value pairs (Tuple2&lt;K, V&gt;), and can be 
used to
+ * construct PairRDDs.
  */
 public interface PairFunction<T, K, V> extends Serializable {
   public Tuple2<K, V> call(T t) throws Exception;

http://git-wip-us.apache.org/repos/asf/spark/blob/df7974b8/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala 
b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
index a6123bd..8e8f7f6 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaDoubleRDD.scala
@@ -114,7 +114,7 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends 
JavaRDDLike[JDouble, Ja
    * Return an RDD with the elements from `this` that are not in `other`.
    *
    * Uses `this` partitioner/partition size, because even if `other` is huge, 
the resulting
-   * RDD will be <= us.
+   * RDD will be &lt;= us.
    */
   def subtract(other: JavaDoubleRDD): JavaDoubleRDD =
     fromRDD(srdd.subtract(other))
@@ -233,11 +233,11 @@ class JavaDoubleRDD(val srdd: RDD[scala.Double]) extends 
JavaRDDLike[JDouble, Ja
    * to the left except for the last which is closed
    *  e.g. for the array
    *  [1,10,20,50] the buckets are [1,10) [10,20) [20,50]
-   *  e.g 1<=x<10 , 10<=x<20, 20<=x<50
+   *  e.g 1&lt;=x&lt;10 , 10&lt;=x&lt;20, 20&lt;=x&lt;50
    *  And on the input of 1 and 50 we would have a histogram of 1,0,0
    *
    * Note: if your histogram is evenly spaced (e.g. [0, 10, 20, 30]) this can 
be switched
-   * from an O(log n) inseration to O(1) per element. (where n = # buckets) if 
you set evenBuckets
+   * from an O(log n) insertion to O(1) per element. (where n = # buckets) if 
you set evenBuckets
    * to true.
    * buckets must be sorted and not contain any duplicates.
    * buckets array must be at least two elements

http://git-wip-us.apache.org/repos/asf/spark/blob/df7974b8/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala 
b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
index c38b965..e37f3ac 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala
@@ -392,7 +392,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
    * Return an RDD with the elements from `this` that are not in `other`.
    *
    * Uses `this` partitioner/partition size, because even if `other` is huge, 
the resulting
-   * RDD will be <= us.
+   * RDD will be &lt;= us.
    */
   def subtract(other: JavaPairRDD[K, V]): JavaPairRDD[K, V] =
     fromRDD(rdd.subtract(other))
@@ -413,7 +413,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
    * Return an RDD with the pairs from `this` whose keys are not in `other`.
    *
    * Uses `this` partitioner/partition size, because even if `other` is huge, 
the resulting
-   * RDD will be <= us.
+   * RDD will be &lt;= us.
    */
   def subtractByKey[W](other: JavaPairRDD[K, W]): JavaPairRDD[K, V] = {
     implicit val ctag: ClassTag[W] = fakeClassTag

http://git-wip-us.apache.org/repos/asf/spark/blob/df7974b8/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala 
b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
index 45168ba..0565adf 100644
--- a/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
+++ b/core/src/main/scala/org/apache/spark/api/java/JavaSparkContext.scala
@@ -215,7 +215,10 @@ class JavaSparkContext(val sc: SparkContext)
    *   hdfs://a-hdfs-path/part-nnnnn
    * }}}
    *
-   * Do `JavaPairRDD<String, String> rdd = 
sparkContext.wholeTextFiles("hdfs://a-hdfs-path")`,
+   * Do
+   * {{{
+   *   JavaPairRDD<String, String> rdd = 
sparkContext.wholeTextFiles("hdfs://a-hdfs-path")
+   * }}}
    *
    * <p> then `rdd` contains
    * {{{

http://git-wip-us.apache.org/repos/asf/spark/blob/df7974b8/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
index 4734251..dfad25d 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/feature/Normalizer.scala
@@ -26,7 +26,7 @@ import org.apache.spark.mllib.linalg.{Vector, Vectors}
  * :: Experimental ::
  * Normalizes samples individually to unit L^p^ norm
  *
- * For any 1 <= p < Double.PositiveInfinity, normalizes samples using
+ * For any 1 &lt;= p &lt; Double.PositiveInfinity, normalizes samples using
  * sum(abs(vector).^p^)^(1/p)^ as norm.
  *
  * For p = Double.PositiveInfinity, max(abs(vector)) will be used as norm for 
normalization.

http://git-wip-us.apache.org/repos/asf/spark/blob/df7974b8/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala
 
b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala
index ec2d481..10a515a 100644
--- 
a/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala
+++ 
b/mllib/src/main/scala/org/apache/spark/mllib/linalg/distributed/RowMatrix.scala
@@ -152,7 +152,7 @@ class RowMatrix(
    * storing the right singular vectors, is computed via matrix multiplication 
as
    * U = A * (V * S^-1^), if requested by user. The actual method to use is 
determined
    * automatically based on the cost:
-   *  - If n is small (n < 100) or k is large compared with n (k > n / 2), we 
compute the Gramian
+   *  - If n is small (n &lt; 100) or k is large compared with n (k > n / 2), 
we compute the Gramian
    *    matrix first and then compute its top eigenvalues and eigenvectors 
locally on the driver.
    *    This requires a single pass with O(n^2^) storage on each executor and 
on the driver, and
    *    O(n^2^ k) time on the driver.
@@ -169,7 +169,8 @@ class RowMatrix(
    * @note The conditions that decide which method to use internally and the 
default parameters are
    *       subject to change.
    *
-   * @param k number of leading singular values to keep (0 < k <= n). It might 
return less than k if
+   * @param k number of leading singular values to keep (0 &lt; k &lt;= n).
+   *          It might return less than k if
    *          there are numerically zero singular values or there are not 
enough Ritz values
    *          converged before the maximum number of Arnoldi update iterations 
is reached (in case
    *          that matrix A is ill-conditioned).
@@ -192,7 +193,7 @@ class RowMatrix(
   /**
    * The actual SVD implementation, visible for testing.
    *
-   * @param k number of leading singular values to keep (0 < k <= n)
+   * @param k number of leading singular values to keep (0 &lt; k &lt;= n)
    * @param computeU whether to compute U
    * @param rCond the reciprocal condition number
    * @param maxIter max number of iterations (if ARPACK is used)

http://git-wip-us.apache.org/repos/asf/spark/blob/df7974b8/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
index ca35100..dce0adf 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
@@ -196,8 +196,8 @@ object MLUtils {
 
   /**
    * Load labeled data from a file. The data format used here is
-   * <L>, <f1> <f2> ...
-   * where <f1>, <f2> are feature values in Double and <L> is the 
corresponding label as Double.
+   * L, f1 f2 ...
+   * where f1, f2 are feature values in Double and L is the corresponding 
label as Double.
    *
    * @param sc SparkContext
    * @param dir Directory to the input data files.
@@ -219,8 +219,8 @@ object MLUtils {
 
   /**
    * Save labeled data to a file. The data format used here is
-   * <L>, <f1> <f2> ...
-   * where <f1>, <f2> are feature values in Double and <L> is the 
corresponding label as Double.
+   * L, f1 f2 ...
+   * where f1, f2 are feature values in Double and L is the corresponding 
label as Double.
    *
    * @param data An RDD of LabeledPoints containing data to be saved.
    * @param dir Directory to save the data.

http://git-wip-us.apache.org/repos/asf/spark/blob/df7974b8/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala
index e7faba0..1e0ccb3 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/api/java/JavaSchemaRDD.scala
@@ -193,7 +193,7 @@ class JavaSchemaRDD(
    * Return an RDD with the elements from `this` that are not in `other`.
    *
    * Uses `this` partitioner/partition size, because even if `other` is huge, 
the resulting
-   * RDD will be <= us.
+   * RDD will be &lt;= us.
    */
   def subtract(other: JavaSchemaRDD): JavaSchemaRDD =
     this.baseSchemaRDD.subtract(other.baseSchemaRDD).toJavaSchemaRDD


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to