Repository: spark
Updated Branches:
  refs/heads/branch-1.5 2273e7432 -> ef961ed48


[SPARK-8925] [MLLIB] Add @since tags to mllib.util

Went thru the history of changes the file MLUtils.scala and picked up the 
version that the change went in.

Author: Sudhakar Thota <sudhakarth...@yahoo.com>
Author: Sudhakar Thota <sudhakarth...@sudhakars-mbp-2.usca.ibm.com>

Closes #7436 from sthota2014/SPARK-8925_thotas.

(cherry picked from commit 017b5de07ef6cff249e984a2ab781c520249ac76)
Signed-off-by: Xiangrui Meng <m...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/ef961ed4
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/ef961ed4
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/ef961ed4

Branch: refs/heads/branch-1.5
Commit: ef961ed48a4f45447f0e0ad256b040c7ab2d78d9
Parents: 2273e74
Author: Sudhakar Thota <sudhakarth...@yahoo.com>
Authored: Tue Aug 11 14:31:51 2015 -0700
Committer: Xiangrui Meng <m...@databricks.com>
Committed: Tue Aug 11 14:32:01 2015 -0700

----------------------------------------------------------------------
 .../org/apache/spark/mllib/util/MLUtils.scala   | 22 +++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/ef961ed4/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
----------------------------------------------------------------------
diff --git a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
index 7c5cfa7..26eb84a 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala
@@ -64,6 +64,7 @@ object MLUtils {
    *                    feature dimensions.
    * @param minPartitions min number of partitions
    * @return labeled data stored as an RDD[LabeledPoint]
+   * @since 1.0.0
    */
   def loadLibSVMFile(
       sc: SparkContext,
@@ -113,7 +114,10 @@ object MLUtils {
   }
 
   // Convenient methods for `loadLibSVMFile`.
-
+  
+  /**
+   * @since 1.0.0
+   */
   @deprecated("use method without multiclass argument, which no longer has 
effect", "1.1.0")
   def loadLibSVMFile(
       sc: SparkContext,
@@ -126,6 +130,7 @@ object MLUtils {
   /**
    * Loads labeled data in the LIBSVM format into an RDD[LabeledPoint], with 
the default number of
    * partitions.
+   * @since 1.0.0
    */
   def loadLibSVMFile(
       sc: SparkContext,
@@ -133,6 +138,9 @@ object MLUtils {
       numFeatures: Int): RDD[LabeledPoint] =
     loadLibSVMFile(sc, path, numFeatures, sc.defaultMinPartitions)
 
+  /**
+   * @since 1.0.0
+   */
   @deprecated("use method without multiclass argument, which no longer has 
effect", "1.1.0")
   def loadLibSVMFile(
       sc: SparkContext,
@@ -141,6 +149,9 @@ object MLUtils {
       numFeatures: Int): RDD[LabeledPoint] =
     loadLibSVMFile(sc, path, numFeatures)
 
+  /**
+   * @since 1.0.0
+   */
   @deprecated("use method without multiclass argument, which no longer has 
effect", "1.1.0")
   def loadLibSVMFile(
       sc: SparkContext,
@@ -151,6 +162,7 @@ object MLUtils {
   /**
    * Loads binary labeled data in the LIBSVM format into an RDD[LabeledPoint], 
with number of
    * features determined automatically and the default number of partitions.
+   * @since 1.0.0
    */
   def loadLibSVMFile(sc: SparkContext, path: String): RDD[LabeledPoint] =
     loadLibSVMFile(sc, path, -1)
@@ -181,12 +193,14 @@ object MLUtils {
    * @param path file or directory path in any Hadoop-supported file system URI
    * @param minPartitions min number of partitions
    * @return vectors stored as an RDD[Vector]
+   * @since 1.1.0
    */
   def loadVectors(sc: SparkContext, path: String, minPartitions: Int): 
RDD[Vector] =
     sc.textFile(path, minPartitions).map(Vectors.parse)
 
   /**
    * Loads vectors saved using `RDD[Vector].saveAsTextFile` with the default 
number of partitions.
+   * @since 1.1.0
    */
   def loadVectors(sc: SparkContext, path: String): RDD[Vector] =
     sc.textFile(path, sc.defaultMinPartitions).map(Vectors.parse)
@@ -197,6 +211,7 @@ object MLUtils {
    * @param path file or directory path in any Hadoop-supported file system URI
    * @param minPartitions min number of partitions
    * @return labeled points stored as an RDD[LabeledPoint]
+   * @since 1.1.0
    */
   def loadLabeledPoints(sc: SparkContext, path: String, minPartitions: Int): 
RDD[LabeledPoint] =
     sc.textFile(path, minPartitions).map(LabeledPoint.parse)
@@ -204,6 +219,7 @@ object MLUtils {
   /**
    * Loads labeled points saved using `RDD[LabeledPoint].saveAsTextFile` with 
the default number of
    * partitions.
+   * @since 1.1.0
    */
   def loadLabeledPoints(sc: SparkContext, dir: String): RDD[LabeledPoint] =
     loadLabeledPoints(sc, dir, sc.defaultMinPartitions)
@@ -220,6 +236,7 @@ object MLUtils {
    *
    * @deprecated Should use [[org.apache.spark.rdd.RDD#saveAsTextFile]] for 
saving and
    *            [[org.apache.spark.mllib.util.MLUtils#loadLabeledPoints]] for 
loading.
+   * @since 1.0.0
    */
   @deprecated("Should use MLUtils.loadLabeledPoints instead.", "1.0.1")
   def loadLabeledData(sc: SparkContext, dir: String): RDD[LabeledPoint] = {
@@ -241,6 +258,7 @@ object MLUtils {
    *
    * @deprecated Should use [[org.apache.spark.rdd.RDD#saveAsTextFile]] for 
saving and
    *            [[org.apache.spark.mllib.util.MLUtils#loadLabeledPoints]] for 
loading.
+   * @since 1.0.0
    */
   @deprecated("Should use RDD[LabeledPoint].saveAsTextFile instead.", "1.0.1")
   def saveLabeledData(data: RDD[LabeledPoint], dir: String) {
@@ -253,6 +271,7 @@ object MLUtils {
    * Return a k element array of pairs of RDDs with the first element of each 
pair
    * containing the training data, a complement of the validation data and the 
second
    * element, the validation data, containing a unique 1/kth of the data. 
Where k=numFolds.
+   * @since 1.0.0
    */
   @Experimental
   def kFold[T: ClassTag](rdd: RDD[T], numFolds: Int, seed: Int): 
Array[(RDD[T], RDD[T])] = {
@@ -268,6 +287,7 @@ object MLUtils {
 
   /**
    * Returns a new vector with `1.0` (bias) appended to the input vector.
+   * @since 1.0.0
    */
   def appendBias(vector: Vector): Vector = {
     vector match {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to