Repository: spark
Updated Branches:
  refs/heads/master b431e6747 -> e45daf226


[SPARK-1766] sorted functions to meet pedantic requirements

Pedantry is underrated

Author: Chris Cope <[email protected]>

Closes #1859 from copester/master and squashes the following commits:

0fb4499 [Chris Cope] [SPARK-1766] sorted functions to meet pedantic requirements


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/e45daf22
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/e45daf22
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/e45daf22

Branch: refs/heads/master
Commit: e45daf226d780f4a7aaabc2de9f04367bee16f26
Parents: b431e67
Author: Chris Cope <[email protected]>
Authored: Sat Aug 9 20:58:56 2014 -0700
Committer: Patrick Wendell <[email protected]>
Committed: Sat Aug 9 20:58:56 2014 -0700

----------------------------------------------------------------------
 .../org/apache/spark/rdd/PairRDDFunctions.scala | 38 ++++++++++----------
 1 file changed, 19 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/e45daf22/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala 
b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
index 93af50c..5dd6472 100644
--- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala
@@ -238,6 +238,25 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
   }
 
   /**
+   * Merge the values for each key using an associative reduce function. This 
will also perform
+   * the merging locally on each mapper before sending results to a reducer, 
similarly to a
+   * "combiner" in MapReduce. Output will be hash-partitioned with 
numPartitions partitions.
+   */
+  def reduceByKey(func: (V, V) => V, numPartitions: Int): RDD[(K, V)] = {
+    reduceByKey(new HashPartitioner(numPartitions), func)
+  }
+
+  /**
+   * Merge the values for each key using an associative reduce function. This 
will also perform
+   * the merging locally on each mapper before sending results to a reducer, 
similarly to a
+   * "combiner" in MapReduce. Output will be hash-partitioned with the 
existing partitioner/
+   * parallelism level.
+   */
+  def reduceByKey(func: (V, V) => V): RDD[(K, V)] = {
+    reduceByKey(defaultPartitioner(self), func)
+  }
+
+  /**
    * Merge the values for each key using an associative reduce function, but 
return the results
    * immediately to the master as a Map. This will also perform the merging 
locally on each mapper
    * before sending results to a reducer, similarly to a "combiner" in 
MapReduce.
@@ -375,15 +394,6 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
   }
 
   /**
-   * Merge the values for each key using an associative reduce function. This 
will also perform
-   * the merging locally on each mapper before sending results to a reducer, 
similarly to a
-   * "combiner" in MapReduce. Output will be hash-partitioned with 
numPartitions partitions.
-   */
-  def reduceByKey(func: (V, V) => V, numPartitions: Int): RDD[(K, V)] = {
-    reduceByKey(new HashPartitioner(numPartitions), func)
-  }
-
-  /**
    * Group the values for each key in the RDD into a single sequence. Allows 
controlling the
    * partitioning of the resulting key-value pair RDD by passing a Partitioner.
    *
@@ -483,16 +493,6 @@ class PairRDDFunctions[K, V](self: RDD[(K, V)])
   }
 
   /**
-   * Merge the values for each key using an associative reduce function. This 
will also perform
-   * the merging locally on each mapper before sending results to a reducer, 
similarly to a
-   * "combiner" in MapReduce. Output will be hash-partitioned with the 
existing partitioner/
-   * parallelism level.
-   */
-  def reduceByKey(func: (V, V) => V): RDD[(K, V)] = {
-    reduceByKey(defaultPartitioner(self), func)
-  }
-
-  /**
    * Group the values for each key in the RDD into a single sequence. 
Hash-partitions the
    * resulting RDD with the existing partitioner/parallelism level.
    *


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to