Remove unnecessary ClassTag's
Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/333d58df Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/333d58df Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/333d58df Branch: refs/heads/master Commit: 333d58df8676b30adc86e479579e2659e24d01a3 Parents: 838b0e7 Author: Andrew Or <andrewo...@gmail.com> Authored: Fri Jan 3 17:55:26 2014 -0800 Committer: Andrew Or <andrewo...@gmail.com> Committed: Fri Jan 3 17:55:26 2014 -0800 ---------------------------------------------------------------------- core/src/main/scala/org/apache/spark/Aggregator.scala | 4 +--- .../main/scala/org/apache/spark/rdd/PairRDDFunctions.scala | 7 +++---- 2 files changed, 4 insertions(+), 7 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/333d58df/core/src/main/scala/org/apache/spark/Aggregator.scala ---------------------------------------------------------------------- diff --git a/core/src/main/scala/org/apache/spark/Aggregator.scala b/core/src/main/scala/org/apache/spark/Aggregator.scala index c9e3e8e..bb488f4 100644 --- a/core/src/main/scala/org/apache/spark/Aggregator.scala +++ b/core/src/main/scala/org/apache/spark/Aggregator.scala @@ -17,8 +17,6 @@ package org.apache.spark -import scala.reflect.ClassTag - import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap} /** @@ -28,7 +26,7 @@ import org.apache.spark.util.collection.{AppendOnlyMap, ExternalAppendOnlyMap} * @param mergeValue function to merge a new value into the aggregation result. * @param mergeCombiners function to merge outputs from multiple mergeValue function. */ -case class Aggregator[K, V, C: ClassTag] ( +case class Aggregator[K, V, C] ( createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiners: (C, C) => C) { http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/333d58df/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala ---------------------------------------------------------------------- diff --git a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala index 7b1759e..f8cd362 100644 --- a/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/PairRDDFunctions.scala @@ -72,7 +72,7 @@ class PairRDDFunctions[K: ClassTag, V: ClassTag](self: RDD[(K, V)]) * In addition, users can control the partitioning of the output RDD, and whether to perform * map-side aggregation (if a mapper can produce multiple items with the same key). */ - def combineByKey[C: ClassTag](createCombiner: V => C, + def combineByKey[C](createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiners: (C, C) => C, partitioner: Partitioner, @@ -110,7 +110,7 @@ class PairRDDFunctions[K: ClassTag, V: ClassTag](self: RDD[(K, V)]) /** * Simplified version of combineByKey that hash-partitions the output RDD. */ - def combineByKey[C: ClassTag](createCombiner: V => C, + def combineByKey[C](createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiners: (C, C) => C, numPartitions: Int): RDD[(K, C)] = { @@ -338,8 +338,7 @@ class PairRDDFunctions[K: ClassTag, V: ClassTag](self: RDD[(K, V)]) * Simplified version of combineByKey that hash-partitions the resulting RDD using the * existing partitioner/parallelism level. */ - def combineByKey[C: ClassTag]( - createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiners: (C, C) => C) + def combineByKey[C](createCombiner: V => C, mergeValue: (C, V) => C, mergeCombiners: (C, C) => C) : RDD[(K, C)] = { combineByKey(createCombiner, mergeValue, mergeCombiners, defaultPartitioner(self)) }