Repository: spark Updated Branches: refs/heads/master e9b16e67c -> da738cffa
[MINOR] Renamed variables in SparkKMeans.scala, LocalKMeans.scala and kmeans.py to simplify readability With the previous syntax it could look like that the reduceByKey sums separately abscissas and ordinates of some 2D points. Perhaps in this way should be easier to understand the example, especially for who is starting the functional programming like me now. Author: Niccolo Becchi <niccolo.bec...@gmail.com> Author: pippobaudos <niccolo.bec...@gmail.com> Closes #5875 from pippobaudos/patch-1 and squashes the following commits: 3bb3a47 [pippobaudos] renamed variables in LocalKMeans.scala and kmeans.py to simplify readability 2c2a7a2 [Niccolo Becchi] Update SparkKMeans.scala Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/da738cff Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/da738cff Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/da738cff Branch: refs/heads/master Commit: da738cffa8f7e12545b47f31dcb051f2927e4149 Parents: e9b16e6 Author: Niccolo Becchi <niccolo.bec...@gmail.com> Authored: Tue May 5 08:54:42 2015 +0100 Committer: Sean Owen <so...@cloudera.com> Committed: Tue May 5 08:54:42 2015 +0100 ---------------------------------------------------------------------- examples/src/main/python/kmeans.py | 10 +++++----- .../scala/org/apache/spark/examples/LocalKMeans.scala | 2 +- .../scala/org/apache/spark/examples/SparkKMeans.scala | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/spark/blob/da738cff/examples/src/main/python/kmeans.py ---------------------------------------------------------------------- diff --git a/examples/src/main/python/kmeans.py b/examples/src/main/python/kmeans.py index 1939150..1456c87 100755 --- a/examples/src/main/python/kmeans.py +++ b/examples/src/main/python/kmeans.py @@ -68,14 +68,14 @@ if __name__ == "__main__": closest = data.map( lambda p: (closestPoint(p, kPoints), (p, 1))) pointStats = closest.reduceByKey( - lambda (x1, y1), (x2, y2): (x1 + x2, y1 + y2)) + lambda (p1, c1), (p2, c2): (p1 + p2, c1 + c2)) newPoints = pointStats.map( - lambda xy: (xy[0], xy[1][0] / xy[1][1])).collect() + lambda st: (st[0], st[1][0] / st[1][1])).collect() - tempDist = sum(np.sum((kPoints[x] - y) ** 2) for (x, y) in newPoints) + tempDist = sum(np.sum((kPoints[iK] - p) ** 2) for (iK, p) in newPoints) - for (x, y) in newPoints: - kPoints[x] = y + for (iK, p) in newPoints: + kPoints[iK] = p print("Final centers: " + str(kPoints)) http://git-wip-us.apache.org/repos/asf/spark/blob/da738cff/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala ---------------------------------------------------------------------- diff --git a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala index f73eac1..04fc0a0 100644 --- a/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala +++ b/examples/src/main/scala/org/apache/spark/examples/LocalKMeans.scala @@ -99,7 +99,7 @@ object LocalKMeans { var pointStats = mappings.map { pair => pair._2.reduceLeft [(Int, (Vector[Double], Int))] { - case ((id1, (x1, y1)), (id2, (x2, y2))) => (id1, (x1 + x2, y1 + y2)) + case ((id1, (p1, c1)), (id2, (p2, c2))) => (id1, (p1 + p2, c1 + c2)) } } http://git-wip-us.apache.org/repos/asf/spark/blob/da738cff/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala ---------------------------------------------------------------------- diff --git a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala index 48e8d11..b514d91 100644 --- a/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala +++ b/examples/src/main/scala/org/apache/spark/examples/SparkKMeans.scala @@ -79,7 +79,7 @@ object SparkKMeans { while(tempDist > convergeDist) { val closest = data.map (p => (closestPoint(p, kPoints), (p, 1))) - val pointStats = closest.reduceByKey{case ((x1, y1), (x2, y2)) => (x1 + x2, y1 + y2)} + val pointStats = closest.reduceByKey{case ((p1, c1), (p2, c2)) => (p1 + p2, c1 + c2)} val newPoints = pointStats.map {pair => (pair._1, pair._2._1 * (1.0 / pair._2._2))}.collectAsMap() --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org