Github user sethah commented on a diff in the pull request:

    https://github.com/apache/spark/pull/15342#discussion_r81862372
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala ---
    @@ -409,74 +361,39 @@ class KMeans private (
           bcNewCentersList += bcNewCenters
           val preCosts = costs
           costs = data.zip(preCosts).map { case (point, cost) =>
    -          Array.tabulate(runs) { r =>
    -            math.min(KMeans.pointCost(bcNewCenters.value(r), point), 
cost(r))
    -          }
    -        }.persist(StorageLevel.MEMORY_AND_DISK)
    -      val sumCosts = costs
    -        .aggregate(new Array[Double](runs))(
    -          seqOp = (s, v) => {
    -            // s += v
    -            var r = 0
    -            while (r < runs) {
    -              s(r) += v(r)
    -              r += 1
    -            }
    -            s
    -          },
    -          combOp = (s0, s1) => {
    -            // s0 += s1
    -            var r = 0
    -            while (r < runs) {
    -              s0(r) += s1(r)
    -              r += 1
    -            }
    -            s0
    -          }
    -        )
    +        math.min(KMeans.pointCost(bcNewCenters.value, point), cost)
    +      }.persist(StorageLevel.MEMORY_AND_DISK)
    +      val sumCosts = costs.sum()
     
           bcNewCenters.unpersist(blocking = false)
           preCosts.unpersist(blocking = false)
     
    -      val chosen = data.zip(costs).mapPartitionsWithIndex { (index, 
pointsWithCosts) =>
    +      val chosen = data.zip(costs).mapPartitionsWithIndex { (index, 
pointCosts) =>
             val rand = new XORShiftRandom(seed ^ (step << 16) ^ index)
    -        pointsWithCosts.flatMap { case (p, c) =>
    -          val rs = (0 until runs).filter { r =>
    -            rand.nextDouble() < 2.0 * c(r) * k / sumCosts(r)
    -          }
    -          if (rs.nonEmpty) Some((p, rs)) else None
    -        }
    +        pointCosts.filter { case (_, c) => rand.nextDouble() < 2.0 * c * k 
/ sumCosts }.map(_._1)
           }.collect()
    -      mergeNewCenters()
    -      chosen.foreach { case (p, rs) =>
    -        rs.foreach(newCenters(_) += p.toDense)
    -      }
    +      newCenters = chosen.map(_.toDense)
    +      centers ++= newCenters
           step += 1
         }
     
    -    mergeNewCenters()
         costs.unpersist(blocking = false)
         bcNewCentersList.foreach(_.destroy(false))
     
    -    // Finally, we might have a set of more than k candidate centers for 
each run; weigh each
    +    if (centers.size <= k) {
    +      return centers.toArray
    --- End diff --
    
    I prefer to avoid the `return` keyword and just put the other code under 
the else here. But it is a small preference.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to