albertusk95 commented on a change in pull request #14937: 
[SPARK-8519][SPARK-11560] [ML] [MLlib] Optimize KMeans implementation.
URL: https://github.com/apache/spark/pull/14937#discussion_r305282066
 
 

 ##########
 File path: mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala
 ##########
 @@ -211,17 +220,79 @@ class KMeans private (
         + " parent RDDs are also uncached.")
     }
 
-    // Compute squared norms and cache them.
-    val norms = data.map(Vectors.norm(_, 2.0))
-    norms.persist()
-    val zippedData = data.zip(norms).map { case (v, norm) =>
-      new VectorWithNorm(v, norm)
+    val zippedData = data.map { x => new VectorWithNorm(x) }
+
+    val initStartTime = System.nanoTime()
+
+    val centers = initialModel match {
+      case Some(kMeansCenters) =>
+        kMeansCenters.clusterCenters.map(new VectorWithNorm(_))
+      case None =>
+        if (initializationMode == KMeans.RANDOM) {
+          initRandom(zippedData)
+        } else {
+          initKMeansParallel(zippedData)
+        }
+    }
+
+    val initTimeInSeconds = (System.nanoTime() - initStartTime) / 1e9
+    logInfo(s"Initialization with $initializationMode took " + 
"%.3f".format(initTimeInSeconds) +
+      " seconds.")
+
+    val samplePoint = data.first()
+    val dim = samplePoint.size
+    val isSparse = samplePoint.isInstanceOf[SparseVector]
+    if (isSparse) {
+      logWarning("KMeans will be less efficient if the input data is Sparse 
Vector.")
+    }
+
+    // Store data as block and cache it.
+    val blockData = zippedData.mapPartitions { iter =>
+      iter.grouped(blockSize).map { points =>
+        val realSize = points.size
+        val pointNormArray = new Array[Double](realSize)
+        var numRows = 0
+
+        val pointMatrix = if (isSparse) {
+          val colPtrs = new Array[Int](realSize + 1)
+          val rowIndices = mutable.ArrayBuilder.make[Int]
+          val values = mutable.ArrayBuilder.make[Double]
+          var nnz = 0
+
+          points.foreach { point =>
+            val sv = point.vector.asInstanceOf[SparseVector]
+            sv.foreachActive { (index, value) =>
+              rowIndices += index
+              values += value
+              nnz += 1
+            }
+
+            pointNormArray(numRows) = point.norm
+            numRows += 1
+            colPtrs(numRows) = nnz
+          }
+          new SparseMatrix(numRows, dim, colPtrs, rowIndices.result(), 
values.result(), true)
+        } else {
+          val pointArray = new Array[Double](realSize * dim)
+          points.foreach { point =>
+            System.arraycopy(point.vector.toArray, 0, pointArray, numRows * 
dim, dim)
+            pointNormArray(numRows) = point.norm
+            numRows += 1
+          }
+          new DenseMatrix(numRows, dim, pointArray, true)
+        }
+
+        (pointMatrix, pointNormArray)
+      }
     }
-    val model = runAlgorithm(zippedData, instr)
-    norms.unpersist()
+    blockData.persist()
 
 Review comment:
   I think it'd be good if the storage level is specified as `MEMORY_AND_DISK`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to