Github user mengxr commented on a diff in the pull request:
https://github.com/apache/spark/pull/3319#discussion_r22070510
--- Diff: mllib/src/main/scala/org/apache/spark/mllib/linalg/Matrices.scala
---
@@ -197,6 +331,145 @@ class SparseMatrix(
}
override def copy = new SparseMatrix(numRows, numCols, colPtrs,
rowIndices, values.clone())
+
+ private[mllib] def map(f: Double => Double) =
+ new SparseMatrix(numRows, numCols, colPtrs, rowIndices, values.map(f))
+
+ private[mllib] def update(f: Double => Double): SparseMatrix = {
+ val len = values.length
+ var i = 0
+ while (i < len) {
+ values(i) = f(values(i))
+ i += 1
+ }
+ this
+ }
+
+ /** Generate a `DenseMatrix` from the given `SparseMatrix`. */
+ def toDense(): DenseMatrix = {
+ new DenseMatrix(numRows, numCols, toArray)
+ }
+}
+
+/**
+ * Factory methods for [[org.apache.spark.mllib.linalg.SparseMatrix]].
+ */
+object SparseMatrix {
+
+ /**
+ * Generate a `SparseMatrix` from Coordinate List (COO) format. Input
must be an array of
+ * (row, column, value) tuples. Array must be sorted first by *column*
index and then by row
+ * index.
+ * @param numRows number of rows of the matrix
+ * @param numCols number of columns of the matrix
+ * @param entries Array of ((row, column), value) tuples
+ * @return The corresponding `SparseMatrix`
+ */
+ def fromCOO(numRows: Int, numCols: Int, entries: Array[((Int, Int),
Double)]): SparseMatrix = {
+ val colPtrs = new ArrayBuffer[Int](numCols + 1)
+ colPtrs.append(0)
+ var nnz = 0
+ var lastCol = 0
+ val values = entries.map { case ((i, j), v) =>
+ while (j != lastCol) {
+ colPtrs.append(nnz)
+ lastCol += 1
+ if (lastCol > numCols) {
+ throw new IndexOutOfBoundsException("Please make sure that the
entries array is " +
+ "sorted by COLUMN index first and then by row index.")
+ }
+ }
+ nnz += 1
+ v
+ }
+ while (numCols > lastCol) {
+ colPtrs.append(nnz)
+ lastCol += 1
+ }
+ new SparseMatrix(numRows, numCols, colPtrs.toArray,
entries.map(_._1._1), values)
+ }
+
+ /**
+ * Generate an Identity Matrix in `SparseMatrix` format.
+ * @param n number of rows and columns of the matrix
+ * @return `SparseMatrix` with size `n` x `n` and values of ones on the
diagonal
+ */
+ def speye(n: Int): SparseMatrix = {
+ new SparseMatrix(n, n, (0 to n).toArray, (0 until n).toArray,
Array.fill(n)(1.0))
+ }
+
+ /** Generates a `SparseMatrix` with a given random number generator and
`method`, which
+ * specifies the distribution. */
+ private def genRandMatrix(
+ numRows: Int,
+ numCols: Int,
+ density: Double,
+ rng: Random,
+ method: Random => Double): SparseMatrix = {
+ require(density >= 0.0 && density <= 1.0, "density must be a double in
the range " +
+ s"0.0 <= d <= 1.0. Currently, density: $density")
+ val length = math.ceil(numRows * numCols * density).toInt
+ val entries = Map[(Int, Int), Double]()
+ var i = 0
+ while (i < length) {
+ var rowIndex = rng.nextInt(numRows)
+ var colIndex = rng.nextInt(numCols)
+ while (entries.contains((rowIndex, colIndex))) {
--- End diff --
If `density` is close to `1`, it is hard to end this while loop. We can
combine this approach with selection-rejection to achieve `O(nnz)` complexity:
https://github.com/mengxr/spark-sampling/blob/master/src/main/scala/org/apache/spark/sampling/RDDSamplingFunctions.scala#L98
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]