fix docs to use SparseMatrix
Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/746148bc Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/746148bc Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/746148bc Branch: refs/heads/master Commit: 746148bc18d5e25ea93f5ff17a6cb4da9b671b75 Parents: 06c0f76 Author: Reza Zadeh <riz...@gmail.com> Authored: Sun Jan 5 18:03:57 2014 -0800 Committer: Reza Zadeh <riz...@gmail.com> Committed: Sun Jan 5 18:03:57 2014 -0800 ---------------------------------------------------------------------- docs/mllib-guide.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/746148bc/docs/mllib-guide.md ---------------------------------------------------------------------- diff --git a/docs/mllib-guide.md b/docs/mllib-guide.md index abeb55d..653848b 100644 --- a/docs/mllib-guide.md +++ b/docs/mllib-guide.md @@ -243,18 +243,21 @@ as tuples of the form ((i,j),value) all in RDDs. Below is example usage. import org.apache.spark.SparkContext import org.apache.spark.mllib.linalg.SVD +import org.apache.spark.mllib.linalg.SparseMatrix +import org.apache.spark.mllib.linalg.MatrixEntry // Load and parse the data file val data = sc.textFile("mllib/data/als/test.data").map { line => val parts = line.split(',') - ((parts(0).toInt, parts(1).toInt), parts(2).toDouble) + MatrixEntry(parts(0).toInt, parts(1).toInt, parts(2).toDouble) } val m = 4 val n = 4 val k = 1 // recover largest singular vector -val (u, s, v) = SVD.sparseSVD(data, m, n, 1) +val decomposed = SVD.sparseSVD(SparseMatrix(data, m, n), k) +val = decomposed.S.data println("singular values = " + s.toArray.mkString)