Github user mengxr commented on a diff in the pull request:
https://github.com/apache/spark/pull/407#discussion_r11795397
--- Diff:
mllib/src/test/scala/org/apache/spark/mllib/recommendation/ALSSuite.scala ---
@@ -128,6 +128,34 @@ class ALSSuite extends FunSuite with LocalSparkContext
{
assert(u11 != u2)
}
+ test("custom partitioner") {
+ testALS(50, 50, 2, 15, 0.7, 0.3, false, false, false, 3, null)
+ testALS(50, 50, 2, 15, 0.7, 0.3, false, false, false, 3, new
Partitioner {
+ def numPartitions(): Int = 3
+ def getPartition(x: Any): Int = x match {
+ case null => 0
+ case _ => x.hashCode % 2
+ }
+ })
+ }
+
+ test("negative ids") {
+ val data = ALSSuite.generateRatings(50, 50, 2, 0.7, false, false)
+ val ratings = sc.parallelize(data._1.map { case Rating(u,p,r) =>
Rating(u-25,p-25,r) })
+ val correct = data._2
+ val model = ALS.train(ratings, 5, 15)
+
+ val pairs = Array.tabulate(50, 50)((u,p) => (u-25,p-25)).flatten
+ val ans = model.predict(sc.parallelize(pairs)).collect
+ ans.foreach { r =>
+ val u = r.user + 25
+ val p = r.product + 25
+ val v = r.rating
+ val error = v - correct.get(u, p)
+ assert(math.abs(error) < 0.4)
--- End diff --
We shouldn't assert on each prediction. One outlier could fail the task.
Instead, compute RMSE and put an assertion on it. Or you can modify `testALS`
by adding `negativeIds` to its arguments and use it in this test.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---