Github user Yunni commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16715#discussion_r100966541
  
    --- Diff: 
examples/src/main/scala/org/apache/spark/examples/ml/MinHashLSHExample.scala ---
    @@ -37,38 +38,44 @@ object MinHashLSHExample {
           (0, Vectors.sparse(6, Seq((0, 1.0), (1, 1.0), (2, 1.0)))),
           (1, Vectors.sparse(6, Seq((2, 1.0), (3, 1.0), (4, 1.0)))),
           (2, Vectors.sparse(6, Seq((0, 1.0), (2, 1.0), (4, 1.0))))
    -    )).toDF("id", "keys")
    +    )).toDF("id", "features")
     
         val dfB = spark.createDataFrame(Seq(
           (3, Vectors.sparse(6, Seq((1, 1.0), (3, 1.0), (5, 1.0)))),
           (4, Vectors.sparse(6, Seq((2, 1.0), (3, 1.0), (5, 1.0)))),
           (5, Vectors.sparse(6, Seq((1, 1.0), (2, 1.0), (4, 1.0))))
    -    )).toDF("id", "keys")
    +    )).toDF("id", "features")
     
         val key = Vectors.sparse(6, Seq((1, 1.0), (3, 1.0)))
     
         val mh = new MinHashLSH()
    -      .setNumHashTables(3)
    -      .setInputCol("keys")
    -      .setOutputCol("values")
    +      .setNumHashTables(5)
    +      .setInputCol("features")
    +      .setOutputCol("hashes")
     
         val model = mh.fit(dfA)
     
         // Feature Transformation
    +    println("The hashed dataset where hashed values are stored in the 
column 'hashes':")
         model.transform(dfA).show()
    -    // Cache the transformed columns
    -    val transformedA = model.transform(dfA).cache()
    -    val transformedB = model.transform(dfB).cache()
     
    -    // Approximate similarity join
    -    model.approxSimilarityJoin(dfA, dfB, 0.6).show()
    -    model.approxSimilarityJoin(transformedA, transformedB, 0.6).show()
    -    // Self Join
    -    model.approxSimilarityJoin(dfA, dfA, 0.6).filter("datasetA.id < 
datasetB.id").show()
    +    // Compute the locality sensitive hashes for the input rows, then 
perform approximate
    +    // similarity join.
    +    // We could avoid computing hashes by passing in the 
already-transformed dataset, e.g.
    +    // `model.approxSimilarityJoin(transformedA, transformedB, 0.6)`
    +    println("Approximately joining dfA and dfB on Jaccard distance smaller 
than 0.6:")
    +    model.approxSimilarityJoin(dfA, dfB, 0.6)
    +      .select(col("datasetA.id").alias("idA"),
    +        col("datasetB.id").alias("idB"),
    +        col("distCol").alias("JaccardDistance")).show()
     
    -    // Approximate nearest neighbor search
    +    // Compute the locality sensitive hashes for the input rows, then 
perform approximate nearest
    +    // neighbor search.
    +    // We could avoid computing hashes by passing in the 
already-transformed dataset, e.g.
    +    // `model.approxNearestNeighbors(transformedA, key, 2)`
    +    // It may return less than 2 rows because of lack of elements in the 
hash buckets.
    --- End diff --
    
    Done.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to