Github user Yunni commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16715#discussion_r100966561
  
    --- Diff: 
examples/src/main/scala/org/apache/spark/examples/ml/BucketedRandomProjectionLSHExample.scala
 ---
    @@ -38,40 +39,45 @@ object BucketedRandomProjectionLSHExample {
           (1, Vectors.dense(1.0, -1.0)),
           (2, Vectors.dense(-1.0, -1.0)),
           (3, Vectors.dense(-1.0, 1.0))
    -    )).toDF("id", "keys")
    +    )).toDF("id", "features")
     
         val dfB = spark.createDataFrame(Seq(
           (4, Vectors.dense(1.0, 0.0)),
           (5, Vectors.dense(-1.0, 0.0)),
           (6, Vectors.dense(0.0, 1.0)),
           (7, Vectors.dense(0.0, -1.0))
    -    )).toDF("id", "keys")
    +    )).toDF("id", "features")
     
         val key = Vectors.dense(1.0, 0.0)
     
         val brp = new BucketedRandomProjectionLSH()
           .setBucketLength(2.0)
           .setNumHashTables(3)
    -      .setInputCol("keys")
    -      .setOutputCol("values")
    +      .setInputCol("features")
    +      .setOutputCol("hashes")
     
         val model = brp.fit(dfA)
     
         // Feature Transformation
    +    println("The hashed dataset where hashed values are stored in the 
column 'hashes':")
         model.transform(dfA).show()
    -    // Cache the transformed columns
    -    val transformedA = model.transform(dfA).cache()
    -    val transformedB = model.transform(dfB).cache()
     
    -    // Approximate similarity join
    -    model.approxSimilarityJoin(dfA, dfB, 1.5).show()
    -    model.approxSimilarityJoin(transformedA, transformedB, 1.5).show()
    -    // Self Join
    -    model.approxSimilarityJoin(dfA, dfA, 2.5).filter("datasetA.id < 
datasetB.id").show()
    +    // Compute the locality sensitive hashes for the input rows, then 
perform approximate
    +    // similarity join.
    +    // We could avoid computing hashes by passing in the 
already-transformed dataset, e.g.
    +    // `model.approxSimilarityJoin(transformedA, transformedB, 1.5)`
    +    println("Approximately joining dfA and dfB on Euclidean distance 
smaller than 1.5:")
    +    model.approxSimilarityJoin(dfA, dfB, 1.5)
    +      .select(col("datasetA.id").alias("idA"),
    +        col("datasetB.id").alias("idB"),
    +        col("distCol").alias("EuclideanDistance")).show()
    --- End diff --
    
    Done in 6 places.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to