Github user sethah commented on a diff in the pull request:
https://github.com/apache/spark/pull/16715#discussion_r100426821
--- Diff:
examples/src/main/java/org/apache/spark/examples/ml/JavaMinHashLSHExample.java
---
@@ -44,25 +45,67 @@ public static void main(String[] args) {
.getOrCreate();
// $example on$
- List<Row> data = Arrays.asList(
+ List<Row> dataA = Arrays.asList(
RowFactory.create(0, Vectors.sparse(6, new int[]{0, 1, 2}, new
double[]{1.0, 1.0, 1.0})),
RowFactory.create(1, Vectors.sparse(6, new int[]{2, 3, 4}, new
double[]{1.0, 1.0, 1.0})),
RowFactory.create(2, Vectors.sparse(6, new int[]{0, 2, 4}, new
double[]{1.0, 1.0, 1.0}))
);
+ List<Row> dataB = Arrays.asList(
+ RowFactory.create(0, Vectors.sparse(6, new int[]{1, 3, 5}, new
double[]{1.0, 1.0, 1.0})),
+ RowFactory.create(1, Vectors.sparse(6, new int[]{2, 3, 5}, new
double[]{1.0, 1.0, 1.0})),
+ RowFactory.create(2, Vectors.sparse(6, new int[]{1, 2, 4}, new
double[]{1.0, 1.0, 1.0}))
+ );
+
StructType schema = new StructType(new StructField[]{
new StructField("id", DataTypes.IntegerType, false,
Metadata.empty()),
- new StructField("keys", new VectorUDT(), false, Metadata.empty())
+ new StructField("features", new VectorUDT(), false, Metadata.empty())
});
- Dataset<Row> dataFrame = spark.createDataFrame(data, schema);
+ Dataset<Row> dfA = spark.createDataFrame(dataA, schema);
+ Dataset<Row> dfB = spark.createDataFrame(dataB, schema);
+
+ int[] indicies = {1, 3};
+ double[] values = {1.0, 1.0};
+ Vector key = Vectors.sparse(6, indicies, values);
MinHashLSH mh = new MinHashLSH()
- .setNumHashTables(1)
- .setInputCol("keys")
- .setOutputCol("values");
+ .setNumHashTables(5)
+ .setInputCol("features")
+ .setOutputCol("hashes");
+
+ MinHashLSHModel model = mh.fit(dfA);
+
+ // Feature Transformation
+ System.out.println("The hashed dataset where hashed values are stored
in the column 'values':");
+ model.transform(dfA).show();
+ // Cache the transformed columns
+ Dataset<Row> transformedA = model.transform(dfA).cache();
+ Dataset<Row> transformedB = model.transform(dfB).cache();
+
+ // Approximate similarity join
+ System.out.println("Approximately joining dfA and dfB on distance
smaller than 0.6:");
+ model.approxSimilarityJoin(dfA, dfB, 0.6)
+ .select("datasetA.id", "datasetB.id", "distCol")
+ .show();
+ System.out.println("Joining cached datasets to avoid recomputing the
hash values:");
+ model.approxSimilarityJoin(transformedA, transformedB, 0.6)
+ .select("datasetA.id", "datasetB.id", "distCol")
+ .show();
+
+ // Self Join
+ System.out.println("Approximately self join of dfB on distance smaller
than 0.6:");
--- End diff --
same comments as above
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]