Github user Yunni commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16715#discussion_r100193020
  
    --- Diff: python/pyspark/ml/feature.py ---
    @@ -755,6 +951,102 @@ def maxAbs(self):
     
     
     @inherit_doc
    +class MinHashLSH(JavaEstimator, LSHParams, HasInputCol, HasOutputCol, 
HasSeed,
    +                 JavaMLReadable, JavaMLWritable):
    +
    +    """
    +    .. note:: Experimental
    +
    +    LSH class for Jaccard distance.
    +    The input can be dense or sparse vectors, but it is more efficient if 
it is sparse.
    +    For example, `Vectors.sparse(10, Array[(2, 1.0), (3, 1.0), (5, 1.0)])`
    +    means there are 10 elements in the space. This set contains elem 2, 
elem 3 and elem 5.
    +    Also, any input vector must have at least 1 non-zero indices, and all 
non-zero values
    +    are treated as binary "1" values.
    +
    +    .. seealso:: `MinHash <https://en.wikipedia.org/wiki/MinHash>`_
    +
    +    >>> from pyspark.ml.linalg import Vectors
    +    >>> data = [(Vectors.sparse(6, [0, 1, 2], [1.0, 1.0, 1.0]),),
    +    ...         (Vectors.sparse(6, [2, 3, 4], [1.0, 1.0, 1.0]),),
    +    ...         (Vectors.sparse(6, [0, 2, 4], [1.0, 1.0, 1.0]),)]
    +    >>> df = spark.createDataFrame(data, ["keys"])
    +    >>> mh = MinHashLSH(inputCol="keys", outputCol="values", seed=12345)
    +    >>> model = mh.fit(df)
    +    >>> model.transform(df).head()
    +    Row(keys=SparseVector(6, {0: 1.0, 1: 1.0, 2: 1.0}), 
values=[DenseVector([-1638925712.0])])
    +    >>> data2 = [(Vectors.sparse(6, [1, 3, 5], [1.0, 1.0, 1.0]),),
    +    ...          (Vectors.sparse(6, [2, 3, 5], [1.0, 1.0, 1.0]),),
    +    ...          (Vectors.sparse(6, [1, 2, 4], [1.0, 1.0, 1.0]),)]
    +    >>> df2 = spark.createDataFrame(data2, ["keys"])
    +    >>> key = Vectors.sparse(6, [1], [1.0])
    +    >>> model.approxNearestNeighbors(df2, key, 
1).select("distCol").head()[0]
    +    0.66666...
    +    >>> model.approxSimilarityJoin(df, df2, 
1.0).select("distCol").head()[0]
    +    0.5
    +    >>> mhPath = temp_path + "/mh"
    +    >>> mh.save(mhPath)
    +    >>> mh2 = MinHashLSH.load(mhPath)
    +    >>> mh2.getOutputCol() == mh.getOutputCol()
    +    True
    +    >>> modelPath = temp_path + "/mh-model"
    +    >>> model.save(modelPath)
    +    >>> model2 = MinHashLSHModel.load(modelPath)
    +
    +    .. versionadded:: 2.2.0
    +    """
    +
    +    @keyword_only
    +    def __init__(self, inputCol=None, outputCol=None, seed=None, 
numHashTables=1):
    +        """
    +        __init__(self, inputCol=None, outputCol=None, seed=None, 
numHashTables=1)
    +        """
    +        super(MinHashLSH, self).__init__()
    +        self._java_obj = 
self._new_java_obj("org.apache.spark.ml.feature.MinHashLSH", self.uid)
    +        self._setDefault(numHashTables=1)
    +        kwargs = self.__init__._input_kwargs
    +        self.setParams(**kwargs)
    +
    +    @keyword_only
    +    @since("2.2.0")
    +    def setParams(self, inputCol=None, outputCol=None, seed=None, 
numHashTables=1):
    +        """
    +        setParams(self, inputCol=None, outputCol=None, seed=None, 
numHashTables=1)
    +        Sets params for this MinHashLSH.
    +        """
    +        kwargs = self.setParams._input_kwargs
    +        return self._set(**kwargs)
    +
    +    def _create_model(self, java_model):
    +        return MinHashLSHModel(java_model)
    +
    +
    +class MinHashLSHModel(JavaModel, LSHModel, JavaMLReadable, JavaMLWritable):
    +    """
    +    .. note:: Experimental
    +
    +    Model produced by :py:class:`MinHashLSH`, where where multiple hash 
functions are stored. Each
    +    hash function is picked from the following family of hash functions, 
where :math:`a_i` and
    +    :math:`b_i` are randomly chosen integers less than prime:
    +    :math:`h_i(x) = ((x \cdot a_i + b_i) \mod prime)` This hash family is 
approximately min-wise
    +    independent according to the reference.
    +
    +    .. seealso:: Tom Bohman, Colin Cooper, and Alan Frieze. "Min-wise 
independent linear \
    +    permutations." Electronic Journal of Combinatorics 7 (2000): R26.
    +
    +    .. versionadded:: 2.2.0
    +    """
    +
    +    @property
    +    @since("2.2.0")
    +    def randCoefficients(self):
    --- End diff --
    
    Removed


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to