Github user rxin commented on a diff in the pull request:
https://github.com/apache/spark/pull/5991#discussion_r29918612
--- Diff: python/pyspark/ml/feature.py ---
@@ -16,61 +16,73 @@
#
from pyspark.rdd import ignore_unicode_prefix
-from pyspark.ml.param.shared import HasInputCol, HasInputCols,
HasOutputCol, HasNumFeatures
+from pyspark.ml.param.shared import *
from pyspark.ml.util import keyword_only
-from pyspark.ml.wrapper import JavaTransformer
+from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaTransformer
from pyspark.mllib.common import inherit_doc
-__all__ = ['Tokenizer', 'HashingTF', 'VectorAssembler']
+__all__ = ['Binarizer', 'HashingTF', 'IDF', 'IDFModel', 'Normalizer',
'OneHotEncoder',
+ 'PolynomialExpansion', 'StandardScaler', 'StandardScalerModel',
'StringIndexer',
+ 'StringIndexerModel', 'Tokenizer', 'VectorAssembler',
'VectorIndexer', 'Word2Vec',
+ 'Word2VecModel']
@inherit_doc
-@ignore_unicode_prefix
-class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol):
+class Binarizer(JavaTransformer, HasInputCol, HasOutputCol):
"""
- A tokenizer that converts the input string to lowercase and then
- splits it by white spaces.
+ Binarize a column of continuous features given a threshold.
>>> from pyspark.sql import Row
- >>> df = sc.parallelize([Row(text="a b c")]).toDF()
- >>> tokenizer = Tokenizer(inputCol="text", outputCol="words")
- >>> tokenizer.transform(df).head()
- Row(text=u'a b c', words=[u'a', u'b', u'c'])
- >>> # Change a parameter.
- >>> tokenizer.setParams(outputCol="tokens").transform(df).head()
- Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
- >>> # Temporarily modify a parameter.
- >>> tokenizer.transform(df, {tokenizer.outputCol: "words"}).head()
- Row(text=u'a b c', words=[u'a', u'b', u'c'])
- >>> tokenizer.transform(df).head()
- Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
- >>> # Must use keyword arguments to specify params.
- >>> tokenizer.setParams("text")
- Traceback (most recent call last):
- ...
- TypeError: Method setParams forces keyword arguments.
+ >>> df = sc.parallelize([Row(values=0.5)]).toDF()
--- End diff --
I prefer the 2nd approach
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]