zhengruifeng commented on pull request #31472:
URL: https://github.com/apache/spark/pull/31472#issuecomment-773203630
```
scala> val df =
spark.read.format("libsvm").load("/d0/Dev/Opensource/spark/data/mllib/sample_multiclass_classification_data.txt").withColumn("probability",
lit(0.0))
21/02/04 18:06:36 WARN LibSVMFileFormat: 'numFeatures' option not specified,
determining the number of features by going though the input. If you know the
number in advance, please specify it via 'numFeatures' option to avoid the
extra scan.
df: org.apache.spark.sql.DataFrame = [label: double, features: vector ... 1
more field]
scala>
scala> val classifier = new
LogisticRegression().setMaxIter(1).setTol(1E-6).setFitIntercept(true)
classifier: org.apache.spark.ml.classification.LogisticRegression =
logreg_5900509aa825
scala> val ovr = new OneVsRest().setClassifier(classifier)
ovr: org.apache.spark.ml.classification.OneVsRest = oneVsRest_dd2b3e9da4e3
scala> val ovrm = ovr.fit(df)
ovrm: org.apache.spark.ml.classification.OneVsRestModel = OneVsRestModel:
uid=oneVsRest_dd2b3e9da4e3, classifier=logreg_5900509aa825, numClasses=3,
numFeatures=4
scala> ovrm.transform(df)
java.lang.IllegalArgumentException: requirement failed: Column probability
already exists.
at scala.Predef$.require(Predef.scala:281)
at
org.apache.spark.ml.util.SchemaUtils$.appendColumn(SchemaUtils.scala:106)
at org.apache.spark.ml.util.SchemaUtils$.appendColumn(SchemaUtils.scala:96)
at
org.apache.spark.ml.classification.ProbabilisticClassifierParams.validateAndTransformSchema(ProbabilisticClassifier.scala:38)
at
org.apache.spark.ml.classification.ProbabilisticClassifierParams.validateAndTransformSchema$(ProbabilisticClassifier.scala:33)
at
org.apache.spark.ml.classification.LogisticRegressionModel.org$apache$spark$ml$classification$LogisticRegressionParams$$super$validateAndTransformSchema(LogisticRegression.scala:917)
at
org.apache.spark.ml.classification.LogisticRegressionParams.validateAndTransformSchema(LogisticRegression.scala:268)
at
org.apache.spark.ml.classification.LogisticRegressionParams.validateAndTransformSchema$(LogisticRegression.scala:255)
at
org.apache.spark.ml.classification.LogisticRegressionModel.validateAndTransformSchema(LogisticRegression.scala:917)
at org.apache.spark.ml.PredictionModel.transformSchema(Predictor.scala:222)
at
org.apache.spark.ml.classification.ClassificationModel.transformSchema(Classifier.scala:182)
at
org.apache.spark.ml.classification.ProbabilisticClassificationModel.transformSchema(ProbabilisticClassifier.scala:88)
at org.apache.spark.ml.PipelineStage.transformSchema(Pipeline.scala:71)
at
org.apache.spark.ml.classification.ProbabilisticClassificationModel.transform(ProbabilisticClassifier.scala:107)
at
org.apache.spark.ml.classification.OneVsRestModel.$anonfun$transform$4(OneVsRest.scala:215)
at
scala.collection.IndexedSeqOptimized.foldLeft(IndexedSeqOptimized.scala:60)
at
scala.collection.IndexedSeqOptimized.foldLeft$(IndexedSeqOptimized.scala:68)
at scala.collection.mutable.ArrayOps$ofRef.foldLeft(ArrayOps.scala:198)
at
org.apache.spark.ml.classification.OneVsRestModel.transform(OneVsRest.scala:203)
... 49 elided
scala>
```
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]