Github user mgaido91 commented on a diff in the pull request: https://github.com/apache/spark/pull/20594#discussion_r167914538 --- Diff: mllib/src/main/scala/org/apache/spark/ml/feature/Bucketizer.scala --- @@ -290,6 +293,27 @@ object Bucketizer extends DefaultParamsReadable[Bucketizer] { } } + + private[Bucketizer] class BucketizerWriter(instance: Bucketizer) extends MLWriter { + + override protected def saveImpl(path: String): Unit = { + // SPARK-23377: The default params will be saved and loaded as user-supplied params. + // Once `inputCols` is set, the default value of `outputCol` param causes the error + // when checking exclusive params. As a temporary to fix it, we remove the default + // value of `outputCol` if `inputCols` is set before saving. + // TODO: If we modify the persistence mechanism later to better handle default params, + // we can get rid of this. + var removedOutputCol: Option[String] = None + if (instance.isSet(instance.inputCols)) { --- End diff -- this can create a lot of issues with the Python API. Please see #20410 for reference. Thus I am against this fix.
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org