Github user jkbradley commented on a diff in the pull request:
https://github.com/apache/spark/pull/15770#discussion_r178987675
--- Diff:
mllib/src/main/scala/org/apache/spark/ml/clustering/PowerIterationClustering.scala
---
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.ml.clustering
+
+import org.apache.spark.annotation.{Experimental, Since}
+import org.apache.spark.ml.Transformer
+import org.apache.spark.ml.linalg.{Vector}
+import org.apache.spark.ml.param._
+import org.apache.spark.ml.param.shared._
+import org.apache.spark.ml.util._
+import org.apache.spark.mllib.clustering.{PowerIterationClustering =>
MLlibPowerIterationClustering}
+import
org.apache.spark.mllib.clustering.PowerIterationClustering.Assignment
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.{DataFrame, Dataset, Row}
+import org.apache.spark.sql.functions.{col}
+import org.apache.spark.sql.types.{IntegerType, LongType, StructField,
StructType}
+
+/**
+ * Common params for PowerIterationClustering
+ */
+private[clustering] trait PowerIterationClusteringParams extends Params
with HasMaxIter
+ with HasFeaturesCol with HasPredictionCol {
+
+ /**
+ * The number of clusters to create (k). Must be > 1. Default: 2.
+ * @group param
+ */
+ @Since("2.2.0")
+ final val k = new IntParam(this, "k", "The number of clusters to create.
" +
+ "Must be > 1.", ParamValidators.gt(1))
+
+ /** @group getParam */
+ @Since("2.2.0")
+ def getK: Int = $(k)
+
+ /**
+ * Param for the initialization algorithm. This can be either "random"
to use a random vector
+ * as vertex properties, or "degree" to use normalized sum similarities.
Default: random.
+ */
+ @Since("2.2.0")
+ final val initMode = new Param[String](this, "initMode", "The
initialization algorithm. " +
+ "Supported options: 'random' and 'degree'.",
+ (value: String) => validateInitMode(value))
+
+ private[spark] def validateInitMode(initMode: String): Boolean = {
+ initMode match {
+ case "random" => true
+ case "degree" => true
+ case _ => false
+ }
+ }
+
+ /** @group expertGetParam */
+ @Since("2.2.0")
+ def getInitMode: String = $(initMode)
+
+ /**
+ * Param for the column name for ids returned by
[[PowerIterationClustering.transform()]].
+ * Default: "id"
+ * @group param
+ */
+ val idCol = new Param[String](this, "idCol", "column name for ids.")
+
+ /** @group getParam */
+ def getIdCol: String = $(idCol)
+
+ /**
+ * Validates the input schema
+ * @param schema input schema
+ */
+ protected def validateSchema(schema: StructType): Unit = {
--- End diff --
+1
Also:
* This should check other input columns to make sure they are defined.
* This should add predictionCol, not check that it exists in the input.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]