holdenk commented on a change in pull request #20146: [SPARK-11215][ML] Add 
multiple columns support to StringIndexer
URL: https://github.com/apache/spark/pull/20146#discussion_r241710382
 
 

 ##########
 File path: mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala
 ##########
 @@ -130,21 +159,60 @@ class StringIndexer @Since("1.4.0") (
   @Since("1.4.0")
   def setOutputCol(value: String): this.type = set(outputCol, value)
 
+  /** @group setParam */
+  @Since("2.4.0")
+  def setInputCols(value: Array[String]): this.type = set(inputCols, value)
+
+  /** @group setParam */
+  @Since("2.4.0")
+  def setOutputCols(value: Array[String]): this.type = set(outputCols, value)
+
+  private def countByValue(
+      dataset: Dataset[_],
+      inputCols: Array[String]): Array[OpenHashMap[String, Long]] = {
+
+    val aggregator = new StringIndexerAggregator(inputCols.length)
+    implicit val encoder = Encoders.kryo[Array[OpenHashMap[String, Long]]]
+
+    dataset.select(inputCols.map(col(_).cast(StringType)): _*)
+      .toDF
+      .groupBy().agg(aggregator.toColumn)
+      .as[Array[OpenHashMap[String, Long]]]
+      .collect()(0)
+  }
+
   @Since("2.0.0")
   override def fit(dataset: Dataset[_]): StringIndexerModel = {
     transformSchema(dataset.schema, logging = true)
-    val values = dataset.na.drop(Array($(inputCol)))
-      .select(col($(inputCol)).cast(StringType))
-      .rdd.map(_.getString(0))
-    val labels = $(stringOrderType) match {
-      case StringIndexer.frequencyDesc => 
values.countByValue().toSeq.sortBy(-_._2)
-        .map(_._1).toArray
-      case StringIndexer.frequencyAsc => 
values.countByValue().toSeq.sortBy(_._2)
-        .map(_._1).toArray
-      case StringIndexer.alphabetDesc => values.distinct.collect.sortWith(_ > 
_)
-      case StringIndexer.alphabetAsc => values.distinct.collect.sortWith(_ < _)
-    }
-    copyValues(new StringIndexerModel(uid, labels).setParent(this))
+
+    val (inputCols, _) = getInOutCols()
+
+    val filteredDF = dataset.na.drop(inputCols)
+
+    // In case of equal frequency when frequencyDesc/Asc, we further sort the 
strings by alphabet.
+    val labelsArray = $(stringOrderType) match {
+      case StringIndexer.frequencyDesc =>
+        countByValue(filteredDF, inputCols).map { counts =>
+          counts.toSeq.sortBy(_._1).sortBy(-_._2).map(_._1).toArray
 
 Review comment:
   Yes I think we can do this with a single `sortWith` e.g. in the shell I did 
something like:
   
   > scala> def lt(a: (String, Int), b: (String, Int)) = {
   >      |   if (a._2 == b._2) {
   >      |     a._1 < b._1
   >      |   } else {
   >      |     a._2 < b._2
   >      |   }
   >      | }
   > lt: (a: (String, Int), b: (String, Int))Boolean
   > 
   > scala> seq.sortWith(lt)
   > res7: List[(String, Int)] = List((a,1), (c,1), (a,2))
   > 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to