hvanhovell commented on code in PR #47742:
URL: https://github.com/apache/spark/pull/47742#discussion_r1715912924


##########
mllib/src/main/scala/org/apache/spark/ml/feature/StringIndexer.scala:
##########
@@ -195,31 +193,27 @@ class StringIndexer @Since("1.4.0") (
       } else {
         // We don't count for NaN values. Because `StringIndexerAggregator` 
only processes strings,
         // we replace NaNs with null in advance.
-        new Column(If(col.isNaN.expr, Literal(null), 
col.expr)).cast(StringType)
+        when(!col.isNaN, col.cast(StringType)).otherwise(lit(null))
       }
     }
   }
 
   private def countByValue(
       dataset: Dataset[_],
-      inputCols: Array[String]): Array[OpenHashMap[String, Long]] = {
+      inputCols: Array[String]): Seq[Map[String, Long]] = {
+    val selectedCols = getSelectedCols(dataset, 
inputCols.toImmutableArraySeq).map(grouped_count)
 
-    val aggregator = new StringIndexerAggregator(inputCols.length)
-    implicit val encoder = Encoders.kryo[Array[OpenHashMap[String, Long]]]
-
-    val selectedCols = getSelectedCols(dataset, inputCols.toImmutableArraySeq)
-    dataset.select(selectedCols: _*)
-      .toDF()
-      .agg(aggregator.toColumn)
-      .as[Array[OpenHashMap[String, Long]]]
-      .collect()(0)
+    dataset

Review Comment:
   Is this the only place where this is being used? We could also use a regular 
aggregate:
   ```scala
   private def getSelectedCols(dataset: Dataset[_], inputCols: Seq[String]): 
Column = {
       array(inputCols.map { colName =>
         val col = dataset.col(colName)
         val getter = if (col.expr.dataType == StringType) {
           col
         } else {
           // We don't count for NaN values. Because `StringIndexerAggregator` 
only processes strings,
           // we replace NaNs with null in advance.
           new Column(If(col.isNaN.expr, Literal(null), 
col.expr)).cast(StringType)
         }
         named_struct(lit(col), lit("field"), getter, "value")
       })
     }
   
     private def countByValue(
         dataset: Dataset[_],
         inputCols: Array[String]): Array[OpenHashMap[String, Long]] = {
   
       val selectedCols = getSelectedCols(dataset, 
inputCols.toImmutableArraySeq)
       dataset.select(inline(selectedCols)).groupBy($"field", 
$"value").add(count(lit(1)).as("count")).collect() // need to convert this to a 
map...
     }
   ```
   That should be much better in terms of memory usage.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to