Github user aray commented on a diff in the pull request:

    https://github.com/apache/spark/pull/9929#discussion_r45806267
  
    --- Diff: sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala ---
    @@ -282,74 +282,96 @@ class GroupedData protected[sql](
       }
     
       /**
    -    * (Scala-specific) Pivots a column of the current [[DataFrame]] and 
preform the specified
    -    * aggregation.
    -    * {{{
    -    *   // Compute the sum of earnings for each year by course with each 
course as a separate column
    -    *   df.groupBy($"year").pivot($"course", "dotNET", 
"Java").agg(sum($"earnings"))
    -    *   // Or without specifying column values
    -    *   df.groupBy($"year").pivot($"course").agg(sum($"earnings"))
    -    * }}}
    -    * @param pivotColumn Column to pivot
    -    * @param values Optional list of values of pivotColumn that will be 
translated to columns in the
    -    *               output data frame. If values are not provided the 
method with do an immediate
    -    *               call to .distinct() on the pivot column.
    -    * @since 1.6.0
    -    */
    -  @scala.annotation.varargs
    -  def pivot(pivotColumn: Column, values: Column*): GroupedData = groupType 
match {
    -    case _: GroupedData.PivotType =>
    -      throw new UnsupportedOperationException("repeated pivots are not 
supported")
    -    case GroupedData.GroupByType =>
    -      val pivotValues = if (values.nonEmpty) {
    -        values.map {
    -          case Column(literal: Literal) => literal
    -          case other =>
    -            throw new UnsupportedOperationException(
    -              s"The values of a pivot must be literals, found $other")
    -        }
    -      } else {
    -        // This is to prevent unintended OOM errors when the number of 
distinct values is large
    -        val maxValues = 
df.sqlContext.conf.getConf(SQLConf.DATAFRAME_PIVOT_MAX_VALUES)
    -        // Get the distinct values of the column and sort them so its 
consistent
    -        val values = df.select(pivotColumn)
    -          .distinct()
    -          .sort(pivotColumn)
    -          .map(_.get(0))
    -          .take(maxValues + 1)
    -          .map(Literal(_)).toSeq
    -        if (values.length > maxValues) {
    -          throw new RuntimeException(
    -            s"The pivot column $pivotColumn has more than $maxValues 
distinct values, " +
    -              "this could indicate an error. " +
    -              "If this was intended, set \"" + 
SQLConf.DATAFRAME_PIVOT_MAX_VALUES.key + "\" " +
    -              s"to at least the number of distinct values of the pivot 
column.")
    -        }
    -        values
    -      }
    -      new GroupedData(df, groupingExprs, 
GroupedData.PivotType(pivotColumn.expr, pivotValues))
    -    case _ =>
    -      throw new UnsupportedOperationException("pivot is only supported 
after a groupBy")
    +   * Pivots a column of the current [[DataFrame]] and preform the 
specified aggregation.
    +   * There are two versions of pivot function: one that requires the 
caller to specify the list
    +   * of distinct values to pivot on, and one that does not. The latter is 
more concise but less
    +   * efficient, because Spark needs to first compute the list of distinct 
values internally.
    +   *
    +   * {{{
    +   *   // Compute the sum of earnings for each year by course with each 
course as a separate column
    +   *   df.groupBy("year").pivot("course", Seq("dotNET", 
"Java")).sum("earnings")
    +   *
    +   *   // Or without specifying column values (less efficient)
    +   *   df.groupBy("year").pivot("course").sum("earnings")
    +   * }}}
    +   *
    +   * @param pivotColumn Name of the column to pivot.
    +   * @since 1.6.0
    +   */
    +  def pivot(pivotColumn: String): GroupedData = {
    +    // This is to prevent unintended OOM errors when the number of 
distinct values is large
    +    val maxValues = 
df.sqlContext.conf.getConf(SQLConf.DATAFRAME_PIVOT_MAX_VALUES)
    +    // Get the distinct values of the column and sort them so its 
consistent
    +    val values = df.select(pivotColumn)
    +      .distinct()
    +      .sort(pivotColumn)
    --- End diff --
    
    The sort is there to ensure that the output columns are in a consistent 
logical order.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to