sunchao commented on a change in pull request #34445:
URL: https://github.com/apache/spark/pull/34445#discussion_r748584416



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/AggregatePushDownUtils.scala
##########
@@ -138,4 +145,39 @@ object AggregatePushDownUtils {
     converter.convert(aggregatesAsRow, columnVectors.toArray)
     new ColumnarBatch(columnVectors.asInstanceOf[Array[ColumnVector]], 1)
   }
+
+  /**
+   * Return the schema for aggregates only (exclude group by columns)
+   */
+  def getSchemaWithoutGroupingExpression(
+      aggregation: Aggregation,
+      aggSchema: StructType): StructType = {
+    val numOfGroupByColumns = aggregation.groupByColumns.length
+    if (numOfGroupByColumns > 0) {
+      new StructType(aggSchema.fields.drop(numOfGroupByColumns))
+    } else {
+      aggSchema
+    }
+  }
+
+  /**
+   * Reorder partition cols if they are not in the same order as group by 
columns
+   */
+  def reOrderPartitionCol(
+      partitionSchema: StructType,
+      aggregation: Aggregation,
+      partitionValues: InternalRow): InternalRow = {
+    val groupByColNames = aggregation.groupByColumns.map(_.fieldNames.head)
+    var reorderedPartColValues = Array.empty[Any]
+    if (!partitionSchema.names.sameElements(groupByColNames)) {
+      groupByColNames.foreach { col =>
+        val index = partitionSchema.names.indexOf(col)
+        val v = partitionValues.asInstanceOf[GenericInternalRow].values(index)

Review comment:
       just curious: is this always guaranteed to be `GenericInternalRow`? 

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/AggregatePushDownUtils.scala
##########
@@ -138,4 +145,39 @@ object AggregatePushDownUtils {
     converter.convert(aggregatesAsRow, columnVectors.toArray)
     new ColumnarBatch(columnVectors.asInstanceOf[Array[ColumnVector]], 1)
   }
+
+  /**
+   * Return the schema for aggregates only (exclude group by columns)
+   */
+  def getSchemaWithoutGroupingExpression(
+      aggregation: Aggregation,
+      aggSchema: StructType): StructType = {
+    val numOfGroupByColumns = aggregation.groupByColumns.length
+    if (numOfGroupByColumns > 0) {
+      new StructType(aggSchema.fields.drop(numOfGroupByColumns))
+    } else {
+      aggSchema
+    }
+  }
+
+  /**
+   * Reorder partition cols if they are not in the same order as group by 
columns
+   */
+  def reOrderPartitionCol(
+      partitionSchema: StructType,
+      aggregation: Aggregation,
+      partitionValues: InternalRow): InternalRow = {
+    val groupByColNames = aggregation.groupByColumns.map(_.fieldNames.head)

Review comment:
       should we add an assertion here checking that 
`aggregation.groupByColumns` has the same length as `partitionSchema` and 
`partitionValues`?

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/parquet/ParquetPartitionReaderFactory.scala
##########
@@ -134,10 +134,14 @@ case class ParquetPartitionReaderFactory(
         private var hasNext = true
         private lazy val row: InternalRow = {
           val footer = getFooter(file)
+
+          val partitionValues = AggregatePushDownUtils.reOrderPartitionCol(

Review comment:
       can we call this in `ParquetUtils.createAggInternalRowFromFooter`? it 
seems we always call `AggregatePushDownUtils.reOrderPartitionCol` then followed 
by `ParquetUtils.createAggInternalRowFromFooter`.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to