sunchao commented on a change in pull request #34298:
URL: https://github.com/apache/spark/pull/34298#discussion_r737923928
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcUtils.scala
##########
@@ -377,4 +381,106 @@ object OrcUtils extends Logging {
case _ => false
}
}
+
+ /**
+ * When the partial aggregates (Max/Min/Count) are pushed down to ORC, we
don't need to read data
+ * from ORC and aggregate at Spark layer. Instead we want to get the partial
aggregates
+ * (Max/Min/Count) result using the statistics information from ORC file
footer, and then
+ * construct an InternalRow from these aggregate results.
+ *
+ * @return Aggregate results in the format of InternalRow
+ */
+ def createAggInternalRowFromFooter(
+ reader: Reader,
+ dataSchema: StructType,
+ partitionSchema: StructType,
+ aggregation: Aggregation,
+ aggSchema: StructType,
+ isCaseSensitive: Boolean): InternalRow = {
+ require(aggregation.groupByColumns.length == 0,
+ s"aggregate $aggregation with group-by column shouldn't be pushed down")
+ val columnsStatistics = OrcFooterReader.readStatistics(reader)
Review comment:
I see. Thanks @c21 !
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]