huaxingao commented on a change in pull request #32049:
URL: https://github.com/apache/spark/pull/32049#discussion_r638476634



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/parquet/ParquetPartitionReaderFactory.scala
##########
@@ -80,43 +90,108 @@ case class ParquetPartitionReaderFactory(
   private val datetimeRebaseModeInRead = 
parquetOptions.datetimeRebaseModeInRead
   private val int96RebaseModeInRead = parquetOptions.int96RebaseModeInRead
 
+  private def getFooter(file: PartitionedFile): ParquetMetadata = {
+    val conf = broadcastedConf.value.value
+
+    val filePath = new Path(new URI(file.filePath))
+
+    if (aggregation.aggregateExpressions.isEmpty) {
+      ParquetFooterReader.readFooter(conf, filePath, SKIP_ROW_GROUPS)
+    } else {
+      ParquetFooterReader.readFooter(conf, filePath, NO_FILTER)
+    }
+  }
+
+  // Define isCreatedByParquetMr as function to avoid unnecessary parquet 
footer reads.
+  private def isCreatedByParquetMr(file: PartitionedFile): Boolean =
+    getFooter(file).getFileMetaData.getCreatedBy().startsWith("parquet-mr")
+
+  private def convertTz(isCreatedByParquetMr: Boolean): Option[ZoneId] =
+    if (timestampConversion && !isCreatedByParquetMr) {
+      Some(DateTimeUtils
+        
.getZoneId(broadcastedConf.value.value.get(SQLConf.SESSION_LOCAL_TIMEZONE.key)))
+    } else {
+      None
+    }
+
   override def supportColumnarReads(partition: InputPartition): Boolean = {
     sqlConf.parquetVectorizedReaderEnabled && sqlConf.wholeStageEnabled &&
       resultSchema.length <= sqlConf.wholeStageMaxNumFields &&
       resultSchema.forall(_.dataType.isInstanceOf[AtomicType])
   }
 
   override def buildReader(file: PartitionedFile): 
PartitionReader[InternalRow] = {
-    val reader = if (enableVectorizedReader) {
-      createVectorizedReader(file)
+    val fileReader = if (aggregation.aggregateExpressions.isEmpty) {
+
+      val reader = if (enableVectorizedReader) {
+        createVectorizedReader(file)
+      } else {
+        createRowBaseReader(file)
+      }
+
+      new PartitionReader[InternalRow] {
+        override def next(): Boolean = reader.nextKeyValue()
+
+        override def get(): InternalRow = 
reader.getCurrentValue.asInstanceOf[InternalRow]
+
+        override def close(): Unit = reader.close()
+      }
     } else {
-      createRowBaseReader(file)
-    }
+      new PartitionReader[InternalRow] {
+        var count = 0
 
-    val fileReader = new PartitionReader[InternalRow] {
-      override def next(): Boolean = reader.nextKeyValue()
+        override def next(): Boolean = if (count == 0) true else false
 
-      override def get(): InternalRow = 
reader.getCurrentValue.asInstanceOf[InternalRow]
+        override def get(): InternalRow = {
+          count += 1
+          val footer = getFooter(file)
+          val (parquetTypes, values) =
+            ParquetUtils.getPushedDownAggResult(footer, dataSchema, 
aggregation)
+          ParquetUtils.aggResultToSparkInternalRows(footer, parquetTypes, 
values, aggSchema,
+            datetimeRebaseModeInRead, int96RebaseModeInRead, 
convertTz(isCreatedByParquetMr(file)))

Review comment:
       will change this.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to