Yaohua628 commented on a change in pull request #35068:
URL: https://github.com/apache/spark/pull/35068#discussion_r778617794
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileScanRDD.scala
##########
@@ -130,67 +130,37 @@ class FileScanRDD(
UnsafeProjection.create(joinedExpressions)
}
+ // metadata constant column vectors, will only be updated when the
current file is changed
+ val metadataVectors: Seq[ConstantColumnVector] =
+ metadataColumns.map(m => new ConstantColumnVector(m.dataType))
+
/**
* For each partitioned file, metadata columns for each record in the
file are exactly same.
- * Only update metadata row when `currentFile` is changed.
+ * Only update metadata row and vectors when `currentFile` is changed.
*/
- private def updateMetadataRow(): Unit = {
- if (metadataColumns.nonEmpty && currentFile != null) {
- val path = new Path(currentFile.filePath)
- metadataColumns.zipWithIndex.foreach { case (attr, i) =>
- attr.name match {
- case FILE_PATH => metadataRow.update(i,
UTF8String.fromString(path.toString))
- case FILE_NAME => metadataRow.update(i,
UTF8String.fromString(path.getName))
- case FILE_SIZE => metadataRow.update(i, currentFile.fileSize)
- case FILE_MODIFICATION_TIME =>
- // the modificationTime from the file is in millisecond,
- // while internally, the TimestampType is stored in microsecond
- metadataRow.update(i, currentFile.modificationTime * 1000L)
- }
+ private def updateMetadataData(): Unit = {
Review comment:
make sense!
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]