cloud-fan commented on a change in pull request #34575:
URL: https://github.com/apache/spark/pull/34575#discussion_r772862245
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileScanRDD.scala
##########
@@ -103,6 +116,108 @@ class FileScanRDD(
context.killTaskIfInterrupted()
(currentIterator != null && currentIterator.hasNext) || nextIterator()
}
+
+ ///////////////////////////
+ // FILE METADATA METHODS //
+ ///////////////////////////
+
+ // metadata columns unsafe row, will only be updated when the current
file is changed
+ @volatile private var metadataColumnsUnsafeRow: UnsafeRow = _
+ // metadata columns internal row, will only be updated when the current
file is changed
+ @volatile private var metadataColumnsInternalRow: InternalRow = _
+ // an unsafe joiner to join an unsafe row with the metadata unsafe row
+ lazy private val metadataUnsafeRowJoiner =
+ GenerateUnsafeRowJoiner.create(requiredSchema,
metadataColumns.toStructType)
+ // metadata columns unsafe row converter
+ lazy private val unsafeRowConverter = {
+ val metadataColumnsDataTypes = metadataColumns.map(_.dataType).toArray
+ val converter = UnsafeProjection.create(metadataColumnsDataTypes)
+ (row: InternalRow) => converter(row)
+ }
+
+ /**
+ * For each partitioned file, metadata columns for each record in the
file are exactly same.
+ * Only update metadata columns when `currentFile` is changed.
+ */
+ private def updateMetadataColumns(): Unit = {
+ if (metadataColumns.nonEmpty) {
+ if (currentFile == null) {
+ metadataColumnsUnsafeRow = null
+ metadataColumnsInternalRow = null
+ } else {
+ // construct an internal row
+ val path = new Path(currentFile.filePath)
+ metadataColumnsInternalRow = InternalRow.fromSeq(
+ metadataColumns.map(_.name).map {
+ case FILE_PATH => UTF8String.fromString(path.toString)
+ case FILE_NAME => UTF8String.fromString(path.getName)
+ case FILE_SIZE => currentFile.fileSize
+ case FILE_MODIFICATION_TIME => currentFile.modificationTime
+ }
+ )
+ // convert the internal row to an unsafe row
+ metadataColumnsUnsafeRow =
unsafeRowConverter(metadataColumnsInternalRow)
+ }
+ }
+ }
+
+ /**
+ * Create a writable column vector containing all required metadata
columns
+ */
+ private def createMetadataColumnVector(c: ColumnarBatch):
Array[WritableColumnVector] = {
+ val path = new Path(currentFile.filePath)
+ val filePathBytes = path.toString.getBytes
+ val fileNameBytes = path.getName.getBytes
+ var rowId = 0
+ metadataColumns.map(_.name).map {
+ case FILE_PATH =>
+ val columnVector = new OnHeapColumnVector(c.numRows(), StringType)
+ rowId = 0
+ // use a tight-loop for better performance
+ while (rowId < c.numRows()) {
+ columnVector.putByteArray(rowId, filePathBytes)
+ rowId += 1
+ }
+ columnVector
+ case FILE_NAME =>
+ val columnVector = new OnHeapColumnVector(c.numRows(), StringType)
+ rowId = 0
+ // use a tight-loop for better performance
+ while (rowId < c.numRows()) {
+ columnVector.putByteArray(rowId, fileNameBytes)
+ rowId += 1
+ }
+ columnVector
+ case FILE_SIZE =>
+ val columnVector = new OnHeapColumnVector(c.numRows(), LongType)
+ columnVector.putLongs(0, c.numRows(), currentFile.fileSize)
+ columnVector
+ case FILE_MODIFICATION_TIME =>
+ val columnVector = new OnHeapColumnVector(c.numRows(), LongType)
+ columnVector.putLongs(0, c.numRows(), currentFile.modificationTime)
+ columnVector
+ }.toArray
+ }
+
+ /**
+ * Add metadata columns at the end of nextElement if needed.
+ * For different row implementations, use different methods to update
and append.
+ */
+ private def addMetadataColumnsIfNeeded(nextElement: Object): Object = {
+ if (metadataColumns.nonEmpty) {
+ nextElement match {
+ case c: ColumnarBatch =>
+ new ColumnarBatch(
+ Array.tabulate(c.numCols())(c.column) ++
createMetadataColumnVector(c),
Review comment:
hmm, is `Array(c.column(0), c.column(1), ... c.column(numCols - 1))` the
same as `c.column`?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]