bart-samwel commented on a change in pull request #34575:
URL: https://github.com/apache/spark/pull/34575#discussion_r756403397



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileScanRDD.scala
##########
@@ -103,6 +116,135 @@ class FileScanRDD(
         context.killTaskIfInterrupted()
         (currentIterator != null && currentIterator.hasNext) || nextIterator()
       }
+
+      ///////////////////////////
+      // FILE METADATA METHODS //
+      ///////////////////////////
+
+      // whether a metadata column exists and it is a `MetadataAttribute`
+      private lazy val hasMetadataAttribute: Boolean = {
+        metadataStruct.exists {
+          case MetadataAttribute(_) => true
+          case _ => false
+        }
+      }
+
+      // metadata struct unsafe row, will only be updated when the current 
file is changed
+      @volatile private var metadataStructUnsafeRow: UnsafeRow = _
+      // metadata generic row, will only be updated when the current file is 
changed
+      @volatile private var metadataStructGenericRow: Row = _
+      // an unsafe joiner to join an unsafe row with the metadata unsafe row
+      lazy private val unsafeRowJoiner =
+        if (hasMetadataAttribute)
+          GenerateUnsafeRowJoiner.create(requiredSchema, 
Seq(metadataStruct.get).toStructType)
+
+      // Create a off/on heap WritableColumnVector
+      private def createColumnVector(numRows: Int, dataType: DataType): 
WritableColumnVector = {
+        if (offHeapColumnVectorEnabled) {
+          new OffHeapColumnVector(numRows, dataType)
+        } else {
+          new OnHeapColumnVector(numRows, dataType)
+        }
+      }
+
+      /**
+       * For each partitioned file, metadata columns for each record in the 
file are exactly same.
+       * Only update metadata columns when `currentFile` is changed.
+       */
+      private def updateMetadataStruct(): Unit = {
+        if (hasMetadataAttribute) {
+          val meta = metadataStruct.get
+          if (currentFile == null) {
+            metadataStructUnsafeRow = new UnsafeRow(1)
+            metadataStructGenericRow = new GenericRow(1)
+          } else {
+            // make an generic row
+            assert(meta.dataType.isInstanceOf[StructType])
+            metadataStructGenericRow = Row.fromSeq(
+              meta.dataType.asInstanceOf[StructType].names.map {
+                case FILE_PATH => UTF8String.fromString(new 
File(currentFile.filePath).toString)
+                case FILE_NAME => UTF8String.fromString(
+                  currentFile.filePath.split("/").last)
+                case FILE_SIZE => currentFile.fileSize
+                case FILE_MODIFICATION_TIME => currentFile.modificationTime
+                case _ => None // be exhaustive, won't happen
+              }
+            )
+
+            // convert the generic row to an unsafe row
+            val unsafeRowConverter = {
+              val converter = UnsafeProjection.create(
+                Array(METADATA_STRUCT))
+              (row: Row) => {
+                converter(CatalystTypeConverters.convertToCatalyst(row)
+                  .asInstanceOf[InternalRow])
+              }
+            }
+            metadataStructUnsafeRow =
+              unsafeRowConverter(Row.fromSeq(Seq(metadataStructGenericRow)))
+          }
+        }
+      }
+
+      /**
+       * Create a writable column vector containing all required metadata 
fields
+       */
+      private def createMetadataStructColumnVector(
+          c: ColumnarBatch, meta: AttributeReference): WritableColumnVector = {
+        val columnVector = createColumnVector(c.numRows(), METADATA_STRUCT)
+        val filePathBytes = new File(currentFile.filePath).toString.getBytes
+        val fileNameBytes = currentFile.filePath.split("/").last.getBytes
+        var rowId = 0
+
+        assert(meta.dataType.isInstanceOf[StructType])
+        meta.dataType.asInstanceOf[StructType].names.zipWithIndex.foreach { 
case (name, ind) =>
+          name match {
+            case FILE_PATH =>
+              rowId = 0
+              // use a tight-loop for better performance
+              while (rowId < c.numRows()) {
+                columnVector.getChild(ind).putByteArray(rowId, filePathBytes)

Review comment:
       This is terribly inefficient -- it will copy the file path for every 
row. Can we please add a method to the ColumnVector implementations that does 
the equivalent of `putLongs` but for byte arrays, that copies the file path 
_once_ and then references it in every row?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to