IonutBoicuAms commented on code in PR #37228:
URL: https://github.com/apache/spark/pull/37228#discussion_r946576926


##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala:
##########
@@ -239,10 +262,20 @@ object FileSourceStrategy extends Strategy with 
PredicateHelper with Logging {
 
       // extra Project node: wrap flat metadata columns to a metadata struct
       val withMetadataProjections = metadataStructOpt.map { metadataStruct =>
+        val structColumns = metadataColumns.map { col => col.name match {
+            case FileFormat.FILE_PATH | FileFormat.FILE_NAME | 
FileFormat.FILE_SIZE |
+                 FileFormat.FILE_MODIFICATION_TIME =>
+              col
+            case FileFormat.ROW_INDEX =>
+              fileFormatReaderGeneratedMetadataColumns
+                .filter(_.name == FileFormat.ROW_INDEX_TEMPORARY_COLUMN_NAME)
+                .head.withName(FileFormat.ROW_INDEX)

Review Comment:
   Done



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala:
##########
@@ -223,8 +216,38 @@ object FileSourceStrategy extends Strategy with 
PredicateHelper with Logging {
         }.toSeq
       }.getOrElse(Seq.empty)
 
-      // outputAttributes should also include the metadata columns at the very 
end
-      val outputAttributes = readDataColumns ++ partitionColumns ++ 
metadataColumns
+      val fileConstantMetadataColumns: Seq[Attribute] =
+        metadataColumns.filter(_.name != FileFormat.ROW_INDEX)
+
+      val readDataColumns = dataColumns
+          .filter(requiredAttributes.contains)
+          .filterNot(partitionColumns.contains)
+
+      val fileFormatReaderGeneratedMetadataColumns: Seq[Attribute] =
+        metadataColumns.map(_.name).flatMap {
+          case FileFormat.ROW_INDEX =>
+            if ((readDataColumns ++ partitionColumns).map(_.name)
+                .contains(FileFormat.ROW_INDEX_TEMPORARY_COLUMN_NAME)) {
+              throw new 
AnalysisException(FileFormat.ROW_INDEX_TEMPORARY_COLUMN_NAME +
+                " is a reserved column name that cannot be read in combination 
with " +
+                s"${FileFormat.METADATA_NAME}.${FileFormat.ROW_INDEX} column.")
+            }
+            
Some(AttributeReference(FileFormat.ROW_INDEX_TEMPORARY_COLUMN_NAME, LongType)())
+          case _ => None
+        }
+
+      val outputSchema = (readDataColumns ++ 
fileFormatReaderGeneratedMetadataColumns).toStructType

Review Comment:
   Done



##########
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala:
##########
@@ -223,8 +216,38 @@ object FileSourceStrategy extends Strategy with 
PredicateHelper with Logging {
         }.toSeq
       }.getOrElse(Seq.empty)
 
-      // outputAttributes should also include the metadata columns at the very 
end
-      val outputAttributes = readDataColumns ++ partitionColumns ++ 
metadataColumns
+      val fileConstantMetadataColumns: Seq[Attribute] =
+        metadataColumns.filter(_.name != FileFormat.ROW_INDEX)
+
+      val readDataColumns = dataColumns
+          .filter(requiredAttributes.contains)
+          .filterNot(partitionColumns.contains)
+
+      val fileFormatReaderGeneratedMetadataColumns: Seq[Attribute] =
+        metadataColumns.map(_.name).flatMap {
+          case FileFormat.ROW_INDEX =>
+            if ((readDataColumns ++ partitionColumns).map(_.name)

Review Comment:
   Done



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to