yihua commented on code in PR #10167:
URL: https://github.com/apache/hudi/pull/10167#discussion_r1410100073
##########
hudi-spark-datasource/hudi-spark3.5.x/src/main/scala/org/apache/spark/sql/adapter/Spark3_5Adapter.scala:
##########
@@ -127,4 +126,17 @@ class Spark3_5Adapter extends BaseSpark3Adapter {
case OFF_HEAP => "OFF_HEAP"
case _ => throw new IllegalArgumentException(s"Invalid StorageLevel:
$level")
}
+
+ override def appendRowIndexColumnForParquetFileReader(requiredSchema:
StructType, shouldUseRecordPosition: Boolean): StructType = {
+ if (shouldUseRecordPosition) StructType(requiredSchema.toArray :+
FileSourceGeneratedMetadataStructField(
+ ROW_INDEX_TEMPORARY_COLUMN_NAME, ROW_INDEX_TEMPORARY_COLUMN_NAME,
LongType, nullable = false)) else requiredSchema
+ }
+
+ override def appendRowIndexColumnForFileGroupReader(requiredSchema:
StructType, shouldUseRecordPosition: Boolean): StructType = {
+ if (shouldUseRecordPosition) StructType(requiredSchema.toArray :+
ROW_INDEX_FIELD) else requiredSchema
+ }
+
+ override def getDataFilters(requiredFilters: Seq[Filter],
recordKeyRelatedFilters: Seq[Filter], shouldUseRecordPosition: Boolean):
Seq[Filter] = {
+ requiredFilters ++ recordKeyRelatedFilters
+ }
Review Comment:
Can we directly use Spark version as the criteria to fetch the row index
with the Spark parquet reader instead of adding new APIs here?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]