This is an automated email from the ASF dual-hosted git repository.

danny0405 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 653f2ced8309 fix: push down pk filters to log file when spark enable 
`parquetFilterPushDown` (#14332)
653f2ced8309 is described below

commit 653f2ced83099d7060407db830c224438334317f
Author: TheR1sing3un <[email protected]>
AuthorDate: Wed Nov 26 16:20:28 2025 +0800

    fix: push down pk filters to log file when spark enable 
`parquetFilterPushDown` (#14332)
    
    1. push down pk filters to log file when spark enable 
`parquetFilterPushDown`
    
    The previous judgment logic was a typo error.
    Whether to push down depends on the `parquetFilterPushDown`,
    while whether to perform a record filter at the parquet level depends on 
the `parquetRecordFilterEnabled`
    
    Signed-off-by: TheR1sing3un <[email protected]>
---
 .../main/java/org/apache/hudi/io/storage/HoodieSparkParquetReader.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetReader.java
 
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetReader.java
index 1e808613864d..81a0004ab19a 100644
--- 
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetReader.java
+++ 
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/HoodieSparkParquetReader.java
@@ -155,7 +155,7 @@ public class HoodieSparkParquetReader implements 
HoodieSparkFileReader {
     storage.getConf().set(SQLConf.PARQUET_BINARY_AS_STRING().key(), 
sqlConf.getConf(SQLConf.PARQUET_BINARY_AS_STRING()).toString());
     storage.getConf().set(SQLConf.PARQUET_INT96_AS_TIMESTAMP().key(), 
sqlConf.getConf(SQLConf.PARQUET_INT96_AS_TIMESTAMP()).toString());
     RebaseDateTime.RebaseSpec rebaseDateSpec = 
SparkAdapterSupport$.MODULE$.sparkAdapter().getRebaseSpec("CORRECTED");
-    boolean parquetFilterPushDown = 
storage.getConf().getBoolean(SQLConf.PARQUET_RECORD_FILTER_ENABLED().key(), 
sqlConf.parquetRecordFilterEnabled());
+    boolean parquetFilterPushDown = 
storage.getConf().getBoolean(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED().key(), 
sqlConf.parquetFilterPushDown());
     if (parquetFilterPushDown && readFilters != null && 
!readFilters.isEmpty()) {
       ParquetMetadata parquetMetadataWithoutRowGroup = 
getParquetMetadataWithoutRowGroup();
       ParquetFilters parquetFilters = new ParquetFilters(

Reply via email to