This is an automated email from the ASF dual-hosted git repository.

yihua pushed a commit to branch release-1.1.0
in repository https://gitbox.apache.org/repos/asf/hudi.git

commit f11bef61e823040da9f5a5af0bfb714fef0e01df
Author: Lin Liu <[email protected]>
AuthorDate: Mon Nov 10 11:51:01 2025 -0500

    fix: Disable positional merging for spark version < 3.5 (#14241)
---
 .../org/apache/hudi/HoodieHadoopFsRelationFactory.scala  | 16 ++++++++++------
 .../hudi/functional/TestFiltersInFileGroupReader.java    |  6 ++----
 2 files changed, 12 insertions(+), 10 deletions(-)

diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieHadoopFsRelationFactory.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieHadoopFsRelationFactory.scala
index 9ee1663fea65..242f29f2c89c 100644
--- 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieHadoopFsRelationFactory.scala
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/HoodieHadoopFsRelationFactory.scala
@@ -20,7 +20,7 @@ package org.apache.hudi
 
 import org.apache.hudi.HoodieBaseRelation.{convertToAvroSchema, 
isSchemaEvolutionEnabledOnRead}
 import org.apache.hudi.HoodieConversionUtils.toScalaOption
-import org.apache.hudi.common.config.{ConfigProperty, HoodieReaderConfig}
+import org.apache.hudi.common.config.HoodieReaderConfig
 import org.apache.hudi.common.model.HoodieRecord
 import org.apache.hudi.common.table.{HoodieTableConfig, HoodieTableMetaClient, 
TableSchemaResolver}
 import org.apache.hudi.common.table.log.InstantRange.RangeType
@@ -205,7 +205,7 @@ abstract class HoodieBaseHadoopFsRelationFactory(val 
sqlContext: SQLContext,
     shouldOmitPartitionColumns || shouldExtractPartitionValueFromPath || 
isBootstrap
   }
 
-  private lazy val shouldUseRecordPosition: Boolean = 
checkIfAConfigurationEnabled(HoodieReaderConfig.MERGE_USE_RECORD_POSITIONS)
+  private lazy val shouldUseRecordPosition: Boolean = 
checkIfPositionalMergingEnabled()
 
   private lazy val queryTimestamp: Option[String] =
     
specifiedQueryTimestamp.orElse(toScalaOption(timeline.lastInstant()).map(_.requestedTime))
@@ -214,10 +214,14 @@ abstract class HoodieBaseHadoopFsRelationFactory(val 
sqlContext: SQLContext,
   // NOTE: We're including compaction here since it's not considering a 
"commit" operation
     metaClient.getCommitsAndCompactionTimeline.filterCompletedInstants
 
-  private def checkIfAConfigurationEnabled(config: 
ConfigProperty[java.lang.Boolean],
-                                           defaultValueOption: Option[String] 
= Option.empty): Boolean = {
-    optParams.getOrElse(config.key(),
-      sqlContext.getConf(config.key(), 
defaultValueOption.getOrElse(String.valueOf(config.defaultValue())))).toBoolean
+  private def checkIfPositionalMergingEnabled(): Boolean = {
+    if (!HoodieSparkUtils.gteqSpark3_5) {
+      false
+    } else {
+      val configKey = HoodieReaderConfig.MERGE_USE_RECORD_POSITIONS.key
+      optParams.getOrElse(configKey,
+        sqlContext.getConf(configKey, 
HoodieReaderConfig.MERGE_USE_RECORD_POSITIONS.defaultValue.toString)).toBoolean
+    }
   }
 
   protected lazy val fileStatusCache: FileStatusCache = 
FileStatusCache.getOrCreate(sparkSession)
diff --git 
a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestFiltersInFileGroupReader.java
 
b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestFiltersInFileGroupReader.java
index 4d6f1f17efc7..c1221c7b4b32 100644
--- 
a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestFiltersInFileGroupReader.java
+++ 
b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestFiltersInFileGroupReader.java
@@ -27,7 +27,6 @@ import org.apache.spark.sql.Dataset;
 import org.apache.spark.sql.Row;
 import org.apache.spark.sql.SaveMode;
 import org.apache.spark.sql.internal.SQLConf;
-import org.junit.jupiter.api.Disabled;
 import org.junit.jupiter.api.Tag;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.ValueSource;
@@ -39,12 +38,11 @@ import java.util.Map;
  * Ensure that parquet filters are not being pushed down when they shouldn't be
  */
 @Tag("functional")
-public class TestFiltersInFileGroupReader extends TestBootstrapReadBase {
+class TestFiltersInFileGroupReader extends TestBootstrapReadBase {
 
-  @Disabled("issues/14222")
   @ParameterizedTest
   @ValueSource(booleans = {true, false})
-  public void testFiltersInFileFormat(boolean mergeUseRecordPositions) {
+  void testFiltersInFileFormat(boolean mergeUseRecordPositions) {
     this.bootstrapType = "mixed";
     this.dashPartitions = true;
     this.tableType = HoodieTableType.MERGE_ON_READ;

Reply via email to