This is an automated email from the ASF dual-hosted git repository.

schang pushed a commit to branch branch-0.x
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/branch-0.x by this push:
     new 7f9b469b23cb fix: Databricks Spark 3.4 Runtime compatibility for 
reading Hudi tables (#18291)
7f9b469b23cb is described below

commit 7f9b469b23cb604531a5a04ac4225478aced6773
Author: Y Ethan Guo <[email protected]>
AuthorDate: Sat Mar 7 21:55:21 2026 -0800

    fix: Databricks Spark 3.4 Runtime compatibility for reading Hudi tables 
(#18291)
---
 .../org/apache/hudi/DatabricksRuntimeHelper.scala  | 75 ++++++++++++++++++++++
 .../HoodieSpark34PartitionedFileUtils.scala        |  3 +-
 2 files changed, 77 insertions(+), 1 deletion(-)

diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DatabricksRuntimeHelper.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DatabricksRuntimeHelper.scala
new file mode 100644
index 000000000000..2ddd07ceb0bf
--- /dev/null
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DatabricksRuntimeHelper.scala
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi
+
+import org.apache.hadoop.fs.FileStatus
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.execution.datasources.PartitionDirectory
+
+import scala.util.Try
+
+/**
+ * Compatibility helper for Databricks Spark runtime.
+ */
+object DatabricksRuntimeHelper {
+  private lazy val fileStatusWithMetadataClass: Option[Class[_]] =
+    
Try(Class.forName("org.apache.spark.sql.execution.datasources.FileStatusWithMetadata")).toOption
+  private lazy val fileStatusWithMetadataModuleClass =
+    
Try(Class.forName("org.apache.spark.sql.execution.datasources.FileStatusWithMetadata$")).toOption
+  private lazy val fileStatusWithMetadataModule = 
fileStatusWithMetadataModuleClass.map { cls =>
+    cls.getField("MODULE$").get(null) // the singleton companion object 
instance
+  }
+  private lazy val fileStatusWithMetadataInstantiationMethod = 
fileStatusWithMetadataClass.flatMap { cls =>
+    Try(cls.getMethod("apply", classOf[FileStatus])).toOption
+  }
+
+  private lazy val partitionDirectoryConstructor: 
Option[java.lang.reflect.Constructor[_]] =
+    fileStatusWithMetadataClass.flatMap { _ =>
+      Try(classOf[PartitionDirectory].getConstructor(classOf[InternalRow], 
classOf[Seq[_]])).toOption
+    }
+
+  /**
+   * Creates a PartitionDirectory, using reflection on Databricks runtime where
+   * PartitionDirectory expects (InternalRow, Seq[FileStatusWithMetadata]) 
instead of
+   * (InternalRow, Seq[FileStatus]).
+   *
+   * Databricks Spark 3.4 runtime backports FileStatusWithMetadata from Spark 
3.5.
+   * On Databricks Spark 3.4, PartitionDirectory expects FileStatusWithMetadata
+   * instead of plain FileStatus.
+   *
+   * FileStatusWithMetadata does NOT extend FileStatus (it wraps it via 
composition),
+   * so individual element casts are impossible. We use reflection to 
construct it.
+   *
+   * On Databricks, constructs FileStatusWithMetadata.apply(fileStatus) for 
each file
+   * status, then reflectively constructs PartitionDirectory.
+   * On standard Spark, falls back to normal PartitionDirectory construction.
+   */
+  def newPartitionDirectory(internalRow: InternalRow,
+                            statuses: Seq[FileStatus],
+                            fallback: (InternalRow, Seq[FileStatus]) => 
PartitionDirectory): PartitionDirectory = {
+    (fileStatusWithMetadataInstantiationMethod, fileStatusWithMetadataModule, 
partitionDirectoryConstructor) match {
+      case (Some(method), Some(module), Some(ctor)) =>
+        val wrappedStatuses = statuses.map(e => method.invoke(module, e))
+        ctor.newInstance(internalRow, 
wrappedStatuses).asInstanceOf[PartitionDirectory]
+      case _ =>
+        fallback(internalRow, statuses)
+    }
+  }
+}
diff --git 
a/hudi-spark-datasource/hudi-spark3.4.x/src/main/scala/org/apache/spark/sql/execution/datasources/HoodieSpark34PartitionedFileUtils.scala
 
b/hudi-spark-datasource/hudi-spark3.4.x/src/main/scala/org/apache/spark/sql/execution/datasources/HoodieSpark34PartitionedFileUtils.scala
index c51e13763c76..fdc5acbc5fe7 100644
--- 
a/hudi-spark-datasource/hudi-spark3.4.x/src/main/scala/org/apache/spark/sql/execution/datasources/HoodieSpark34PartitionedFileUtils.scala
+++ 
b/hudi-spark-datasource/hudi-spark3.4.x/src/main/scala/org/apache/spark/sql/execution/datasources/HoodieSpark34PartitionedFileUtils.scala
@@ -19,6 +19,7 @@
 
 package org.apache.spark.sql.execution.datasources
 
+import org.apache.hudi.DatabricksRuntimeHelper
 import org.apache.hudi.storage.StoragePath
 
 import org.apache.hadoop.fs.FileStatus
@@ -49,6 +50,6 @@ object HoodieSpark34PartitionedFileUtils extends 
HoodieSparkPartitionedFileUtils
   }
 
   override def newPartitionDirectory(internalRow: InternalRow, statuses: 
Seq[FileStatus]): PartitionDirectory = {
-    PartitionDirectory(internalRow, statuses)
+    DatabricksRuntimeHelper.newPartitionDirectory(internalRow, statuses, (row, 
files) => PartitionDirectory(row, files))
   }
 }

Reply via email to