Copilot commented on code in PR #2031:
URL: https://github.com/apache/auron/pull/2031#discussion_r2852327072


##########
thirdparty/auron-hudi/src/main/scala/org/apache/spark/sql/auron/hudi/HudiConvertProvider.scala:
##########
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.auron.hudi
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.auron.{AuronConverters, AuronConvertProvider, 
NativeConverters, Shims}
+import org.apache.spark.sql.execution.FileSourceScanExec
+import org.apache.spark.sql.execution.SparkPlan
+
+import org.apache.auron.spark.configuration.SparkAuronConfiguration
+
+class HudiConvertProvider extends AuronConvertProvider with Logging {
+
+  override def isEnabled: Boolean = {
+    val sparkVersion = org.apache.spark.SPARK_VERSION
+    val major = sparkVersion.takeWhile(_.isDigit)
+    val supported = major.nonEmpty && major.toInt >= 3 && major.toInt < 4
+    SparkAuronConfiguration.ENABLE_HUDI_SCAN.get() && supported
+  }
+
+  override def isSupported(exec: SparkPlan): Boolean = {
+    exec match {
+      case scan: FileSourceScanExec =>
+        // Only handle Hudi-backed file scans; other scans fall through.
+        HudiScanSupport.isSupported(scan)
+      case _ => false
+    }
+  }
+
+  override def convert(exec: SparkPlan): SparkPlan = {
+    exec match {
+      case scan: FileSourceScanExec if HudiScanSupport.isSupported(scan) =>
+        HudiScanSupport.fileFormat(scan) match {
+          case Some(HudiScanSupport.ParquetFormat) =>
+            assert(SparkAuronConfiguration.ENABLE_SCAN_PARQUET.get())
+            // Hudi falls back to Spark when timestamp scanning is disabled.
+            if (!SparkAuronConfiguration.ENABLE_SCAN_PARQUET_TIMESTAMP.get()) {
+              if (scan.requiredSchema.exists(e =>
+                  NativeConverters.existTimestampType(e.dataType))) {
+                return exec
+              }

Review Comment:
   This conversion relies on `assert(ENABLE_SCAN_PARQUET.get())` to enforce 
config, but JVM assertions may be disabled in production, which would allow 
native conversion even when Parquet scanning is configured off. Replace the 
assert with an explicit runtime check (and return the original exec when 
disabled) to ensure config is always respected without throwing/logging 
warnings.



##########
dev/reformat:
##########
@@ -52,7 +52,7 @@ fi
 sparkver=spark-3.5
 for celebornver in celeborn-0.5 celeborn-0.6
 do
-  run_maven -P"${sparkver}" -Pceleborn,"${celebornver}" -Puniffle,uniffle-0.10 
-Ppaimon,paimon-1.2 -Pflink,flink-1.18 -Piceberg,iceberg-1.9
+  run_maven -P"${sparkver}" -Pceleborn,"${celebornver}" -Puniffle,uniffle-0.10 
-Ppaimon,paimon-1.2 -Phudi,hudi-0.15 -Pflink,flink-1.18 -Piceberg,iceberg-1.9

Review Comment:
   `-Phudi,hudi-0.15` enables a `hudi` profile that doesn't appear to exist in 
the root pom (only `hudi-0.15` is defined). This will at least emit Maven 
warnings during reformat and may hide real profile issues; consider dropping 
the nonexistent `hudi` profile and only enabling `-Phudi-0.15` (or just 
`-Phudi-0.15` consistent with auron-build.sh).
   ```suggestion
     run_maven -P"${sparkver}" -Pceleborn,"${celebornver}" 
-Puniffle,uniffle-0.10 -Ppaimon,paimon-1.2 -Phudi-0.15 -Pflink,flink-1.18 
-Piceberg,iceberg-1.9
   ```



##########
thirdparty/auron-hudi/pom.xml:
##########
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~    http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.auron</groupId>
+    <artifactId>auron-parent_${scalaVersion}</artifactId>
+    <version>${project.version}</version>
+    <relativePath>../../pom.xml</relativePath>
+  </parent>
+
+  <artifactId>auron-hudi_${scalaVersion}</artifactId>
+  <packaging>jar</packaging>
+  <name>Apache Auron Hudi ${hudiVersion} ${scalaVersion}</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.auron</groupId>
+      <artifactId>spark-extension_${scalaVersion}</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hudi</groupId>
+      
<artifactId>hudi-spark${shortSparkVersion}-bundle_${scalaVersion}</artifactId>
+      <version>${hudiVersion}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-sql_${scalaVersion}</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_${scalaVersion}</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-core_${scalaVersion}</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-sql_${scalaVersion}</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-catalyst_${scalaVersion}</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-hive_${scalaVersion}</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.auron</groupId>
+      <artifactId>spark-extension-shims-spark_${scalaVersion}</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <version>${maven-enforcer-plugin.version}</version>
+        <executions>
+          <execution>
+            <id>hudi-spark-version-compat</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <requireProperty>
+                  <property>hudiEnabled</property>
+                  <regex>true</regex>
+                  <regexMessage>Hudi module requires 
hudiEnabled=true.</regexMessage>
+                </requireProperty>
+                <requireProperty>
+                  <property>shortSparkVersion</property>
+                  <regex>3\.0|3\.1|3\.2|3\.3|3\.4|3\.5</regex>
+                  <regexMessage>Hudi integration supports Spark 3.0-3.5 only. 
Current: ${shortSparkVersion}</regexMessage>
+                </requireProperty>
+                <requireProperty>
+                  <property>hudiVersion</property>
+                  <regex>0\.15\.0</regex>

Review Comment:
   The `hudiVersion` regex is not anchored, so a version like `0.15.0-SNAPSHOT` 
could still match `0\.15\.0` and pass. If the intent is to pin strictly to 
0.15.0, anchor the regex (e.g., `^0\.15\.0$`).
   ```suggestion
                     <regex>^0\.15\.0$</regex>
   ```



##########
thirdparty/auron-hudi/src/main/scala/org/apache/spark/sql/auron/hudi/HudiConvertProvider.scala:
##########
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.auron.hudi
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.auron.{AuronConverters, AuronConvertProvider, 
NativeConverters, Shims}
+import org.apache.spark.sql.execution.FileSourceScanExec
+import org.apache.spark.sql.execution.SparkPlan
+
+import org.apache.auron.spark.configuration.SparkAuronConfiguration
+
+class HudiConvertProvider extends AuronConvertProvider with Logging {
+
+  override def isEnabled: Boolean = {
+    val sparkVersion = org.apache.spark.SPARK_VERSION
+    val major = sparkVersion.takeWhile(_.isDigit)
+    val supported = major.nonEmpty && major.toInt >= 3 && major.toInt < 4

Review Comment:
   Spark version gating only checks the major version (>=3 && <4), which would 
also enable this provider on any future Spark 3.x beyond the supported range 
(the PR scope says 3.0–3.5). Consider parsing major+minor and explicitly 
restricting to 3.0–3.5 to avoid accidentally enabling on unsupported Spark 
versions.
   ```suggestion
   import scala.util.Try
   
   class HudiConvertProvider extends AuronConvertProvider with Logging {
   
     override def isEnabled: Boolean = {
       val sparkVersion = org.apache.spark.SPARK_VERSION
       val versionParts = sparkVersion.split("[\\.-]", 3)
   
       val maybeMajor = versionParts.headOption.flatMap(part => 
Try(part.toInt).toOption)
       val maybeMinor =
         if (versionParts.length >= 2) Try(versionParts(1).toInt).toOption else 
None
   
       val supported = (for {
         major <- maybeMajor
         minor <- maybeMinor
       } yield major == 3 && minor >= 0 && minor <= 5).getOrElse(false)
   ```



##########
thirdparty/auron-hudi/src/main/scala/org/apache/spark/sql/auron/hudi/HudiConvertProvider.scala:
##########
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.auron.hudi
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.auron.{AuronConverters, AuronConvertProvider, 
NativeConverters, Shims}
+import org.apache.spark.sql.execution.FileSourceScanExec
+import org.apache.spark.sql.execution.SparkPlan
+
+import org.apache.auron.spark.configuration.SparkAuronConfiguration
+
+class HudiConvertProvider extends AuronConvertProvider with Logging {
+
+  override def isEnabled: Boolean = {
+    val sparkVersion = org.apache.spark.SPARK_VERSION
+    val major = sparkVersion.takeWhile(_.isDigit)
+    val supported = major.nonEmpty && major.toInt >= 3 && major.toInt < 4
+    SparkAuronConfiguration.ENABLE_HUDI_SCAN.get() && supported
+  }
+
+  override def isSupported(exec: SparkPlan): Boolean = {
+    exec match {
+      case scan: FileSourceScanExec =>
+        // Only handle Hudi-backed file scans; other scans fall through.
+        HudiScanSupport.isSupported(scan)
+      case _ => false
+    }
+  }
+
+  override def convert(exec: SparkPlan): SparkPlan = {
+    exec match {
+      case scan: FileSourceScanExec if HudiScanSupport.isSupported(scan) =>
+        HudiScanSupport.fileFormat(scan) match {
+          case Some(HudiScanSupport.ParquetFormat) =>
+            assert(SparkAuronConfiguration.ENABLE_SCAN_PARQUET.get())
+            // Hudi falls back to Spark when timestamp scanning is disabled.
+            if (!SparkAuronConfiguration.ENABLE_SCAN_PARQUET_TIMESTAMP.get()) {
+              if (scan.requiredSchema.exists(e =>
+                  NativeConverters.existTimestampType(e.dataType))) {
+                return exec
+              }
+            }
+            logDebug(s"Applying native parquet scan for Hudi: 
${scan.relation.location}")
+            
AuronConverters.addRenameColumnsExec(Shims.get.createNativeParquetScanExec(scan))
+          case Some(HudiScanSupport.OrcFormat) =>
+            assert(SparkAuronConfiguration.ENABLE_SCAN_ORC.get())
+            // ORC follows the same timestamp fallback rule as Parquet.
+            if (!SparkAuronConfiguration.ENABLE_SCAN_ORC_TIMESTAMP.get()) {
+              if (scan.requiredSchema.exists(e =>
+                  NativeConverters.existTimestampType(e.dataType))) {
+                return exec
+              }

Review Comment:
   Same issue as the Parquet branch: using `assert(ENABLE_SCAN_ORC.get())` as a 
config gate is unsafe if assertions are disabled, and it forces fallback via 
exceptions/log warnings when ORC scanning is off. Use an explicit check and 
return the original Spark plan when disabled.



##########
thirdparty/auron-hudi/src/main/scala/org/apache/spark/sql/auron/hudi/HudiScanSupport.scala:
##########
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.auron.hudi
+
+import java.net.URI
+import java.util.{Locale, Properties}
+
+import org.apache.hadoop.fs.Path
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.catalog.CatalogTable
+import org.apache.spark.sql.execution.FileSourceScanExec
+import org.apache.spark.sql.execution.datasources.HadoopFsRelation
+
+object HudiScanSupport extends Logging {
+  sealed trait HudiFileFormat
+  case object ParquetFormat extends HudiFileFormat
+  case object OrcFormat extends HudiFileFormat
+
+  private val hudiParquetFileFormatSuffix = "HoodieParquetFileFormat"
+  private val newHudiParquetFileFormatSuffix = "NewHoodieParquetFileFormat"
+  private val hudiOrcFileFormatSuffix = "HoodieOrcFileFormat"
+  private val newHudiOrcFileFormatSuffix = "NewHoodieOrcFileFormat"
+  private val morTableTypes = Set("merge_on_read", "mor")
+  private val hudiTableTypeKeys = Seq(
+    "hoodie.datasource.write.table.type",
+    "hoodie.datasource.read.table.type",
+    "hoodie.table.type")
+  private val hudiBaseFileFormatKeys = Seq(
+    "hoodie.table.base.file.format",
+    "hoodie.datasource.write.base.file.format",
+    "hoodie.datasource.write.storage.type")
+
+  def fileFormat(scan: FileSourceScanExec): Option[HudiFileFormat] = {
+    val fileFormatName = scan.relation.fileFormat.getClass.getName
+    val fromClass = fileFormat(fileFormatName)
+    if (fromClass.nonEmpty) {
+      return fromClass
+    }
+    // Spark may report generic Orc/Parquet formats for Hudi; use metadata 
fallback
+    // only when the underlying file index indicates a Hudi table.
+    fileFormatFromMeta(scan, catalogTable(scan.relation), fileFormatName)
+  }
+
+  private[hudi] def fileFormat(fileFormatName: String): Option[HudiFileFormat] 
= {
+    logDebug(s"Hudi fileFormat resolved to: ${fileFormatName}")
+    if (fileFormatName.endsWith(newHudiParquetFileFormatSuffix) ||
+      fileFormatName.endsWith(newHudiOrcFileFormatSuffix)) {
+      return None
+    }
+    if (fileFormatName.endsWith(hudiParquetFileFormatSuffix)) {
+      return Some(ParquetFormat)
+    }
+    if (fileFormatName.endsWith(hudiOrcFileFormatSuffix)) {
+      return Some(OrcFormat)
+    }
+    None
+  }
+
+  def isSupported(scan: FileSourceScanExec): Boolean =
+    isSupported(fileFormat(scan), scan.relation.options, 
catalogTable(scan.relation))
+
+  private[hudi] def isSupported(fileFormatName: String, options: Map[String, 
String]): Boolean = {
+    isSupported(fileFormat(fileFormatName), options, None)
+  }
+
+  private[hudi] def isSupported(
+      fileFormat: Option[HudiFileFormat],
+      options: Map[String, String],
+      catalogTable: Option[CatalogTable]): Boolean = {
+    if (fileFormat.isEmpty) {
+      return false
+    }
+    if (hasTimeTravel(options)) {
+      return false
+    }
+
+    val tableType = tableTypeFromOptions(options)
+      .orElse(tableTypeFromCatalog(catalogTable))
+      .orElse(tableTypeFromMeta(options))
+      .map(_.toLowerCase(Locale.ROOT))
+
+    logDebug(s"Hudi tableType resolved to: ${tableType.getOrElse("unknown")}")
+
+    // Only support basic COW tables for the base version.
+    !tableType.exists(morTableTypes.contains)
+  }
+
+  private def tableTypeFromOptions(options: Map[String, String]): 
Option[String] = {
+    hudiTableTypeKeys
+      .flatMap(key => options.get(key))
+      .headOption
+  }
+
+  private def baseFileFormatFromOptions(options: Map[String, String]): 
Option[String] = {
+    hudiBaseFileFormatKeys
+      .flatMap(key => options.get(key))
+      .headOption
+  }
+
+  private def tableTypeFromMeta(options: Map[String, String]): Option[String] 
= {
+    val basePath = options.get("path").map(normalizePath)
+    basePath.flatMap { path =>
+      try {
+        val hadoopConf = SparkSession.active.sessionState.newHadoopConf()
+        val base = new Path(path)
+        val fs = base.getFileSystem(hadoopConf)
+        val propsPath = new Path(base, ".hoodie/hoodie.properties")
+        if (!fs.exists(propsPath)) {

Review Comment:
   tableTypeFromMeta/baseFileFormatFromMeta read and parse 
`.hoodie/hoodie.properties` via Hadoop FS on every `isSupported`/`fileFormat` 
evaluation when options/catalog don’t contain the needed keys. Since conversion 
runs during planning, repeated FS opens here can add noticeable latency for 
queries with many scans; consider caching the resolved table type/base format 
per base path (and possibly downgrading exception logs to debug to avoid noisy 
logs on transient FS issues).



##########
thirdparty/auron-hudi/pom.xml:
##########
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~    http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.auron</groupId>
+    <artifactId>auron-parent_${scalaVersion}</artifactId>
+    <version>${project.version}</version>
+    <relativePath>../../pom.xml</relativePath>
+  </parent>
+
+  <artifactId>auron-hudi_${scalaVersion}</artifactId>
+  <packaging>jar</packaging>
+  <name>Apache Auron Hudi ${hudiVersion} ${scalaVersion}</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.auron</groupId>
+      <artifactId>spark-extension_${scalaVersion}</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hudi</groupId>
+      
<artifactId>hudi-spark${shortSparkVersion}-bundle_${scalaVersion}</artifactId>
+      <version>${hudiVersion}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-sql_${scalaVersion}</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_${scalaVersion}</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-core_${scalaVersion}</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-sql_${scalaVersion}</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-catalyst_${scalaVersion}</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-hive_${scalaVersion}</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.auron</groupId>
+      <artifactId>spark-extension-shims-spark_${scalaVersion}</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <version>${maven-enforcer-plugin.version}</version>
+        <executions>
+          <execution>
+            <id>hudi-spark-version-compat</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <requireProperty>
+                  <property>hudiEnabled</property>
+                  <regex>true</regex>
+                  <regexMessage>Hudi module requires 
hudiEnabled=true.</regexMessage>
+                </requireProperty>
+                <requireProperty>
+                  <property>shortSparkVersion</property>
+                  <regex>3\.0|3\.1|3\.2|3\.3|3\.4|3\.5</regex>

Review Comment:
   The `shortSparkVersion` regex is not anchored, so unsupported values like 
`3.10` would match the `3\.1` alternative and incorrectly pass. Anchor the 
regex to the full string (e.g., `^(3\.0|3\.1|3\.2|3\.3|3\.4|3\.5)$`).
   ```suggestion
                     <regex>^(3\.0|3\.1|3\.2|3\.3|3\.4|3\.5)$</regex>
   ```



##########
thirdparty/auron-hudi/pom.xml:
##########
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~    http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"; 
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>org.apache.auron</groupId>
+    <artifactId>auron-parent_${scalaVersion}</artifactId>
+    <version>${project.version}</version>
+    <relativePath>../../pom.xml</relativePath>
+  </parent>
+
+  <artifactId>auron-hudi_${scalaVersion}</artifactId>
+  <packaging>jar</packaging>
+  <name>Apache Auron Hudi ${hudiVersion} ${scalaVersion}</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.auron</groupId>
+      <artifactId>spark-extension_${scalaVersion}</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hudi</groupId>
+      
<artifactId>hudi-spark${shortSparkVersion}-bundle_${scalaVersion}</artifactId>
+      <version>${hudiVersion}</version>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-sql_${scalaVersion}</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.scalatest</groupId>
+      <artifactId>scalatest_${scalaVersion}</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-core_${scalaVersion}</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-sql_${scalaVersion}</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-catalyst_${scalaVersion}</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.spark</groupId>
+      <artifactId>spark-hive_${scalaVersion}</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.auron</groupId>
+      <artifactId>spark-extension-shims-spark_${scalaVersion}</artifactId>
+      <version>${project.version}</version>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <version>${maven-enforcer-plugin.version}</version>
+        <executions>
+          <execution>
+            <id>hudi-spark-version-compat</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <requireProperty>
+                  <property>hudiEnabled</property>
+                  <regex>true</regex>

Review Comment:
   The enforcer rule `hudiEnabled` uses regex `true` without anchors, so values 
like `true1` would incorrectly pass. Anchor the regex (e.g., `^true$`) to 
ensure only the exact expected value is accepted.
   ```suggestion
                     <regex>^true$</regex>
   ```



##########
thirdparty/auron-hudi/src/main/scala/org/apache/spark/sql/auron/hudi/HudiScanSupport.scala:
##########
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.auron.hudi
+
+import java.net.URI
+import java.util.{Locale, Properties}
+
+import org.apache.hadoop.fs.Path
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.catalog.CatalogTable
+import org.apache.spark.sql.execution.FileSourceScanExec
+import org.apache.spark.sql.execution.datasources.HadoopFsRelation
+
+object HudiScanSupport extends Logging {
+  sealed trait HudiFileFormat
+  case object ParquetFormat extends HudiFileFormat
+  case object OrcFormat extends HudiFileFormat
+
+  private val hudiParquetFileFormatSuffix = "HoodieParquetFileFormat"
+  private val newHudiParquetFileFormatSuffix = "NewHoodieParquetFileFormat"
+  private val hudiOrcFileFormatSuffix = "HoodieOrcFileFormat"
+  private val newHudiOrcFileFormatSuffix = "NewHoodieOrcFileFormat"
+  private val morTableTypes = Set("merge_on_read", "mor")
+  private val hudiTableTypeKeys = Seq(
+    "hoodie.datasource.write.table.type",
+    "hoodie.datasource.read.table.type",
+    "hoodie.table.type")
+  private val hudiBaseFileFormatKeys = Seq(
+    "hoodie.table.base.file.format",
+    "hoodie.datasource.write.base.file.format",
+    "hoodie.datasource.write.storage.type")
+
+  def fileFormat(scan: FileSourceScanExec): Option[HudiFileFormat] = {
+    val fileFormatName = scan.relation.fileFormat.getClass.getName
+    val fromClass = fileFormat(fileFormatName)
+    if (fromClass.nonEmpty) {
+      return fromClass
+    }
+    // Spark may report generic Orc/Parquet formats for Hudi; use metadata 
fallback
+    // only when the underlying file index indicates a Hudi table.
+    fileFormatFromMeta(scan, catalogTable(scan.relation), fileFormatName)
+  }
+
+  private[hudi] def fileFormat(fileFormatName: String): Option[HudiFileFormat] 
= {
+    logDebug(s"Hudi fileFormat resolved to: ${fileFormatName}")
+    if (fileFormatName.endsWith(newHudiParquetFileFormatSuffix) ||
+      fileFormatName.endsWith(newHudiOrcFileFormatSuffix)) {
+      return None
+    }
+    if (fileFormatName.endsWith(hudiParquetFileFormatSuffix)) {
+      return Some(ParquetFormat)
+    }
+    if (fileFormatName.endsWith(hudiOrcFileFormatSuffix)) {
+      return Some(OrcFormat)
+    }
+    None
+  }
+
+  def isSupported(scan: FileSourceScanExec): Boolean =
+    isSupported(fileFormat(scan), scan.relation.options, 
catalogTable(scan.relation))
+
+  private[hudi] def isSupported(fileFormatName: String, options: Map[String, 
String]): Boolean = {
+    isSupported(fileFormat(fileFormatName), options, None)
+  }
+
+  private[hudi] def isSupported(
+      fileFormat: Option[HudiFileFormat],
+      options: Map[String, String],
+      catalogTable: Option[CatalogTable]): Boolean = {
+    if (fileFormat.isEmpty) {
+      return false
+    }
+    if (hasTimeTravel(options)) {
+      return false
+    }
+
+    val tableType = tableTypeFromOptions(options)
+      .orElse(tableTypeFromCatalog(catalogTable))
+      .orElse(tableTypeFromMeta(options))
+      .map(_.toLowerCase(Locale.ROOT))
+
+    logDebug(s"Hudi tableType resolved to: ${tableType.getOrElse("unknown")}")
+
+    // Only support basic COW tables for the base version.
+    !tableType.exists(morTableTypes.contains)
+  }
+
+  private def tableTypeFromOptions(options: Map[String, String]): 
Option[String] = {
+    hudiTableTypeKeys
+      .flatMap(key => options.get(key))
+      .headOption
+  }
+
+  private def baseFileFormatFromOptions(options: Map[String, String]): 
Option[String] = {
+    hudiBaseFileFormatKeys
+      .flatMap(key => options.get(key))
+      .headOption
+  }
+
+  private def tableTypeFromMeta(options: Map[String, String]): Option[String] 
= {
+    val basePath = options.get("path").map(normalizePath)
+    basePath.flatMap { path =>
+      try {
+        val hadoopConf = SparkSession.active.sessionState.newHadoopConf()
+        val base = new Path(path)
+        val fs = base.getFileSystem(hadoopConf)
+        val propsPath = new Path(base, ".hoodie/hoodie.properties")
+        if (!fs.exists(propsPath)) {
+          if (log.isDebugEnabled()) {
+            logDebug(s"Hudi table properties not found at: $propsPath")
+          }
+          return None
+        }
+        val in = fs.open(propsPath)
+        try {
+          val props = new Properties()
+          props.load(in)
+          Option(props.getProperty("hoodie.table.type"))
+        } finally {
+          in.close()
+        }
+      } catch {
+        case t: Throwable =>
+          logInfo(s"Failed to load hudi table type from $path: 
${t.getClass.getSimpleName}")
+          None
+      }
+    }
+  }
+
+  private def baseFileFormatFromMeta(options: Map[String, String]): 
Option[String] = {
+    val basePath = options.get("path").map(normalizePath)
+    basePath.flatMap { path =>
+      try {
+        val hadoopConf = SparkSession.active.sessionState.newHadoopConf()
+        val base = new Path(path)
+        val fs = base.getFileSystem(hadoopConf)
+        val propsPath = new Path(base, ".hoodie/hoodie.properties")
+        if (!fs.exists(propsPath)) {
+          if (log.isDebugEnabled()) {
+            logDebug(s"Hudi table properties not found at: $propsPath")
+          }
+          return None
+        }
+        val in = fs.open(propsPath)
+        try {
+          val props = new Properties()
+          props.load(in)
+          Option(props.getProperty("hoodie.table.base.file.format"))
+        } finally {
+          in.close()
+        }
+      } catch {
+        case t: Throwable =>
+          logInfo(s"Failed to load hudi base file format from $path: 
${t.getClass.getSimpleName}")
+          None
+      }
+    }
+  }
+
+  private def baseFileFormatFromCatalog(catalogTable: Option[CatalogTable]): 
Option[String] = {
+    catalogTable.flatMap { table =>
+      val props = table.properties ++ table.storage.properties
+      hudiBaseFileFormatKeys.flatMap(props.get).headOption
+    }
+  }
+
+  private def fileFormatFromMeta(
+      scan: FileSourceScanExec,
+      catalogTable: Option[CatalogTable],
+      fileFormatName: String): Option[HudiFileFormat] = {
+    // Avoid treating non-Hudi tables as Hudi when Spark reports generic 
formats.
+    if (!isHudiFileIndex(scan.relation.location)) {
+      return None
+    }
+    val baseFormat = baseFileFormatFromOptions(scan.relation.options)
+      .orElse(baseFileFormatFromCatalog(catalogTable))
+      .orElse(baseFileFormatFromMeta(scan.relation.options))
+      .map(_.toLowerCase(Locale.ROOT))
+    baseFormat.flatMap {
+      case "orc" if fileFormatName.contains("OrcFileFormat") => Some(OrcFormat)
+      case "parquet" if fileFormatName.contains("ParquetFileFormat") => 
Some(ParquetFormat)
+      case _ => None
+    }
+  }
+
+  private def tableTypeFromCatalog(catalogTable: Option[CatalogTable]): 
Option[String] = {
+    catalogTable.flatMap { table =>
+      val props = table.properties ++ table.storage.properties
+      hudiTableTypeKeys.flatMap(props.get).headOption
+    }
+  }
+
+  private def catalogTable(relation: HadoopFsRelation): Option[CatalogTable] = 
{
+    val method = relation.getClass.getMethods.find(_.getName == "catalogTable")
+    method.flatMap { m =>
+      try {
+        m.invoke(relation) match {
+          case opt: Option[_] => opt.asInstanceOf[Option[CatalogTable]]
+          case table: CatalogTable => Some(table)
+          case _ => None
+        }
+      } catch {
+        case _: Throwable => None
+      }
+    }
+  }
+
+  private def isHudiFileIndex(fileIndex: AnyRef): Boolean = {
+    var current: Class[_] = fileIndex.getClass
+    while (current != null) {
+      if (current.getName.endsWith("HoodieFileIndex")) {
+        return true
+      }
+      current = current.getSuperclass
+    }
+    false
+  }
+
+  private def hasTimeTravel(options: Map[String, String]): Boolean = {
+    val keys = options.keys.map(_.toLowerCase(Locale.ROOT))
+    keys.exists {
+      case "as.of.instant" => true
+      case "as.of.timestamp" => true
+      case "hoodie.datasource.read.as.of.instant" => true
+      case "hoodie.datasource.read.as.of.timestamp" => true
+      case _ => false
+    }
+  }
+
+  private def normalizePath(rawPath: String): String = {
+    try {
+      val uri = new URI(rawPath)
+      if (uri.getScheme == null) {
+        rawPath
+      } else {
+        uri.getPath
+      }
+    } catch {
+      case _: Throwable => rawPath
+    }

Review Comment:
   normalizePath() drops URI scheme/authority by returning only uri.getPath for 
schemed paths (e.g., s3a://bucket/path becomes /path). That will cause Hadoop 
Path/FS resolution to use the wrong filesystem and fail to find 
`.hoodie/hoodie.properties` on non-local stores. Preserve the full URI (or use 
Hadoop Path parsing) instead of stripping to the path component.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to