This is an automated email from the ASF dual-hosted git repository.
sivabalan pushed a commit to branch branch-0.x
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/branch-0.x by this push:
new 286fdaa8989f [MINOR] Fix outdated api usage and libraries (#17556)
286fdaa8989f is described below
commit 286fdaa8989f49835accee935aec3135dce3d894
Author: Lin Liu <[email protected]>
AuthorDate: Mon Jan 5 20:06:19 2026 -0800
[MINOR] Fix outdated api usage and libraries (#17556)
---------
Co-authored-by: Tim Brown <[email protected]>
---
.github/workflows/bot.yml | 11 +++++++++++
.../compose/docker-compose_hadoop284_hive233_spark244.yml | 4 ++--
.../spark/sql/hudi/analysis/HoodieSpark32PlusAnalysis.scala | 8 +++++++-
.../apache/spark/sql/HoodieSpark35CatalystPlanUtils.scala | 13 ++++++++++++-
4 files changed, 32 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/bot.yml b/.github/workflows/bot.yml
index 5d659123f133..a666aee75122 100644
--- a/.github/workflows/bot.yml
+++ b/.github/workflows/bot.yml
@@ -686,6 +686,17 @@ jobs:
distribution: 'temurin'
architecture: x64
cache: maven
+ - name: Check disk space
+ run: df -h
+ - name: 'Free space'
+ run: |
+ sudo rm -rf /usr/share/dotnet
+ sudo rm -rf /usr/local/lib/android
+ sudo rm -rf /opt/ghc
+ sudo rm -rf /usr/local/share/boost
+ docker system prune --all --force --volumes
+ - name: Check disk space after cleanup
+ run: df -h
- name: Build Project
env:
SPARK_PROFILE: ${{ matrix.sparkProfile }}
diff --git a/docker/compose/docker-compose_hadoop284_hive233_spark244.yml
b/docker/compose/docker-compose_hadoop284_hive233_spark244.yml
index 1b711574f6a1..12a3452f5b43 100644
--- a/docker/compose/docker-compose_hadoop284_hive233_spark244.yml
+++ b/docker/compose/docker-compose_hadoop284_hive233_spark244.yml
@@ -175,7 +175,7 @@ services:
- "namenode"
zookeeper:
- image: 'bitnami/zookeeper:3.4.12-r68'
+ image: 'bitnamilegacy/zookeeper:3.4.12-r68'
hostname: zookeeper
container_name: zookeeper
ports:
@@ -184,7 +184,7 @@ services:
- ALLOW_ANONYMOUS_LOGIN=yes
kafka:
- image: 'bitnami/kafka:2.0.0'
+ image: 'bitnamilegacy/kafka:2.0.0'
hostname: kafkabroker
container_name: kafkabroker
ports:
diff --git
a/hudi-spark-datasource/hudi-spark3.2plus-common/src/main/scala/org/apache/spark/sql/hudi/analysis/HoodieSpark32PlusAnalysis.scala
b/hudi-spark-datasource/hudi-spark3.2plus-common/src/main/scala/org/apache/spark/sql/hudi/analysis/HoodieSpark32PlusAnalysis.scala
index 84fb3fd405b4..ccf991cb6930 100644
---
a/hudi-spark-datasource/hudi-spark3.2plus-common/src/main/scala/org/apache/spark/sql/hudi/analysis/HoodieSpark32PlusAnalysis.scala
+++
b/hudi-spark-datasource/hudi-spark3.2plus-common/src/main/scala/org/apache/spark/sql/hudi/analysis/HoodieSpark32PlusAnalysis.scala
@@ -111,7 +111,13 @@ case class HoodieSpark32PlusResolveReferences(spark:
SparkSession) extends Rule[
lazy val analyzer = spark.sessionState.analyzer
val targetTable = if (targetTableO.resolved) targetTableO else
analyzer.execute(targetTableO)
val sourceTable = if (sourceTableO.resolved) sourceTableO else
analyzer.execute(sourceTableO)
- val m = mO.asInstanceOf[MergeIntoTable].copy(targetTable = targetTable,
sourceTable = sourceTable)
+ val originalMergeInto = mO.asInstanceOf[MergeIntoTable]
+ val m = originalMergeInto.copy(
+ targetTable = targetTable,
+ sourceTable = sourceTable,
+ mergeCondition = originalMergeInto.mergeCondition,
+ matchedActions = originalMergeInto.matchedActions,
+ notMatchedActions = originalMergeInto.notMatchedActions)
// END: custom Hudi change
EliminateSubqueryAliases(targetTable) match {
case r: NamedRelation if r.skipSchemaResolution =>
diff --git
a/hudi-spark-datasource/hudi-spark3.5.x/src/main/scala/org/apache/spark/sql/HoodieSpark35CatalystPlanUtils.scala
b/hudi-spark-datasource/hudi-spark3.5.x/src/main/scala/org/apache/spark/sql/HoodieSpark35CatalystPlanUtils.scala
index 1b4b86c4e421..463b6e820556 100644
---
a/hudi-spark-datasource/hudi-spark3.5.x/src/main/scala/org/apache/spark/sql/HoodieSpark35CatalystPlanUtils.scala
+++
b/hudi-spark-datasource/hudi-spark3.5.x/src/main/scala/org/apache/spark/sql/HoodieSpark35CatalystPlanUtils.scala
@@ -24,7 +24,7 @@ import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{AnalysisErrorAt, ResolvedTable}
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeSet,
Expression, ProjectionOverSchema}
import org.apache.spark.sql.catalyst.planning.ScanOperation
-import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan,
MergeIntoTable, Project}
+import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoStatement,
LogicalPlan, MergeIntoTable, Project}
import org.apache.spark.sql.connector.catalog.{Identifier, Table, TableCatalog}
import org.apache.spark.sql.execution.command.RepairTableCommand
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation,
LogicalRelation}
@@ -80,4 +80,15 @@ object HoodieSpark35CatalystPlanUtils extends
HoodieSpark3CatalystPlanUtils {
"sqlExpr" -> a.sql,
"cols" -> cols))
}
+
+ override def rebaseInsertIntoStatement(iis: LogicalPlan, targetTable:
LogicalPlan, query: LogicalPlan): LogicalPlan = {
+ val insert = iis.asInstanceOf[InsertIntoStatement]
+ insert.copy(
+ table = targetTable,
+ partitionSpec = insert.partitionSpec,
+ userSpecifiedCols = insert.userSpecifiedCols,
+ query = query,
+ overwrite = insert.overwrite,
+ ifPartitionNotExists = insert.ifPartitionNotExists)
+ }
}