This is an automated email from the ASF dual-hosted git repository.
changchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git
The following commit(s) were added to refs/heads/main by this push:
new 1c2ffcae14 Fix deprecated Delta API usages in velox test code (#11594)
1c2ffcae14 is described below
commit 1c2ffcae149045443fb4b13bbeafb11c3fe5e584
Author: Chang Chen <[email protected]>
AuthorDate: Tue Feb 10 18:20:13 2026 +0800
Fix deprecated Delta API usages in velox test code (#11594)
Replace deprecated Delta Lake APIs with their recommended alternatives:
- deltaLog.snapshot -> deltaLog.unsafeVolatileSnapshot
- log.startTransaction() -> log.startTransaction(catalogTableOpt = None)
These APIs were deprecated in Delta Lake 3.3 (delta33):
- snapshot: deprecated since 12.0, use unsafeVolatileSnapshot
- startTransaction(): deprecated since 3.0, use CatalogTable overload
---
.../spark/sql/delta/DeletionVectorsTestUtils.scala | 4 ++--
.../scala/org/apache/spark/sql/delta/DeltaSuite.scala | 18 +++++++++---------
.../perf/OptimizeMetadataOnlyDeltaQuerySuite.scala | 2 +-
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git
a/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/DeletionVectorsTestUtils.scala
b/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/DeletionVectorsTestUtils.scala
index 5bb022c12d..fdfa516bac 100644
---
a/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/DeletionVectorsTestUtils.scala
+++
b/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/DeletionVectorsTestUtils.scala
@@ -190,7 +190,7 @@ trait DeletionVectorsTestUtils extends QueryTest with
SharedSparkSession with De
/** Utility method to remove the given rows from the given file using DVs */
protected def removeRowsFromFile(
log: DeltaLog, addFile: AddFile, rowIndexesToRemove: Seq[Long]): Unit = {
- val txn = log.startTransaction()
+ val txn = log.startTransaction(catalogTableOpt = None)
val actions = removeRowsFromFileUsingDV(log, addFile, rowIndexesToRemove)
txn.commit(actions, Truncate())
}
@@ -306,7 +306,7 @@ trait DeletionVectorsTestUtils extends QueryTest with
SharedSparkSession with De
// This is needed to make the manual commit work correctly, since we are
not actually
// running a command that produces metrics.
withSQLConf(DeltaSQLConf.DELTA_HISTORY_METRICS_ENABLED.key -> "false") {
- val txn = log.startTransaction()
+ val txn = log.startTransaction(catalogTableOpt = None)
val allAddFiles = txn.snapshot.allFiles.collect()
numFiles = Some(allAddFiles.length)
val bitmap = RoaringBitmapArray(0L until numRowsToRemovePerFile: _*)
diff --git
a/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/DeltaSuite.scala
b/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/DeltaSuite.scala
index 564485b405..f265168ddb 100644
---
a/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/DeltaSuite.scala
+++
b/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/DeltaSuite.scala
@@ -1322,7 +1322,7 @@ class DeltaSuite
.save(tempDir.toString)
val deltaLog = DeltaLog.forTable(spark, tempDir)
- assert(deltaLog.snapshot.metadata.partitionColumns === Seq("by4"))
+ assert(deltaLog.unsafeVolatileSnapshot.metadata.partitionColumns ===
Seq("by4"))
spark.read
.format("delta")
@@ -1333,7 +1333,7 @@ class DeltaSuite
.mode(SaveMode.Overwrite)
.save(tempDir.toString)
- assert(deltaLog.snapshot.metadata.partitionColumns === Nil)
+ assert(deltaLog.unsafeVolatileSnapshot.metadata.partitionColumns ===
Nil)
}
}
@@ -1529,7 +1529,7 @@ class DeltaSuite
spark.range(10).write.format("delta").save(tempDir.toString)
val deltaLog = DeltaLog.forTable(spark, tempDir)
val numParts =
spark.sessionState.conf.getConf(DeltaSQLConf.DELTA_SNAPSHOT_PARTITIONS).get
- assert(deltaLog.snapshot.stateDS.rdd.getNumPartitions == numParts)
+ assert(deltaLog.unsafeVolatileSnapshot.stateDS.rdd.getNumPartitions ==
numParts)
}
}
@@ -1550,7 +1550,7 @@ class DeltaSuite
withSQLConf(("spark.databricks.delta.snapshotPartitions", "410")) {
spark.range(10).write.format("delta").save(tempDir.toString)
val deltaLog = DeltaLog.forTable(spark, tempDir)
- assert(deltaLog.snapshot.stateDS.rdd.getNumPartitions == 410)
+ assert(deltaLog.unsafeVolatileSnapshot.stateDS.rdd.getNumPartitions
== 410)
}
}
}
@@ -1954,7 +1954,7 @@ class DeltaSuite
withTempDir {
tempDir =>
val deltaLog = DeltaLog.forTable(spark, tempDir)
- assert(deltaLog.snapshot.stateDS.rdd.getNumPartitions == 0)
+ assert(deltaLog.unsafeVolatileSnapshot.stateDS.rdd.getNumPartitions ==
0)
}
}
@@ -1973,7 +1973,7 @@ class DeltaSuite
try {
withTempDir {
tempDir =>
- val files = DeltaLog.forTable(spark,
tempDir).snapshot.stateDS.collect()
+ val files = DeltaLog.forTable(spark,
tempDir).unsafeVolatileSnapshot.stateDS.collect()
assert(files.isEmpty)
}
sparkContext.listenerBus.waitUntilEmpty(15000)
@@ -2186,7 +2186,7 @@ class DeltaSuite
val deltaLog = DeltaLog.forTable(spark, tempDir)
val hadoopConf = deltaLog.newDeltaHadoopConf()
- val snapshot = deltaLog.snapshot
+ val snapshot = deltaLog.unsafeVolatileSnapshot
val files = snapshot.allFiles.collect()
// assign physical name to new schema
@@ -2428,7 +2428,7 @@ class DeltaSuite
val deltaLog = DeltaLog.forTable(spark, testPath)
// We need to drop default properties set by subclasses to make this
test pass in them
assert(
- deltaLog.snapshot.metadata.configuration
+ deltaLog.unsafeVolatileSnapshot.metadata.configuration
.filterKeys(!_.startsWith("delta.columnMapping."))
.toMap ===
Map("delta.logRetentionDuration" -> "123 days"))
@@ -3248,7 +3248,7 @@ class DeltaNameColumnMappingSuite extends DeltaSuite with
DeltaColumnMappingEnab
.mode("append")
.save(tempDir.getCanonicalPath)
- val protocol = DeltaLog.forTable(spark, tempDir).snapshot.protocol
+ val protocol = DeltaLog.forTable(spark,
tempDir).unsafeVolatileSnapshot.protocol
val (r, w) = if (protocol.supportsReaderFeatures ||
protocol.supportsWriterFeatures) {
(
TableFeatureProtocolUtils.TABLE_FEATURES_MIN_READER_VERSION,
diff --git
a/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/perf/OptimizeMetadataOnlyDeltaQuerySuite.scala
b/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/perf/OptimizeMetadataOnlyDeltaQuerySuite.scala
index d2265c8a15..834f164308 100644
---
a/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/perf/OptimizeMetadataOnlyDeltaQuerySuite.scala
+++
b/backends-velox/src-delta33/test/scala/org/apache/spark/sql/delta/perf/OptimizeMetadataOnlyDeltaQuerySuite.scala
@@ -831,7 +831,7 @@ class OptimizeMetadataOnlyDeltaQuerySuite
// Creates AddFile entries with non-existing files
// The query should read only the delta log and not the parquet files
val log = DeltaLog.forTable(spark, tempPath)
- val txn = log.startTransaction()
+ val txn = log.startTransaction(catalogTableOpt = None)
txn.commitManually(
DeltaTestUtils.createTestAddFile(
encodedPath = "1.parquet",
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]