This is an automated email from the ASF dual-hosted git repository.
kejia pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git
The following commit(s) were added to refs/heads/main by this push:
new c5febd8d62 [GLUTEN-9382][VL]Support bucket write with non partition
table (#9575)
c5febd8d62 is described below
commit c5febd8d625a4dc1a7821b54c778513bfb182a5b
Author: JiaKe <[email protected]>
AuthorDate: Thu Jun 5 13:17:29 2025 +0800
[GLUTEN-9382][VL]Support bucket write with non partition table (#9575)
---
.../scala/org/apache/gluten/backendsapi/velox/VeloxBackend.scala | 2 +-
.../spark/sql/execution/VeloxParquetWriteForHiveSuite.scala | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git
a/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxBackend.scala
b/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxBackend.scala
index 034a5d5f17..f6ed91055e 100644
---
a/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxBackend.scala
+++
b/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxBackend.scala
@@ -351,7 +351,7 @@ object VeloxBackendSettings extends BackendSettingsApi {
// is limited to partitioned tables. Therefore, we should add this
condition restriction.
// After velox supports bucketed non-partitioned tables, we can remove
the restriction on
// partitioned tables.
- if (bucketSpec.isEmpty || (isHiveCompatibleBucketTable &&
isPartitionedTable)) {
+ if (bucketSpec.isEmpty || isHiveCompatibleBucketTable) {
None
} else {
Some("Unsupported native write: non-compatible hive bucket write is
not supported.")
diff --git
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
index e77a98df35..71b965cd29 100644
---
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
@@ -271,7 +271,7 @@ class VeloxParquetWriteForHiveSuite
}
}
- test("bucket writer with non-dynamic partition should fallback") {
+ test("bucket writer with non-dynamic partition") {
if (isSparkVersionGE("3.4")) {
Seq("true", "false").foreach {
enableConvertMetastore =>
@@ -293,7 +293,7 @@ class VeloxParquetWriteForHiveSuite
// hive relation convert always use dynamic, so it will offload
to native.
checkNativeWrite(
s"INSERT INTO $target PARTITION(k='0') SELECT i, j FROM
$source",
- checkNative = enableConvertMetastore.toBoolean)
+ checkNative = true)
val files = tableDir(target)
.listFiles()
.filterNot(f => f.getName.startsWith(".") ||
f.getName.startsWith("_"))
@@ -305,7 +305,7 @@ class VeloxParquetWriteForHiveSuite
}
}
- test("bucket writer with non-partition table should fallback") {
+ test("bucket writer with non-partition table") {
if (isSparkVersionGE("3.4")) {
Seq("true", "false").foreach {
enableConvertMetastore =>
@@ -323,7 +323,7 @@ class VeloxParquetWriteForHiveSuite
(0 until 50).map(i => (i % 13, i.toString)).toDF("i", "j")
df.write.mode(SaveMode.Overwrite).saveAsTable(source)
- checkNativeWrite(s"INSERT INTO $target SELECT i, j FROM
$source", checkNative = false)
+ checkNativeWrite(s"INSERT INTO $target SELECT i, j FROM
$source", checkNative = true)
checkAnswer(spark.table(target), df)
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]