This is an automated email from the ASF dual-hosted git repository.
danny0405 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new 4c528029796 [HUDI-7282] Avoid verification failure due to append
writing of the cow table with cluster configuration when the index is bucket.
(#10475)
4c528029796 is described below
commit 4c5280297964c5aa3bd4bd7abe893ac36b8ebbcf
Author: akido <[email protected]>
AuthorDate: Fri Jan 12 09:11:30 2024 +0800
[HUDI-7282] Avoid verification failure due to append writing of the cow
table with cluster configuration when the index is bucket. (#10475)
---
.../src/main/java/org/apache/hudi/util/ClusteringUtil.java | 2 +-
.../test/java/org/apache/hudi/utils/TestClusteringUtil.java | 11 +++++++++++
2 files changed, 12 insertions(+), 1 deletion(-)
diff --git
a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/ClusteringUtil.java
b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/ClusteringUtil.java
index 75d4ea79815..ac81b4e7af4 100644
---
a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/ClusteringUtil.java
+++
b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/util/ClusteringUtil.java
@@ -49,7 +49,7 @@ public class ClusteringUtil {
private static final Logger LOG =
LoggerFactory.getLogger(ClusteringUtil.class);
public static void validateClusteringScheduling(Configuration conf) {
- if (OptionsResolver.isBucketIndexType(conf)) {
+ if (!OptionsResolver.isAppendMode(conf) &&
OptionsResolver.isBucketIndexType(conf)) {
HoodieIndex.BucketIndexEngineType bucketIndexEngineType =
OptionsResolver.getBucketEngineType(conf);
switch (bucketIndexEngineType) {
case SIMPLE:
diff --git
a/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/utils/TestClusteringUtil.java
b/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/utils/TestClusteringUtil.java
index e9433d036ca..ca8718289d9 100644
---
a/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/utils/TestClusteringUtil.java
+++
b/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/utils/TestClusteringUtil.java
@@ -32,6 +32,7 @@ import org.apache.hudi.common.util.ClusteringUtils;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.configuration.FlinkOptions;
import org.apache.hudi.exception.HoodieIOException;
+import org.apache.hudi.index.HoodieIndex;
import org.apache.hudi.table.HoodieFlinkTable;
import org.apache.hudi.util.ClusteringUtil;
import org.apache.hudi.util.FlinkTables;
@@ -113,6 +114,16 @@ public class TestClusteringUtil {
.stream().map(HoodieInstant::getTimestamp).collect(Collectors.toList());
assertThat(actualInstants, is(oriInstants));
}
+
+ @Test
+ void validateClusteringScheduling() throws Exception {
+ beforeEach();
+ ClusteringUtil.validateClusteringScheduling(this.conf);
+
+ // validate bucket index
+ this.conf.setString(FlinkOptions.INDEX_TYPE,
HoodieIndex.IndexType.BUCKET.name());
+ ClusteringUtil.validateClusteringScheduling(this.conf);
+ }
/**
* Generates a clustering plan on the timeline and returns its instant time.