This is an automated email from the ASF dual-hosted git repository.
vhs pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new 245db2ca5cbb test: Fix flaky test in TestHoodieClientMultiWriter
(#17793)
245db2ca5cbb is described below
commit 245db2ca5cbb7ed5e7801e8817404a732d183b72
Author: Y Ethan Guo <[email protected]>
AuthorDate: Tue Jan 6 23:42:38 2026 -0800
test: Fix flaky test in TestHoodieClientMultiWriter (#17793)
---
.../java/org/apache/hudi/client/TestHoodieClientMultiWriter.java | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git
a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/client/TestHoodieClientMultiWriter.java
b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/client/TestHoodieClientMultiWriter.java
index a4e051d6f23e..72d0db584ae8 100644
---
a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/client/TestHoodieClientMultiWriter.java
+++
b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/client/TestHoodieClientMultiWriter.java
@@ -49,6 +49,7 @@ import
org.apache.hudi.common.table.view.FileSystemViewStorageConfig;
import org.apache.hudi.common.table.view.FileSystemViewStorageType;
import org.apache.hudi.common.testutils.HoodieTestTable;
import org.apache.hudi.common.testutils.HoodieTestUtils;
+import org.apache.hudi.common.testutils.InProcessTimeGenerator;
import org.apache.hudi.common.util.CommitUtils;
import org.apache.hudi.io.util.FileIOUtils;
import org.apache.hudi.common.util.Option;
@@ -703,19 +704,19 @@ public class TestHoodieClientMultiWriter extends
HoodieClientTestBase {
// Create the first commit
SparkRDDWriteClient<?> client = getHoodieWriteClient(cfg);
- createCommitWithInsertsForPartition(cfg, client, "000", "001", 100,
"2016/03/01");
+ String firstCommitTime = InProcessTimeGenerator.createNewInstantTime();
+ createCommitWithInsertsForPartition(cfg, client, "000", firstCommitTime,
100, "2016/03/01");
client.close();
int numConcurrentWriters = 5;
ExecutorService executors =
Executors.newFixedThreadPool(numConcurrentWriters);
List<Future<?>> futures = new ArrayList<>(numConcurrentWriters);
for (int loop = 0; loop < numConcurrentWriters; loop++) {
- String newCommitTime = "00" + (loop + 2);
String partition = "2016/03/0" + (loop + 2);
futures.add(executors.submit(() -> {
try {
SparkRDDWriteClient<?> writeClient = getHoodieWriteClient(cfg);
- createCommitWithInsertsForPartition(cfg, writeClient, "001",
newCommitTime, 100, partition);
+ createCommitWithInsertsForPartition(cfg, writeClient, "001",
InProcessTimeGenerator.createNewInstantTime(), 100, partition);
writeClient.close();
} catch (Exception e) {
throw new RuntimeException(e);