This is an automated email from the ASF dual-hosted git repository.
wombatukun pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new ddbe1b2a0bd [MINOR] Fix of flaky
`TestHoodieClientMultiWriter::testMultiWriterWithAsyncTableServicesWithConflict`
(#12137)
ddbe1b2a0bd is described below
commit ddbe1b2a0bd4bdf35d886d09ef335597d65aa71d
Author: Geser Dugarov <[email protected]>
AuthorDate: Tue Oct 22 19:05:17 2024 +0700
[MINOR] Fix of flaky
`TestHoodieClientMultiWriter::testMultiWriterWithAsyncTableServicesWithConflict`
(#12137)
---
.../hudi/client/TestHoodieClientMultiWriter.java | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/TestHoodieClientMultiWriter.java
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/TestHoodieClientMultiWriter.java
index a20efbaf54b..ba6fb446a89 100644
---
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/TestHoodieClientMultiWriter.java
+++
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/TestHoodieClientMultiWriter.java
@@ -27,6 +27,7 @@ import
org.apache.hudi.client.transaction.lock.ZookeeperBasedLockProvider;
import org.apache.hudi.common.config.HoodieMetadataConfig;
import org.apache.hudi.common.config.HoodieStorageConfig;
import org.apache.hudi.common.config.LockConfiguration;
+import org.apache.hudi.common.lock.LockProvider;
import org.apache.hudi.common.model.HoodieCleaningPolicy;
import org.apache.hudi.common.model.HoodieFailedWritesCleaningPolicy;
import org.apache.hudi.common.model.HoodieFileFormat;
@@ -461,7 +462,7 @@ public class TestHoodieClientMultiWriter extends
HoodieClientTestBase {
@ParameterizedTest
@MethodSource("providerClassResolutionStrategyAndTableType")
- public void
testMultiWriterWithAsyncTableServicesWithConflict(HoodieTableType tableType,
Class providerClass,
+ public void
testMultiWriterWithAsyncTableServicesWithConflict(HoodieTableType tableType,
Class<? extends LockProvider<?>> providerClass,
ConflictResolutionStrategy resolutionStrategy) throws Exception {
// create inserts X 1
if (tableType == HoodieTableType.MERGE_ON_READ) {
@@ -526,6 +527,11 @@ public class TestHoodieClientMultiWriter extends
HoodieClientTestBase {
final SparkRDDWriteClient client2 = getHoodieWriteClient(cfg);
final SparkRDDWriteClient client3 = getHoodieWriteClient(cfg);
+ // Test with concurrent operations could be flaky, to reduce possibility
of wrong ordering some queue is added
+ // For InProcessLockProvider we could wait less
+ final int waitAndRunFirst =
providerClass.isAssignableFrom(InProcessLockProvider.class) ? 2000 : 20000;
+ final int waitAndRunSecond =
providerClass.isAssignableFrom(InProcessLockProvider.class) ? 3000 : 30000;
+
// Create upserts, schedule cleaning, schedule compaction in parallel
Future future1 = executors.submit(() -> {
final String newCommitTime = client1.createNewInstantTime();
@@ -534,7 +540,7 @@ public class TestHoodieClientMultiWriter extends
HoodieClientTestBase {
// We want the upsert to go through only after the compaction
// and cleaning schedule completion. So, waiting on latch here.
- latchCountDownAndWait(scheduleCountDownLatch, 30000);
+ latchCountDownAndWait(scheduleCountDownLatch, waitAndRunSecond);
if (tableType == HoodieTableType.MERGE_ON_READ && !(resolutionStrategy
instanceof PreferWriterConflictResolutionStrategy)) {
// HUDI-6897: Improve
SimpleConcurrentFileWritesConflictResolutionStrategy for NB-CC
// There is no need to throw concurrent modification exception for the
simple strategy under NB-CC, because the compactor would finally resolve the
conflicts instead.
@@ -561,12 +567,12 @@ public class TestHoodieClientMultiWriter extends
HoodieClientTestBase {
client2.scheduleTableService(compactionTimeStamp, Option.empty(),
TableServiceType.COMPACT);
});
}
- latchCountDownAndWait(scheduleCountDownLatch, 30000);
+ latchCountDownAndWait(scheduleCountDownLatch, waitAndRunFirst);
});
Future future3 = executors.submit(() -> {
assertDoesNotThrow(() -> {
- latchCountDownAndWait(scheduleCountDownLatch, 30000);
+ latchCountDownAndWait(scheduleCountDownLatch, waitAndRunFirst);
String cleanCommitTime = client3.createNewInstantTime();
client3.scheduleTableService(cleanCommitTime, Option.empty(),
TableServiceType.CLEAN);
});
@@ -590,7 +596,7 @@ public class TestHoodieClientMultiWriter extends
HoodieClientTestBase {
future1 = executors.submit(() -> {
final String newCommitTime = client1.createNewInstantTime();
final int numRecords = 100;
- latchCountDownAndWait(runCountDownLatch, 30000);
+ latchCountDownAndWait(runCountDownLatch, waitAndRunSecond);
assertDoesNotThrow(() -> {
createCommitWithInserts(cfg, client1, thirdCommitTime, newCommitTime,
numRecords, true);
validInstants.add(newCommitTime);
@@ -598,7 +604,7 @@ public class TestHoodieClientMultiWriter extends
HoodieClientTestBase {
});
future2 = executors.submit(() -> {
- latchCountDownAndWait(runCountDownLatch, 30000);
+ latchCountDownAndWait(runCountDownLatch, waitAndRunFirst);
if (tableType == HoodieTableType.MERGE_ON_READ) {
assertDoesNotThrow(() -> {
HoodieWriteMetadata<JavaRDD<WriteStatus>> compactionMetadata =
client2.compact(pendingCompactionTime);
@@ -609,7 +615,7 @@ public class TestHoodieClientMultiWriter extends
HoodieClientTestBase {
});
future3 = executors.submit(() -> {
- latchCountDownAndWait(runCountDownLatch, 30000);
+ latchCountDownAndWait(runCountDownLatch, waitAndRunFirst);
assertDoesNotThrow(() -> {
client3.clean(pendingCleanTime, false);
validInstants.add(pendingCleanTime);