This is an automated email from the ASF dual-hosted git repository.
danny0405 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new ae40574ef6d [MINOR] Avoid repeated write behavior in ut (#11755)
ae40574ef6d is described below
commit ae40574ef6db52099650aabe8416cb6e20e7bae4
Author: TheR1sing3un <[email protected]>
AuthorDate: Tue Aug 13 08:02:31 2024 +0800
[MINOR] Avoid repeated write behavior in ut (#11755)
1. set stats in metadata after compaction
2. fix wrong usage about HoodieWriteMetadata which cause the repeated write
behavior in ut
Signed-off-by: ther1sing3un <[email protected]>
---
.../compact/RunCompactionActionExecutor.java | 1 +
.../table/action/compact/TestHoodieCompactor.java | 36 +++++++++++-----------
2 files changed, 19 insertions(+), 18 deletions(-)
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java
index f8ce1fcdc2a..066776e1743 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/RunCompactionActionExecutor.java
@@ -125,6 +125,7 @@ public class RunCompactionActionExecutor<T> extends
compactionMetadata.setWriteStatuses(statuses);
compactionMetadata.setCommitted(false);
compactionMetadata.setCommitMetadata(Option.of(metadata));
+ compactionMetadata.setWriteStats(updateStatusMap);
} catch (Exception e) {
throw new HoodieCompactionException("Could not compact " +
config.getBasePath(), e);
}
diff --git
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/action/compact/TestHoodieCompactor.java
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/action/compact/TestHoodieCompactor.java
index 78086a64359..167a00babe6 100644
---
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/action/compact/TestHoodieCompactor.java
+++
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/action/compact/TestHoodieCompactor.java
@@ -20,11 +20,8 @@ package org.apache.hudi.table.action.compact;
import org.apache.hudi.avro.model.HoodieCompactionPlan;
import org.apache.hudi.client.SparkRDDWriteClient;
-import org.apache.hudi.client.WriteStatus;
import org.apache.hudi.common.config.HoodieMemoryConfig;
import org.apache.hudi.common.config.HoodieStorageConfig;
-import org.apache.hudi.common.data.HoodieData;
-import org.apache.hudi.common.data.HoodieListData;
import org.apache.hudi.common.model.FileSlice;
import org.apache.hudi.common.model.HoodieRecord;
import org.apache.hudi.common.model.HoodieTableType;
@@ -48,6 +45,7 @@ import org.apache.hudi.metrics.HoodieMetrics;
import org.apache.hudi.storage.HoodieStorageUtils;
import org.apache.hudi.table.HoodieSparkTable;
import org.apache.hudi.table.HoodieTable;
+import org.apache.hudi.table.action.HoodieWriteMetadata;
import org.apache.hudi.testutils.HoodieSparkClientTestHarness;
import com.codahale.metrics.Counter;
@@ -235,7 +233,7 @@ public class TestHoodieCompactor extends
HoodieSparkClientTestHarness {
updateRecords(config, newCommitTime, records);
assertLogFilesNumEqualsTo(config, i);
}
- HoodieData<WriteStatus> result = compact(writeClient,
String.format("10%s", i));
+ HoodieWriteMetadata result = compact(writeClient, String.format("10%s",
i));
verifyCompaction(result);
// Verify compaction.requested, compaction.completed metrics counts.
@@ -270,7 +268,7 @@ public class TestHoodieCompactor extends
HoodieSparkClientTestHarness {
assertLogFilesNumEqualsTo(config, 1);
- HoodieData<WriteStatus> result = compact(writeClient, "10" + (i + 1));
+ HoodieWriteMetadata result = compact(writeClient, "10" + (i + 1));
verifyCompaction(result);
// Verify compaction.requested, compaction.completed metrics counts.
@@ -317,26 +315,28 @@ public class TestHoodieCompactor extends
HoodieSparkClientTestHarness {
/**
* Do a compaction.
*/
- private HoodieData<WriteStatus> compact(SparkRDDWriteClient writeClient,
String compactionInstantTime) {
+ private HoodieWriteMetadata compact(SparkRDDWriteClient writeClient, String
compactionInstantTime) {
writeClient.scheduleCompactionAtInstant(compactionInstantTime,
Option.empty());
- JavaRDD<WriteStatus> writeStatusJavaRDD = (JavaRDD<WriteStatus>)
writeClient.compact(compactionInstantTime).getWriteStatuses();
- return HoodieListData.eager(writeStatusJavaRDD.collect());
+ HoodieWriteMetadata compactMetadata =
writeClient.compact(compactionInstantTime);
+ return compactMetadata;
}
/**
- * Verify that all partition paths are present in the WriteStatus result.
+ * Verify that all partition paths are present in the HoodieWriteMetadata
result.
*/
- private void verifyCompaction(HoodieData<WriteStatus> result) {
- List<WriteStatus> writeStatuses = result.collectAsList();
+ private void verifyCompaction(HoodieWriteMetadata compactionMetadata) {
+ assertTrue(compactionMetadata.getWriteStats().isPresent());
+ List<HoodieWriteStat> stats = (List<HoodieWriteStat>)
compactionMetadata.getWriteStats().get();
+ assertEquals(dataGen.getPartitionPaths().length, stats.size());
for (String partitionPath : dataGen.getPartitionPaths()) {
- assertTrue(writeStatuses.stream().anyMatch(writeStatus ->
writeStatus.getStat().getPartitionPath().contentEquals(partitionPath)));
+ assertTrue(stats.stream().anyMatch(stat ->
stat.getPartitionPath().contentEquals(partitionPath)));
}
- writeStatuses.forEach(writeStatus -> {
- final HoodieWriteStat.RuntimeStats stats =
writeStatus.getStat().getRuntimeStats();
- assertNotNull(stats);
- assertEquals(stats.getTotalCreateTime(), 0);
- assertTrue(stats.getTotalUpsertTime() > 0);
- assertTrue(stats.getTotalScanTime() > 0);
+ stats.forEach(stat -> {
+ HoodieWriteStat.RuntimeStats runtimeStats = stat.getRuntimeStats();
+ assertNotNull(runtimeStats);
+ assertEquals(0, runtimeStats.getTotalCreateTime());
+ assertTrue(runtimeStats.getTotalUpsertTime() > 0);
+ assertTrue(runtimeStats.getTotalScanTime() > 0);
});
}
}