This is an automated email from the ASF dual-hosted git repository.
xushiyan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new f57248abb46 [HUDI-6448] Improve upgrade/downgrade for table ver. 6
(#9063)
f57248abb46 is described below
commit f57248abb465a923418129c18801ec1d64a15a5d
Author: Shiyan Xu <[email protected]>
AuthorDate: Fri Jun 30 02:18:13 2023 -0700
[HUDI-6448] Improve upgrade/downgrade for table ver. 6 (#9063)
---------
Co-authored-by: sivabalan <[email protected]>
---
.../table/upgrade/FiveToFourDowngradeHandler.java | 4 +-
.../table/upgrade/FiveToSixUpgradeHandler.java | 20 ++++--
.../table/upgrade/FourToFiveUpgradeHandler.java | 4 +-
.../table/upgrade/OneToZeroDowngradeHandler.java | 2 +-
.../table/upgrade/SixToFiveDowngradeHandler.java | 44 ++++++++++--
.../table/upgrade/TwoToOneDowngradeHandler.java | 2 +-
.../functional/TestHoodieBackedMetadata.java | 4 +-
.../hudi/table/upgrade/TestUpgradeDowngrade.java | 78 ++++++++++++++++++++--
.../hudi/common/table/HoodieTableConfig.java | 4 ++
.../hudi/common/table/HoodieTableVersion.java | 2 +-
.../TestUpgradeOrDowngradeProcedure.scala | 4 +-
11 files changed, 143 insertions(+), 25 deletions(-)
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FiveToFourDowngradeHandler.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FiveToFourDowngradeHandler.java
index 51da9810f6a..e51f5496c2d 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FiveToFourDowngradeHandler.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FiveToFourDowngradeHandler.java
@@ -23,13 +23,13 @@ import org.apache.hudi.common.config.ConfigProperty;
import org.apache.hudi.common.engine.HoodieEngineContext;
import org.apache.hudi.config.HoodieWriteConfig;
-import java.util.HashMap;
+import java.util.Collections;
import java.util.Map;
public class FiveToFourDowngradeHandler implements DowngradeHandler {
@Override
public Map<ConfigProperty, String> downgrade(HoodieWriteConfig config,
HoodieEngineContext context, String instantTime, SupportsUpgradeDowngrade
upgradeDowngradeHelper) {
- return new HashMap<>();
+ return Collections.emptyMap();
}
}
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FiveToSixUpgradeHandler.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FiveToSixUpgradeHandler.java
index e3346c2f455..69086b394bf 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FiveToSixUpgradeHandler.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FiveToSixUpgradeHandler.java
@@ -18,7 +18,6 @@
package org.apache.hudi.table.upgrade;
-import org.apache.hadoop.fs.Path;
import org.apache.hudi.common.config.ConfigProperty;
import org.apache.hudi.common.engine.HoodieEngineContext;
import org.apache.hudi.common.table.HoodieTableMetaClient;
@@ -28,11 +27,13 @@ import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.config.HoodieWriteConfig;
import org.apache.hudi.exception.HoodieUpgradeDowngradeException;
import org.apache.hudi.table.HoodieTable;
+
+import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.util.HashMap;
+import java.util.Collections;
import java.util.Map;
/**
@@ -46,9 +47,18 @@ public class FiveToSixUpgradeHandler implements
UpgradeHandler {
@Override
public Map<ConfigProperty, String> upgrade(HoodieWriteConfig config,
HoodieEngineContext context, String instantTime, SupportsUpgradeDowngrade
upgradeDowngradeHelper) {
- HoodieTable table = upgradeDowngradeHelper.getTable(config, context);
+ final HoodieTable table = upgradeDowngradeHelper.getTable(config, context);
+
+ deleteCompactionRequestedFileFromAuxiliaryFolder(table);
+
+ return Collections.emptyMap();
+ }
+
+ /**
+ * See HUDI-6040.
+ */
+ private void deleteCompactionRequestedFileFromAuxiliaryFolder(HoodieTable
table) {
HoodieTableMetaClient metaClient = table.getMetaClient();
- // delete compaction file from .aux
HoodieTimeline compactionTimeline =
metaClient.getActiveTimeline().filterPendingCompactionTimeline()
.filter(instant -> instant.getState() ==
HoodieInstant.State.REQUESTED);
compactionTimeline.getInstantsAsStream().forEach(
@@ -65,6 +75,6 @@ public class FiveToSixUpgradeHandler implements
UpgradeHandler {
}
}
);
- return new HashMap<>();
}
+
}
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FourToFiveUpgradeHandler.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FourToFiveUpgradeHandler.java
index 01183357e7a..4d7c5b8b6df 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FourToFiveUpgradeHandler.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/FourToFiveUpgradeHandler.java
@@ -32,7 +32,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
-import java.util.HashMap;
+import java.util.Collections;
import java.util.Map;
import static
org.apache.hudi.common.util.PartitionPathEncodeUtils.DEFAULT_PARTITION_PATH;
@@ -68,11 +68,11 @@ public class FourToFiveUpgradeHandler implements
UpgradeHandler {
throw new HoodieException(String.format("Old deprecated \"%s\"
partition found in hudi table. This needs a migration step before we can
upgrade ",
DEPRECATED_DEFAULT_PARTITION_PATH));
}
+ return Collections.emptyMap();
} catch (IOException e) {
LOG.error("Fetching file system instance failed", e);
throw new HoodieException("Fetching FileSystem instance failed ", e);
}
- return new HashMap<>();
}
private boolean hasDefaultPartitionPath(HoodieWriteConfig config,
HoodieTable table) throws IOException {
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/OneToZeroDowngradeHandler.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/OneToZeroDowngradeHandler.java
index f70fd4ade09..d4b2bafc9ab 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/OneToZeroDowngradeHandler.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/OneToZeroDowngradeHandler.java
@@ -50,6 +50,6 @@ public class OneToZeroDowngradeHandler implements
DowngradeHandler {
WriteMarkers writeMarkers =
WriteMarkersFactory.get(config.getMarkersType(), table,
inflightInstant.getTimestamp());
writeMarkers.quietDeleteMarkerDir(context,
config.getMarkersDeleteParallelism());
}
- return Collections.EMPTY_MAP;
+ return Collections.emptyMap();
}
}
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/SixToFiveDowngradeHandler.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/SixToFiveDowngradeHandler.java
index 24d8ee697ed..228c0f710a8 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/SixToFiveDowngradeHandler.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/SixToFiveDowngradeHandler.java
@@ -18,20 +18,29 @@
package org.apache.hudi.table.upgrade;
-import org.apache.hadoop.fs.Path;
import org.apache.hudi.common.config.ConfigProperty;
import org.apache.hudi.common.engine.HoodieEngineContext;
+import org.apache.hudi.common.table.HoodieTableConfig;
import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.table.HoodieTableVersion;
import org.apache.hudi.common.table.timeline.HoodieActiveTimeline;
import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.HoodieTimeline;
import org.apache.hudi.common.util.FileIOUtils;
+import org.apache.hudi.common.util.Option;
import org.apache.hudi.config.HoodieWriteConfig;
+import org.apache.hudi.metadata.MetadataPartitionType;
import org.apache.hudi.table.HoodieTable;
-import java.util.Collections;
+import org.apache.hadoop.fs.Path;
+
+import java.util.HashMap;
import java.util.Map;
+import static
org.apache.hudi.common.table.HoodieTableConfig.TABLE_METADATA_PARTITIONS;
+import static
org.apache.hudi.common.table.HoodieTableConfig.TABLE_METADATA_PARTITIONS_INFLIGHT;
+import static
org.apache.hudi.metadata.HoodieTableMetadataUtil.deleteMetadataTablePartition;
+
/**
* Downgrade handle to assist in downgrading hoodie table from version 6 to 5.
* To ensure compatibility, we need recreate the compaction requested file to
@@ -41,9 +50,35 @@ public class SixToFiveDowngradeHandler implements
DowngradeHandler {
@Override
public Map<ConfigProperty, String> downgrade(HoodieWriteConfig config,
HoodieEngineContext context, String instantTime, SupportsUpgradeDowngrade
upgradeDowngradeHelper) {
- HoodieTable table = upgradeDowngradeHelper.getTable(config, context);
+ final HoodieTable table = upgradeDowngradeHelper.getTable(config, context);
+
+ removeRecordIndexIfNeeded(table, context);
+ syncCompactionRequestedFileToAuxiliaryFolder(table);
+
+ Map<ConfigProperty, String> updatedTableProps = new HashMap<>();
+ HoodieTableConfig tableConfig = table.getMetaClient().getTableConfig();
+ Option.ofNullable(tableConfig.getString(TABLE_METADATA_PARTITIONS))
+ .ifPresent(v -> updatedTableProps.put(TABLE_METADATA_PARTITIONS, v));
+
Option.ofNullable(tableConfig.getString(TABLE_METADATA_PARTITIONS_INFLIGHT))
+ .ifPresent(v ->
updatedTableProps.put(TABLE_METADATA_PARTITIONS_INFLIGHT, v));
+ return updatedTableProps;
+ }
+
+ /**
+ * Record-level index, a new partition in metadata table, was first added in
+ * 0.14.0 ({@link HoodieTableVersion#SIX}. Any downgrade from this version
+ * should remove this partition.
+ */
+ private static void removeRecordIndexIfNeeded(HoodieTable table,
HoodieEngineContext context) {
+ HoodieTableMetaClient metaClient = table.getMetaClient();
+ deleteMetadataTablePartition(metaClient, context,
MetadataPartitionType.RECORD_INDEX, false);
+ }
+
+ /**
+ * See HUDI-6040.
+ */
+ private static void syncCompactionRequestedFileToAuxiliaryFolder(HoodieTable
table) {
HoodieTableMetaClient metaClient = table.getMetaClient();
- // sync compaction requested file to .aux
HoodieTimeline compactionTimeline = new HoodieActiveTimeline(metaClient,
false).filterPendingCompactionTimeline()
.filter(instant -> instant.getState() ==
HoodieInstant.State.REQUESTED);
compactionTimeline.getInstantsAsStream().forEach(instant -> {
@@ -52,6 +87,5 @@ public class SixToFiveDowngradeHandler implements
DowngradeHandler {
new Path(metaClient.getMetaPath(), fileName),
new Path(metaClient.getMetaAuxiliaryPath(), fileName));
});
- return Collections.emptyMap();
}
}
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/TwoToOneDowngradeHandler.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/TwoToOneDowngradeHandler.java
index 5cde65e2572..cb0fca5ffee 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/TwoToOneDowngradeHandler.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/TwoToOneDowngradeHandler.java
@@ -70,7 +70,7 @@ public class TwoToOneDowngradeHandler implements
DowngradeHandler {
throw new HoodieException("Converting marker files to DIRECT style
failed during downgrade", e);
}
}
- return Collections.EMPTY_MAP;
+ return Collections.emptyMap();
}
/**
diff --git
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java
index 84417fce958..075afd61eb1 100644
---
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java
+++
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java
@@ -2345,7 +2345,7 @@ public class TestHoodieBackedMetadata extends
TestHoodieMetadataBase {
assertTrue(currentStatus.getModificationTime() >
prevStatus.getModificationTime());
initMetaClient();
- assertEquals(metaClient.getTableConfig().getTableVersion().versionCode(),
HoodieTableVersion.FIVE.versionCode());
+ assertEquals(metaClient.getTableConfig().getTableVersion().versionCode(),
HoodieTableVersion.current().versionCode());
assertTrue(fs.exists(new Path(metadataTableBasePath)), "Metadata table
should exist");
FileStatus newStatus = fs.getFileStatus(new Path(metadataTableBasePath));
assertTrue(oldStatus.getModificationTime() <
newStatus.getModificationTime());
@@ -2423,7 +2423,7 @@ public class TestHoodieBackedMetadata extends
TestHoodieMetadataBase {
}
initMetaClient();
- assertEquals(metaClient.getTableConfig().getTableVersion().versionCode(),
HoodieTableVersion.FIVE.versionCode());
+ assertEquals(metaClient.getTableConfig().getTableVersion().versionCode(),
HoodieTableVersion.current().versionCode());
assertTrue(fs.exists(new Path(metadataTableBasePath)), "Metadata table
should exist");
FileStatus newStatus = fs.getFileStatus(new Path(metadataTableBasePath));
assertTrue(oldStatus.getModificationTime() <
newStatus.getModificationTime());
diff --git
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java
index 9d837565363..60b87020c11 100644
---
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java
+++
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java
@@ -36,8 +36,10 @@ import org.apache.hudi.common.table.marker.MarkerType;
import org.apache.hudi.common.table.timeline.HoodieInstant;
import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion;
import org.apache.hudi.common.table.view.SyncableFileSystemView;
+import org.apache.hudi.common.testutils.HoodieMetadataTestTable;
import org.apache.hudi.common.testutils.HoodieTestDataGenerator;
import org.apache.hudi.common.testutils.HoodieTestUtils;
+import org.apache.hudi.common.util.CollectionUtils;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.collection.Pair;
import org.apache.hudi.config.HoodieWriteConfig;
@@ -46,10 +48,10 @@ import org.apache.hudi.keygen.SimpleKeyGenerator;
import org.apache.hudi.keygen.TimestampBasedKeyGenerator;
import org.apache.hudi.keygen.constant.KeyGeneratorOptions;
import org.apache.hudi.metadata.HoodieTableMetadata;
+import org.apache.hudi.metadata.MetadataPartitionType;
import org.apache.hudi.table.HoodieTable;
import org.apache.hudi.table.marker.WriteMarkers;
import org.apache.hudi.table.marker.WriteMarkersFactory;
-import org.apache.hudi.testutils.Assertions;
import org.apache.hudi.testutils.HoodieClientTestBase;
import org.apache.hudi.testutils.HoodieClientTestUtils;
@@ -71,6 +73,8 @@ import org.junit.jupiter.params.provider.EnumSource;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -82,13 +86,23 @@ import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import static org.apache.hudi.common.model.HoodieTableType.MERGE_ON_READ;
import static org.apache.hudi.common.table.HoodieTableConfig.BASE_FILE_FORMAT;
import static
org.apache.hudi.common.table.HoodieTableConfig.HIVE_STYLE_PARTITIONING_ENABLE;
+import static
org.apache.hudi.common.table.HoodieTableConfig.TABLE_METADATA_PARTITIONS;
+import static
org.apache.hudi.common.table.HoodieTableConfig.TABLE_METADATA_PARTITIONS_INFLIGHT;
import static org.apache.hudi.common.table.HoodieTableConfig.TYPE;
+import static
org.apache.hudi.common.table.HoodieTableMetaClient.METADATA_TABLE_FOLDER_PATH;
import static
org.apache.hudi.common.testutils.HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH;
import static
org.apache.hudi.common.testutils.HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH;
import static org.apache.hudi.common.util.MarkerUtils.MARKERS_FILENAME_PREFIX;
import static
org.apache.hudi.common.util.PartitionPathEncodeUtils.DEPRECATED_DEFAULT_PARTITION_PATH;
+import static
org.apache.hudi.metadata.HoodieTableMetadataUtil.PARTITION_NAME_BLOOM_FILTERS;
+import static
org.apache.hudi.metadata.HoodieTableMetadataUtil.PARTITION_NAME_COLUMN_STATS;
+import static
org.apache.hudi.metadata.HoodieTableMetadataUtil.PARTITION_NAME_FILES;
+import static
org.apache.hudi.metadata.HoodieTableMetadataUtil.PARTITION_NAME_RECORD_INDEX;
+import static org.apache.hudi.metadata.MetadataPartitionType.RECORD_INDEX;
+import static org.apache.hudi.testutils.Assertions.assertNoWriteErrors;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -115,6 +129,8 @@ public class TestUpgradeDowngrade extends
HoodieClientTestBase {
public static Stream<Arguments> downGradeConfigParams() {
Object[][] data = new Object[][] {
+ {true, HoodieTableType.COPY_ON_WRITE, true, HoodieTableVersion.SIX,
HoodieTableVersion.FIVE},
+ {false, HoodieTableType.COPY_ON_WRITE, false, HoodieTableVersion.SIX,
HoodieTableVersion.FIVE},
{true, HoodieTableType.COPY_ON_WRITE, true, HoodieTableVersion.FIVE,
HoodieTableVersion.FOUR},
{false, HoodieTableType.COPY_ON_WRITE, false, HoodieTableVersion.FIVE,
HoodieTableVersion.FOUR},
{true, HoodieTableType.MERGE_ON_READ, true, HoodieTableVersion.FIVE,
HoodieTableVersion.FOUR},
@@ -511,6 +527,60 @@ public class TestUpgradeDowngrade extends
HoodieClientTestBase {
assertEquals(tableConfig.getBaseFileFormat().name(),
originalProps.getProperty(BASE_FILE_FORMAT.key()));
}
+ @Test
+ public void testDowngradeSixToFiveShouldDeleteRecordIndexPartition() throws
Exception {
+ HoodieWriteConfig config = getConfigBuilder()
+ .withMetadataConfig(HoodieMetadataConfig.newBuilder()
+ .enable(true)
+ .withMetadataIndexColumnStats(true)
+ .withMetadataIndexBloomFilter(true)
+ .withEnableRecordIndex(true).build())
+ .build();
+ HoodieTable table = getHoodieTable(metaClient, config);
+ for (MetadataPartitionType partitionType : MetadataPartitionType.values())
{
+ metaClient.getTableConfig().setMetadataPartitionState(metaClient,
partitionType, true);
+ }
+ metaClient.getTableConfig().setMetadataPartitionsInflight(metaClient,
MetadataPartitionType.values());
+ String metadataTableBasePath = Paths.get(basePath,
METADATA_TABLE_FOLDER_PATH).toString();
+ HoodieTableMetaClient metadataTableMetaClient =
HoodieTestUtils.init(metadataTableBasePath, MERGE_ON_READ);
+ HoodieMetadataTestTable.of(metadataTableMetaClient)
+ .addCommit("000")
+ .withBaseFilesInPartition(RECORD_INDEX.getPartitionPath(), 0);
+
+ // validate the relevant table states before downgrade
+ java.nio.file.Path recordIndexPartitionPath = Paths.get(basePath,
+ METADATA_TABLE_FOLDER_PATH, RECORD_INDEX.getPartitionPath());
+ Set<String> allPartitions = CollectionUtils.createImmutableSet(
+ PARTITION_NAME_FILES,
+ PARTITION_NAME_COLUMN_STATS,
+ PARTITION_NAME_BLOOM_FILTERS,
+ PARTITION_NAME_RECORD_INDEX
+ );
+ Set<String> allPartitionsExceptRecordIndex =
CollectionUtils.createImmutableSet(
+ PARTITION_NAME_FILES,
+ PARTITION_NAME_COLUMN_STATS,
+ PARTITION_NAME_BLOOM_FILTERS
+ );
+ assertTrue(Files.exists(recordIndexPartitionPath), "record index partition
should exist.");
+ assertEquals(allPartitions,
metaClient.getTableConfig().getMetadataPartitions(),
+ TABLE_METADATA_PARTITIONS.key() + " should contain all partitions.");
+ assertEquals(allPartitions,
metaClient.getTableConfig().getMetadataPartitionsInflight(),
+ TABLE_METADATA_PARTITIONS_INFLIGHT.key() + " should contain all
partitions.");
+
+ // perform downgrade
+ prepForDowngradeFromVersion(HoodieTableVersion.SIX);
+ new UpgradeDowngrade(metaClient, config, context,
SparkUpgradeDowngradeHelper.getInstance())
+ .run(HoodieTableVersion.FIVE, null);
+
+ // validate the relevant table states after downgrade
+ assertFalse(Files.exists(recordIndexPartitionPath), "record index
partition should be deleted.");
+ assertEquals(allPartitionsExceptRecordIndex,
metaClient.getTableConfig().getMetadataPartitions(),
+ TABLE_METADATA_PARTITIONS.key() + " should contain all partitions
except record_index.");
+ assertEquals(allPartitionsExceptRecordIndex,
metaClient.getTableConfig().getMetadataPartitionsInflight(),
+ TABLE_METADATA_PARTITIONS_INFLIGHT.key() + " should contain all
partitions except record_index.");
+
+ }
+
@ParameterizedTest(name = TEST_NAME_WITH_DOWNGRADE_PARAMS)
@MethodSource("downGradeConfigParams")
public void testDowngrade(
@@ -685,7 +755,7 @@ public class TestUpgradeDowngrade extends
HoodieClientTestBase {
List<HoodieRecord> records =
dataGen.generateInsertsContainsAllPartitions(newCommitTime, 2);
JavaRDD<HoodieRecord> writeRecords = jsc.parallelize(records, 1);
JavaRDD<WriteStatus> statuses = client.upsert(writeRecords, newCommitTime);
- Assertions.assertNoWriteErrors(statuses.collect());
+ assertNoWriteErrors(statuses.collect());
client.commit(newCommitTime, statuses);
return records;
}
@@ -743,7 +813,7 @@ public class TestUpgradeDowngrade extends
HoodieClientTestBase {
List<HoodieRecord> records =
dataGen.generateInsertsContainsAllPartitions(newCommitTime, 2);
JavaRDD<HoodieRecord> writeRecords = jsc.parallelize(records, 1);
JavaRDD<WriteStatus> statuses = client.upsert(writeRecords, newCommitTime);
- Assertions.assertNoWriteErrors(statuses.collect());
+ assertNoWriteErrors(statuses.collect());
client.commit(newCommitTime, statuses);
/**
* Write 2 (updates)
@@ -753,7 +823,7 @@ public class TestUpgradeDowngrade extends
HoodieClientTestBase {
List<HoodieRecord> records2 = dataGen.generateUpdates(newCommitTime,
records);
statuses = client.upsert(jsc.parallelize(records2, 1), newCommitTime);
- Assertions.assertNoWriteErrors(statuses.collect());
+ assertNoWriteErrors(statuses.collect());
if (commitSecondUpsert) {
client.commit(newCommitTime, statuses);
}
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
index e609fb85595..5963e199ea9 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
@@ -799,6 +799,10 @@ public class HoodieTableConfig extends HoodieConfig {
LOG.info(String.format("MDT %s partitions %s have been set to inflight",
metaClient.getBasePathV2(), partitionTypes));
}
+ public void setMetadataPartitionsInflight(HoodieTableMetaClient metaClient,
MetadataPartitionType... partitionTypes) {
+ setMetadataPartitionsInflight(metaClient,
Arrays.stream(partitionTypes).collect(Collectors.toList()));
+ }
+
/**
* Clear {@link HoodieTableConfig#TABLE_METADATA_PARTITIONS}
* {@link HoodieTableConfig#TABLE_METADATA_PARTITIONS_INFLIGHT}.
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableVersion.java
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableVersion.java
index 63d823b6ecd..a0ec92473ca 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableVersion.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableVersion.java
@@ -53,7 +53,7 @@ public enum HoodieTableVersion {
}
public static HoodieTableVersion current() {
- return FIVE;
+ return SIX;
}
public static HoodieTableVersion versionFromCode(int versionCode) {
diff --git
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/procedure/TestUpgradeOrDowngradeProcedure.scala
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/procedure/TestUpgradeOrDowngradeProcedure.scala
index a664a4a32cb..ff4b5aa92ea 100644
---
a/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/procedure/TestUpgradeOrDowngradeProcedure.scala
+++
b/hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/spark/sql/hudi/procedure/TestUpgradeOrDowngradeProcedure.scala
@@ -55,10 +55,10 @@ class TestUpgradeOrDowngradeProcedure extends
HoodieSparkProcedureTestBase {
.build
// verify hoodie.table.version of the original table
- assertResult(HoodieTableVersion.FIVE.versionCode) {
+ assertResult(HoodieTableVersion.SIX.versionCode) {
metaClient.getTableConfig.getTableVersion.versionCode()
}
- assertTableVersionFromPropertyFile(metaClient,
HoodieTableVersion.FIVE.versionCode)
+ assertTableVersionFromPropertyFile(metaClient,
HoodieTableVersion.SIX.versionCode)
// downgrade table to ZERO
checkAnswer(s"""call downgrade_table(table => '$tableName', to_version
=> 'ZERO')""")(Seq(true))