yihua commented on code in PR #13948:
URL: https://github.com/apache/hudi/pull/13948#discussion_r2366431455
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java:
##########
@@ -113,6 +119,7 @@ public void testUpgradeOrDowngrade(HoodieTableVersion
fromVersion, HoodieTableVe
validateVersionSpecificProperties(resultMetaClient, toVersion);
validateDataConsistency(originalData, resultMetaClient, "after " +
operation);
+
Review Comment:
nit: remove empty line
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java:
##########
@@ -134,29 +141,6 @@ public void testUpgradeOrDowngrade(HoodieTableVersion
fromVersion, HoodieTableVe
LOG.info("Successfully completed {} test for version {} -> {}", operation,
fromVersion, toVersion);
}
- @ParameterizedTest
- @MethodSource("versionsBelowSix")
- public void testUpgradeForVersionsStartingBelowSixBlocked(HoodieTableVersion
originalVersion) throws Exception {
Review Comment:
Let's still keep this test and move `hudi-v4-table.zip` and
`hudi-v5-table.zip` to a new folder `unsupported-upgrade-tables` and keep
`generate-fixture.scala` for these tables.
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java:
##########
@@ -71,13 +71,19 @@
public class TestUpgradeDowngrade extends SparkClientFunctionalTestHarness {
private static final Logger LOG =
LoggerFactory.getLogger(TestUpgradeDowngrade.class);
- private static final String FIXTURES_BASE_PATH =
"/upgrade-downgrade-fixtures/mor-tables/";
@TempDir
java.nio.file.Path tempDir;
private HoodieTableMetaClient metaClient;
+ private static String getFixturesBasePath(String suffix) {
+ if (suffix.contains("complex-keygen")) {
+ return "/upgrade-downgrade-fixtures/complex-keygen-tables/";
+ }
+ return "/upgrade-downgrade-fixtures/maintenance-tables/";
Review Comment:
nit: use `mor-tables` since Hudi does not use the term "maintenance".
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java:
##########
@@ -71,13 +71,19 @@
public class TestUpgradeDowngrade extends SparkClientFunctionalTestHarness {
private static final Logger LOG =
LoggerFactory.getLogger(TestUpgradeDowngrade.class);
- private static final String FIXTURES_BASE_PATH =
"/upgrade-downgrade-fixtures/mor-tables/";
@TempDir
java.nio.file.Path tempDir;
private HoodieTableMetaClient metaClient;
+ private static String getFixturesBasePath(String suffix) {
+ if (suffix.contains("complex-keygen")) {
+ return "/upgrade-downgrade-fixtures/complex-keygen-tables/";
+ }
+ return "/upgrade-downgrade-fixtures/maintenance-tables/";
Review Comment:
And use `hudi-v6-table.zip` as the naming convention on tables with table
services and archived timeline.
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java:
##########
@@ -308,61 +292,20 @@ public void
testDowngradeToVersionsBelowSixBlocked(HoodieTableVersion fromVersio
"Exception message should match expected blocked downgrade format");
}
- @Disabled
- @ParameterizedTest
- @MethodSource("metadataTableCorruptionTestVersionPairs")
- public void testMetadataTableUpgradeDowngradeFailure(HoodieTableVersion
fromVersion, HoodieTableVersion toVersion) throws Exception {
- boolean isUpgrade = fromVersion.lesserThan(toVersion);
- String operation = isUpgrade ? "upgrade" : "downgrade";
- LOG.info("Testing metadata table failure during {} from version {} to {}",
operation, fromVersion, toVersion);
-
- HoodieTableMetaClient originalMetaClient = loadFixtureTable(fromVersion);
- assertEquals(fromVersion,
originalMetaClient.getTableConfig().getTableVersion(),
- "Fixture table should be at expected version");
-
- HoodieWriteConfig cfg = createWriteConfig(originalMetaClient, true);
-
- String metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(
- originalMetaClient.getBasePath().toString());
- StoragePath metadataHoodiePath = new StoragePath(metadataTablePath,
HoodieTableMetaClient.METAFOLDER_NAME);
- StoragePath propsPath = new StoragePath(metadataHoodiePath,
HoodieTableConfig.HOODIE_PROPERTIES_FILE);
- StoragePath backupPropsPath = new StoragePath(metadataHoodiePath,
HoodieTableConfig.HOODIE_PROPERTIES_FILE_BACKUP);
-
- String corruptedContent =
"CORRUPTED_INVALID_CONTENT\n\nTHIS_IS_NOT_VALID_PROPERTIES_FORMAT";
- try (OutputStream propsOut =
originalMetaClient.getStorage().create(propsPath, true);
- OutputStream backupOut =
originalMetaClient.getStorage().create(backupPropsPath, true)) {
- propsOut.write(corruptedContent.getBytes());
- backupOut.write(corruptedContent.getBytes());
- }
-
- HoodieUpgradeDowngradeException exception = assertThrows(
- HoodieUpgradeDowngradeException.class,
- () -> new UpgradeDowngrade(originalMetaClient, cfg, context(),
SparkUpgradeDowngradeHelper.getInstance())
- .run(toVersion, null)
- );
-
- // Verify the specific exception message for metadata table failures
- String expectedMessage = "Upgrade/downgrade for the Hudi metadata table
failed. "
- + "Please try again. If the failure repeats for metadata table, it is
recommended to disable "
- + "the metadata table so that the upgrade and downgrade can continue
for the data table.";
- assertTrue(exception.getMessage().contains(expectedMessage),
- "Exception message should contain metadata table failure message");
- }
-
private static Stream<Arguments>
testComplexKeygenValidationDuringUpgradeDowngrade() {
return Stream.of(
- Arguments.of(HoodieTableVersion.SIX, HoodieTableVersion.NINE, true),
- Arguments.of(HoodieTableVersion.SIX, HoodieTableVersion.NINE, false),
- Arguments.of(HoodieTableVersion.SIX, HoodieTableVersion.EIGHT, true),
- Arguments.of(HoodieTableVersion.SIX, HoodieTableVersion.EIGHT, false),
- Arguments.of(HoodieTableVersion.EIGHT, HoodieTableVersion.NINE, true),
- Arguments.of(HoodieTableVersion.EIGHT, HoodieTableVersion.NINE, false),
- Arguments.of(HoodieTableVersion.NINE, HoodieTableVersion.SIX, true),
- Arguments.of(HoodieTableVersion.NINE, HoodieTableVersion.SIX, false),
- Arguments.of(HoodieTableVersion.NINE, HoodieTableVersion.EIGHT, true),
- Arguments.of(HoodieTableVersion.NINE, HoodieTableVersion.EIGHT, false),
- Arguments.of(HoodieTableVersion.EIGHT, HoodieTableVersion.SIX, true),
- Arguments.of(HoodieTableVersion.EIGHT, HoodieTableVersion.SIX, false)
+ Arguments.of(HoodieTableVersion.SIX, HoodieTableVersion.NINE,
true),
Review Comment:
nit: check indentation which should not change
##########
hudi-spark-datasource/hudi-spark/src/test/resources/upgrade-downgrade-fixtures/README.md:
##########
@@ -37,15 +43,6 @@ All fixture tables use a consistent simple schema:
- `ts` (long) - Timestamp
- `partition` (string) - Partition value
-## Table Structure
-
-Each fixture table contains:
-- 2-3 base files (parquet)
-- 2-3 log files
-- Multiple committed instants
-- 1 pending/failed write (for rollback testing)
-- Basic .hoodie metadata structure
Review Comment:
Let's add a one-sentence general description of what different folders of
tables cover.
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java:
##########
@@ -467,40 +413,44 @@ private Option<HoodieTableVersion>
getNextVersion(HoodieTableVersion current) {
* Get fixture zip file name for a given table version.
*/
public static String getFixtureName(HoodieTableVersion version, String
suffix) {
- String zipSuffix = suffix + ".zip";
+ String baseName;
switch (version) {
case FOUR:
- return "hudi-v4-table" + zipSuffix;
+ baseName = "hudi-v4";
+ break;
case FIVE:
- return "hudi-v5-table" + zipSuffix;
+ baseName = "hudi-v5";
+ break;
case SIX:
- return "hudi-v6-table" + zipSuffix;
+ baseName = "hudi-v6";
+ break;
case EIGHT:
- return "hudi-v8-table" + zipSuffix;
+ baseName = "hudi-v8";
+ break;
case NINE:
- return "hudi-v9-table" + zipSuffix;
+ baseName = "hudi-v9";
+ break;
default:
throw new IllegalArgumentException("Unsupported fixture version: " +
version);
}
+
+ // Handle different naming patterns based on suffix
+ if (suffix.contains("complex-keygen")) {
+ return baseName + "-table" + suffix + ".zip";
+ } else {
+ // Default to maintenance tables
+ return baseName + "-table-maintenance.zip";
+ }
}
private static Stream<Arguments> tableVersions() {
return Stream.of(
- Arguments.of(HoodieTableVersion.FOUR), // Hudi 0.11.1
- Arguments.of(HoodieTableVersion.FIVE), // Hudi 0.12.2
Arguments.of(HoodieTableVersion.SIX), // Hudi 0.14
Arguments.of(HoodieTableVersion.EIGHT), // Hudi 1.0.2
Arguments.of(HoodieTableVersion.NINE) // Hudi 1.1
);
}
- private static Stream<Arguments> versionsBelowSix() {
- return Stream.of(
- Arguments.of(HoodieTableVersion.FOUR), // Hudi 0.11.1
- Arguments.of(HoodieTableVersion.FIVE) // Hudi 0.12.2
- );
Review Comment:
Will keep this
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java:
##########
@@ -134,29 +141,6 @@ public void testUpgradeOrDowngrade(HoodieTableVersion
fromVersion, HoodieTableVe
LOG.info("Successfully completed {} test for version {} -> {}", operation,
fromVersion, toVersion);
}
- @ParameterizedTest
- @MethodSource("versionsBelowSix")
- public void testUpgradeForVersionsStartingBelowSixBlocked(HoodieTableVersion
originalVersion) throws Exception {
Review Comment:
So we can save time, without having to generate the tables using the new
script.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]