rahil-c commented on code in PR #13642:
URL: https://github.com/apache/hudi/pull/13642#discussion_r2254789679


##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java:
##########
@@ -394,6 +395,115 @@ public void 
testUpgradeFourtoFiveWithHiveStyleDefaultPartitionWithSkipValidation
     testUpgradeFourToFiveInternal(true, true, true);
   }
 
+  @Test
+  public void testUpgradeFourToFiveWithMetadataTableFailure() throws Exception 
{
+    // Follow same setup of testUpgradeFourToFiveInternal
+    String tableName = metaClient.getTableConfig().getTableName();
+    cleanUp();
+    initSparkContexts();
+    initPath();
+    initTestDataGenerator();
+
+    Map<String, String> params = new HashMap<>();
+    addNewTableParamsToProps(params, tableName);
+    Properties properties = new Properties();
+    params.forEach((k, v) -> properties.setProperty(k, v));
+
+    initMetaClient(getTableType(), properties);
+    HoodieWriteConfig cfg = getConfigBuilder()
+        .withRollbackUsingMarkers(false)
+        .withWriteTableVersion(6)
+        
.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).build())
+        .withProps(params)
+        .build();
+    SparkRDDWriteClient client = getHoodieWriteClient(cfg);
+    // Write inserts
+    doInsert(client);
+
+    downgradeTableConfigsFromFiveToFour(cfg);
+    // end of setup of testUpgradeFourToFiveInternal
+
+    // Now test specific case of Metadata table failure during upgrade process
+    HoodieTableVersion originalVersion = 
metaClient.getTableConfig().getTableVersion();
+    
+    // Get metadata table path and corrupt its properties files, to trigger 
exception
+    String metadataTablePath = HoodieTableMetadata.getMetadataTableBasePath(
+        metaClient.getBasePath().toString());
+    StoragePath metadataHoodiePath = new StoragePath(metadataTablePath, 
HoodieTableMetaClient.METAFOLDER_NAME);
+    StoragePath propsPath = new StoragePath(metadataHoodiePath, 
HoodieTableConfig.HOODIE_PROPERTIES_FILE);
+    StoragePath backupPropsPath = new StoragePath(metadataHoodiePath, 
HoodieTableConfig.HOODIE_PROPERTIES_FILE_BACKUP);
+    
+    // Corrupt both properties files with invalid content
+    String corruptedContent = 
"CORRUPTED_INVALID_CONTENT\n\nTHIS_IS_NOT_VALID_PROPERTIES_FORMAT";
+    try (OutputStream propsOut = metaClient.getStorage().create(propsPath, 
true);
+         OutputStream backupOut = 
metaClient.getStorage().create(backupPropsPath, true)) {
+      propsOut.write(corruptedContent.getBytes());
+      backupOut.write(corruptedContent.getBytes());
+    }
+
+    // Attempt upgrade from version 4 to 5 - should fail with metadata table 
exception
+    HoodieUpgradeDowngradeException exception = assertThrows(
+        HoodieUpgradeDowngradeException.class,
+        () -> new UpgradeDowngrade(metaClient, cfg, context, 
SparkUpgradeDowngradeHelper.getInstance())
+            .run(HoodieTableVersion.FIVE, null)
+    );
+
+    // Verify the exception message contains the expected error message
+    assertTrue(exception.getMessage().contains("Upgrade/downgrade for the Hudi 
metadata table failed. "
+        + "Please try again. If the failure repeats for metadata table, it is 
recommended to disable "
+        + "the metadata table so that the upgrade and downgrade can continue 
for the data table."),
+        "Exception message should match the exact error message from 
UpgradeDowngrade.java");
+    
+    // Verify main table version unchanged (upgrade process was stopped)
+    metaClient = HoodieTableMetaClient.reload(metaClient);
+    assertEquals(originalVersion, 
metaClient.getTableConfig().getTableVersion(),
+        "Main table version should remain unchanged when metadata table 
upgrade fails");
+  }
+
+  @Test
+  public void testAutoUpgradeDisabledDuringUpgrade() throws Exception {
+    // Follow same setup of testUpgradeFourToFiveInternal
+    String tableName = metaClient.getTableConfig().getTableName();
+    cleanUp();
+    initSparkContexts();
+    initPath();
+    initTestDataGenerator();
+
+    Map<String, String> params = new HashMap<>();
+    addNewTableParamsToProps(params, tableName);
+    Properties properties = new Properties();
+    params.forEach((k, v) -> properties.setProperty(k, v));
+
+    initMetaClient(getTableType(), properties);
+    // Create config with auto-upgrade disabled
+    HoodieWriteConfig cfg = getConfigBuilder()
+        .withRollbackUsingMarkers(false)
+        .withWriteTableVersion(6)
+        .withAutoUpgradeVersion(false)  // Disable auto-upgrade
+        .withProps(params)
+        .build();
+    
+    SparkRDDWriteClient client = getHoodieWriteClient(cfg);
+    // Write inserts to establish table
+    doInsert(client);
+    
+    // Downgrade to version 4 first
+    downgradeTableConfigsFromFiveToFour(cfg);
+
+    metaClient = HoodieTableMetaClient.reload(metaClient);
+    HoodieTableVersion originalVersion = 
metaClient.getTableConfig().getTableVersion();
+    assertEquals(HoodieTableVersion.FOUR, originalVersion, "Table should be at 
version 4 before test");
+    
+    // Attempt upgrade from version 4 to 5 with auto-upgrade disabled
+    new UpgradeDowngrade(metaClient, cfg, context, 
SparkUpgradeDowngradeHelper.getInstance())
+        .run(HoodieTableVersion.FIVE, null);
+    
+    // Verify that upgrade was skipped and table version remains unchanged
+    metaClient = HoodieTableMetaClient.reload(metaClient);
+    assertEquals(HoodieTableVersion.FOUR, 
metaClient.getTableConfig().getTableVersion(),

Review Comment:
   @nsivabalan 
   We have the current test for `autoUpgrade` disabled 
https://github.com/apache/hudi/blob/master/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/table/upgrade/TestUpgradeDowngrade.java#L125.
   
   In terms of testing with this flag being enabled, and also used a specific 
write version I can try adding that case if needed.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to