This is an automated email from the ASF dual-hosted git repository.

codope pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 35be9bbbc7e [HUDI-6324] Fixing deleting of MDT index (#9248)
35be9bbbc7e is described below

commit 35be9bbbc7ef7ae6ad0a4955da78da4c0463074f
Author: Sivabalan Narayanan <[email protected]>
AuthorDate: Fri Aug 4 13:25:17 2023 -0400

    [HUDI-6324] Fixing deleting of MDT index (#9248)
---
 .../src/main/java/org/apache/hudi/table/HoodieSparkTable.java    | 9 +++++----
 .../apache/hudi/client/functional/TestHoodieBackedMetadata.java  | 1 -
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/HoodieSparkTable.java
 
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/HoodieSparkTable.java
index f7d314aa8bb..a5202fb7bbe 100644
--- 
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/HoodieSparkTable.java
+++ 
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/table/HoodieSparkTable.java
@@ -92,15 +92,16 @@ public abstract class HoodieSparkTable<T>
       String triggeringInstantTimestamp,
       HoodieFailedWritesCleaningPolicy failedWritesCleaningPolicy) {
     if (config.isMetadataTableEnabled() || 
metaClient.getTableConfig().isMetadataTableAvailable()) {
+      // if any partition is deleted, we need to reload the metadata table 
writer so that new table configs are picked up
+      // to reflect the delete mdt partitions.
+      deleteMetadataIndexIfNecessary();
+
       // Create the metadata table writer. First time after the upgrade this 
creation might trigger
       // metadata table bootstrapping. Bootstrapping process could fail and 
checking the table
       // existence after the creation is needed.
-      final HoodieTableMetadataWriter metadataWriter = 
SparkHoodieBackedTableMetadataWriter.create(
+      HoodieTableMetadataWriter metadataWriter = 
SparkHoodieBackedTableMetadataWriter.create(
           context.getHadoopConf().get(), config, failedWritesCleaningPolicy, 
context,
           Option.of(triggeringInstantTimestamp));
-      // even with metadata enabled, some index could have been disabled
-      // delete metadata partitions corresponding to such indexes
-      deleteMetadataIndexIfNecessary();
       try {
         if (isMetadataTableExists || metaClient.getFs().exists(new Path(
             
HoodieTableMetadata.getMetadataTableBasePath(metaClient.getBasePath())))) {
diff --git 
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java
 
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java
index 5288947bb19..b31ecd3d268 100644
--- 
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java
+++ 
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java
@@ -239,7 +239,6 @@ public class TestHoodieBackedMetadata extends 
TestHoodieMetadataBase {
   }
 
   @Test
-  @Disabled("HUDI-6324") // Disabling of MDT partitions might have to be 
revisited. Might only be an admin operation.
   public void testTurnOffMetadataIndexAfterEnable() throws Exception {
     initPath();
     HoodieWriteConfig cfg = getConfigBuilder(TRIP_EXAMPLE_SCHEMA, 
HoodieIndex.IndexType.BLOOM, HoodieFailedWritesCleaningPolicy.EAGER)

Reply via email to