xushiyan commented on code in PR #8963:
URL: https://github.com/apache/hudi/pull/8963#discussion_r1233101215


##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/io/TestHoodieTimelineArchiver.java:
##########
@@ -322,6 +323,7 @@ public void testArchiveTableWithReplaceCommits() throws 
Exception {
     }
   }
 
+  @Disabled("HUDI-6385")
   @ParameterizedTest
   @ValueSource(strings = {"KEEP_LATEST_BY_HOURS", "KEEP_LATEST_COMMITS"})
   public void testArchivalWithAutoAdjustmentBasedOnCleanConfigs(String 
cleaningPolicy) throws Exception {

Review Comment:
   still disabled



##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/io/TestHoodieTimelineArchiver.java:
##########
@@ -650,6 +683,7 @@ public void 
testLoadArchiveTimelineWithDamagedPlanFile(boolean enableArchiveMerg
     assertThrows(HoodieException.class, () -> 
metaClient.getArchivedTimeline().reload());
   }
 
+  @Disabled("HUDI-6386")
   @ParameterizedTest
   @ValueSource(booleans = {true, false})
   public void testArchivalWithMultiWriters(boolean enableMetadata) throws 
Exception {

Review Comment:
   still disabled



##########
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/functional/TestCleanPlanExecutor.java:
##########
@@ -492,37 +484,42 @@ public void testKeepLatestCommitsMOR() throws Exception {
 
     HoodieWriteConfig config = 
HoodieWriteConfig.newBuilder().withPath(basePath)
         
.withMetadataConfig(HoodieMetadataConfig.newBuilder().withAssumeDatePartitioning(true)
-                            // Column Stats Index is disabled, since these 
tests construct tables which are
-                            // not valid (empty commit metadata, invalid 
parquet files)
-                            .withMetadataIndexColumnStats(false).build())
+            // Column Stats Index is disabled, since these tests construct 
tables which are
+            // not valid (empty commit metadata, invalid parquet files)
+            .withMetadataIndexColumnStats(false).build())
         .withCleanConfig(HoodieCleanConfig.newBuilder()
             
.withCleanerPolicy(HoodieCleaningPolicy.KEEP_LATEST_COMMITS).retainCommits(1).build())
         .build();
 
     HoodieTableMetaClient metaClient = HoodieTestUtils.init(hadoopConf, 
basePath, HoodieTableType.MERGE_ON_READ);
+    HoodieTableMetadataWriter metadataWriter = 
SparkHoodieBackedTableMetadataWriter.create(hadoopConf, config, context);
     HoodieTestTable testTable = HoodieTestTable.of(metaClient);
     String p0 = "2020/01/01";
-
     // Make 3 files, one base file and 2 log files associated with base file
     String file1P0 = 
testTable.addDeltaCommit("000").getFileIdsWithBaseFilesInPartitions(p0).get(p0);
-    testTable.forDeltaCommit("000")
-                .withLogFile(p0, file1P0, 1)
-                .withLogFile(p0, file1P0, 2);
+    Map<String, List<String>> part1ToFileId = Collections.unmodifiableMap(new 
HashMap<String, List<String>>() {
+      {
+        put(p0, CollectionUtils.createImmutableList(file1P0));
+      }
+    });

Review Comment:
   move to a util method



##########
hudi-common/src/main/java/org/apache/hudi/metadata/HoodieMetadataPayload.java:
##########
@@ -756,7 +756,7 @@ public HoodieRecordGlobalLocation getRecordGlobalLocation() 
{
     if (recordIndexMetadata.getFileIndex() != RECORD_INDEX_MISSING_FILEINDEX) {
       fileId += "-" + recordIndexMetadata.getFileIndex();
     }
-    final Date instantDate = new Date(recordIndexMetadata.getInstantTime());
+    final java.util.Date instantDate = new 
java.util.Date(recordIndexMetadata.getInstantTime());

Review Comment:
   to be reverted



##########
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/DeleteMetadataTableProcedure.scala:
##########
@@ -51,6 +53,11 @@ class DeleteMetadataTableProcedure extends BaseProcedure 
with ProcedureBuilder w
     try {
       val statuses = metaClient.getFs.listStatus(metadataPath)
       if (statuses.nonEmpty) metaClient.getFs.delete(metadataPath, true)
+
+      val removingPropKeys = 
removeMetadataPartitionsProps(metaClient.getTableConfig.getProps)
+        
.getRight.keySet().asInstanceOf[java.util.Set[String]].stream().collect(Collectors.toSet[String])
+      val metaPathDir = new Path(metaClient.getBasePathV2, METAFOLDER_NAME)
+      HoodieTableConfig.delete(metaClient.getFs, metaPathDir, removingPropKeys)

Review Comment:
   to be extracted



##########
hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableMetaClient.java:
##########
@@ -465,6 +466,31 @@ public void validateTableProperties(Properties properties) 
{
     }
   }
 
+  /**
+   * Use this method to remove {@link 
HoodieTableConfig#TABLE_METADATA_PARTITIONS}
+   * and {@link HoodieTableConfig#TABLE_METADATA_PARTITIONS_INFLIGHT} during 
table
+   * init process as the metadata table is not initialized yet.
+   *
+   * @param props the original properties
+   * @return a {@link Pair} of which left is the updated {@link Properties} 
and right
+   * is removed {@link Properties}.
+   */
+  public static Pair<Properties, Properties> 
removeMetadataPartitionsProps(Properties props) {

Review Comment:
   to be extracted



##########
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/command/TruncateHoodieTableCommand.scala:
##########
@@ -72,7 +73,7 @@ case class TruncateHoodieTableCommand(
 
       // ReInit hoodie.properties
       HoodieTableMetaClient.withPropertyBuilder()
-        .fromProperties(properties)
+        .fromProperties(removeMetadataPartitionsProps(properties).getLeft)

Review Comment:
   to be extracted



##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/BaseHoodieWriteClient.java:
##########
@@ -685,13 +688,19 @@ public void restoreToSavepoint(String savepointTime) {
       try {
         // Delete metadata table directly when users trigger savepoint 
rollback if mdt existed and beforeTimelineStarts
         String metadataTableBasePathStr = 
HoodieTableMetadata.getMetadataTableBasePath(config.getBasePath());
-        HoodieTableMetaClient mdtClient = 
HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(metadataTableBasePathStr).build();
+        HoodieTableMetaClient dataMetaClient = 
HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(config.getBasePath()).build();
+        HoodieTableMetaClient metadataMetaClient = 
HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(metadataTableBasePathStr).build();
         // Same as HoodieTableMetadataUtil#processRollbackMetadata
         HoodieInstant syncedInstant = new HoodieInstant(false, 
HoodieTimeline.DELTA_COMMIT_ACTION, savepointTime);
         // The instant required to sync rollback to MDT has been archived and 
the mdt syncing will be failed
         // So that we need to delete the whole MDT here.
-        if 
(mdtClient.getCommitsTimeline().isBeforeTimelineStarts(syncedInstant.getTimestamp()))
 {
-          mdtClient.getFs().delete(new Path(metadataTableBasePathStr), true);
+        if 
(metadataMetaClient.getCommitsTimeline().isBeforeTimelineStarts(syncedInstant.getTimestamp()))
 {
+          metadataMetaClient.getFs().delete(new 
Path(metadataTableBasePathStr), true);
+          // Remove metadata partitions properties from hoodie.properties
+          Set<String> removingPropKeys = 
removeMetadataPartitionsProps(dataMetaClient.getTableConfig().getProps())
+              
.getRight().keySet().stream().map(Object::toString).collect(Collectors.toSet());
+          Path metaPathDir = new Path(dataMetaClient.getBasePathV2(), 
METAFOLDER_NAME);
+          HoodieTableConfig.delete(fs, metaPathDir, removingPropKeys);

Review Comment:
   to be extracted



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to