manojpec commented on a change in pull request #4194:
URL: https://github.com/apache/hudi/pull/4194#discussion_r761768414



##########
File path: 
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieBackedMetadata.java
##########
@@ -346,6 +363,56 @@ public void testMetadataTableServices() throws Exception {
     assertEquals(tableMetadata.getLatestCompactionTime().get(), "0000004001");
   }
 
+
+  /**
+   * Tests that virtual key configs are honored in base files after compaction 
in metadata table.
+   *
+   * @throws Exception
+   */
+  @ParameterizedTest
+  @ValueSource(booleans = {true, false})
+  public void testVirtualKeysInBaseFiles(boolean populateMetaFields) throws 
Exception {
+    HoodieTableType tableType = MERGE_ON_READ;
+    init(tableType, false);
+    writeConfig = getWriteConfigBuilder(true, true, false)
+        .withMetadataConfig(HoodieMetadataConfig.newBuilder()
+            .enable(true)
+            .enableFullScan(true)
+            .enableMetrics(false)
+            .withPopulateMetaFields(populateMetaFields)
+            .withMaxNumDeltaCommitsBeforeCompaction(2)
+            .build()).build();
+    initWriteConfigAndMetatableWriter(writeConfig, true);
+
+    doWriteOperation(testTable, "0000001", INSERT);
+    doClean(testTable, "0000003", Arrays.asList("0000001"));
+    // this should have triggered compaction in metadata table
+    doWriteOperation(testTable, "0000004", UPSERT);
+
+    HoodieTableMetadata tableMetadata = metadata(writeConfig, context);
+    assertTrue(tableMetadata.getLatestCompactionTime().isPresent());
+    assertEquals(tableMetadata.getLatestCompactionTime().get(), "0000004001");
+
+    HoodieTableMetaClient metadataMetaClient = 
HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(metadataTableBasePath).build();
+    HoodieWriteConfig metadataTableWriteConfig = 
getMetadataWriteConfig(writeConfig);
+    metadataMetaClient.reloadActiveTimeline();
+
+    HoodieTable table = HoodieSparkTable.create(metadataTableWriteConfig, 
context, metadataMetaClient);
+    table.getHoodieView().sync();
+    List<FileSlice> fileSlices = 
table.getSliceView().getLatestFileSlices("files").collect(Collectors.toList());
+    HoodieBaseFile baseFile = fileSlices.get(0).getBaseFile().get();
+    HoodieHFileReader hoodieHFileReader = new 
HoodieHFileReader(context.getHadoopConf().get(), new Path(baseFile.getPath()),
+        new CacheConfig(context.getHadoopConf().get()));
+    List<Pair<String, IndexedRecord>> records = 
hoodieHFileReader.readAllRecords();

Review comment:
       good test

##########
File path: 
hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieMetadataBase.java
##########
@@ -292,7 +292,7 @@ protected HoodieWriteConfig getWriteConfig(boolean 
autoCommit, boolean useFileLi
             .enable(useFileListingMetadata)
             .enableFullScan(enableFullScan)
             .enableMetrics(enableMetrics)
-            .withPopulateMetaFields(false)
+            
.withPopulateMetaFields(HoodieMetadataConfig.POPULATE_META_FIELDS.defaultValue())

Review comment:
       maybe we should remove this line.. as we are just setting the default 
here ?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to