This is an automated email from the ASF dual-hosted git repository. sivabalan pushed a commit to branch release-0.10.1-rc1 in repository https://gitbox.apache.org/repos/asf/hudi.git
commit 872d984641d7d151fb196b496953869f71724439 Author: YueZhang <[email protected]> AuthorDate: Mon Jan 3 11:43:30 2022 +0800 [HUDI-3138] Fix broken UT test for TestHiveSyncTool.testDropPartitions (#4493) Co-authored-by: yuezhang <[email protected]> --- .../src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java | 8 +++++--- .../test/java/org/apache/hudi/hive/testutils/HiveTestUtil.java | 3 ++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java index eaca521..c515c9d 100644 --- a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java +++ b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java @@ -812,12 +812,14 @@ public class TestHiveSyncTool { assertEquals(hiveClient.getTableSchema(HiveTestUtil.hiveSyncConfig.tableName).size(), hiveClient.getDataSchema().getColumns().size() + 1, "Hive Schema should match the table schema + partition field"); - assertEquals(1, hiveClient.scanTablePartitions(hiveSyncConfig.tableName).size(), + List<Partition> partitions = hiveClient.scanTablePartitions(hiveSyncConfig.tableName); + assertEquals(1, partitions.size(), "Table partitions should match the number of partitions we wrote"); assertEquals(instantTime, hiveClient.getLastCommitTimeSynced(hiveSyncConfig.tableName).get(), "The last commit that was synced should be updated in the TBLPROPERTIES"); - // create a replace commit to delete current partitions - HiveTestUtil.createReplaceCommit("101", "2021/12/28", WriteOperationType.DELETE_PARTITION); + String partitiontoDelete = partitions.get(0).getValues().get(0).replace("-","/"); + // create a replace commit to delete current partitions+ + HiveTestUtil.createReplaceCommit("101", partitiontoDelete, WriteOperationType.DELETE_PARTITION, true, true); // sync drop partitins tool = new HiveSyncTool(hiveSyncConfig, HiveTestUtil.getHiveConf(), fileSystem); diff --git a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/testutils/HiveTestUtil.java b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/testutils/HiveTestUtil.java index b54005b..34158d4 100644 --- a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/testutils/HiveTestUtil.java +++ b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/testutils/HiveTestUtil.java @@ -178,9 +178,10 @@ public class HiveTestUtil { createCommitFile(commitMetadata, instantTime); } - public static void createReplaceCommit(String instantTime, String partitions, WriteOperationType type) + public static void createReplaceCommit(String instantTime, String partitions, WriteOperationType type, boolean isParquetSchemaSimple, boolean useSchemaFromCommitMetadata) throws IOException { HoodieReplaceCommitMetadata replaceCommitMetadata = new HoodieReplaceCommitMetadata(); + addSchemaToCommitMetadata(replaceCommitMetadata, isParquetSchemaSimple, useSchemaFromCommitMetadata); replaceCommitMetadata.setOperationType(type); Map<String, List<String>> partitionToReplaceFileIds = new HashMap<>(); partitionToReplaceFileIds.put(partitions, new ArrayList<>());
