This is an automated email from the ASF dual-hosted git repository. yihua pushed a commit to branch branch-0.x in repository https://gitbox.apache.org/repos/asf/hudi.git
commit 541b4dcf2441d8bc9233d415b5de9622faa01e80 Author: Vova Kolmakov <[email protected]> AuthorDate: Wed May 15 00:48:27 2024 -0700 [HUDI-7628] Rename FSUtils.getPartitionPath to constructAbsolutePath (#11054) Co-authored-by: Vova Kolmakov <[email protected]> --- .../hudi/aws/sync/AWSGlueCatalogSyncClient.java | 4 +-- .../apache/hudi/cli/commands/RepairsCommand.java | 4 +-- .../apache/hudi/client/CompactionAdminClient.java | 4 +-- .../index/bucket/ConsistentBucketIndexUtils.java | 8 +++--- .../org/apache/hudi/io/HoodieAppendHandle.java | 2 +- .../org/apache/hudi/io/HoodieCreateHandle.java | 2 +- .../java/org/apache/hudi/io/HoodieMergeHandle.java | 2 +- .../java/org/apache/hudi/io/HoodieWriteHandle.java | 4 +-- .../metadata/HoodieBackedTableMetadataWriter.java | 2 +- .../hudi/table/action/compact/HoodieCompactor.java | 2 +- .../table/action/rollback/BaseRollbackHelper.java | 4 +-- .../rollback/ListingBasedRollbackStrategy.java | 6 ++-- .../rollback/MarkerBasedRollbackStrategy.java | 2 +- .../marker/TimelineServerBasedWriteMarkers.java | 4 +-- .../org/apache/hudi/table/marker/WriteMarkers.java | 2 +- .../io/storage/row/HoodieRowDataCreateHandle.java | 4 +-- .../hudi/io/storage/row/HoodieRowCreateHandle.java | 4 +-- .../TestSavepointRestoreMergeOnRead.java | 8 +++--- .../java/org/apache/hudi/table/TestCleaner.java | 4 +-- .../hudi/table/marker/TestWriteMarkersBase.java | 2 +- .../java/org/apache/hudi/common/fs/FSUtils.java | 32 +++++++++++----------- .../hudi/common/model/CompactionOperation.java | 2 +- .../hudi/common/model/HoodieCommitMetadata.java | 8 +++--- .../hudi/common/table/cdc/HoodieCDCExtractor.java | 4 +-- .../clean/CleanMetadataV1MigrationHandler.java | 2 +- .../clean/CleanPlanV2MigrationHandler.java | 2 +- .../compaction/CompactionV1MigrationHandler.java | 2 +- .../table/view/AbstractTableFileSystemView.java | 4 +-- .../IncrementalTimelineSyncFileSystemView.java | 2 +- .../sink/compact/ITTestHoodieFlinkCompactor.java | 2 +- .../org/apache/hudi/IncrementalRelation.scala | 2 +- .../RepairAddpartitionmetaProcedure.scala | 2 +- .../RepairMigratePartitionMetaProcedure.scala | 2 +- .../procedures/ShowInvalidParquetProcedure.scala | 2 +- .../TestSparkConsistentBucketClustering.java | 2 +- .../apache/hudi/sync/adb/HoodieAdbJdbcClient.java | 10 +++---- .../org/apache/hudi/hive/ddl/HMSDDLExecutor.java | 4 +-- .../hudi/hive/ddl/QueryBasedDDLExecutor.java | 4 +-- .../org/apache/hudi/hive/TestHiveSyncTool.java | 2 +- .../apache/hudi/sync/common/HoodieSyncClient.java | 4 +-- .../hudi/utilities/HoodieDataTableUtils.java | 2 +- .../utilities/HoodieMetadataTableValidator.java | 8 +++--- .../hudi/utilities/HoodieSnapshotCopier.java | 4 +-- .../hudi/utilities/HoodieSnapshotExporter.java | 4 +-- 44 files changed, 93 insertions(+), 93 deletions(-) diff --git a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java index 9e3c088f8b0..11e3eaea1c0 100644 --- a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java +++ b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java @@ -301,7 +301,7 @@ public class AWSGlueCatalogSyncClient extends HoodieSyncClient { try { StorageDescriptor sd = table.storageDescriptor(); List<PartitionInput> partitionInputList = partitionsToAdd.stream().map(partition -> { - String fullPartitionPath = FSUtils.getPartitionPathInHadoopPath(s3aToS3(getBasePath()), partition).toString(); + String fullPartitionPath = FSUtils.constructAbsolutePathInHadoopPath(s3aToS3(getBasePath()), partition).toString(); List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); StorageDescriptor partitionSD = sd.copy(copySd -> copySd.location(fullPartitionPath)); return PartitionInput.builder().values(partitionValues).storageDescriptor(partitionSD).build(); @@ -345,7 +345,7 @@ public class AWSGlueCatalogSyncClient extends HoodieSyncClient { try { StorageDescriptor sd = table.storageDescriptor(); List<BatchUpdatePartitionRequestEntry> updatePartitionEntries = changedPartitions.stream().map(partition -> { - String fullPartitionPath = FSUtils.getPartitionPathInHadoopPath(s3aToS3(getBasePath()), partition).toString(); + String fullPartitionPath = FSUtils.constructAbsolutePathInHadoopPath(s3aToS3(getBasePath()), partition).toString(); List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(partition); StorageDescriptor partitionSD = sd.copy(copySd -> copySd.location(fullPartitionPath)); PartitionInput partitionInput = PartitionInput.builder().values(partitionValues).storageDescriptor(partitionSD).build(); diff --git a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java index 28e1a0d39ba..0eedbf964fe 100644 --- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java +++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java @@ -128,7 +128,7 @@ public class RepairsCommand { int ind = 0; for (String partition : partitionPaths) { - StoragePath partitionPath = FSUtils.getPartitionPath(basePath, partition); + StoragePath partitionPath = FSUtils.constructAbsolutePath(basePath, partition); String[] row = new String[3]; row[0] = partition; row[1] = "Yes"; @@ -236,7 +236,7 @@ public class RepairsCommand { int ind = 0; for (String partitionPath : partitionPaths) { StoragePath partition = - FSUtils.getPartitionPath(client.getBasePath(), partitionPath); + FSUtils.constructAbsolutePath(client.getBasePath(), partitionPath); Option<StoragePath> textFormatFile = HoodiePartitionMetadata.textFormatMetaPathIfExists(HoodieCLI.storage, partition); Option<StoragePath> baseFormatFile = diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/CompactionAdminClient.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/CompactionAdminClient.java index a63524dfbb5..dbe07b7d0f3 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/CompactionAdminClient.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/CompactionAdminClient.java @@ -296,7 +296,7 @@ public class CompactionAdminClient extends BaseHoodieClient { if (operation.getDataFileName().isPresent()) { String expPath = metaClient.getStorage() .getPathInfo(new StoragePath( - FSUtils.getPartitionPath(metaClient.getBasePath(), operation.getPartitionPath()), + FSUtils.constructAbsolutePath(metaClient.getBasePath(), operation.getPartitionPath()), operation.getDataFileName().get())) .getPath().toString(); ValidationUtils.checkArgument(df.isPresent(), @@ -309,7 +309,7 @@ public class CompactionAdminClient extends BaseHoodieClient { try { List<StoragePathInfo> pathInfoList = metaClient.getStorage() .listDirectEntries(new StoragePath( - FSUtils.getPartitionPath(metaClient.getBasePath(), operation.getPartitionPath()), dp)); + FSUtils.constructAbsolutePath(metaClient.getBasePath(), operation.getPartitionPath()), dp)); ValidationUtils.checkArgument(pathInfoList.size() == 1, "Expect only 1 file-status"); return new HoodieLogFile(pathInfoList.get(0)); } catch (FileNotFoundException fe) { diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bucket/ConsistentBucketIndexUtils.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bucket/ConsistentBucketIndexUtils.java index 7a124d25ee9..a90e0db6a06 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bucket/ConsistentBucketIndexUtils.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bucket/ConsistentBucketIndexUtils.java @@ -108,8 +108,8 @@ public class ConsistentBucketIndexUtils { */ public static Option<HoodieConsistentHashingMetadata> loadMetadata(HoodieTable table, String partition) { HoodieTableMetaClient metaClient = table.getMetaClient(); - Path metadataPath = FSUtils.getPartitionPathInHadoopPath(metaClient.getHashingMetadataPath(), partition); - Path partitionPath = FSUtils.getPartitionPathInHadoopPath(metaClient.getBasePathV2().toString(), partition); + Path metadataPath = FSUtils.constructAbsolutePathInHadoopPath(metaClient.getHashingMetadataPath(), partition); + Path partitionPath = FSUtils.constructAbsolutePathInHadoopPath(metaClient.getBasePathV2().toString(), partition); try { Predicate<FileStatus> hashingMetaCommitFilePredicate = fileStatus -> { String filename = fileStatus.getPath().getName(); @@ -186,7 +186,7 @@ public class ConsistentBucketIndexUtils { */ public static boolean saveMetadata(HoodieTable table, HoodieConsistentHashingMetadata metadata, boolean overwrite) { HoodieStorage storage = table.getMetaClient().getStorage(); - StoragePath dir = FSUtils.getPartitionPath( + StoragePath dir = FSUtils.constructAbsolutePath( table.getMetaClient().getHashingMetadataPath(), metadata.getPartitionPath()); StoragePath fullPath = new StoragePath(dir, metadata.getFilename()); try (OutputStream out = storage.create(fullPath, overwrite)) { @@ -267,7 +267,7 @@ public class ConsistentBucketIndexUtils { * @return true if hashing metadata file is latest else false */ private static boolean recommitMetadataFile(HoodieTable table, FileStatus metaFile, String partition) { - Path partitionPath = new Path(FSUtils.getPartitionPath(table.getMetaClient().getBasePathV2(), partition).toUri()); + Path partitionPath = new Path(FSUtils.constructAbsolutePath(table.getMetaClient().getBasePathV2(), partition).toUri()); String timestamp = getTimestampFromFile(metaFile.getPath().getName()); if (table.getPendingCommitTimeline().containsInstant(timestamp)) { return false; diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java index 2bac318fc81..5b414c79b53 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java @@ -206,7 +206,7 @@ public class HoodieAppendHandle<T, I, K, O> extends HoodieWriteHandle<T, I, K, O try { // Save hoodie partition meta in the partition path HoodiePartitionMetadata partitionMetadata = new HoodiePartitionMetadata(storage, baseInstantTime, - new StoragePath(config.getBasePath()), FSUtils.getPartitionPath(config.getBasePath(), partitionPath), + new StoragePath(config.getBasePath()), FSUtils.constructAbsolutePath(config.getBasePath(), partitionPath), hoodieTable.getPartitionMetafileFormat()); partitionMetadata.trySave(); this.writer = createLogWriter(fileSlice, baseInstantTime); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java index 0ad4e212a1a..ce908f89bb6 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java @@ -98,7 +98,7 @@ public class HoodieCreateHandle<T, I, K, O> extends HoodieWriteHandle<T, I, K, O try { HoodiePartitionMetadata partitionMetadata = new HoodiePartitionMetadata(storage, instantTime, new StoragePath(config.getBasePath()), - FSUtils.getPartitionPath(config.getBasePath(), partitionPath), + FSUtils.constructAbsolutePath(config.getBasePath(), partitionPath), hoodieTable.getPartitionMetafileFormat()); partitionMetadata.trySave(); createMarkerFile(partitionPath, diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java index afae82fd13f..797684b71af 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java @@ -174,7 +174,7 @@ public class HoodieMergeHandle<T, I, K, O> extends HoodieWriteHandle<T, I, K, O> HoodiePartitionMetadata partitionMetadata = new HoodiePartitionMetadata(storage, instantTime, new StoragePath(config.getBasePath()), - FSUtils.getPartitionPath(config.getBasePath(), partitionPath), + FSUtils.constructAbsolutePath(config.getBasePath(), partitionPath), hoodieTable.getPartitionMetafileFormat()); partitionMetadata.trySave(); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java index de45c51ecf1..486102b5222 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java @@ -114,7 +114,7 @@ public abstract class HoodieWriteHandle<T, I, K, O> extends HoodieIOHandle<T, I, } public StoragePath makeNewPath(String partitionPath) { - StoragePath path = FSUtils.getPartitionPath(config.getBasePath(), partitionPath); + StoragePath path = FSUtils.constructAbsolutePath(config.getBasePath(), partitionPath); try { if (!storage.exists(path)) { storage.createDirectory(path); // create a new partition as needed. @@ -247,7 +247,7 @@ public abstract class HoodieWriteHandle<T, I, K, O> extends HoodieIOHandle<T, I, : Option.empty(); return HoodieLogFormat.newWriterBuilder() - .onParentPath(FSUtils.getPartitionPath(hoodieTable.getMetaClient().getBasePath(), partitionPath)) + .onParentPath(FSUtils.constructAbsolutePath(hoodieTable.getMetaClient().getBasePath(), partitionPath)) .withFileId(fileId) .overBaseCommit(baseCommitTime) .withLogVersion(latestLogFile.map(HoodieLogFile::getLogVersion).orElse(HoodieLogFile.LOGFILE_BASE_VERSION)) diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java index 7a084aba52c..0714f27d0e8 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java @@ -713,7 +713,7 @@ public abstract class HoodieBackedTableMetadataWriter<I> implements HoodieTableM final HoodieDeleteBlock block = new HoodieDeleteBlock(new DeleteRecord[0], blockHeader); try (HoodieLogFormat.Writer writer = HoodieLogFormat.newWriterBuilder() - .onParentPath(FSUtils.getPartitionPath(metadataWriteConfig.getBasePath(), metadataPartition.getPartitionPath())) + .onParentPath(FSUtils.constructAbsolutePath(metadataWriteConfig.getBasePath(), metadataPartition.getPartitionPath())) .withFileId(fileGroupFileId) .overBaseCommit(instantTime) .withLogVersion(HoodieLogFile.LOGFILE_BASE_VERSION) diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/HoodieCompactor.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/HoodieCompactor.java index 9ede03b12cd..9e38410fed9 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/HoodieCompactor.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/HoodieCompactor.java @@ -186,7 +186,7 @@ public abstract class HoodieCompactor<T, I, K, O> implements Serializable { LOG.info("MaxMemoryPerCompaction => " + maxMemoryPerCompaction); List<String> logFiles = operation.getDeltaFileNames().stream().map(p -> - new StoragePath(FSUtils.getPartitionPath( + new StoragePath(FSUtils.constructAbsolutePath( metaClient.getBasePath(), operation.getPartitionPath()), p).toString()) .collect(toList()); HoodieMergedLogRecordScanner scanner = HoodieMergedLogRecordScanner.newBuilder() diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackHelper.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackHelper.java index 7d16726c20d..f9cff041e9a 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackHelper.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackHelper.java @@ -165,7 +165,7 @@ public class BaseRollbackHelper implements Serializable { WriteMarkers writeMarkers = WriteMarkersFactory.get(config.getMarkersType(), table, instantTime); writer = HoodieLogFormat.newWriterBuilder() - .onParentPath(FSUtils.getPartitionPath(metaClient.getBasePathV2().toString(), partitionPath)) + .onParentPath(FSUtils.constructAbsolutePath(metaClient.getBasePathV2().toString(), partitionPath)) .withFileId(fileId) .overBaseCommit(latestBaseInstant) .withStorage(metaClient.getStorage()) @@ -203,7 +203,7 @@ public class BaseRollbackHelper implements Serializable { // With listing based rollback, sometimes we only get the fileID of interest(so that we can add rollback command block) w/o the actual file name. // So, we want to ignore such invalid files from this list before we add it to the rollback stats. - String partitionFullPath = FSUtils.getPartitionPath(metaClient.getBasePathV2().toString(), rollbackRequest.getPartitionPath()).toString(); + String partitionFullPath = FSUtils.constructAbsolutePath(metaClient.getBasePathV2().toString(), rollbackRequest.getPartitionPath()).toString(); Map<String, Long> validLogBlocksToDelete = new HashMap<>(); rollbackRequest.getLogBlocksToBeDeleted().entrySet().stream().forEach((kv) -> { String logFileFullPath = kv.getKey(); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/ListingBasedRollbackStrategy.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/ListingBasedRollbackStrategy.java index 83d5d88c28f..1fd054b9407 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/ListingBasedRollbackStrategy.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/ListingBasedRollbackStrategy.java @@ -225,7 +225,7 @@ public class ListingBasedRollbackStrategy implements BaseRollbackPlanActionExecu } return false; }; - return fs.listStatus(FSUtils.getPartitionPathInHadoopPath(config.getBasePath(), partitionPath), filter); + return fs.listStatus(FSUtils.constructAbsolutePathInHadoopPath(config.getBasePath(), partitionPath), filter); } private FileStatus[] fetchFilesFromInstant(HoodieInstant instantToRollback, String partitionPath, String basePath, @@ -286,7 +286,7 @@ public class ListingBasedRollbackStrategy implements BaseRollbackPlanActionExecu } private static Path[] listFilesToBeDeleted(String basePath, String partitionPath) { - return new Path[] {FSUtils.getPartitionPathInHadoopPath(basePath, partitionPath)}; + return new Path[] {FSUtils.constructAbsolutePathInHadoopPath(basePath, partitionPath)}; } private static Path[] getFilesFromCommitMetadata(String basePath, HoodieCommitMetadata commitMetadata, String partitionPath) { @@ -356,7 +356,7 @@ public class ListingBasedRollbackStrategy implements BaseRollbackPlanActionExecu FileSlice latestFileSlice = latestFileSlices.get(writeStat.getFileId()); String fileId = writeStat.getFileId(); String latestBaseInstant = latestFileSlice.getBaseInstantTime(); - Path fullLogFilePath = FSUtils.getPartitionPathInHadoopPath(table.getConfig().getBasePath(), writeStat.getPath()); + Path fullLogFilePath = FSUtils.constructAbsolutePathInHadoopPath(table.getConfig().getBasePath(), writeStat.getPath()); Map<String, Long> logFilesWithBlocksToRollback = Collections.singletonMap( fullLogFilePath.toString(), writeStat.getTotalWriteBytes() > 0 ? writeStat.getTotalWriteBytes() : 1L); hoodieRollbackRequests.add(new HoodieRollbackRequest(partitionPath, fileId, latestBaseInstant, diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/MarkerBasedRollbackStrategy.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/MarkerBasedRollbackStrategy.java index 648d05da61f..5ba61b38803 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/MarkerBasedRollbackStrategy.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/MarkerBasedRollbackStrategy.java @@ -121,7 +121,7 @@ public class MarkerBasedRollbackStrategy<T, I, K, O> implements BaseRollbackPlan LOG.warn("Find old marker type for log file: " + fileNameWithPartitionToRollback); fileId = FSUtils.getFileIdFromFilePath(fullLogFilePath); baseCommitTime = FSUtils.getCommitTime(fullLogFilePath.getName()); - StoragePath partitionPath = FSUtils.getPartitionPath(config.getBasePath(), relativePartitionPath); + StoragePath partitionPath = FSUtils.constructAbsolutePath(config.getBasePath(), relativePartitionPath); // NOTE: Since we're rolling back incomplete Delta Commit, it only could have appended its // block to the latest log-file diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java index 7b0fda4ea47..f738449d7dc 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java @@ -158,7 +158,7 @@ public class TimelineServerBasedWriteMarkers extends WriteMarkers { LOG.info("[timeline-server-based] Created marker file " + partitionPath + "/" + markerFileName + " in " + timer.endTimer() + " ms"); if (success) { - return Option.of(new StoragePath(FSUtils.getPartitionPath(markerDirPath, partitionPath), markerFileName)); + return Option.of(new StoragePath(FSUtils.constructAbsolutePath(markerDirPath, partitionPath), markerFileName)); } else { return Option.empty(); } @@ -177,7 +177,7 @@ public class TimelineServerBasedWriteMarkers extends WriteMarkers { + " in " + timer.endTimer() + " ms"); if (success) { - return Option.of(new StoragePath(FSUtils.getPartitionPath(markerDirPath, partitionPath), markerFileName)); + return Option.of(new StoragePath(FSUtils.constructAbsolutePath(markerDirPath, partitionPath), markerFileName)); } else { // this failed may due to early conflict detection, so we need to throw out. throw new HoodieEarlyConflictDetectionException(new ConcurrentModificationException("Early conflict detected but cannot resolve conflicts for overlapping writes")); diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/WriteMarkers.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/WriteMarkers.java index e481d0b9e4b..cd9f67b5b20 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/WriteMarkers.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/WriteMarkers.java @@ -182,7 +182,7 @@ public abstract class WriteMarkers implements Serializable { * @return path of the marker file */ protected StoragePath getMarkerPath(String partitionPath, String fileName, IOType type) { - StoragePath path = FSUtils.getPartitionPath(markerDirPath, partitionPath); + StoragePath path = FSUtils.constructAbsolutePath(markerDirPath, partitionPath); String markerFileName = getMarkerFileName(fileName, type); return new StoragePath(path, markerFileName); } diff --git a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java index 19455773153..4227e14165f 100644 --- a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java +++ b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java @@ -104,7 +104,7 @@ public class HoodieRowDataCreateHandle implements Serializable { storage, instantTime, new StoragePath(writeConfig.getBasePath()), - FSUtils.getPartitionPath(writeConfig.getBasePath(), partitionPath), + FSUtils.constructAbsolutePath(writeConfig.getBasePath(), partitionPath), table.getPartitionMetafileFormat()); partitionMetadata.trySave(); createMarkerFile(partitionPath, FSUtils.makeBaseFileName(this.instantTime, getWriteToken(), this.fileId, table.getBaseFileExtension())); @@ -190,7 +190,7 @@ public class HoodieRowDataCreateHandle implements Serializable { private Path makeNewPath(String partitionPath) { StoragePath path = - FSUtils.getPartitionPath(writeConfig.getBasePath(), partitionPath); + FSUtils.constructAbsolutePath(writeConfig.getBasePath(), partitionPath); try { if (!storage.exists(path)) { storage.createDirectory(path); // create a new partition as needed. diff --git a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowCreateHandle.java b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowCreateHandle.java index 890b12899f1..0d164f379fe 100644 --- a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowCreateHandle.java +++ b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowCreateHandle.java @@ -141,7 +141,7 @@ public class HoodieRowCreateHandle implements Serializable { storage, instantTime, new StoragePath(writeConfig.getBasePath()), - FSUtils.getPartitionPath(writeConfig.getBasePath(), partitionPath), + FSUtils.constructAbsolutePath(writeConfig.getBasePath(), partitionPath), table.getPartitionMetafileFormat()); partitionMetadata.trySave(); @@ -262,7 +262,7 @@ public class HoodieRowCreateHandle implements Serializable { } private static Path makeNewPath(FileSystem fs, String partitionPath, String fileName, HoodieWriteConfig writeConfig) { - Path path = FSUtils.getPartitionPathInHadoopPath(writeConfig.getBasePath(), partitionPath); + Path path = FSUtils.constructAbsolutePathInHadoopPath(writeConfig.getBasePath(), partitionPath); try { if (!fs.exists(path)) { fs.mkdirs(path); // create a new partition as needed. diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestSavepointRestoreMergeOnRead.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestSavepointRestoreMergeOnRead.java index 04f931904bd..5027170cca7 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestSavepointRestoreMergeOnRead.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestSavepointRestoreMergeOnRead.java @@ -119,7 +119,7 @@ public class TestSavepointRestoreMergeOnRead extends HoodieClientTestBase { StoragePathFilter filter = (path) -> path.toString().contains(finalCompactionCommit); for (String pPath : dataGen.getPartitionPaths()) { assertEquals(0, storage.listDirectEntries( - FSUtils.getPartitionPath(hoodieWriteConfig.getBasePath(), pPath), + FSUtils.constructAbsolutePath(hoodieWriteConfig.getBasePath(), pPath), filter).size()); } } @@ -164,7 +164,7 @@ public class TestSavepointRestoreMergeOnRead extends HoodieClientTestBase { StoragePathFilter filter = (path) -> path.toString().contains(secondCommit); for (String pPath : dataGen.getPartitionPaths()) { assertEquals(1, storage.listDirectEntries( - FSUtils.getPartitionPath(hoodieWriteConfig.getBasePath(), pPath), filter) + FSUtils.constructAbsolutePath(hoodieWriteConfig.getBasePath(), pPath), filter) .size()); } @@ -203,7 +203,7 @@ public class TestSavepointRestoreMergeOnRead extends HoodieClientTestBase { filter = (path) -> path.toString().contains(secondCommit); for (String pPath : dataGen.getPartitionPaths()) { assertEquals(0, storage.listDirectEntries( - FSUtils.getPartitionPath(hoodieWriteConfig.getBasePath(), pPath), filter) + FSUtils.constructAbsolutePath(hoodieWriteConfig.getBasePath(), pPath), filter) .size()); } // ensure files matching 1st commit is intact @@ -211,7 +211,7 @@ public class TestSavepointRestoreMergeOnRead extends HoodieClientTestBase { for (String pPath : dataGen.getPartitionPaths()) { assertEquals(1, storage.listDirectEntries( - FSUtils.getPartitionPath(hoodieWriteConfig.getBasePath(), pPath), + FSUtils.constructAbsolutePath(hoodieWriteConfig.getBasePath(), pPath), filter).size()); } } diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java index b9a289ec5e4..a41b76387a6 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java @@ -861,9 +861,9 @@ public class TestCleaner extends HoodieCleanerTestBase { version2Plan.getFilePathsToBeDeletedPerPartition().get(partition1).size()); assertEquals(version1Plan.getFilesToBeDeletedPerPartition().get(partition2).size(), version2Plan.getFilePathsToBeDeletedPerPartition().get(partition2).size()); - assertEquals(new Path(FSUtils.getPartitionPathInHadoopPath(metaClient.getBasePath(), partition1), fileName1).toString(), + assertEquals(new Path(FSUtils.constructAbsolutePathInHadoopPath(metaClient.getBasePath(), partition1), fileName1).toString(), version2Plan.getFilePathsToBeDeletedPerPartition().get(partition1).get(0).getFilePath()); - assertEquals(new Path(FSUtils.getPartitionPathInHadoopPath(metaClient.getBasePath(), partition2), fileName2).toString(), + assertEquals(new Path(FSUtils.constructAbsolutePathInHadoopPath(metaClient.getBasePath(), partition2), fileName2).toString(), version2Plan.getFilePathsToBeDeletedPerPartition().get(partition2).get(0).getFilePath()); // Downgrade and verify version 1 plan diff --git a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/marker/TestWriteMarkersBase.java b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/marker/TestWriteMarkersBase.java index 037613eaa5a..7eba0f31ca8 100644 --- a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/marker/TestWriteMarkersBase.java +++ b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/marker/TestWriteMarkersBase.java @@ -58,7 +58,7 @@ public abstract class TestWriteMarkersBase extends HoodieCommonTestHarness { } private void createInvalidFile(String partitionPath, String invalidFileName) { - StoragePath path = FSUtils.getPartitionPath(markerFolderPath, partitionPath); + StoragePath path = FSUtils.constructAbsolutePath(markerFolderPath, partitionPath); StoragePath invalidFilePath = new StoragePath(path, invalidFileName); try { storage.create(invalidFilePath, false).close(); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/fs/FSUtils.java b/hudi-common/src/main/java/org/apache/hudi/common/fs/FSUtils.java index ebc71aa2ac0..0685d8d4a88 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/fs/FSUtils.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/fs/FSUtils.java @@ -704,40 +704,40 @@ public class FSUtils { return sizeInBytes / (1024 * 1024); } - public static Path getPartitionPathInHadoopPath(String basePath, String partitionPath) { - if (StringUtils.isNullOrEmpty(partitionPath)) { + public static Path constructAbsolutePathInHadoopPath(String basePath, String relativePartitionPath) { + if (StringUtils.isNullOrEmpty(relativePartitionPath)) { return new Path(basePath); } // NOTE: We have to chop leading "/" to make sure Hadoop does not treat it like // absolute path - String properPartitionPath = partitionPath.startsWith("/") - ? partitionPath.substring(1) - : partitionPath; - return getPartitionPath(new CachingPath(basePath), properPartitionPath); + String properPartitionPath = relativePartitionPath.startsWith(PATH_SEPARATOR) + ? relativePartitionPath.substring(1) + : relativePartitionPath; + return constructAbsolutePath(new CachingPath(basePath), properPartitionPath); } - public static StoragePath getPartitionPath(String basePath, String partitionPath) { - if (StringUtils.isNullOrEmpty(partitionPath)) { + public static StoragePath constructAbsolutePath(String basePath, String relativePartitionPath) { + if (StringUtils.isNullOrEmpty(relativePartitionPath)) { return new StoragePath(basePath); } // NOTE: We have to chop leading "/" to make sure Hadoop does not treat it like // absolute path - String properPartitionPath = partitionPath.startsWith("/") - ? partitionPath.substring(1) - : partitionPath; - return getPartitionPath(new StoragePath(basePath), properPartitionPath); + String properPartitionPath = relativePartitionPath.startsWith(PATH_SEPARATOR) + ? relativePartitionPath.substring(1) + : relativePartitionPath; + return constructAbsolutePath(new StoragePath(basePath), properPartitionPath); } - public static Path getPartitionPath(Path basePath, String partitionPath) { + public static Path constructAbsolutePath(Path basePath, String relativePartitionPath) { // For non-partitioned table, return only base-path - return StringUtils.isNullOrEmpty(partitionPath) ? basePath : new CachingPath(basePath, partitionPath); + return StringUtils.isNullOrEmpty(relativePartitionPath) ? basePath : new CachingPath(basePath, relativePartitionPath); } - public static StoragePath getPartitionPath(StoragePath basePath, String partitionPath) { + public static StoragePath constructAbsolutePath(StoragePath basePath, String relativePartitionPath) { // For non-partitioned table, return only base-path - return StringUtils.isNullOrEmpty(partitionPath) ? basePath : new StoragePath(basePath, partitionPath); + return StringUtils.isNullOrEmpty(relativePartitionPath) ? basePath : new StoragePath(basePath, relativePartitionPath); } /** diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/CompactionOperation.java b/hudi-common/src/main/java/org/apache/hudi/common/model/CompactionOperation.java index 04aceb336f9..15accbd49c2 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/CompactionOperation.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/CompactionOperation.java @@ -119,7 +119,7 @@ public class CompactionOperation implements Serializable { public Option<HoodieBaseFile> getBaseFile(String basePath, String partitionPath) { Option<BaseFile> externalBaseFile = bootstrapFilePath.map(BaseFile::new); - StoragePath dirPath = FSUtils.getPartitionPath(basePath, partitionPath); + StoragePath dirPath = FSUtils.constructAbsolutePath(basePath, partitionPath); return dataFileName.map(df -> { return externalBaseFile.map(ext -> new HoodieBaseFile(new StoragePath(dirPath, df).toString(), ext)) .orElseGet(() -> new HoodieBaseFile(new StoragePath(dirPath, df).toString())); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java index eeb16cf12af..b371c6acad1 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java @@ -135,7 +135,7 @@ public class HoodieCommitMetadata implements Serializable { HashMap<String, String> fullPaths = new HashMap<>(); for (Map.Entry<String, String> entry : getFileIdAndRelativePaths().entrySet()) { String fullPath = entry.getValue() != null - ? FSUtils.getPartitionPath(basePath, entry.getValue()).toString() + ? FSUtils.constructAbsolutePath(basePath, entry.getValue()).toString() : null; fullPaths.put(entry.getKey(), fullPath); } @@ -147,7 +147,7 @@ public class HoodieCommitMetadata implements Serializable { if (getPartitionToWriteStats().get(partitionPath) != null) { for (HoodieWriteStat stat : getPartitionToWriteStats().get(partitionPath)) { if ((stat.getFileId() != null)) { - String fullPath = FSUtils.getPartitionPathInHadoopPath(basePath, stat.getPath()).toString(); + String fullPath = FSUtils.constructAbsolutePathInHadoopPath(basePath, stat.getPath()).toString(); fullPaths.add(fullPath); } } @@ -184,7 +184,7 @@ public class HoodieCommitMetadata implements Serializable { for (HoodieWriteStat stat : stats) { String relativeFilePath = stat.getPath(); StoragePath fullPath = relativeFilePath != null - ? FSUtils.getPartitionPath(basePath, relativeFilePath) : null; + ? FSUtils.constructAbsolutePath(basePath, relativeFilePath) : null; if (fullPath != null) { long blockSize = HoodieStorageUtils.getStorage(fullPath.toString(), hadoopConf).getDefaultBlockSize(fullPath); @@ -218,7 +218,7 @@ public class HoodieCommitMetadata implements Serializable { for (HoodieWriteStat stat : stats) { String relativeFilePath = stat.getPath(); StoragePath fullPath = - relativeFilePath != null ? FSUtils.getPartitionPath(basePath, + relativeFilePath != null ? FSUtils.constructAbsolutePath(basePath, relativeFilePath) : null; if (fullPath != null) { StoragePathInfo pathInfo = diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCExtractor.java b/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCExtractor.java index eea2ebbbc81..fc838bcc1e5 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCExtractor.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCExtractor.java @@ -184,7 +184,7 @@ public class HoodieCDCExtractor { try { List<StoragePathInfo> touchedFiles = new ArrayList<>(); for (String touchedPartition : touchedPartitions) { - StoragePath partitionPath = FSUtils.getPartitionPath(basePath, touchedPartition); + StoragePath partitionPath = FSUtils.constructAbsolutePath(basePath, touchedPartition); touchedFiles.addAll(storage.listDirectEntries(partitionPath)); } return new HoodieTableFileSystemView( @@ -313,7 +313,7 @@ public class HoodieCDCExtractor { HoodieFileGroupId fgId, HoodieInstant instant, String currentLogFile) { - StoragePath partitionPath = FSUtils.getPartitionPath(basePath, fgId.getPartitionPath()); + StoragePath partitionPath = FSUtils.constructAbsolutePath(basePath, fgId.getPartitionPath()); if (instant.getAction().equals(DELTA_COMMIT_ACTION)) { String currentLogFileName = new StoragePath(currentLogFile).getName(); Option<Pair<String, List<String>>> fileSliceOpt = diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanMetadataV1MigrationHandler.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanMetadataV1MigrationHandler.java index 1f7b5792eb0..41e3dc79399 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanMetadataV1MigrationHandler.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanMetadataV1MigrationHandler.java @@ -99,6 +99,6 @@ public class CleanMetadataV1MigrationHandler extends AbstractMigratorBase<Hoodie return fileName; } - return new Path(FSUtils.getPartitionPath(basePath, partitionPath), fileName).toString(); + return new Path(FSUtils.constructAbsolutePath(basePath, partitionPath), fileName).toString(); } } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanPlanV2MigrationHandler.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanPlanV2MigrationHandler.java index 7317991af37..99b5185ba73 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanPlanV2MigrationHandler.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanPlanV2MigrationHandler.java @@ -55,7 +55,7 @@ public class CleanPlanV2MigrationHandler extends AbstractMigratorBase<HoodieClea Map<String, List<HoodieCleanFileInfo>> filePathsPerPartition = plan.getFilesToBeDeletedPerPartition().entrySet().stream().map(e -> Pair.of(e.getKey(), e.getValue().stream() .map(v -> new HoodieCleanFileInfo( - new Path(FSUtils.getPartitionPathInHadoopPath(metaClient.getBasePath(), e.getKey()), v).toString(), false)) + new Path(FSUtils.constructAbsolutePathInHadoopPath(metaClient.getBasePath(), e.getKey()), v).toString(), false)) .collect(Collectors.toList()))).collect(Collectors.toMap(Pair::getKey, Pair::getValue)); return new HoodieCleanerPlan(plan.getEarliestInstantToRetain(), plan.getLastCompletedCommitTimestamp(), plan.getPolicy(), new HashMap<>(), VERSION, filePathsPerPartition, new ArrayList<>(), Collections.emptyMap()); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/compaction/CompactionV1MigrationHandler.java b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/compaction/CompactionV1MigrationHandler.java index 17488a637ce..31905b1ad4b 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/compaction/CompactionV1MigrationHandler.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/compaction/CompactionV1MigrationHandler.java @@ -78,6 +78,6 @@ public class CompactionV1MigrationHandler extends AbstractMigratorBase<HoodieCom return fileName; } - return new Path(FSUtils.getPartitionPath(basePath, partitionPath), fileName).toString(); + return new Path(FSUtils.constructAbsolutePath(basePath, partitionPath), fileName).toString(); } } diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java index d7097aed170..049af4f420c 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java @@ -348,7 +348,7 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV // Pairs of relative partition path and absolute partition path List<Pair<String, StoragePath>> absolutePartitionPathList = partitionSet.stream() .map(partition -> Pair.of( - partition, FSUtils.getPartitionPath(metaClient.getBasePathV2(), partition))) + partition, FSUtils.constructAbsolutePath(metaClient.getBasePathV2(), partition))) .collect(Collectors.toList()); long beginLsTs = System.currentTimeMillis(); Map<Pair<String, StoragePath>, List<StoragePathInfo>> pathInfoMap = @@ -420,7 +420,7 @@ public abstract class AbstractTableFileSystemView implements SyncableFileSystemV */ private List<StoragePathInfo> getAllFilesInPartition(String relativePartitionPath) throws IOException { - StoragePath partitionPath = FSUtils.getPartitionPath(metaClient.getBasePathV2(), + StoragePath partitionPath = FSUtils.constructAbsolutePath(metaClient.getBasePathV2(), relativePartitionPath); long beginLsTs = System.currentTimeMillis(); List<StoragePathInfo> pathInfoList = listPartition(partitionPath); diff --git a/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java b/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java index 410f13b2b29..42888e2ad8a 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java @@ -364,7 +364,7 @@ public abstract class IncrementalTimelineSyncFileSystemView extends AbstractTabl final String partitionPath = entry.getValue().getPartitionPath(); List<String> fullPathList = entry.getValue().getSuccessDeleteFiles() .stream().map(fileName -> new StoragePath(FSUtils - .getPartitionPathInHadoopPath(basePath, partitionPath).toString(), fileName).toString()) + .constructAbsolutePathInHadoopPath(basePath, partitionPath).toString(), fileName).toString()) .collect(Collectors.toList()); removeFileSlicesForPartition(timeline, instant, entry.getKey(), fullPathList); }); diff --git a/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/sink/compact/ITTestHoodieFlinkCompactor.java b/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/sink/compact/ITTestHoodieFlinkCompactor.java index f8091d8dc36..ac4d2ea7783 100644 --- a/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/sink/compact/ITTestHoodieFlinkCompactor.java +++ b/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/sink/compact/ITTestHoodieFlinkCompactor.java @@ -427,7 +427,7 @@ public class ITTestHoodieFlinkCompactor { FSUtils.getAllPartitionPaths(HoodieFlinkEngineContext.DEFAULT, metaClient.getBasePath(), false, false).forEach( partition -> { try { - storage.listDirectEntries(FSUtils.getPartitionPath(metaClient.getBasePathV2(), partition)) + storage.listDirectEntries(FSUtils.constructAbsolutePath(metaClient.getBasePathV2(), partition)) .stream() .filter(f -> FSUtils.isBaseFile(new Path(f.getPath().toUri()))) .forEach(f -> { diff --git a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala index d83e4172556..cb5803dfe5e 100644 --- a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala +++ b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala @@ -161,7 +161,7 @@ class IncrementalRelation(val sqlContext: SQLContext, fromBytes(metaClient.getActiveTimeline.getInstantDetails(instant).get, classOf[HoodieReplaceCommitMetadata]) replaceMetadata.getPartitionToReplaceFileIds.entrySet().flatMap { entry => entry.getValue.map { e => - val fullPath = FSUtils.getPartitionPath(basePath, entry.getKey).toString + val fullPath = FSUtils.constructAbsolutePath(basePath, entry.getKey).toString (e, fullPath) } } diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairAddpartitionmetaProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairAddpartitionmetaProcedure.scala index 03ef6cc3f54..3ae183101e8 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairAddpartitionmetaProcedure.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairAddpartitionmetaProcedure.scala @@ -62,7 +62,7 @@ class RepairAddpartitionmetaProcedure extends BaseProcedure with ProcedureBuilde val rows = new util.ArrayList[Row](partitionPaths.size) for (partition <- partitionPaths) { - val partitionPath: StoragePath = FSUtils.getPartitionPath(basePath, partition) + val partitionPath: StoragePath = FSUtils.constructAbsolutePath(basePath, partition) var isPresent = "Yes" var action = "None" if (!HoodiePartitionMetadata.hasPartitionMetadata(metaClient.getStorage, partitionPath)) { diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairMigratePartitionMetaProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairMigratePartitionMetaProcedure.scala index 07fd7c92a68..4edb95c0cfc 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairMigratePartitionMetaProcedure.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairMigratePartitionMetaProcedure.scala @@ -67,7 +67,7 @@ class RepairMigratePartitionMetaProcedure extends BaseProcedure with ProcedureBu val rows = new util.ArrayList[Row](partitionPaths.size) for (partitionPath <- partitionPaths) { - val partition: StoragePath = FSUtils.getPartitionPath(tablePath, partitionPath) + val partition: StoragePath = FSUtils.constructAbsolutePath(tablePath, partitionPath) val textFormatFile: Option[StoragePath] = HoodiePartitionMetadata.textFormatMetaPathIfExists( metaClient.getStorage, partition) val baseFormatFile: Option[StoragePath] = HoodiePartitionMetadata.baseFormatMetaPathIfExists( diff --git a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowInvalidParquetProcedure.scala b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowInvalidParquetProcedure.scala index 0abb050ca2b..8758537a800 100644 --- a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowInvalidParquetProcedure.scala +++ b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowInvalidParquetProcedure.scala @@ -53,7 +53,7 @@ class ShowInvalidParquetProcedure extends BaseProcedure with ProcedureBuilder { val serHadoopConf = new SerializableConfiguration(jsc.hadoopConfiguration()) javaRdd.rdd.map(part => { val fs = HadoopFSUtils.getFs(new Path(srcPath), serHadoopConf.get()) - FSUtils.getAllDataFilesInPartition(fs, FSUtils.getPartitionPathInHadoopPath(srcPath, part)) + FSUtils.getAllDataFilesInPartition(fs, FSUtils.constructAbsolutePathInHadoopPath(srcPath, part)) }).flatMap(_.toList) .filter(status => { val filePath = status.getPath diff --git a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestSparkConsistentBucketClustering.java b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestSparkConsistentBucketClustering.java index 5910bcb0899..96e4a8f0ce4 100644 --- a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestSparkConsistentBucketClustering.java +++ b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestSparkConsistentBucketClustering.java @@ -189,7 +189,7 @@ public class TestSparkConsistentBucketClustering extends HoodieSparkClientTestHa Arrays.stream(dataGen.getPartitionPaths()).forEach(p -> { if (!isCommitFilePresent) { StoragePath metadataPath = - FSUtils.getPartitionPath(table.getMetaClient().getHashingMetadataPath(), p); + FSUtils.constructAbsolutePath(table.getMetaClient().getHashingMetadataPath(), p); try { table.getMetaClient().getStorage().listDirectEntries(metadataPath).forEach(fl -> { if (fl.getPath().getName() diff --git a/hudi-sync/hudi-adb-sync/src/main/java/org/apache/hudi/sync/adb/HoodieAdbJdbcClient.java b/hudi-sync/hudi-adb-sync/src/main/java/org/apache/hudi/sync/adb/HoodieAdbJdbcClient.java index 2c557c35f76..0c4305017f1 100644 --- a/hudi-sync/hudi-adb-sync/src/main/java/org/apache/hudi/sync/adb/HoodieAdbJdbcClient.java +++ b/hudi-sync/hudi-adb-sync/src/main/java/org/apache/hudi/sync/adb/HoodieAdbJdbcClient.java @@ -323,7 +323,7 @@ public class HoodieAdbJdbcClient extends HoodieSyncClient { if (!StringUtils.isNullOrEmpty(str)) { List<String> values = partitionValueExtractor.extractPartitionValuesInPath(str); Path storagePartitionPath = - FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH), String.join("/", values)); + FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH), String.join("/", values)); String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); partitions.put(values, fullStoragePartitionPath); @@ -359,7 +359,7 @@ public class HoodieAdbJdbcClient extends HoodieSyncClient { .append(tableName).append("`").append(" add if not exists "); for (String partition : partitions) { String partitionClause = getPartitionClause(partition); - Path partitionPath = FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partition); + Path partitionPath = FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partition); String fullPartitionPathStr = config.generateAbsolutePathStr(partitionPath); sqlBuilder.append(" partition (").append(partitionClause).append(") location '") .append(fullPartitionPathStr).append("' "); @@ -376,7 +376,7 @@ public class HoodieAdbJdbcClient extends HoodieSyncClient { String alterTable = "alter table `" + tableName + "`"; for (String partition : partitions) { String partitionClause = getPartitionClause(partition); - Path partitionPath = FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partition); + Path partitionPath = FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partition); String fullPartitionPathStr = config.generateAbsolutePathStr(partitionPath); String changePartition = alterTable + " add if not exists partition (" + partitionClause + ") location '" + fullPartitionPathStr + "'"; @@ -455,13 +455,13 @@ public class HoodieAdbJdbcClient extends HoodieSyncClient { List<PartitionEvent> events = new ArrayList<>(); for (String storagePartition : partitionStoragePartitions) { Path storagePartitionPath = - FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH), storagePartition); + FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH), storagePartition); String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); // Check if the partition values or if hdfs path is the same List<String> storagePartitionValues = partitionValueExtractor.extractPartitionValuesInPath(storagePartition); if (config.getBoolean(ADB_SYNC_USE_HIVE_STYLE_PARTITIONING)) { String partition = String.join("/", storagePartitionValues); - storagePartitionPath = FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partition); + storagePartitionPath = FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partition); fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); } if (!storagePartitionValues.isEmpty()) { diff --git a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HMSDDLExecutor.java b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HMSDDLExecutor.java index 2f82aa2c006..b5471079524 100644 --- a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HMSDDLExecutor.java +++ b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HMSDDLExecutor.java @@ -205,7 +205,7 @@ public class HMSDDLExecutor implements DDLExecutor { partitionSd.setOutputFormat(sd.getOutputFormat()); partitionSd.setSerdeInfo(sd.getSerdeInfo()); String fullPartitionPath = - FSUtils.getPartitionPathInHadoopPath(syncConfig.getString(META_SYNC_BASE_PATH), x).toString(); + FSUtils.constructAbsolutePathInHadoopPath(syncConfig.getString(META_SYNC_BASE_PATH), x).toString(); List<String> partitionValues = partitionValueExtractor.extractPartitionValuesInPath(x); partitionSd.setLocation(fullPartitionPath); partitionList.add(new Partition(partitionValues, databaseName, tableName, 0, 0, partitionSd, null)); @@ -229,7 +229,7 @@ public class HMSDDLExecutor implements DDLExecutor { try { StorageDescriptor sd = client.getTable(databaseName, tableName).getSd(); List<Partition> partitionList = changedPartitions.stream().map(partition -> { - Path partitionPath = FSUtils.getPartitionPathInHadoopPath(syncConfig.getString(META_SYNC_BASE_PATH), partition); + Path partitionPath = FSUtils.constructAbsolutePathInHadoopPath(syncConfig.getString(META_SYNC_BASE_PATH), partition); String partitionScheme = partitionPath.toUri().getScheme(); String fullPartitionPath = StorageSchemes.HDFS.getScheme().equals(partitionScheme) ? FSUtils.getDFSFullPartitionPath(syncConfig.getHadoopFileSystem(), partitionPath) : partitionPath.toString(); diff --git a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/QueryBasedDDLExecutor.java b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/QueryBasedDDLExecutor.java index e3b2b913944..194f99705bf 100644 --- a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/QueryBasedDDLExecutor.java +++ b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/QueryBasedDDLExecutor.java @@ -162,7 +162,7 @@ public abstract class QueryBasedDDLExecutor implements DDLExecutor { for (int i = 0; i < partitions.size(); i++) { String partitionClause = getPartitionClause(partitions.get(i)); String fullPartitionPath = - FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partitions.get(i)).toString(); + FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partitions.get(i)).toString(); alterSQL.append(" PARTITION (").append(partitionClause).append(") LOCATION '").append(fullPartitionPath) .append("' "); if ((i + 1) % batchSyncPartitionNum == 0) { @@ -211,7 +211,7 @@ public abstract class QueryBasedDDLExecutor implements DDLExecutor { String alterTable = "ALTER TABLE " + HIVE_ESCAPE_CHARACTER + tableName + HIVE_ESCAPE_CHARACTER; for (String partition : partitions) { String partitionClause = getPartitionClause(partition); - Path partitionPath = FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partition); + Path partitionPath = FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH), partition); String partitionScheme = partitionPath.toUri().getScheme(); String fullPartitionPath = StorageSchemes.HDFS.getScheme().equals(partitionScheme) ? FSUtils.getDFSFullPartitionPath(config.getHadoopFileSystem(), partitionPath) : partitionPath.toString(); diff --git a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java index ef9d43794d6..a755c5ba4f2 100644 --- a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java +++ b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java @@ -357,7 +357,7 @@ public class TestHiveSyncTool { // it and generate a partition update event for it. ddlExecutor.runSQL("ALTER TABLE `" + HiveTestUtil.TABLE_NAME + "` PARTITION (`datestr`='2050-01-01') SET LOCATION '" - + FSUtils.getPartitionPathInHadoopPath(basePath, "2050/1/1").toString() + "'"); + + FSUtils.constructAbsolutePathInHadoopPath(basePath, "2050/1/1").toString() + "'"); hivePartitions = hiveClient.getAllPartitions(HiveTestUtil.TABLE_NAME); List<String> writtenPartitionsSince = hiveClient.getWrittenPartitionsSince(Option.empty(), Option.empty()); diff --git a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncClient.java b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncClient.java index 582f8ec2999..b2c26781d21 100644 --- a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncClient.java +++ b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncClient.java @@ -161,7 +161,7 @@ public abstract class HoodieSyncClient implements HoodieMetaSyncOperations, Auto List<PartitionEvent> events = new ArrayList<>(); for (String storagePartition : allPartitionsOnStorage) { Path storagePartitionPath = - FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH), storagePartition); + FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH), storagePartition); String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); // Check if the partition values or if hdfs path is the same List<String> storagePartitionValues = partitionValueExtractor.extractPartitionValuesInPath(storagePartition); @@ -205,7 +205,7 @@ public abstract class HoodieSyncClient implements HoodieMetaSyncOperations, Auto List<PartitionEvent> events = new ArrayList<>(); for (String storagePartition : writtenPartitionsOnStorage) { Path storagePartitionPath = - FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH), storagePartition); + FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH), storagePartition); String fullStoragePartitionPath = Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath(); // Check if the partition values or if hdfs path is the same List<String> storagePartitionValues = partitionValueExtractor.extractPartitionValuesInPath(storagePartition); diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableUtils.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableUtils.java index 64079f18380..7647f93c899 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableUtils.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableUtils.java @@ -38,7 +38,7 @@ public class HoodieDataTableUtils { String basePath) throws IOException { List<String> allPartitionPaths = tableMetadata.getAllPartitionPaths() .stream().map(partitionPath -> - FSUtils.getPartitionPathInHadoopPath(basePath, partitionPath).toString()) + FSUtils.constructAbsolutePathInHadoopPath(basePath, partitionPath).toString()) .collect(Collectors.toList()); return tableMetadata.getAllFilesInPartitions(allPartitionPaths).values().stream() .map(fileStatuses -> diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java index 992d3e0fd16..8a2ded37fd5 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java @@ -664,7 +664,7 @@ public class HoodieMetadataTableValidator implements Serializable { @VisibleForTesting Option<String> getPartitionCreationInstant(HoodieStorage storage, String basePath, String partition) { HoodiePartitionMetadata hoodiePartitionMetadata = - new HoodiePartitionMetadata(storage, FSUtils.getPartitionPath(basePath, partition)); + new HoodiePartitionMetadata(storage, FSUtils.constructAbsolutePath(basePath, partition)); return hoodiePartitionMetadata.readPartitionCreatedCommitTime(); } @@ -681,7 +681,7 @@ public class HoodieMetadataTableValidator implements Serializable { // ignore partitions created by uncommitted ingestion. return allPartitionPathsFromFS.stream().parallel().filter(part -> { HoodiePartitionMetadata hoodiePartitionMetadata = - new HoodiePartitionMetadata(storage, FSUtils.getPartitionPath(basePath, part)); + new HoodiePartitionMetadata(storage, FSUtils.constructAbsolutePath(basePath, part)); Option<String> instantOption = hoodiePartitionMetadata.readPartitionCreatedCommitTime(); if (instantOption.isPresent()) { String instantTime = instantOption.get(); @@ -1403,7 +1403,7 @@ public class HoodieMetadataTableValidator implements Serializable { return baseFileNameList.stream().flatMap(filename -> new ParquetUtils().readRangeFromParquetMetadata( metaClient.getHadoopConf(), - new StoragePath(FSUtils.getPartitionPath(metaClient.getBasePathV2(), partitionPath), filename), + new StoragePath(FSUtils.constructAbsolutePath(metaClient.getBasePathV2(), partitionPath), filename), allColumnNameList).stream()) .sorted(new HoodieColumnRangeMetadataComparator()) .collect(Collectors.toList()); @@ -1445,7 +1445,7 @@ public class HoodieMetadataTableValidator implements Serializable { private Option<BloomFilterData> readBloomFilterFromFile(String partitionPath, String filename) { StoragePath path = new StoragePath( - FSUtils.getPartitionPath(metaClient.getBasePathV2(), partitionPath).toString(), filename); + FSUtils.constructAbsolutePath(metaClient.getBasePathV2(), partitionPath).toString(), filename); BloomFilter bloomFilter; HoodieConfig hoodieConfig = new HoodieConfig(); hoodieConfig.setValue(HoodieReaderConfig.USE_NATIVE_HFILE_READER, diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java index 68567b290fd..b7dcacb97e3 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java @@ -125,7 +125,7 @@ public class HoodieSnapshotCopier implements Serializable { // also need to copy over partition metadata StoragePath partitionMetaFile = HoodiePartitionMetadata.getPartitionMetafilePath(storage1, - FSUtils.getPartitionPath(baseDir, partition)).get(); + FSUtils.constructAbsolutePath(baseDir, partition)).get(); if (storage1.exists(partitionMetaFile)) { filePaths.add(new Tuple2<>(partition, partitionMetaFile.toString())); } @@ -136,7 +136,7 @@ public class HoodieSnapshotCopier implements Serializable { context.foreach(filesToCopy, tuple -> { String partition = tuple._1(); Path sourceFilePath = new Path(tuple._2()); - Path toPartitionPath = FSUtils.getPartitionPathInHadoopPath(outputDir, partition); + Path toPartitionPath = FSUtils.constructAbsolutePathInHadoopPath(outputDir, partition); FileSystem ifs = HadoopFSUtils.getFs(baseDir, serConf.newCopy()); if (!ifs.exists(toPartitionPath)) { diff --git a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java index c3bedcfc46a..ca94de1ff44 100644 --- a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java +++ b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java @@ -216,7 +216,7 @@ public class HoodieSnapshotExporter { // also need to copy over partition metadata HoodieStorage storage = HoodieStorageUtils.getStorage(cfg.sourceBasePath, serConf.newCopy()); StoragePath partitionMetaFile = HoodiePartitionMetadata.getPartitionMetafilePath(storage, - FSUtils.getPartitionPath(cfg.sourceBasePath, partition)).get(); + FSUtils.constructAbsolutePath(cfg.sourceBasePath, partition)).get(); if (storage.exists(partitionMetaFile)) { filePaths.add(Pair.of(partition, partitionMetaFile.toString())); } @@ -226,7 +226,7 @@ public class HoodieSnapshotExporter { context.foreach(partitionAndFileList, partitionAndFile -> { String partition = partitionAndFile.getLeft(); Path sourceFilePath = new Path(partitionAndFile.getRight()); - Path toPartitionPath = FSUtils.getPartitionPathInHadoopPath(cfg.targetOutputPath, partition); + Path toPartitionPath = FSUtils.constructAbsolutePathInHadoopPath(cfg.targetOutputPath, partition); FileSystem executorSourceFs = HadoopFSUtils.getFs(cfg.sourceBasePath, serConf.newCopy()); FileSystem executorOutputFs = HadoopFSUtils.getFs(cfg.targetOutputPath, serConf.newCopy());
