This is an automated email from the ASF dual-hosted git repository.
yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git
The following commit(s) were added to refs/heads/master by this push:
new 683c4998d6d [HUDI-7628] Rename FSUtils.getPartitionPath to
constructAbsolutePath (#11054)
683c4998d6d is described below
commit 683c4998d6de28605f9e94c05972258a22f2e5b9
Author: Vova Kolmakov <[email protected]>
AuthorDate: Sat Apr 20 08:11:07 2024 +0700
[HUDI-7628] Rename FSUtils.getPartitionPath to constructAbsolutePath
(#11054)
Co-authored-by: Vova Kolmakov <[email protected]>
---
.../hudi/aws/sync/AWSGlueCatalogSyncClient.java | 4 +--
.../apache/hudi/cli/commands/RepairsCommand.java | 4 +--
.../apache/hudi/client/CompactionAdminClient.java | 4 +--
.../index/bucket/ConsistentBucketIndexUtils.java | 8 +++---
.../org/apache/hudi/io/HoodieAppendHandle.java | 2 +-
.../org/apache/hudi/io/HoodieCreateHandle.java | 2 +-
.../java/org/apache/hudi/io/HoodieMergeHandle.java | 2 +-
.../java/org/apache/hudi/io/HoodieWriteHandle.java | 4 +--
.../metadata/HoodieBackedTableMetadataWriter.java | 2 +-
.../hudi/table/action/compact/HoodieCompactor.java | 2 +-
.../table/action/rollback/BaseRollbackHelper.java | 2 +-
.../rollback/ListingBasedRollbackStrategy.java | 6 ++--
.../ttl/strategy/KeepByCreationTimeStrategy.java | 2 +-
.../marker/TimelineServerBasedWriteMarkers.java | 4 +--
.../org/apache/hudi/table/marker/WriteMarkers.java | 2 +-
.../io/storage/row/HoodieRowDataCreateHandle.java | 4 +--
.../hudi/io/storage/row/HoodieRowCreateHandle.java | 4 +--
.../TestSavepointRestoreMergeOnRead.java | 8 +++---
.../java/org/apache/hudi/table/TestCleaner.java | 4 +--
...dieSparkMergeOnReadTableInsertUpdateDelete.java | 2 +-
.../hudi/table/marker/TestWriteMarkersBase.java | 2 +-
.../java/org/apache/hudi/common/fs/FSUtils.java | 32 +++++++++++-----------
.../hudi/common/model/CompactionOperation.java | 2 +-
.../hudi/common/model/HoodieCommitMetadata.java | 8 +++---
.../hudi/common/table/cdc/HoodieCDCExtractor.java | 4 +--
.../clean/CleanMetadataV1MigrationHandler.java | 2 +-
.../clean/CleanPlanV2MigrationHandler.java | 2 +-
.../compaction/CompactionV1MigrationHandler.java | 2 +-
.../table/view/AbstractTableFileSystemView.java | 4 +--
.../IncrementalTimelineSyncFileSystemView.java | 2 +-
.../sink/compact/ITTestHoodieFlinkCompactor.java | 2 +-
.../org/apache/hudi/IncrementalRelation.scala | 2 +-
.../AlterHoodieTableAddPartitionCommand.scala | 2 +-
.../RepairAddpartitionmetaProcedure.scala | 2 +-
.../RepairMigratePartitionMetaProcedure.scala | 2 +-
.../procedures/ShowInvalidParquetProcedure.scala | 2 +-
.../TestSparkConsistentBucketClustering.java | 2 +-
.../apache/hudi/sync/adb/HoodieAdbJdbcClient.java | 10 +++----
.../org/apache/hudi/hive/ddl/HMSDDLExecutor.java | 4 +--
.../hudi/hive/ddl/QueryBasedDDLExecutor.java | 4 +--
.../org/apache/hudi/hive/TestHiveSyncTool.java | 2 +-
.../apache/hudi/sync/common/HoodieSyncClient.java | 4 +--
.../hudi/utilities/HoodieDataTableUtils.java | 2 +-
.../utilities/HoodieMetadataTableValidator.java | 8 +++---
.../hudi/utilities/HoodieSnapshotCopier.java | 4 +--
.../hudi/utilities/HoodieSnapshotExporter.java | 4 +--
46 files changed, 94 insertions(+), 94 deletions(-)
diff --git
a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
index e06db9f2ba4..6dda51fd134 100644
---
a/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
+++
b/hudi-aws/src/main/java/org/apache/hudi/aws/sync/AWSGlueCatalogSyncClient.java
@@ -303,7 +303,7 @@ public class AWSGlueCatalogSyncClient extends
HoodieSyncClient {
try {
StorageDescriptor sd = table.storageDescriptor();
List<PartitionInput> partitionInputList =
partitionsToAdd.stream().map(partition -> {
- String fullPartitionPath =
FSUtils.getPartitionPathInHadoopPath(s3aToS3(getBasePath()),
partition).toString();
+ String fullPartitionPath =
FSUtils.constructAbsolutePathInHadoopPath(s3aToS3(getBasePath()),
partition).toString();
List<String> partitionValues =
partitionValueExtractor.extractPartitionValuesInPath(partition);
StorageDescriptor partitionSD = sd.copy(copySd ->
copySd.location(fullPartitionPath));
return
PartitionInput.builder().values(partitionValues).storageDescriptor(partitionSD).build();
@@ -347,7 +347,7 @@ public class AWSGlueCatalogSyncClient extends
HoodieSyncClient {
try {
StorageDescriptor sd = table.storageDescriptor();
List<BatchUpdatePartitionRequestEntry> updatePartitionEntries =
changedPartitions.stream().map(partition -> {
- String fullPartitionPath =
FSUtils.getPartitionPathInHadoopPath(s3aToS3(getBasePath()),
partition).toString();
+ String fullPartitionPath =
FSUtils.constructAbsolutePathInHadoopPath(s3aToS3(getBasePath()),
partition).toString();
List<String> partitionValues =
partitionValueExtractor.extractPartitionValuesInPath(partition);
StorageDescriptor partitionSD = sd.copy(copySd ->
copySd.location(fullPartitionPath));
PartitionInput partitionInput =
PartitionInput.builder().values(partitionValues).storageDescriptor(partitionSD).build();
diff --git
a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java
b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java
index 2bc0f3bb59f..41a801c2f2a 100644
--- a/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java
+++ b/hudi-cli/src/main/java/org/apache/hudi/cli/commands/RepairsCommand.java
@@ -128,7 +128,7 @@ public class RepairsCommand {
int ind = 0;
for (String partition : partitionPaths) {
- StoragePath partitionPath = FSUtils.getPartitionPath(basePath,
partition);
+ StoragePath partitionPath = FSUtils.constructAbsolutePath(basePath,
partition);
String[] row = new String[3];
row[0] = partition;
row[1] = "Yes";
@@ -237,7 +237,7 @@ public class RepairsCommand {
int ind = 0;
for (String partitionPath : partitionPaths) {
StoragePath partition =
- FSUtils.getPartitionPath(client.getBasePath(), partitionPath);
+ FSUtils.constructAbsolutePath(client.getBasePath(), partitionPath);
Option<StoragePath> textFormatFile =
HoodiePartitionMetadata.textFormatMetaPathIfExists(HoodieCLI.storage,
partition);
Option<StoragePath> baseFormatFile =
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/CompactionAdminClient.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/CompactionAdminClient.java
index 85b09a0c516..e3390a91533 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/CompactionAdminClient.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/CompactionAdminClient.java
@@ -241,7 +241,7 @@ public class CompactionAdminClient extends BaseHoodieClient
{
if (operation.getDataFileName().isPresent()) {
String expPath = metaClient.getStorage()
.getPathInfo(new StoragePath(
- FSUtils.getPartitionPath(metaClient.getBasePath(),
operation.getPartitionPath()),
+ FSUtils.constructAbsolutePath(metaClient.getBasePath(),
operation.getPartitionPath()),
operation.getDataFileName().get()))
.getPath().toString();
ValidationUtils.checkArgument(df.isPresent(),
@@ -254,7 +254,7 @@ public class CompactionAdminClient extends BaseHoodieClient
{
try {
List<StoragePathInfo> pathInfoList = metaClient.getStorage()
.listDirectEntries(new StoragePath(
- FSUtils.getPartitionPath(metaClient.getBasePath(),
operation.getPartitionPath()), dp));
+ FSUtils.constructAbsolutePath(metaClient.getBasePath(),
operation.getPartitionPath()), dp));
ValidationUtils.checkArgument(pathInfoList.size() == 1, "Expect
only 1 file-status");
return new HoodieLogFile(pathInfoList.get(0));
} catch (FileNotFoundException fe) {
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bucket/ConsistentBucketIndexUtils.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bucket/ConsistentBucketIndexUtils.java
index 7a124d25ee9..a90e0db6a06 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bucket/ConsistentBucketIndexUtils.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/bucket/ConsistentBucketIndexUtils.java
@@ -108,8 +108,8 @@ public class ConsistentBucketIndexUtils {
*/
public static Option<HoodieConsistentHashingMetadata>
loadMetadata(HoodieTable table, String partition) {
HoodieTableMetaClient metaClient = table.getMetaClient();
- Path metadataPath =
FSUtils.getPartitionPathInHadoopPath(metaClient.getHashingMetadataPath(),
partition);
- Path partitionPath =
FSUtils.getPartitionPathInHadoopPath(metaClient.getBasePathV2().toString(),
partition);
+ Path metadataPath =
FSUtils.constructAbsolutePathInHadoopPath(metaClient.getHashingMetadataPath(),
partition);
+ Path partitionPath =
FSUtils.constructAbsolutePathInHadoopPath(metaClient.getBasePathV2().toString(),
partition);
try {
Predicate<FileStatus> hashingMetaCommitFilePredicate = fileStatus -> {
String filename = fileStatus.getPath().getName();
@@ -186,7 +186,7 @@ public class ConsistentBucketIndexUtils {
*/
public static boolean saveMetadata(HoodieTable table,
HoodieConsistentHashingMetadata metadata, boolean overwrite) {
HoodieStorage storage = table.getMetaClient().getStorage();
- StoragePath dir = FSUtils.getPartitionPath(
+ StoragePath dir = FSUtils.constructAbsolutePath(
table.getMetaClient().getHashingMetadataPath(),
metadata.getPartitionPath());
StoragePath fullPath = new StoragePath(dir, metadata.getFilename());
try (OutputStream out = storage.create(fullPath, overwrite)) {
@@ -267,7 +267,7 @@ public class ConsistentBucketIndexUtils {
* @return true if hashing metadata file is latest else false
*/
private static boolean recommitMetadataFile(HoodieTable table, FileStatus
metaFile, String partition) {
- Path partitionPath = new
Path(FSUtils.getPartitionPath(table.getMetaClient().getBasePathV2(),
partition).toUri());
+ Path partitionPath = new
Path(FSUtils.constructAbsolutePath(table.getMetaClient().getBasePathV2(),
partition).toUri());
String timestamp = getTimestampFromFile(metaFile.getPath().getName());
if (table.getPendingCommitTimeline().containsInstant(timestamp)) {
return false;
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java
index 80303fef953..667cae44afd 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieAppendHandle.java
@@ -207,7 +207,7 @@ public class HoodieAppendHandle<T, I, K, O> extends
HoodieWriteHandle<T, I, K, O
// Save hoodie partition meta in the partition path
HoodiePartitionMetadata partitionMetadata = new
HoodiePartitionMetadata(storage, instantTime,
new StoragePath(config.getBasePath()),
- FSUtils.getPartitionPath(config.getBasePath(), partitionPath),
+ FSUtils.constructAbsolutePath(config.getBasePath(), partitionPath),
hoodieTable.getPartitionMetafileFormat());
partitionMetadata.trySave();
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java
index 0ad4e212a1a..ce908f89bb6 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieCreateHandle.java
@@ -98,7 +98,7 @@ public class HoodieCreateHandle<T, I, K, O> extends
HoodieWriteHandle<T, I, K, O
try {
HoodiePartitionMetadata partitionMetadata = new
HoodiePartitionMetadata(storage, instantTime,
new StoragePath(config.getBasePath()),
- FSUtils.getPartitionPath(config.getBasePath(), partitionPath),
+ FSUtils.constructAbsolutePath(config.getBasePath(), partitionPath),
hoodieTable.getPartitionMetafileFormat());
partitionMetadata.trySave();
createMarkerFile(partitionPath,
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java
index 48e25e44ab7..bcf2e8c648a 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieMergeHandle.java
@@ -175,7 +175,7 @@ public class HoodieMergeHandle<T, I, K, O> extends
HoodieWriteHandle<T, I, K, O>
HoodiePartitionMetadata partitionMetadata = new
HoodiePartitionMetadata(storage, instantTime,
new StoragePath(config.getBasePath()),
- FSUtils.getPartitionPath(config.getBasePath(), partitionPath),
+ FSUtils.constructAbsolutePath(config.getBasePath(), partitionPath),
hoodieTable.getPartitionMetafileFormat());
partitionMetadata.trySave();
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java
index aff264deba2..1feeca51578 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/HoodieWriteHandle.java
@@ -113,7 +113,7 @@ public abstract class HoodieWriteHandle<T, I, K, O> extends
HoodieIOHandle<T, I,
}
public StoragePath makeNewPath(String partitionPath) {
- StoragePath path = FSUtils.getPartitionPath(config.getBasePath(),
partitionPath);
+ StoragePath path = FSUtils.constructAbsolutePath(config.getBasePath(),
partitionPath);
try {
if (!storage.exists(path)) {
storage.createDirectory(path); // create a new partition as needed.
@@ -241,7 +241,7 @@ public abstract class HoodieWriteHandle<T, I, K, O> extends
HoodieIOHandle<T, I,
protected HoodieLogFormat.Writer createLogWriter(String deltaCommitTime,
String fileSuffix) {
try {
return HoodieLogFormat.newWriterBuilder()
-
.onParentPath(FSUtils.getPartitionPath(hoodieTable.getMetaClient().getBasePath(),
partitionPath))
+
.onParentPath(FSUtils.constructAbsolutePath(hoodieTable.getMetaClient().getBasePath(),
partitionPath))
.withFileId(fileId)
.withDeltaCommit(deltaCommitTime)
.withFileSize(0L)
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java
index d270a143a96..1fa5877c353 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java
@@ -792,7 +792,7 @@ public abstract class HoodieBackedTableMetadataWriter<I>
implements HoodieTableM
final HoodieDeleteBlock block = new
HoodieDeleteBlock(Collections.emptyList(), false, blockHeader);
try (HoodieLogFormat.Writer writer = HoodieLogFormat.newWriterBuilder()
-
.onParentPath(FSUtils.getPartitionPath(metadataWriteConfig.getBasePath(),
partitionName))
+
.onParentPath(FSUtils.constructAbsolutePath(metadataWriteConfig.getBasePath(),
partitionName))
.withFileId(fileGroupFileId)
.withDeltaCommit(instantTime)
.withLogVersion(HoodieLogFile.LOGFILE_BASE_VERSION)
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/HoodieCompactor.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/HoodieCompactor.java
index 9ede03b12cd..9e38410fed9 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/HoodieCompactor.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/compact/HoodieCompactor.java
@@ -186,7 +186,7 @@ public abstract class HoodieCompactor<T, I, K, O>
implements Serializable {
LOG.info("MaxMemoryPerCompaction => " + maxMemoryPerCompaction);
List<String> logFiles = operation.getDeltaFileNames().stream().map(p ->
- new StoragePath(FSUtils.getPartitionPath(
+ new StoragePath(FSUtils.constructAbsolutePath(
metaClient.getBasePath(), operation.getPartitionPath()),
p).toString())
.collect(toList());
HoodieMergedLogRecordScanner scanner =
HoodieMergedLogRecordScanner.newBuilder()
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackHelper.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackHelper.java
index 66a877d5ee5..af2635f7603 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackHelper.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/BaseRollbackHelper.java
@@ -129,7 +129,7 @@ public class BaseRollbackHelper implements Serializable {
String fileId = rollbackRequest.getFileId();
writer = HoodieLogFormat.newWriterBuilder()
- .onParentPath(FSUtils.getPartitionPath(metaClient.getBasePath(),
rollbackRequest.getPartitionPath()))
+
.onParentPath(FSUtils.constructAbsolutePath(metaClient.getBasePath(),
rollbackRequest.getPartitionPath()))
.withFileId(fileId)
.withDeltaCommit(instantToRollback.getTimestamp())
.withStorage(metaClient.getStorage())
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/ListingBasedRollbackStrategy.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/ListingBasedRollbackStrategy.java
index fd32b83587d..035b1ded888 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/ListingBasedRollbackStrategy.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/ListingBasedRollbackStrategy.java
@@ -191,7 +191,7 @@ public class ListingBasedRollbackStrategy implements
BaseRollbackPlanActionExecu
return false;
};
return ((FileSystem) metaClient.getStorage().getFileSystem())
- .listStatus(FSUtils.getPartitionPathInHadoopPath(config.getBasePath(),
partitionPath),
+
.listStatus(FSUtils.constructAbsolutePathInHadoopPath(config.getBasePath(),
partitionPath),
filter);
}
@@ -221,7 +221,7 @@ public class ListingBasedRollbackStrategy implements
BaseRollbackPlanActionExecu
}
return false;
};
- return
fs.listStatus(FSUtils.getPartitionPathInHadoopPath(config.getBasePath(),
partitionPath), filter);
+ return
fs.listStatus(FSUtils.constructAbsolutePathInHadoopPath(config.getBasePath(),
partitionPath), filter);
}
private FileStatus[] fetchFilesFromInstant(HoodieInstant instantToRollback,
String partitionPath, String basePath,
@@ -282,7 +282,7 @@ public class ListingBasedRollbackStrategy implements
BaseRollbackPlanActionExecu
}
private static Path[] listFilesToBeDeleted(String basePath, String
partitionPath) {
- return new Path[] {FSUtils.getPartitionPathInHadoopPath(basePath,
partitionPath)};
+ return new Path[] {FSUtils.constructAbsolutePathInHadoopPath(basePath,
partitionPath)};
}
private static Path[] getFilesFromCommitMetadata(String basePath,
HoodieCommitMetadata commitMetadata, String partitionPath) {
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/ttl/strategy/KeepByCreationTimeStrategy.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/ttl/strategy/KeepByCreationTimeStrategy.java
index 4abf292da1e..038b82f6dc5 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/ttl/strategy/KeepByCreationTimeStrategy.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/ttl/strategy/KeepByCreationTimeStrategy.java
@@ -41,7 +41,7 @@ public class KeepByCreationTimeStrategy extends
KeepByTimeStrategy {
HoodieTableMetaClient metaClient = hoodieTable.getMetaClient();
return partitionPathsForTTL.stream().parallel().filter(part -> {
HoodiePartitionMetadata hoodiePartitionMetadata =
- new HoodiePartitionMetadata(metaClient.getStorage(),
FSUtils.getPartitionPath(metaClient.getBasePath(), part));
+ new HoodiePartitionMetadata(metaClient.getStorage(),
FSUtils.constructAbsolutePath(metaClient.getBasePath(), part));
Option<String> instantOption =
hoodiePartitionMetadata.readPartitionCreatedCommitTime();
if (instantOption.isPresent()) {
String instantTime = instantOption.get();
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java
index c1cbc6f6573..0a502c3bbfa 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/TimelineServerBasedWriteMarkers.java
@@ -137,7 +137,7 @@ public class TimelineServerBasedWriteMarkers extends
WriteMarkers {
LOG.info("[timeline-server-based] Created marker file " + partitionPath +
"/" + markerFileName
+ " in " + timer.endTimer() + " ms");
if (success) {
- return Option.of(new StoragePath(FSUtils.getPartitionPath(markerDirPath,
partitionPath), markerFileName));
+ return Option.of(new
StoragePath(FSUtils.constructAbsolutePath(markerDirPath, partitionPath),
markerFileName));
} else {
return Option.empty();
}
@@ -156,7 +156,7 @@ public class TimelineServerBasedWriteMarkers extends
WriteMarkers {
+ " in " + timer.endTimer() + " ms");
if (success) {
- return Option.of(new StoragePath(FSUtils.getPartitionPath(markerDirPath,
partitionPath), markerFileName));
+ return Option.of(new
StoragePath(FSUtils.constructAbsolutePath(markerDirPath, partitionPath),
markerFileName));
} else {
// this failed may due to early conflict detection, so we need to throw
out.
throw new HoodieEarlyConflictDetectionException(new
ConcurrentModificationException("Early conflict detected but cannot resolve
conflicts for overlapping writes"));
diff --git
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/WriteMarkers.java
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/WriteMarkers.java
index 58abde613a5..65131116dad 100644
---
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/WriteMarkers.java
+++
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/marker/WriteMarkers.java
@@ -181,7 +181,7 @@ public abstract class WriteMarkers implements Serializable {
* @return path of the marker file
*/
protected StoragePath getMarkerPath(String partitionPath, String fileName,
IOType type) {
- StoragePath path = FSUtils.getPartitionPath(markerDirPath, partitionPath);
+ StoragePath path = FSUtils.constructAbsolutePath(markerDirPath,
partitionPath);
String markerFileName = getMarkerFileName(fileName, type);
return new StoragePath(path, markerFileName);
}
diff --git
a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java
b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java
index 9da48260d01..683282853de 100644
---
a/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java
+++
b/hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowDataCreateHandle.java
@@ -103,7 +103,7 @@ public class HoodieRowDataCreateHandle implements
Serializable {
storage,
instantTime,
new StoragePath(writeConfig.getBasePath()),
- FSUtils.getPartitionPath(writeConfig.getBasePath(),
partitionPath),
+ FSUtils.constructAbsolutePath(writeConfig.getBasePath(),
partitionPath),
table.getPartitionMetafileFormat());
partitionMetadata.trySave();
createMarkerFile(partitionPath,
FSUtils.makeBaseFileName(this.instantTime, getWriteToken(), this.fileId,
table.getBaseFileExtension()));
@@ -189,7 +189,7 @@ public class HoodieRowDataCreateHandle implements
Serializable {
private Path makeNewPath(String partitionPath) {
StoragePath path =
- FSUtils.getPartitionPath(writeConfig.getBasePath(), partitionPath);
+ FSUtils.constructAbsolutePath(writeConfig.getBasePath(),
partitionPath);
try {
if (!storage.exists(path)) {
storage.createDirectory(path); // create a new partition as needed.
diff --git
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowCreateHandle.java
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowCreateHandle.java
index 890b12899f1..0d164f379fe 100644
---
a/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowCreateHandle.java
+++
b/hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowCreateHandle.java
@@ -141,7 +141,7 @@ public class HoodieRowCreateHandle implements Serializable {
storage,
instantTime,
new StoragePath(writeConfig.getBasePath()),
- FSUtils.getPartitionPath(writeConfig.getBasePath(),
partitionPath),
+ FSUtils.constructAbsolutePath(writeConfig.getBasePath(),
partitionPath),
table.getPartitionMetafileFormat());
partitionMetadata.trySave();
@@ -262,7 +262,7 @@ public class HoodieRowCreateHandle implements Serializable {
}
private static Path makeNewPath(FileSystem fs, String partitionPath, String
fileName, HoodieWriteConfig writeConfig) {
- Path path =
FSUtils.getPartitionPathInHadoopPath(writeConfig.getBasePath(), partitionPath);
+ Path path =
FSUtils.constructAbsolutePathInHadoopPath(writeConfig.getBasePath(),
partitionPath);
try {
if (!fs.exists(path)) {
fs.mkdirs(path); // create a new partition as needed.
diff --git
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestSavepointRestoreMergeOnRead.java
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestSavepointRestoreMergeOnRead.java
index 34c1398ba5e..8f57567cbb8 100644
---
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestSavepointRestoreMergeOnRead.java
+++
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestSavepointRestoreMergeOnRead.java
@@ -118,7 +118,7 @@ public class TestSavepointRestoreMergeOnRead extends
HoodieClientTestBase {
StoragePathFilter filter = (path) ->
path.toString().contains(finalCompactionCommit);
for (String pPath : dataGen.getPartitionPaths()) {
assertEquals(0, storage.listDirectEntries(
- FSUtils.getPartitionPath(hoodieWriteConfig.getBasePath(), pPath),
+ FSUtils.constructAbsolutePath(hoodieWriteConfig.getBasePath(),
pPath),
filter).size());
}
}
@@ -163,7 +163,7 @@ public class TestSavepointRestoreMergeOnRead extends
HoodieClientTestBase {
StoragePathFilter filter = (path) ->
path.toString().contains(secondCommit);
for (String pPath : dataGen.getPartitionPaths()) {
assertEquals(1, storage.listDirectEntries(
- FSUtils.getPartitionPath(hoodieWriteConfig.getBasePath(),
pPath), filter)
+ FSUtils.constructAbsolutePath(hoodieWriteConfig.getBasePath(),
pPath), filter)
.size());
}
@@ -202,7 +202,7 @@ public class TestSavepointRestoreMergeOnRead extends
HoodieClientTestBase {
filter = (path) -> path.toString().contains(secondCommit);
for (String pPath : dataGen.getPartitionPaths()) {
assertEquals(0, storage.listDirectEntries(
- FSUtils.getPartitionPath(hoodieWriteConfig.getBasePath(),
pPath), filter)
+ FSUtils.constructAbsolutePath(hoodieWriteConfig.getBasePath(),
pPath), filter)
.size());
}
// ensure files matching 1st commit is intact
@@ -210,7 +210,7 @@ public class TestSavepointRestoreMergeOnRead extends
HoodieClientTestBase {
for (String pPath : dataGen.getPartitionPaths()) {
assertEquals(1,
storage.listDirectEntries(
- FSUtils.getPartitionPath(hoodieWriteConfig.getBasePath(), pPath),
+ FSUtils.constructAbsolutePath(hoodieWriteConfig.getBasePath(),
pPath),
filter).size());
}
}
diff --git
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java
index 5c82d8c3bfd..4f344963122 100644
---
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java
+++
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/TestCleaner.java
@@ -858,9 +858,9 @@ public class TestCleaner extends HoodieCleanerTestBase {
version2Plan.getFilePathsToBeDeletedPerPartition().get(partition1).size());
assertEquals(version1Plan.getFilesToBeDeletedPerPartition().get(partition2).size(),
version2Plan.getFilePathsToBeDeletedPerPartition().get(partition2).size());
- assertEquals(new
Path(FSUtils.getPartitionPathInHadoopPath(metaClient.getBasePath(),
partition1), fileName1).toString(),
+ assertEquals(new
Path(FSUtils.constructAbsolutePathInHadoopPath(metaClient.getBasePath(),
partition1), fileName1).toString(),
version2Plan.getFilePathsToBeDeletedPerPartition().get(partition1).get(0).getFilePath());
- assertEquals(new
Path(FSUtils.getPartitionPathInHadoopPath(metaClient.getBasePath(),
partition2), fileName2).toString(),
+ assertEquals(new
Path(FSUtils.constructAbsolutePathInHadoopPath(metaClient.getBasePath(),
partition2), fileName2).toString(),
version2Plan.getFilePathsToBeDeletedPerPartition().get(partition2).get(0).getFilePath());
// Downgrade and verify version 1 plan
diff --git
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/functional/TestHoodieSparkMergeOnReadTableInsertUpdateDelete.java
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/functional/TestHoodieSparkMergeOnReadTableInsertUpdateDelete.java
index 86aa26ab261..880d6e55069 100644
---
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/functional/TestHoodieSparkMergeOnReadTableInsertUpdateDelete.java
+++
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/functional/TestHoodieSparkMergeOnReadTableInsertUpdateDelete.java
@@ -385,7 +385,7 @@ public class
TestHoodieSparkMergeOnReadTableInsertUpdateDelete extends SparkClie
HoodieSparkTable.create(config, context()), newCommitTime);
HoodieLogFormat.Writer fakeLogWriter = HoodieLogFormat.newWriterBuilder()
.onParentPath(
- FSUtils.getPartitionPath(config.getBasePath(),
+ FSUtils.constructAbsolutePath(config.getBasePath(),
correctWriteStat.getPartitionPath()))
.withFileId(correctWriteStat.getFileId())
.withDeltaCommit(newCommitTime)
diff --git
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/marker/TestWriteMarkersBase.java
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/marker/TestWriteMarkersBase.java
index 7951583ff09..882bfb2f8f8 100644
---
a/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/marker/TestWriteMarkersBase.java
+++
b/hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/table/marker/TestWriteMarkersBase.java
@@ -58,7 +58,7 @@ public abstract class TestWriteMarkersBase extends
HoodieCommonTestHarness {
}
private void createInvalidFile(String partitionPath, String invalidFileName)
{
- StoragePath path = FSUtils.getPartitionPath(markerFolderPath,
partitionPath);
+ StoragePath path = FSUtils.constructAbsolutePath(markerFolderPath,
partitionPath);
StoragePath invalidFilePath = new StoragePath(path, invalidFileName);
try {
storage.create(invalidFilePath, false).close();
diff --git a/hudi-common/src/main/java/org/apache/hudi/common/fs/FSUtils.java
b/hudi-common/src/main/java/org/apache/hudi/common/fs/FSUtils.java
index b875e07032a..4eeee2d8ba5 100644
--- a/hudi-common/src/main/java/org/apache/hudi/common/fs/FSUtils.java
+++ b/hudi-common/src/main/java/org/apache/hudi/common/fs/FSUtils.java
@@ -634,40 +634,40 @@ public class FSUtils {
return sizeInBytes / (1024 * 1024);
}
- public static Path getPartitionPathInHadoopPath(String basePath, String
partitionPath) {
- if (StringUtils.isNullOrEmpty(partitionPath)) {
+ public static Path constructAbsolutePathInHadoopPath(String basePath, String
relativePartitionPath) {
+ if (StringUtils.isNullOrEmpty(relativePartitionPath)) {
return new Path(basePath);
}
// NOTE: We have to chop leading "/" to make sure Hadoop does not treat it
like
// absolute path
- String properPartitionPath = partitionPath.startsWith("/")
- ? partitionPath.substring(1)
- : partitionPath;
- return getPartitionPath(new CachingPath(basePath), properPartitionPath);
+ String properPartitionPath =
relativePartitionPath.startsWith(PATH_SEPARATOR)
+ ? relativePartitionPath.substring(1)
+ : relativePartitionPath;
+ return constructAbsolutePath(new CachingPath(basePath),
properPartitionPath);
}
- public static StoragePath getPartitionPath(String basePath, String
partitionPath) {
- if (StringUtils.isNullOrEmpty(partitionPath)) {
+ public static StoragePath constructAbsolutePath(String basePath, String
relativePartitionPath) {
+ if (StringUtils.isNullOrEmpty(relativePartitionPath)) {
return new StoragePath(basePath);
}
// NOTE: We have to chop leading "/" to make sure Hadoop does not treat it
like
// absolute path
- String properPartitionPath = partitionPath.startsWith("/")
- ? partitionPath.substring(1)
- : partitionPath;
- return getPartitionPath(new StoragePath(basePath), properPartitionPath);
+ String properPartitionPath =
relativePartitionPath.startsWith(PATH_SEPARATOR)
+ ? relativePartitionPath.substring(1)
+ : relativePartitionPath;
+ return constructAbsolutePath(new StoragePath(basePath),
properPartitionPath);
}
- public static Path getPartitionPath(Path basePath, String partitionPath) {
+ public static Path constructAbsolutePath(Path basePath, String
relativePartitionPath) {
// For non-partitioned table, return only base-path
- return StringUtils.isNullOrEmpty(partitionPath) ? basePath : new
CachingPath(basePath, partitionPath);
+ return StringUtils.isNullOrEmpty(relativePartitionPath) ? basePath : new
CachingPath(basePath, relativePartitionPath);
}
- public static StoragePath getPartitionPath(StoragePath basePath, String
partitionPath) {
+ public static StoragePath constructAbsolutePath(StoragePath basePath, String
relativePartitionPath) {
// For non-partitioned table, return only base-path
- return StringUtils.isNullOrEmpty(partitionPath) ? basePath : new
StoragePath(basePath, partitionPath);
+ return StringUtils.isNullOrEmpty(relativePartitionPath) ? basePath : new
StoragePath(basePath, relativePartitionPath);
}
/**
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/model/CompactionOperation.java
b/hudi-common/src/main/java/org/apache/hudi/common/model/CompactionOperation.java
index b94d75fe5a4..483fb61d497 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/model/CompactionOperation.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/model/CompactionOperation.java
@@ -119,7 +119,7 @@ public class CompactionOperation implements Serializable {
public Option<HoodieBaseFile> getBaseFile(String basePath, String
partitionPath) {
Option<BaseFile> externalBaseFile = bootstrapFilePath.map(BaseFile::new);
- StoragePath dirPath = FSUtils.getPartitionPath(basePath, partitionPath);
+ StoragePath dirPath = FSUtils.constructAbsolutePath(basePath,
partitionPath);
return dataFileName.map(df -> {
return externalBaseFile.map(ext -> new HoodieBaseFile(new
StoragePath(dirPath, df).toString(), ext))
.orElseGet(() -> new HoodieBaseFile(new StoragePath(dirPath,
df).toString()));
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java
b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java
index aaf20c689f3..ba41835da5b 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/model/HoodieCommitMetadata.java
@@ -137,7 +137,7 @@ public class HoodieCommitMetadata implements Serializable {
HashMap<String, String> fullPaths = new HashMap<>();
for (Map.Entry<String, String> entry :
getFileIdAndRelativePaths().entrySet()) {
String fullPath = entry.getValue() != null
- ? FSUtils.getPartitionPath(basePath, entry.getValue()).toString()
+ ? FSUtils.constructAbsolutePath(basePath,
entry.getValue()).toString()
: null;
fullPaths.put(entry.getKey(), fullPath);
}
@@ -149,7 +149,7 @@ public class HoodieCommitMetadata implements Serializable {
if (getPartitionToWriteStats().get(partitionPath) != null) {
for (HoodieWriteStat stat :
getPartitionToWriteStats().get(partitionPath)) {
if ((stat.getFileId() != null)) {
- String fullPath = FSUtils.getPartitionPathInHadoopPath(basePath,
stat.getPath()).toString();
+ String fullPath =
FSUtils.constructAbsolutePathInHadoopPath(basePath, stat.getPath()).toString();
fullPaths.add(fullPath);
}
}
@@ -186,7 +186,7 @@ public class HoodieCommitMetadata implements Serializable {
for (HoodieWriteStat stat : stats) {
String relativeFilePath = stat.getPath();
StoragePath fullPath = relativeFilePath != null
- ? FSUtils.getPartitionPath(basePath, relativeFilePath) : null;
+ ? FSUtils.constructAbsolutePath(basePath, relativeFilePath) : null;
if (fullPath != null) {
long blockSize =
HoodieStorageUtils.getStorage(fullPath.toString(),
hadoopConf).getDefaultBlockSize(fullPath);
@@ -220,7 +220,7 @@ public class HoodieCommitMetadata implements Serializable {
for (HoodieWriteStat stat : stats) {
String relativeFilePath = stat.getPath();
StoragePath fullPath =
- relativeFilePath != null ? FSUtils.getPartitionPath(basePath,
+ relativeFilePath != null ? FSUtils.constructAbsolutePath(basePath,
relativeFilePath) : null;
if (fullPath != null) {
StoragePathInfo pathInfo =
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCExtractor.java
b/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCExtractor.java
index eea2ebbbc81..fc838bcc1e5 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCExtractor.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/table/cdc/HoodieCDCExtractor.java
@@ -184,7 +184,7 @@ public class HoodieCDCExtractor {
try {
List<StoragePathInfo> touchedFiles = new ArrayList<>();
for (String touchedPartition : touchedPartitions) {
- StoragePath partitionPath = FSUtils.getPartitionPath(basePath,
touchedPartition);
+ StoragePath partitionPath = FSUtils.constructAbsolutePath(basePath,
touchedPartition);
touchedFiles.addAll(storage.listDirectEntries(partitionPath));
}
return new HoodieTableFileSystemView(
@@ -313,7 +313,7 @@ public class HoodieCDCExtractor {
HoodieFileGroupId fgId,
HoodieInstant instant,
String currentLogFile) {
- StoragePath partitionPath = FSUtils.getPartitionPath(basePath,
fgId.getPartitionPath());
+ StoragePath partitionPath = FSUtils.constructAbsolutePath(basePath,
fgId.getPartitionPath());
if (instant.getAction().equals(DELTA_COMMIT_ACTION)) {
String currentLogFileName = new StoragePath(currentLogFile).getName();
Option<Pair<String, List<String>>> fileSliceOpt =
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanMetadataV1MigrationHandler.java
b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanMetadataV1MigrationHandler.java
index 1f7b5792eb0..41e3dc79399 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanMetadataV1MigrationHandler.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanMetadataV1MigrationHandler.java
@@ -99,6 +99,6 @@ public class CleanMetadataV1MigrationHandler extends
AbstractMigratorBase<Hoodie
return fileName;
}
- return new Path(FSUtils.getPartitionPath(basePath, partitionPath),
fileName).toString();
+ return new Path(FSUtils.constructAbsolutePath(basePath, partitionPath),
fileName).toString();
}
}
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanPlanV2MigrationHandler.java
b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanPlanV2MigrationHandler.java
index 7317991af37..99b5185ba73 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanPlanV2MigrationHandler.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/clean/CleanPlanV2MigrationHandler.java
@@ -55,7 +55,7 @@ public class CleanPlanV2MigrationHandler extends
AbstractMigratorBase<HoodieClea
Map<String, List<HoodieCleanFileInfo>> filePathsPerPartition =
plan.getFilesToBeDeletedPerPartition().entrySet().stream().map(e ->
Pair.of(e.getKey(), e.getValue().stream()
.map(v -> new HoodieCleanFileInfo(
- new
Path(FSUtils.getPartitionPathInHadoopPath(metaClient.getBasePath(),
e.getKey()), v).toString(), false))
+ new
Path(FSUtils.constructAbsolutePathInHadoopPath(metaClient.getBasePath(),
e.getKey()), v).toString(), false))
.collect(Collectors.toList()))).collect(Collectors.toMap(Pair::getKey,
Pair::getValue));
return new HoodieCleanerPlan(plan.getEarliestInstantToRetain(),
plan.getLastCompletedCommitTimestamp(),
plan.getPolicy(), new HashMap<>(), VERSION, filePathsPerPartition, new
ArrayList<>(), Collections.emptyMap());
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/compaction/CompactionV1MigrationHandler.java
b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/compaction/CompactionV1MigrationHandler.java
index 17488a637ce..31905b1ad4b 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/compaction/CompactionV1MigrationHandler.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/compaction/CompactionV1MigrationHandler.java
@@ -78,6 +78,6 @@ public class CompactionV1MigrationHandler extends
AbstractMigratorBase<HoodieCom
return fileName;
}
- return new Path(FSUtils.getPartitionPath(basePath, partitionPath),
fileName).toString();
+ return new Path(FSUtils.constructAbsolutePath(basePath, partitionPath),
fileName).toString();
}
}
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java
index 0ef2a602b76..c3a7906ee9f 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/AbstractTableFileSystemView.java
@@ -370,7 +370,7 @@ public abstract class AbstractTableFileSystemView
implements SyncableFileSystemV
// Pairs of relative partition path and absolute partition path
List<Pair<String, StoragePath>> absolutePartitionPathList =
partitionSet.stream()
.map(partition -> Pair.of(
- partition,
FSUtils.getPartitionPath(metaClient.getBasePathV2(), partition)))
+ partition,
FSUtils.constructAbsolutePath(metaClient.getBasePathV2(), partition)))
.collect(Collectors.toList());
long beginLsTs = System.currentTimeMillis();
Map<Pair<String, StoragePath>, List<StoragePathInfo>> pathInfoMap =
@@ -442,7 +442,7 @@ public abstract class AbstractTableFileSystemView
implements SyncableFileSystemV
*/
private List<StoragePathInfo> getAllFilesInPartition(String
relativePartitionPath)
throws IOException {
- StoragePath partitionPath =
FSUtils.getPartitionPath(metaClient.getBasePathV2(),
+ StoragePath partitionPath =
FSUtils.constructAbsolutePath(metaClient.getBasePathV2(),
relativePartitionPath);
long beginLsTs = System.currentTimeMillis();
List<StoragePathInfo> pathInfoList = listPartition(partitionPath);
diff --git
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java
index 83de15ae1ad..d4e7fbfd1fd 100644
---
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java
+++
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/IncrementalTimelineSyncFileSystemView.java
@@ -367,7 +367,7 @@ public abstract class IncrementalTimelineSyncFileSystemView
extends AbstractTabl
final String partitionPath = entry.getValue().getPartitionPath();
List<String> fullPathList = entry.getValue().getSuccessDeleteFiles()
.stream().map(fileName -> new StoragePath(FSUtils
- .getPartitionPathInHadoopPath(basePath,
partitionPath).toString(), fileName).toString())
+ .constructAbsolutePathInHadoopPath(basePath,
partitionPath).toString(), fileName).toString())
.collect(Collectors.toList());
removeFileSlicesForPartition(timeline, instant, entry.getKey(),
fullPathList);
});
diff --git
a/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/sink/compact/ITTestHoodieFlinkCompactor.java
b/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/sink/compact/ITTestHoodieFlinkCompactor.java
index 4f3f6445c59..11bc1112c3f 100644
---
a/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/sink/compact/ITTestHoodieFlinkCompactor.java
+++
b/hudi-flink-datasource/hudi-flink/src/test/java/org/apache/hudi/sink/compact/ITTestHoodieFlinkCompactor.java
@@ -424,7 +424,7 @@ public class ITTestHoodieFlinkCompactor {
FSUtils.getAllPartitionPaths(HoodieFlinkEngineContext.DEFAULT,
metaClient.getBasePath(), false).forEach(
partition -> {
try {
-
storage.listDirectEntries(FSUtils.getPartitionPath(metaClient.getBasePathV2(),
partition))
+
storage.listDirectEntries(FSUtils.constructAbsolutePath(metaClient.getBasePathV2(),
partition))
.stream()
.filter(f -> FSUtils.isBaseFile(new Path(f.getPath().toUri())))
.forEach(f -> {
diff --git
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala
index 8616bbe7e66..92864ee9df5 100644
---
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala
+++
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/IncrementalRelation.scala
@@ -161,7 +161,7 @@ class IncrementalRelation(val sqlContext: SQLContext,
fromBytes(metaClient.getActiveTimeline.getInstantDetails(instant).get,
classOf[HoodieReplaceCommitMetadata])
replaceMetadata.getPartitionToReplaceFileIds.entrySet().flatMap {
entry =>
entry.getValue.map { e =>
- val fullPath = FSUtils.getPartitionPath(basePath,
entry.getKey).toString
+ val fullPath = FSUtils.constructAbsolutePath(basePath,
entry.getKey).toString
(e, fullPath)
}
}
diff --git
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/command/AlterHoodieTableAddPartitionCommand.scala
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/command/AlterHoodieTableAddPartitionCommand.scala
index a5868e30d4c..8e45ff094aa 100644
---
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/command/AlterHoodieTableAddPartitionCommand.scala
+++
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/spark/sql/hudi/command/AlterHoodieTableAddPartitionCommand.scala
@@ -66,7 +66,7 @@ case class AlterHoodieTableAddPartitionCommand(
val format = hoodieCatalogTable.tableConfig.getPartitionMetafileFormat
val (partitionMetadata, parts) = normalizedSpecs.map { spec =>
val partitionPath = makePartitionPath(hoodieCatalogTable, spec)
- val fullPartitionPath: StoragePath = FSUtils.getPartitionPath(basePath,
partitionPath)
+ val fullPartitionPath: StoragePath =
FSUtils.constructAbsolutePath(basePath, partitionPath)
val metadata = if (HoodiePartitionMetadata.hasPartitionMetadata(storage,
fullPartitionPath)) {
if (!ifNotExists) {
throw new AnalysisException(s"Partition metadata already exists for
path: $fullPartitionPath")
diff --git
a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairAddpartitionmetaProcedure.scala
b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairAddpartitionmetaProcedure.scala
index 03ef6cc3f54..3ae183101e8 100644
---
a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairAddpartitionmetaProcedure.scala
+++
b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairAddpartitionmetaProcedure.scala
@@ -62,7 +62,7 @@ class RepairAddpartitionmetaProcedure extends BaseProcedure
with ProcedureBuilde
val rows = new util.ArrayList[Row](partitionPaths.size)
for (partition <- partitionPaths) {
- val partitionPath: StoragePath = FSUtils.getPartitionPath(basePath,
partition)
+ val partitionPath: StoragePath = FSUtils.constructAbsolutePath(basePath,
partition)
var isPresent = "Yes"
var action = "None"
if (!HoodiePartitionMetadata.hasPartitionMetadata(metaClient.getStorage,
partitionPath)) {
diff --git
a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairMigratePartitionMetaProcedure.scala
b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairMigratePartitionMetaProcedure.scala
index eef95d8f4b5..4ab1a5438bd 100644
---
a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairMigratePartitionMetaProcedure.scala
+++
b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/RepairMigratePartitionMetaProcedure.scala
@@ -67,7 +67,7 @@ class RepairMigratePartitionMetaProcedure extends
BaseProcedure with ProcedureBu
val rows = new util.ArrayList[Row](partitionPaths.size)
for (partitionPath <- partitionPaths) {
- val partition: StoragePath = FSUtils.getPartitionPath(tablePath,
partitionPath)
+ val partition: StoragePath = FSUtils.constructAbsolutePath(tablePath,
partitionPath)
val textFormatFile: Option[StoragePath] =
HoodiePartitionMetadata.textFormatMetaPathIfExists(
metaClient.getStorage, partition)
val baseFormatFile: Option[StoragePath] =
HoodiePartitionMetadata.baseFormatMetaPathIfExists(
diff --git
a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowInvalidParquetProcedure.scala
b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowInvalidParquetProcedure.scala
index 8a720bd7886..7c7a62c8ab1 100644
---
a/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowInvalidParquetProcedure.scala
+++
b/hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/procedures/ShowInvalidParquetProcedure.scala
@@ -55,7 +55,7 @@ class ShowInvalidParquetProcedure extends BaseProcedure with
ProcedureBuilder {
val serHadoopConf = new
SerializableConfiguration(jsc.hadoopConfiguration())
val parquetRdd = javaRdd.rdd.map(part => {
val fs = HadoopFSUtils.getFs(new Path(srcPath), serHadoopConf.get())
- FSUtils.getAllDataFilesInPartition(fs,
FSUtils.getPartitionPathInHadoopPath(srcPath, part))
+ FSUtils.getAllDataFilesInPartition(fs,
FSUtils.constructAbsolutePathInHadoopPath(srcPath, part))
}).flatMap(_.toList)
.filter(status => {
val filePath = status.getPath
diff --git
a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestSparkConsistentBucketClustering.java
b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestSparkConsistentBucketClustering.java
index ee10fc8a606..7a9fb1d911c 100644
---
a/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestSparkConsistentBucketClustering.java
+++
b/hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/functional/TestSparkConsistentBucketClustering.java
@@ -186,7 +186,7 @@ public class TestSparkConsistentBucketClustering extends
HoodieSparkClientTestHa
Arrays.stream(dataGen.getPartitionPaths()).forEach(p -> {
if (!isCommitFilePresent) {
StoragePath metadataPath =
-
FSUtils.getPartitionPath(table.getMetaClient().getHashingMetadataPath(), p);
+
FSUtils.constructAbsolutePath(table.getMetaClient().getHashingMetadataPath(),
p);
try {
table.getMetaClient().getStorage().listDirectEntries(metadataPath).forEach(fl
-> {
if (fl.getPath().getName()
diff --git
a/hudi-sync/hudi-adb-sync/src/main/java/org/apache/hudi/sync/adb/HoodieAdbJdbcClient.java
b/hudi-sync/hudi-adb-sync/src/main/java/org/apache/hudi/sync/adb/HoodieAdbJdbcClient.java
index 2c557c35f76..0c4305017f1 100644
---
a/hudi-sync/hudi-adb-sync/src/main/java/org/apache/hudi/sync/adb/HoodieAdbJdbcClient.java
+++
b/hudi-sync/hudi-adb-sync/src/main/java/org/apache/hudi/sync/adb/HoodieAdbJdbcClient.java
@@ -323,7 +323,7 @@ public class HoodieAdbJdbcClient extends HoodieSyncClient {
if (!StringUtils.isNullOrEmpty(str)) {
List<String> values =
partitionValueExtractor.extractPartitionValuesInPath(str);
Path storagePartitionPath =
-
FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
String.join("/", values));
+
FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
String.join("/", values));
String fullStoragePartitionPath =
Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath();
partitions.put(values, fullStoragePartitionPath);
@@ -359,7 +359,7 @@ public class HoodieAdbJdbcClient extends HoodieSyncClient {
.append(tableName).append("`").append(" add if not exists ");
for (String partition : partitions) {
String partitionClause = getPartitionClause(partition);
- Path partitionPath =
FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partition);
+ Path partitionPath =
FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partition);
String fullPartitionPathStr =
config.generateAbsolutePathStr(partitionPath);
sqlBuilder.append(" partition (").append(partitionClause).append(")
location '")
.append(fullPartitionPathStr).append("' ");
@@ -376,7 +376,7 @@ public class HoodieAdbJdbcClient extends HoodieSyncClient {
String alterTable = "alter table `" + tableName + "`";
for (String partition : partitions) {
String partitionClause = getPartitionClause(partition);
- Path partitionPath =
FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partition);
+ Path partitionPath =
FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partition);
String fullPartitionPathStr =
config.generateAbsolutePathStr(partitionPath);
String changePartition = alterTable + " add if not exists partition (" +
partitionClause
+ ") location '" + fullPartitionPathStr + "'";
@@ -455,13 +455,13 @@ public class HoodieAdbJdbcClient extends HoodieSyncClient
{
List<PartitionEvent> events = new ArrayList<>();
for (String storagePartition : partitionStoragePartitions) {
Path storagePartitionPath =
-
FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
storagePartition);
+
FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
storagePartition);
String fullStoragePartitionPath =
Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath();
// Check if the partition values or if hdfs path is the same
List<String> storagePartitionValues =
partitionValueExtractor.extractPartitionValuesInPath(storagePartition);
if (config.getBoolean(ADB_SYNC_USE_HIVE_STYLE_PARTITIONING)) {
String partition = String.join("/", storagePartitionValues);
- storagePartitionPath =
FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partition);
+ storagePartitionPath =
FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partition);
fullStoragePartitionPath =
Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath();
}
if (!storagePartitionValues.isEmpty()) {
diff --git
a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HMSDDLExecutor.java
b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HMSDDLExecutor.java
index 2f82aa2c006..b5471079524 100644
---
a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HMSDDLExecutor.java
+++
b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/HMSDDLExecutor.java
@@ -205,7 +205,7 @@ public class HMSDDLExecutor implements DDLExecutor {
partitionSd.setOutputFormat(sd.getOutputFormat());
partitionSd.setSerdeInfo(sd.getSerdeInfo());
String fullPartitionPath =
-
FSUtils.getPartitionPathInHadoopPath(syncConfig.getString(META_SYNC_BASE_PATH),
x).toString();
+
FSUtils.constructAbsolutePathInHadoopPath(syncConfig.getString(META_SYNC_BASE_PATH),
x).toString();
List<String> partitionValues =
partitionValueExtractor.extractPartitionValuesInPath(x);
partitionSd.setLocation(fullPartitionPath);
partitionList.add(new Partition(partitionValues, databaseName,
tableName, 0, 0, partitionSd, null));
@@ -229,7 +229,7 @@ public class HMSDDLExecutor implements DDLExecutor {
try {
StorageDescriptor sd = client.getTable(databaseName, tableName).getSd();
List<Partition> partitionList = changedPartitions.stream().map(partition
-> {
- Path partitionPath =
FSUtils.getPartitionPathInHadoopPath(syncConfig.getString(META_SYNC_BASE_PATH),
partition);
+ Path partitionPath =
FSUtils.constructAbsolutePathInHadoopPath(syncConfig.getString(META_SYNC_BASE_PATH),
partition);
String partitionScheme = partitionPath.toUri().getScheme();
String fullPartitionPath =
StorageSchemes.HDFS.getScheme().equals(partitionScheme)
?
FSUtils.getDFSFullPartitionPath(syncConfig.getHadoopFileSystem(),
partitionPath) : partitionPath.toString();
diff --git
a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/QueryBasedDDLExecutor.java
b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/QueryBasedDDLExecutor.java
index e3b2b913944..194f99705bf 100644
---
a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/QueryBasedDDLExecutor.java
+++
b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/ddl/QueryBasedDDLExecutor.java
@@ -162,7 +162,7 @@ public abstract class QueryBasedDDLExecutor implements
DDLExecutor {
for (int i = 0; i < partitions.size(); i++) {
String partitionClause = getPartitionClause(partitions.get(i));
String fullPartitionPath =
-
FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partitions.get(i)).toString();
+
FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partitions.get(i)).toString();
alterSQL.append(" PARTITION (").append(partitionClause).append(")
LOCATION '").append(fullPartitionPath)
.append("' ");
if ((i + 1) % batchSyncPartitionNum == 0) {
@@ -211,7 +211,7 @@ public abstract class QueryBasedDDLExecutor implements
DDLExecutor {
String alterTable = "ALTER TABLE " + HIVE_ESCAPE_CHARACTER + tableName +
HIVE_ESCAPE_CHARACTER;
for (String partition : partitions) {
String partitionClause = getPartitionClause(partition);
- Path partitionPath =
FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partition);
+ Path partitionPath =
FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
partition);
String partitionScheme = partitionPath.toUri().getScheme();
String fullPartitionPath =
StorageSchemes.HDFS.getScheme().equals(partitionScheme)
? FSUtils.getDFSFullPartitionPath(config.getHadoopFileSystem(),
partitionPath) : partitionPath.toString();
diff --git
a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java
b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java
index f54da832d64..87032fd2179 100644
---
a/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java
+++
b/hudi-sync/hudi-hive-sync/src/test/java/org/apache/hudi/hive/TestHiveSyncTool.java
@@ -360,7 +360,7 @@ public class TestHiveSyncTool {
// it and generate a partition update event for it.
ddlExecutor.runSQL("ALTER TABLE `" + HiveTestUtil.TABLE_NAME
+ "` PARTITION (`datestr`='2050-01-01') SET LOCATION '"
- + FSUtils.getPartitionPathInHadoopPath(basePath,
"2050/1/1").toString() + "'");
+ + FSUtils.constructAbsolutePathInHadoopPath(basePath,
"2050/1/1").toString() + "'");
hivePartitions = hiveClient.getAllPartitions(HiveTestUtil.TABLE_NAME);
List<String> writtenPartitionsSince =
hiveClient.getWrittenPartitionsSince(Option.empty(), Option.empty());
diff --git
a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncClient.java
b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncClient.java
index 7f399c9e787..687f8b497b1 100644
---
a/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncClient.java
+++
b/hudi-sync/hudi-sync-common/src/main/java/org/apache/hudi/sync/common/HoodieSyncClient.java
@@ -159,7 +159,7 @@ public abstract class HoodieSyncClient implements
HoodieMetaSyncOperations, Auto
List<PartitionEvent> events = new ArrayList<>();
for (String storagePartition : allPartitionsOnStorage) {
Path storagePartitionPath =
-
FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
storagePartition);
+
FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
storagePartition);
String fullStoragePartitionPath =
Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath();
// Check if the partition values or if hdfs path is the same
List<String> storagePartitionValues =
partitionValueExtractor.extractPartitionValuesInPath(storagePartition);
@@ -203,7 +203,7 @@ public abstract class HoodieSyncClient implements
HoodieMetaSyncOperations, Auto
List<PartitionEvent> events = new ArrayList<>();
for (String storagePartition : writtenPartitionsOnStorage) {
Path storagePartitionPath =
-
FSUtils.getPartitionPathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
storagePartition);
+
FSUtils.constructAbsolutePathInHadoopPath(config.getString(META_SYNC_BASE_PATH),
storagePartition);
String fullStoragePartitionPath =
Path.getPathWithoutSchemeAndAuthority(storagePartitionPath).toUri().getPath();
// Check if the partition values or if hdfs path is the same
List<String> storagePartitionValues =
partitionValueExtractor.extractPartitionValuesInPath(storagePartition);
diff --git
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableUtils.java
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableUtils.java
index 64079f18380..7647f93c899 100644
---
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableUtils.java
+++
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieDataTableUtils.java
@@ -38,7 +38,7 @@ public class HoodieDataTableUtils {
String basePath) throws IOException {
List<String> allPartitionPaths = tableMetadata.getAllPartitionPaths()
.stream().map(partitionPath ->
- FSUtils.getPartitionPathInHadoopPath(basePath,
partitionPath).toString())
+ FSUtils.constructAbsolutePathInHadoopPath(basePath,
partitionPath).toString())
.collect(Collectors.toList());
return
tableMetadata.getAllFilesInPartitions(allPartitionPaths).values().stream()
.map(fileStatuses ->
diff --git
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
index eff6185d37f..b5b2d7c0485 100644
---
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
+++
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java
@@ -664,7 +664,7 @@ public class HoodieMetadataTableValidator implements
Serializable {
@VisibleForTesting
Option<String> getPartitionCreationInstant(HoodieStorage storage, String
basePath, String partition) {
HoodiePartitionMetadata hoodiePartitionMetadata =
- new HoodiePartitionMetadata(storage,
FSUtils.getPartitionPath(basePath, partition));
+ new HoodiePartitionMetadata(storage,
FSUtils.constructAbsolutePath(basePath, partition));
return hoodiePartitionMetadata.readPartitionCreatedCommitTime();
}
@@ -681,7 +681,7 @@ public class HoodieMetadataTableValidator implements
Serializable {
// ignore partitions created by uncommitted ingestion.
return allPartitionPathsFromFS.stream().parallel().filter(part -> {
HoodiePartitionMetadata hoodiePartitionMetadata =
- new HoodiePartitionMetadata(storage,
FSUtils.getPartitionPath(basePath, part));
+ new HoodiePartitionMetadata(storage,
FSUtils.constructAbsolutePath(basePath, part));
Option<String> instantOption =
hoodiePartitionMetadata.readPartitionCreatedCommitTime();
if (instantOption.isPresent()) {
String instantTime = instantOption.get();
@@ -1402,7 +1402,7 @@ public class HoodieMetadataTableValidator implements
Serializable {
return baseFileNameList.stream().flatMap(filename ->
new ParquetUtils().readRangeFromParquetMetadata(
metaClient.getHadoopConf(),
- new
StoragePath(FSUtils.getPartitionPath(metaClient.getBasePathV2(),
partitionPath), filename),
+ new
StoragePath(FSUtils.constructAbsolutePath(metaClient.getBasePathV2(),
partitionPath), filename),
allColumnNameList).stream())
.sorted(new HoodieColumnRangeMetadataComparator())
.collect(Collectors.toList());
@@ -1444,7 +1444,7 @@ public class HoodieMetadataTableValidator implements
Serializable {
private Option<BloomFilterData> readBloomFilterFromFile(String
partitionPath, String filename) {
StoragePath path = new StoragePath(
- FSUtils.getPartitionPath(metaClient.getBasePathV2(),
partitionPath).toString(), filename);
+ FSUtils.constructAbsolutePath(metaClient.getBasePathV2(),
partitionPath).toString(), filename);
BloomFilter bloomFilter;
HoodieConfig hoodieConfig = new HoodieConfig();
hoodieConfig.setValue(HoodieReaderConfig.USE_NATIVE_HFILE_READER,
diff --git
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java
index 54034b38fad..195829b01fa 100644
---
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java
+++
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotCopier.java
@@ -122,7 +122,7 @@ public class HoodieSnapshotCopier implements Serializable {
// also need to copy over partition metadata
StoragePath partitionMetaFile =
HoodiePartitionMetadata.getPartitionMetafilePath(storage1,
- FSUtils.getPartitionPath(baseDir, partition)).get();
+ FSUtils.constructAbsolutePath(baseDir, partition)).get();
if (storage1.exists(partitionMetaFile)) {
filePaths.add(new Tuple2<>(partition, partitionMetaFile.toString()));
}
@@ -133,7 +133,7 @@ public class HoodieSnapshotCopier implements Serializable {
context.foreach(filesToCopy, tuple -> {
String partition = tuple._1();
Path sourceFilePath = new Path(tuple._2());
- Path toPartitionPath = FSUtils.getPartitionPathInHadoopPath(outputDir,
partition);
+ Path toPartitionPath =
FSUtils.constructAbsolutePathInHadoopPath(outputDir, partition);
FileSystem ifs = HadoopFSUtils.getFs(baseDir, serConf.newCopy());
if (!ifs.exists(toPartitionPath)) {
diff --git
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java
index 4e2ef7e33e9..3cf78803699 100644
---
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java
+++
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieSnapshotExporter.java
@@ -216,7 +216,7 @@ public class HoodieSnapshotExporter {
// also need to copy over partition metadata
HoodieStorage storage =
HoodieStorageUtils.getStorage(cfg.sourceBasePath, serConf.newCopy());
StoragePath partitionMetaFile =
HoodiePartitionMetadata.getPartitionMetafilePath(storage,
- FSUtils.getPartitionPath(cfg.sourceBasePath, partition)).get();
+ FSUtils.constructAbsolutePath(cfg.sourceBasePath, partition)).get();
if (storage.exists(partitionMetaFile)) {
filePaths.add(Pair.of(partition, partitionMetaFile.toString()));
}
@@ -226,7 +226,7 @@ public class HoodieSnapshotExporter {
context.foreach(partitionAndFileList, partitionAndFile -> {
String partition = partitionAndFile.getLeft();
Path sourceFilePath = new Path(partitionAndFile.getRight());
- Path toPartitionPath =
FSUtils.getPartitionPathInHadoopPath(cfg.targetOutputPath, partition);
+ Path toPartitionPath =
FSUtils.constructAbsolutePathInHadoopPath(cfg.targetOutputPath, partition);
FileSystem executorSourceFs = HadoopFSUtils.getFs(cfg.sourceBasePath,
serConf.newCopy());
FileSystem executorOutputFs = HadoopFSUtils.getFs(cfg.targetOutputPath,
serConf.newCopy());