xuzifu666 commented on code in PR #9875:
URL: https://github.com/apache/hudi/pull/9875#discussion_r1362323212
##########
hudi-common/src/main/java/org/apache/hudi/metadata/FileSystemBackedTableMetadata.java:
##########
@@ -157,52 +156,66 @@ private List<String>
getPartitionPathWithPathPrefixUsingFilterExpression(String
// TODO: Get the parallelism from HoodieWriteConfig
int listingParallelism = Math.min(DEFAULT_LISTING_PARALLELISM,
pathsToList.size());
- // List all directories in parallel:
- // if current dictionary contains PartitionMetadata, add it to result
- // if current dictionary does not contain PartitionMetadata, add its
subdirectory to queue to be processed.
+ // List all directories in parallel
engineContext.setJobStatus(this.getClass().getSimpleName(), "Listing all
partitions with prefix " + relativePathPrefix);
- // result below holds a list of pair. first entry in the pair optionally
holds the deduced list of partitions.
- // and second entry holds optionally a directory path to be processed
further.
- List<Pair<Option<String>, Option<Path>>> result =
engineContext.flatMap(pathsToList, path -> {
+ List<FileStatus> dirToFileListing = engineContext.flatMap(pathsToList,
path -> {
FileSystem fileSystem = path.getFileSystem(hadoopConf.get());
- if (HoodiePartitionMetadata.hasPartitionMetadata(fileSystem, path)) {
- return
Stream.of(Pair.of(Option.of(FSUtils.getRelativePartitionPath(dataBasePath.get(),
path)), Option.empty()));
- }
- return Arrays.stream(fileSystem.listStatus(path))
- .filter(status -> status.isDirectory() &&
!status.getPath().getName().equals(HoodieTableMetaClient.METAFOLDER_NAME))
- .map(status -> Pair.of(Option.empty(),
Option.of(status.getPath())));
+ return Arrays.stream(fileSystem.listStatus(path));
}, listingParallelism);
pathsToList.clear();
- partitionPaths.addAll(result.stream().filter(entry ->
entry.getKey().isPresent())
- .map(entry -> entry.getKey().get())
- .filter(relativePartitionPath -> fullBoundExpr instanceof
Predicates.TrueExpression
- || (Boolean) fullBoundExpr.eval(
- extractPartitionValues(partitionFields, relativePartitionPath,
urlEncodePartitioningEnabled)))
- .collect(Collectors.toList()));
-
- Expression partialBoundExpr;
- // If partitionPaths is nonEmpty, we're already at the last path level,
and all paths
- // are filtered already.
- if (needPushDownExpressions && partitionPaths.isEmpty()) {
- // Here we assume the path level matches the number of partition
columns, so we'll rebuild
- // new schema based on current path level.
- // e.g. partition columns are <region, date, hh>, if we're listing the
second level, then
- // currentSchema would be <region, date>
- // `PartialBindVisitor` will bind reference if it can be found from
`currentSchema`, otherwise
- // will change the expression to `alwaysTrue`. Can see
`PartialBindVisitor` for details.
- Types.RecordType currentSchema =
Types.RecordType.get(partitionFields.fields().subList(0,
++currentPartitionLevel));
- PartialBindVisitor partialBindVisitor = new
PartialBindVisitor(currentSchema, caseSensitive);
- partialBoundExpr = pushedExpr.accept(partialBindVisitor);
- } else {
- partialBoundExpr = Predicates.alwaysTrue();
- }
+ // if current dictionary contains PartitionMetadata, add it to result
+ // if current dictionary does not contain PartitionMetadata, add it to
queue to be processed.
+ int fileListingParallelism = Math.min(DEFAULT_LISTING_PARALLELISM,
dirToFileListing.size());
+ if (!dirToFileListing.isEmpty()) {
+ // result below holds a list of pair. first entry in the pair
optionally holds the deduced list of partitions.
+ // and second entry holds optionally a directory path to be processed
further.
+ engineContext.setJobStatus(this.getClass().getSimpleName(),
"Processing listed partitions");
+ List<Pair<Option<String>, Option<Path>>> result =
engineContext.map(dirToFileListing, fileStatus -> {
+ FileSystem fileSystem =
fileStatus.getPath().getFileSystem(hadoopConf.get());
+ if (fileStatus.isDirectory()) {
+ if (HoodiePartitionMetadata.hasPartitionMetadata(fileSystem,
fileStatus.getPath())) {
+ return
Pair.of(Option.of(FSUtils.getRelativePartitionPath(dataBasePath.get(),
fileStatus.getPath())), Option.empty());
+ } else if
(!fileStatus.getPath().getName().equals(HoodieTableMetaClient.METAFOLDER_NAME))
{
+ return Pair.of(Option.empty(), Option.of(fileStatus.getPath()));
Review Comment:
ok,in a condition that /2023-10-13 partition are 200000000000 records(1kb
per record),driver memory is 4gb ,sub parition hour from 1 to 24,than query
select count(1) from table where day='2023-10-13' or select * from table where
day='2023-10-13',driver would oom directly. at the same time revert the
https://github.com/apache/hudi/pull/9366 would query success in 1 min @wecharyu
@danny0405
##########
hudi-common/src/main/java/org/apache/hudi/metadata/FileSystemBackedTableMetadata.java:
##########
@@ -157,52 +156,66 @@ private List<String>
getPartitionPathWithPathPrefixUsingFilterExpression(String
// TODO: Get the parallelism from HoodieWriteConfig
int listingParallelism = Math.min(DEFAULT_LISTING_PARALLELISM,
pathsToList.size());
- // List all directories in parallel:
- // if current dictionary contains PartitionMetadata, add it to result
- // if current dictionary does not contain PartitionMetadata, add its
subdirectory to queue to be processed.
+ // List all directories in parallel
engineContext.setJobStatus(this.getClass().getSimpleName(), "Listing all
partitions with prefix " + relativePathPrefix);
- // result below holds a list of pair. first entry in the pair optionally
holds the deduced list of partitions.
- // and second entry holds optionally a directory path to be processed
further.
- List<Pair<Option<String>, Option<Path>>> result =
engineContext.flatMap(pathsToList, path -> {
+ List<FileStatus> dirToFileListing = engineContext.flatMap(pathsToList,
path -> {
FileSystem fileSystem = path.getFileSystem(hadoopConf.get());
- if (HoodiePartitionMetadata.hasPartitionMetadata(fileSystem, path)) {
- return
Stream.of(Pair.of(Option.of(FSUtils.getRelativePartitionPath(dataBasePath.get(),
path)), Option.empty()));
- }
- return Arrays.stream(fileSystem.listStatus(path))
- .filter(status -> status.isDirectory() &&
!status.getPath().getName().equals(HoodieTableMetaClient.METAFOLDER_NAME))
- .map(status -> Pair.of(Option.empty(),
Option.of(status.getPath())));
+ return Arrays.stream(fileSystem.listStatus(path));
}, listingParallelism);
pathsToList.clear();
- partitionPaths.addAll(result.stream().filter(entry ->
entry.getKey().isPresent())
- .map(entry -> entry.getKey().get())
- .filter(relativePartitionPath -> fullBoundExpr instanceof
Predicates.TrueExpression
- || (Boolean) fullBoundExpr.eval(
- extractPartitionValues(partitionFields, relativePartitionPath,
urlEncodePartitioningEnabled)))
- .collect(Collectors.toList()));
-
- Expression partialBoundExpr;
- // If partitionPaths is nonEmpty, we're already at the last path level,
and all paths
- // are filtered already.
- if (needPushDownExpressions && partitionPaths.isEmpty()) {
- // Here we assume the path level matches the number of partition
columns, so we'll rebuild
- // new schema based on current path level.
- // e.g. partition columns are <region, date, hh>, if we're listing the
second level, then
- // currentSchema would be <region, date>
- // `PartialBindVisitor` will bind reference if it can be found from
`currentSchema`, otherwise
- // will change the expression to `alwaysTrue`. Can see
`PartialBindVisitor` for details.
- Types.RecordType currentSchema =
Types.RecordType.get(partitionFields.fields().subList(0,
++currentPartitionLevel));
- PartialBindVisitor partialBindVisitor = new
PartialBindVisitor(currentSchema, caseSensitive);
- partialBoundExpr = pushedExpr.accept(partialBindVisitor);
- } else {
- partialBoundExpr = Predicates.alwaysTrue();
- }
+ // if current dictionary contains PartitionMetadata, add it to result
+ // if current dictionary does not contain PartitionMetadata, add it to
queue to be processed.
+ int fileListingParallelism = Math.min(DEFAULT_LISTING_PARALLELISM,
dirToFileListing.size());
+ if (!dirToFileListing.isEmpty()) {
+ // result below holds a list of pair. first entry in the pair
optionally holds the deduced list of partitions.
+ // and second entry holds optionally a directory path to be processed
further.
+ engineContext.setJobStatus(this.getClass().getSimpleName(),
"Processing listed partitions");
+ List<Pair<Option<String>, Option<Path>>> result =
engineContext.map(dirToFileListing, fileStatus -> {
+ FileSystem fileSystem =
fileStatus.getPath().getFileSystem(hadoopConf.get());
+ if (fileStatus.isDirectory()) {
+ if (HoodiePartitionMetadata.hasPartitionMetadata(fileSystem,
fileStatus.getPath())) {
+ return
Pair.of(Option.of(FSUtils.getRelativePartitionPath(dataBasePath.get(),
fileStatus.getPath())), Option.empty());
+ } else if
(!fileStatus.getPath().getName().equals(HoodieTableMetaClient.METAFOLDER_NAME))
{
+ return Pair.of(Option.empty(), Option.of(fileStatus.getPath()));
Review Comment:
ok,in a condition that /2023-10-13 partition are 200000000000 records(1kb
per record),driver memory is 4gb ,sub parition hour from 1 to 24,than query
select count(1) from table where day='2023-10-13' or select * from table where
day='2023-10-13',driver would oom directly. at the same time revert the
https://github.com/apache/hudi/pull/9366 would query success in 1 min @wecharyu
@danny0405
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]