yihua commented on code in PR #12395:
URL: https://github.com/apache/hudi/pull/12395#discussion_r1866521960
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java:
##########
@@ -420,64 +449,131 @@ writeConfig, new StoragePath(bootstrapFilePath)),
partitionFields,
*/
private Dataset<Row> readRecordsForGroupAsRow(JavaSparkContext jsc,
HoodieClusteringGroup
clusteringGroup,
- String instantTime) {
+ String instantTime,
+ Schema
tableSchemaWithMetaFields) {
List<ClusteringOperation> clusteringOps =
clusteringGroup.getSlices().stream()
.map(ClusteringOperation::create).collect(Collectors.toList());
- boolean hasLogFiles = clusteringOps.stream().anyMatch(op ->
op.getDeltaFilePaths().size() > 0);
- SQLContext sqlContext = new SQLContext(jsc.sc());
-
- StoragePath[] baseFilePaths = clusteringOps
- .stream()
- .map(op -> {
- ArrayList<String> readPaths = new ArrayList<>();
- // NOTE: for bootstrap tables, only need to handle data file path
(which is the skeleton file) because
- // HoodieBootstrapRelation takes care of stitching if there is
bootstrap path for the skeleton file.
- if (op.getDataFilePath() != null) {
- readPaths.add(op.getDataFilePath());
+
+ if
(getWriteConfig().getBooleanOrDefault(HoodieReaderConfig.FILE_GROUP_READER_ENABLED))
{
+ String basePath = getWriteConfig().getBasePath();
+ // construct supporting cast that executors might need
+ boolean usePosition =
getWriteConfig().getBooleanOrDefault(MERGE_USE_RECORD_POSITIONS);
+ String internalSchemaStr = getWriteConfig().getInternalSchema();
+ boolean isInternalSchemaPresent =
!StringUtils.isNullOrEmpty(internalSchemaStr);
+ SerializableSchema serializableTableSchemaWithMetaFields = new
SerializableSchema(tableSchemaWithMetaFields);
+
+ // broadcast reader context.
+ SparkFileGroupReaderBroadcastManager broadcastManager = new
SparkFileGroupReaderBroadcastManager(getEngineContext());
+ broadcastManager.prepareAndBroadcast();
+ StructType sparkSchemaWithMetaFields =
AvroConversionUtils.convertAvroSchemaToStructType(tableSchemaWithMetaFields);
+
+ RDD<InternalRow> internalRowRDD = jsc.parallelize(clusteringOps,
clusteringOps.size()).flatMap(new FlatMapFunction<ClusteringOperation,
InternalRow>() {
+ @Override
+ public Iterator<InternalRow> call(ClusteringOperation
clusteringOperation) throws Exception {
+ // construct FileSlice to pass into FileGroupReader
+ String partitionPath = clusteringOperation.getPartitionPath();
+ HoodieBaseFile baseFile = new HoodieBaseFile(new
StoragePath(basePath, clusteringOperation.getDataFilePath()).toString());
+ List<HoodieLogFile> logFiles =
clusteringOperation.getDeltaFilePaths().stream().map(p ->
+ new HoodieLogFile(new
StoragePath(FSUtils.constructAbsolutePath(
+ basePath, partitionPath), p)))
+ .collect(Collectors.toList());
+ FileSlice fileSlice = new FileSlice(new
HoodieFileGroupId(partitionPath, clusteringOperation.getFileId()),
baseFile.getCommitTime(), baseFile, logFiles);
+
+ // instantiate other supporting cast
+ Schema readerSchema = serializableTableSchemaWithMetaFields.get();
+ Option<InternalSchema> internalSchemaOption = Option.empty();
+ if (isInternalSchemaPresent) {
+ internalSchemaOption = SerDeHelper.fromJson(internalSchemaStr);
}
- return readPaths;
- })
- .flatMap(Collection::stream)
- .filter(path -> !path.isEmpty())
- .map(StoragePath::new)
- .toArray(StoragePath[]::new);
-
- HashMap<String, String> params = new HashMap<>();
- if (hasLogFiles) {
- params.put("hoodie.datasource.query.type", "snapshot");
- } else {
- params.put("hoodie.datasource.query.type", "read_optimized");
- }
+ Option<HoodieReaderContext> readerContextOpt =
broadcastManager.retrieveFileGroupReaderContext(new StoragePath(basePath));
+ Configuration conf = broadcastManager.retrieveStorageConfig().get();
+ HoodieTableMetaClient localMetaClient =
HoodieTableMetaClient.builder()
+ .setBasePath(basePath)
+ .setConf(new HadoopStorageConfiguration(conf))
+ .build();
- StoragePath[] paths;
- if (hasLogFiles) {
- String rawFractionConfig =
getWriteConfig().getString(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION);
- String compactionFractor = rawFractionConfig != null
- ? rawFractionConfig :
HoodieMemoryConfig.DEFAULT_MR_COMPACTION_MEMORY_FRACTION;
- params.put(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION.key(),
compactionFractor);
+ // instantiate FG reader
+ HoodieFileGroupReader<T> fileGroupReader = new
HoodieFileGroupReader<>(
+ readerContextOpt.get(),
+ localMetaClient.getStorage().newInstance(new
StoragePath(basePath), new HadoopStorageConfiguration(conf)),
+ basePath,
+ instantTime,
+ fileSlice,
+ readerSchema,
+ readerSchema,
+ internalSchemaOption,
+ localMetaClient,
+ localMetaClient.getTableConfig().getProps(),
+ 0,
+ Long.MAX_VALUE,
+ usePosition);
+ fileGroupReader.initRecordIterators();
+ // read records from the FG reader
+ HoodieFileGroupReader.HoodieFileGroupReaderIterator<InternalRow>
recordIterator
+ =
(HoodieFileGroupReader.HoodieFileGroupReaderIterator<InternalRow>)
fileGroupReader.getClosableIterator();
+ return recordIterator;
+ }
+ }).rdd();
+
+ return
HoodieUnsafeUtils.createDataFrameFromRDD(((HoodieSparkEngineContext)getEngineContext()).getSqlContext().sparkSession(),
+ internalRowRDD, sparkSchemaWithMetaFields);
+ } else {
+ boolean hasLogFiles = clusteringOps.stream().anyMatch(op ->
op.getDeltaFilePaths().size() > 0);
Review Comment:
Similar to other review comments
(https://github.com/apache/hudi/pull/12390/files#r1865562106), let's make two
methods for if and else blocks for large blocks like this. makes the review
easier (indentation stays same) and also more maintainable.
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java:
##########
@@ -420,64 +449,131 @@ writeConfig, new StoragePath(bootstrapFilePath)),
partitionFields,
*/
private Dataset<Row> readRecordsForGroupAsRow(JavaSparkContext jsc,
HoodieClusteringGroup
clusteringGroup,
- String instantTime) {
+ String instantTime,
+ Schema
tableSchemaWithMetaFields) {
List<ClusteringOperation> clusteringOps =
clusteringGroup.getSlices().stream()
.map(ClusteringOperation::create).collect(Collectors.toList());
- boolean hasLogFiles = clusteringOps.stream().anyMatch(op ->
op.getDeltaFilePaths().size() > 0);
- SQLContext sqlContext = new SQLContext(jsc.sc());
-
- StoragePath[] baseFilePaths = clusteringOps
- .stream()
- .map(op -> {
- ArrayList<String> readPaths = new ArrayList<>();
- // NOTE: for bootstrap tables, only need to handle data file path
(which is the skeleton file) because
- // HoodieBootstrapRelation takes care of stitching if there is
bootstrap path for the skeleton file.
- if (op.getDataFilePath() != null) {
- readPaths.add(op.getDataFilePath());
+
+ if
(getWriteConfig().getBooleanOrDefault(HoodieReaderConfig.FILE_GROUP_READER_ENABLED))
{
+ String basePath = getWriteConfig().getBasePath();
+ // construct supporting cast that executors might need
+ boolean usePosition =
getWriteConfig().getBooleanOrDefault(MERGE_USE_RECORD_POSITIONS);
+ String internalSchemaStr = getWriteConfig().getInternalSchema();
+ boolean isInternalSchemaPresent =
!StringUtils.isNullOrEmpty(internalSchemaStr);
+ SerializableSchema serializableTableSchemaWithMetaFields = new
SerializableSchema(tableSchemaWithMetaFields);
+
+ // broadcast reader context.
+ SparkFileGroupReaderBroadcastManager broadcastManager = new
SparkFileGroupReaderBroadcastManager(getEngineContext());
+ broadcastManager.prepareAndBroadcast();
+ StructType sparkSchemaWithMetaFields =
AvroConversionUtils.convertAvroSchemaToStructType(tableSchemaWithMetaFields);
+
+ RDD<InternalRow> internalRowRDD = jsc.parallelize(clusteringOps,
clusteringOps.size()).flatMap(new FlatMapFunction<ClusteringOperation,
InternalRow>() {
+ @Override
+ public Iterator<InternalRow> call(ClusteringOperation
clusteringOperation) throws Exception {
+ // construct FileSlice to pass into FileGroupReader
+ String partitionPath = clusteringOperation.getPartitionPath();
+ HoodieBaseFile baseFile = new HoodieBaseFile(new
StoragePath(basePath, clusteringOperation.getDataFilePath()).toString());
+ List<HoodieLogFile> logFiles =
clusteringOperation.getDeltaFilePaths().stream().map(p ->
+ new HoodieLogFile(new
StoragePath(FSUtils.constructAbsolutePath(
+ basePath, partitionPath), p)))
+ .collect(Collectors.toList());
+ FileSlice fileSlice = new FileSlice(new
HoodieFileGroupId(partitionPath, clusteringOperation.getFileId()),
baseFile.getCommitTime(), baseFile, logFiles);
+
+ // instantiate other supporting cast
+ Schema readerSchema = serializableTableSchemaWithMetaFields.get();
+ Option<InternalSchema> internalSchemaOption = Option.empty();
+ if (isInternalSchemaPresent) {
+ internalSchemaOption = SerDeHelper.fromJson(internalSchemaStr);
}
- return readPaths;
- })
- .flatMap(Collection::stream)
- .filter(path -> !path.isEmpty())
- .map(StoragePath::new)
- .toArray(StoragePath[]::new);
-
- HashMap<String, String> params = new HashMap<>();
- if (hasLogFiles) {
- params.put("hoodie.datasource.query.type", "snapshot");
- } else {
- params.put("hoodie.datasource.query.type", "read_optimized");
- }
+ Option<HoodieReaderContext> readerContextOpt =
broadcastManager.retrieveFileGroupReaderContext(new StoragePath(basePath));
+ Configuration conf = broadcastManager.retrieveStorageConfig().get();
+ HoodieTableMetaClient localMetaClient =
HoodieTableMetaClient.builder()
+ .setBasePath(basePath)
+ .setConf(new HadoopStorageConfiguration(conf))
+ .build();
- StoragePath[] paths;
- if (hasLogFiles) {
- String rawFractionConfig =
getWriteConfig().getString(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION);
- String compactionFractor = rawFractionConfig != null
- ? rawFractionConfig :
HoodieMemoryConfig.DEFAULT_MR_COMPACTION_MEMORY_FRACTION;
- params.put(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION.key(),
compactionFractor);
+ // instantiate FG reader
+ HoodieFileGroupReader<T> fileGroupReader = new
HoodieFileGroupReader<>(
+ readerContextOpt.get(),
+ localMetaClient.getStorage().newInstance(new
StoragePath(basePath), new HadoopStorageConfiguration(conf)),
+ basePath,
+ instantTime,
+ fileSlice,
+ readerSchema,
+ readerSchema,
+ internalSchemaOption,
+ localMetaClient,
+ localMetaClient.getTableConfig().getProps(),
+ 0,
+ Long.MAX_VALUE,
+ usePosition);
+ fileGroupReader.initRecordIterators();
+ // read records from the FG reader
+ HoodieFileGroupReader.HoodieFileGroupReaderIterator<InternalRow>
recordIterator
+ =
(HoodieFileGroupReader.HoodieFileGroupReaderIterator<InternalRow>)
fileGroupReader.getClosableIterator();
+ return recordIterator;
+ }
+ }).rdd();
+
+ return
HoodieUnsafeUtils.createDataFrameFromRDD(((HoodieSparkEngineContext)getEngineContext()).getSqlContext().sparkSession(),
Review Comment:
```suggestion
return
HoodieUnsafeUtils.createDataFrameFromRDD(((HoodieSparkEngineContext)
getEngineContext()).getSqlContext().sparkSession(),
```
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java:
##########
@@ -420,64 +449,131 @@ writeConfig, new StoragePath(bootstrapFilePath)),
partitionFields,
*/
private Dataset<Row> readRecordsForGroupAsRow(JavaSparkContext jsc,
HoodieClusteringGroup
clusteringGroup,
- String instantTime) {
+ String instantTime,
+ Schema
tableSchemaWithMetaFields) {
List<ClusteringOperation> clusteringOps =
clusteringGroup.getSlices().stream()
.map(ClusteringOperation::create).collect(Collectors.toList());
- boolean hasLogFiles = clusteringOps.stream().anyMatch(op ->
op.getDeltaFilePaths().size() > 0);
- SQLContext sqlContext = new SQLContext(jsc.sc());
-
- StoragePath[] baseFilePaths = clusteringOps
- .stream()
- .map(op -> {
- ArrayList<String> readPaths = new ArrayList<>();
- // NOTE: for bootstrap tables, only need to handle data file path
(which is the skeleton file) because
- // HoodieBootstrapRelation takes care of stitching if there is
bootstrap path for the skeleton file.
- if (op.getDataFilePath() != null) {
- readPaths.add(op.getDataFilePath());
+
+ if
(getWriteConfig().getBooleanOrDefault(HoodieReaderConfig.FILE_GROUP_READER_ENABLED))
{
+ String basePath = getWriteConfig().getBasePath();
+ // construct supporting cast that executors might need
+ boolean usePosition =
getWriteConfig().getBooleanOrDefault(MERGE_USE_RECORD_POSITIONS);
+ String internalSchemaStr = getWriteConfig().getInternalSchema();
+ boolean isInternalSchemaPresent =
!StringUtils.isNullOrEmpty(internalSchemaStr);
+ SerializableSchema serializableTableSchemaWithMetaFields = new
SerializableSchema(tableSchemaWithMetaFields);
+
+ // broadcast reader context.
+ SparkFileGroupReaderBroadcastManager broadcastManager = new
SparkFileGroupReaderBroadcastManager(getEngineContext());
+ broadcastManager.prepareAndBroadcast();
+ StructType sparkSchemaWithMetaFields =
AvroConversionUtils.convertAvroSchemaToStructType(tableSchemaWithMetaFields);
+
+ RDD<InternalRow> internalRowRDD = jsc.parallelize(clusteringOps,
clusteringOps.size()).flatMap(new FlatMapFunction<ClusteringOperation,
InternalRow>() {
+ @Override
+ public Iterator<InternalRow> call(ClusteringOperation
clusteringOperation) throws Exception {
+ // construct FileSlice to pass into FileGroupReader
+ String partitionPath = clusteringOperation.getPartitionPath();
+ HoodieBaseFile baseFile = new HoodieBaseFile(new
StoragePath(basePath, clusteringOperation.getDataFilePath()).toString());
+ List<HoodieLogFile> logFiles =
clusteringOperation.getDeltaFilePaths().stream().map(p ->
+ new HoodieLogFile(new
StoragePath(FSUtils.constructAbsolutePath(
+ basePath, partitionPath), p)))
+ .collect(Collectors.toList());
+ FileSlice fileSlice = new FileSlice(new
HoodieFileGroupId(partitionPath, clusteringOperation.getFileId()),
baseFile.getCommitTime(), baseFile, logFiles);
+
+ // instantiate other supporting cast
+ Schema readerSchema = serializableTableSchemaWithMetaFields.get();
+ Option<InternalSchema> internalSchemaOption = Option.empty();
+ if (isInternalSchemaPresent) {
+ internalSchemaOption = SerDeHelper.fromJson(internalSchemaStr);
}
- return readPaths;
- })
- .flatMap(Collection::stream)
- .filter(path -> !path.isEmpty())
- .map(StoragePath::new)
- .toArray(StoragePath[]::new);
-
- HashMap<String, String> params = new HashMap<>();
- if (hasLogFiles) {
- params.put("hoodie.datasource.query.type", "snapshot");
- } else {
- params.put("hoodie.datasource.query.type", "read_optimized");
- }
+ Option<HoodieReaderContext> readerContextOpt =
broadcastManager.retrieveFileGroupReaderContext(new StoragePath(basePath));
+ Configuration conf = broadcastManager.retrieveStorageConfig().get();
+ HoodieTableMetaClient localMetaClient =
HoodieTableMetaClient.builder()
+ .setBasePath(basePath)
+ .setConf(new HadoopStorageConfiguration(conf))
+ .build();
- StoragePath[] paths;
- if (hasLogFiles) {
- String rawFractionConfig =
getWriteConfig().getString(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION);
- String compactionFractor = rawFractionConfig != null
- ? rawFractionConfig :
HoodieMemoryConfig.DEFAULT_MR_COMPACTION_MEMORY_FRACTION;
- params.put(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION.key(),
compactionFractor);
+ // instantiate FG reader
+ HoodieFileGroupReader<T> fileGroupReader = new
HoodieFileGroupReader<>(
+ readerContextOpt.get(),
+ localMetaClient.getStorage().newInstance(new
StoragePath(basePath), new HadoopStorageConfiguration(conf)),
Review Comment:
This is unnecessary now. You can directly use `localMetaClient` which
contains the correct `Configuration` instance.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]