yihua commented on code in PR #12395:
URL: https://github.com/apache/hudi/pull/12395#discussion_r1866503512


##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java:
##########
@@ -420,64 +449,131 @@ writeConfig, new StoragePath(bootstrapFilePath)), 
partitionFields,
    */
   private Dataset<Row> readRecordsForGroupAsRow(JavaSparkContext jsc,
                                                 HoodieClusteringGroup 
clusteringGroup,
-                                                String instantTime) {
+                                                String instantTime,
+                                                Schema 
tableSchemaWithMetaFields) {
     List<ClusteringOperation> clusteringOps = 
clusteringGroup.getSlices().stream()
         .map(ClusteringOperation::create).collect(Collectors.toList());
-    boolean hasLogFiles = clusteringOps.stream().anyMatch(op -> 
op.getDeltaFilePaths().size() > 0);
-    SQLContext sqlContext = new SQLContext(jsc.sc());
-
-    StoragePath[] baseFilePaths = clusteringOps
-        .stream()
-        .map(op -> {
-          ArrayList<String> readPaths = new ArrayList<>();
-          // NOTE: for bootstrap tables, only need to handle data file path 
(which is the skeleton file) because
-          // HoodieBootstrapRelation takes care of stitching if there is 
bootstrap path for the skeleton file.
-          if (op.getDataFilePath() != null) {
-            readPaths.add(op.getDataFilePath());
+
+    if 
(getWriteConfig().getBooleanOrDefault(HoodieReaderConfig.FILE_GROUP_READER_ENABLED))
 {
+      String basePath = getWriteConfig().getBasePath();
+      // construct supporting cast that executors might need
+      boolean usePosition = 
getWriteConfig().getBooleanOrDefault(MERGE_USE_RECORD_POSITIONS);
+      String internalSchemaStr = getWriteConfig().getInternalSchema();
+      boolean isInternalSchemaPresent = 
!StringUtils.isNullOrEmpty(internalSchemaStr);
+      SerializableSchema serializableTableSchemaWithMetaFields = new 
SerializableSchema(tableSchemaWithMetaFields);
+
+      // broadcast reader context.
+      SparkFileGroupReaderBroadcastManager broadcastManager = new 
SparkFileGroupReaderBroadcastManager(getEngineContext());
+      broadcastManager.prepareAndBroadcast();
+      StructType sparkSchemaWithMetaFields = 
AvroConversionUtils.convertAvroSchemaToStructType(tableSchemaWithMetaFields);
+
+      RDD<InternalRow> internalRowRDD = jsc.parallelize(clusteringOps, 
clusteringOps.size()).flatMap(new FlatMapFunction<ClusteringOperation, 
InternalRow>() {
+        @Override
+        public Iterator<InternalRow> call(ClusteringOperation 
clusteringOperation) throws Exception {
+          // construct FileSlice to pass into FileGroupReader
+          String partitionPath = clusteringOperation.getPartitionPath();
+          HoodieBaseFile baseFile = new HoodieBaseFile(new 
StoragePath(basePath, clusteringOperation.getDataFilePath()).toString());
+          List<HoodieLogFile> logFiles = 
clusteringOperation.getDeltaFilePaths().stream().map(p ->
+                  new HoodieLogFile(new 
StoragePath(FSUtils.constructAbsolutePath(
+                      basePath, partitionPath), p)))
+              .collect(Collectors.toList());
+          FileSlice fileSlice = new FileSlice(new 
HoodieFileGroupId(partitionPath, clusteringOperation.getFileId()), 
baseFile.getCommitTime(), baseFile, logFiles);
+
+          // instantiate other supporting cast
+          Schema readerSchema = serializableTableSchemaWithMetaFields.get();
+          Option<InternalSchema> internalSchemaOption = Option.empty();
+          if (isInternalSchemaPresent) {
+            internalSchemaOption = SerDeHelper.fromJson(internalSchemaStr);
           }
-          return readPaths;
-        })
-        .flatMap(Collection::stream)
-        .filter(path -> !path.isEmpty())
-        .map(StoragePath::new)
-        .toArray(StoragePath[]::new);
-
-    HashMap<String, String> params = new HashMap<>();
-    if (hasLogFiles) {
-      params.put("hoodie.datasource.query.type", "snapshot");
-    } else {
-      params.put("hoodie.datasource.query.type", "read_optimized");
-    }
+          Option<HoodieReaderContext> readerContextOpt = 
broadcastManager.retrieveFileGroupReaderContext(new StoragePath(basePath));
+          Configuration conf = broadcastManager.retrieveStorageConfig().get();
+          HoodieTableMetaClient localMetaClient = 
HoodieTableMetaClient.builder()
+              .setBasePath(basePath)
+              .setConf(new HadoopStorageConfiguration(conf))
+              .build();

Review Comment:
   Is there any chance that the meta client can be reused?



##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java:
##########
@@ -420,64 +449,131 @@ writeConfig, new StoragePath(bootstrapFilePath)), 
partitionFields,
    */
   private Dataset<Row> readRecordsForGroupAsRow(JavaSparkContext jsc,
                                                 HoodieClusteringGroup 
clusteringGroup,
-                                                String instantTime) {
+                                                String instantTime,
+                                                Schema 
tableSchemaWithMetaFields) {
     List<ClusteringOperation> clusteringOps = 
clusteringGroup.getSlices().stream()
         .map(ClusteringOperation::create).collect(Collectors.toList());
-    boolean hasLogFiles = clusteringOps.stream().anyMatch(op -> 
op.getDeltaFilePaths().size() > 0);
-    SQLContext sqlContext = new SQLContext(jsc.sc());
-
-    StoragePath[] baseFilePaths = clusteringOps
-        .stream()
-        .map(op -> {
-          ArrayList<String> readPaths = new ArrayList<>();
-          // NOTE: for bootstrap tables, only need to handle data file path 
(which is the skeleton file) because
-          // HoodieBootstrapRelation takes care of stitching if there is 
bootstrap path for the skeleton file.
-          if (op.getDataFilePath() != null) {
-            readPaths.add(op.getDataFilePath());
+
+    if 
(getWriteConfig().getBooleanOrDefault(HoodieReaderConfig.FILE_GROUP_READER_ENABLED))
 {
+      String basePath = getWriteConfig().getBasePath();
+      // construct supporting cast that executors might need
+      boolean usePosition = 
getWriteConfig().getBooleanOrDefault(MERGE_USE_RECORD_POSITIONS);
+      String internalSchemaStr = getWriteConfig().getInternalSchema();
+      boolean isInternalSchemaPresent = 
!StringUtils.isNullOrEmpty(internalSchemaStr);
+      SerializableSchema serializableTableSchemaWithMetaFields = new 
SerializableSchema(tableSchemaWithMetaFields);
+
+      // broadcast reader context.
+      SparkFileGroupReaderBroadcastManager broadcastManager = new 
SparkFileGroupReaderBroadcastManager(getEngineContext());
+      broadcastManager.prepareAndBroadcast();
+      StructType sparkSchemaWithMetaFields = 
AvroConversionUtils.convertAvroSchemaToStructType(tableSchemaWithMetaFields);
+
+      RDD<InternalRow> internalRowRDD = jsc.parallelize(clusteringOps, 
clusteringOps.size()).flatMap(new FlatMapFunction<ClusteringOperation, 
InternalRow>() {
+        @Override
+        public Iterator<InternalRow> call(ClusteringOperation 
clusteringOperation) throws Exception {
+          // construct FileSlice to pass into FileGroupReader
+          String partitionPath = clusteringOperation.getPartitionPath();
+          HoodieBaseFile baseFile = new HoodieBaseFile(new 
StoragePath(basePath, clusteringOperation.getDataFilePath()).toString());

Review Comment:
   We should also check these cases to make sure they work:
   (1) no base file and log files only;
   (2) bootstrap file groups.



##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java:
##########
@@ -420,64 +449,131 @@ writeConfig, new StoragePath(bootstrapFilePath)), 
partitionFields,
    */
   private Dataset<Row> readRecordsForGroupAsRow(JavaSparkContext jsc,
                                                 HoodieClusteringGroup 
clusteringGroup,
-                                                String instantTime) {
+                                                String instantTime,
+                                                Schema 
tableSchemaWithMetaFields) {
     List<ClusteringOperation> clusteringOps = 
clusteringGroup.getSlices().stream()
         .map(ClusteringOperation::create).collect(Collectors.toList());
-    boolean hasLogFiles = clusteringOps.stream().anyMatch(op -> 
op.getDeltaFilePaths().size() > 0);
-    SQLContext sqlContext = new SQLContext(jsc.sc());
-
-    StoragePath[] baseFilePaths = clusteringOps
-        .stream()
-        .map(op -> {
-          ArrayList<String> readPaths = new ArrayList<>();
-          // NOTE: for bootstrap tables, only need to handle data file path 
(which is the skeleton file) because
-          // HoodieBootstrapRelation takes care of stitching if there is 
bootstrap path for the skeleton file.
-          if (op.getDataFilePath() != null) {
-            readPaths.add(op.getDataFilePath());
+
+    if 
(getWriteConfig().getBooleanOrDefault(HoodieReaderConfig.FILE_GROUP_READER_ENABLED))
 {
+      String basePath = getWriteConfig().getBasePath();
+      // construct supporting cast that executors might need
+      boolean usePosition = 
getWriteConfig().getBooleanOrDefault(MERGE_USE_RECORD_POSITIONS);
+      String internalSchemaStr = getWriteConfig().getInternalSchema();
+      boolean isInternalSchemaPresent = 
!StringUtils.isNullOrEmpty(internalSchemaStr);
+      SerializableSchema serializableTableSchemaWithMetaFields = new 
SerializableSchema(tableSchemaWithMetaFields);
+
+      // broadcast reader context.
+      SparkFileGroupReaderBroadcastManager broadcastManager = new 
SparkFileGroupReaderBroadcastManager(getEngineContext());
+      broadcastManager.prepareAndBroadcast();
+      StructType sparkSchemaWithMetaFields = 
AvroConversionUtils.convertAvroSchemaToStructType(tableSchemaWithMetaFields);
+
+      RDD<InternalRow> internalRowRDD = jsc.parallelize(clusteringOps, 
clusteringOps.size()).flatMap(new FlatMapFunction<ClusteringOperation, 
InternalRow>() {
+        @Override
+        public Iterator<InternalRow> call(ClusteringOperation 
clusteringOperation) throws Exception {
+          // construct FileSlice to pass into FileGroupReader
+          String partitionPath = clusteringOperation.getPartitionPath();
+          HoodieBaseFile baseFile = new HoodieBaseFile(new 
StoragePath(basePath, clusteringOperation.getDataFilePath()).toString());

Review Comment:
   Does `clusteringOperation.getDataFilePath()` contain partition path or not?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to