nsivabalan commented on code in PR #12395:
URL: https://github.com/apache/hudi/pull/12395#discussion_r1865593660


##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java:
##########
@@ -423,61 +443,133 @@ private Dataset<Row> 
readRecordsForGroupAsRow(JavaSparkContext jsc,
                                                 String instantTime) {
     List<ClusteringOperation> clusteringOps = 
clusteringGroup.getSlices().stream()
         .map(ClusteringOperation::create).collect(Collectors.toList());
-    boolean hasLogFiles = clusteringOps.stream().anyMatch(op -> 
op.getDeltaFilePaths().size() > 0);
-    SQLContext sqlContext = new SQLContext(jsc.sc());
-
-    StoragePath[] baseFilePaths = clusteringOps
-        .stream()
-        .map(op -> {
-          ArrayList<String> readPaths = new ArrayList<>();
-          // NOTE: for bootstrap tables, only need to handle data file path 
(which is the skeleton file) because
-          // HoodieBootstrapRelation takes care of stitching if there is 
bootstrap path for the skeleton file.
-          if (op.getDataFilePath() != null) {
-            readPaths.add(op.getDataFilePath());
+
+    if 
(getWriteConfig().getBooleanOrDefault(HoodieReaderConfig.FILE_GROUP_READER_ENABLED))
 {
+      String basePath = getWriteConfig().getBasePath();
+
+      List<FileSlice> fileSlices = clusteringOps.stream().map(new 
Function<ClusteringOperation, FileSlice>() {
+        @Override
+        public FileSlice apply(ClusteringOperation clusteringOperation) {
+          String partitionPath = clusteringOperation.getPartitionPath();
+          HoodieBaseFile baseFile = new HoodieBaseFile(new 
StoragePath(basePath, clusteringOperation.getDataFilePath()).toString());
+          List<HoodieLogFile> logFiles = 
clusteringOperation.getDeltaFilePaths().stream().map(p ->
+                  new HoodieLogFile(new 
StoragePath(FSUtils.constructAbsolutePath(
+                      basePath, partitionPath), p)))
+              .collect(Collectors.toList());
+          return new FileSlice(new HoodieFileGroupId(partitionPath, 
clusteringOperation.getFileId()), baseFile.getCommitTime(), baseFile, logFiles);
+        }
+      }).collect(Collectors.toList());
+
+      boolean usePosition = 
getWriteConfig().getBooleanOrDefault(MERGE_USE_RECORD_POSITIONS);
+      String internalSchema = getWriteConfig().getInternalSchema();
+      boolean isInternalSchemaPresent = 
!StringUtils.isNullOrEmpty(internalSchema);
+      String schema = getWriteConfig().getSchema();
+      boolean allowOperationMetaField = 
getWriteConfig().allowOperationMetadataField();
+
+      // broadcast reader context.
+      SparkFileGroupReaderBroadcastManager broadcastManager = new 
SparkFileGroupReaderBroadcastManager(getEngineContext());
+      broadcastManager.prepareAndBroadcast();
+      StructType sparkSchema = 
AvroConversionUtils.convertAvroSchemaToStructType(new 
Schema.Parser().parse(schema));
+
+      RDD<InternalRow> internalRowRDD = jsc.parallelize(fileSlices, 
fileSlices.size()).flatMap(new FlatMapFunction<FileSlice, InternalRow>() {
+        @Override
+        public Iterator<InternalRow> call(FileSlice fileSlice) throws 
Exception {
+          Schema readerSchema;
+          Option<InternalSchema> internalSchemaOption = Option.empty();
+          if (isInternalSchemaPresent) {
+            readerSchema = HoodieAvroUtils.addMetadataFields(
+                new Schema.Parser().parse(schema), allowOperationMetaField);
+            internalSchemaOption = SerDeHelper.fromJson(internalSchema);
+          } else {
+            readerSchema = HoodieAvroUtils.addMetadataFields(new 
Schema.Parser().parse(schema), allowOperationMetaField);
           }
-          return readPaths;
-        })
-        .flatMap(Collection::stream)
-        .filter(path -> !path.isEmpty())
-        .map(StoragePath::new)
-        .toArray(StoragePath[]::new);
-
-    HashMap<String, String> params = new HashMap<>();
-    if (hasLogFiles) {
-      params.put("hoodie.datasource.query.type", "snapshot");
-    } else {
-      params.put("hoodie.datasource.query.type", "read_optimized");
-    }
 
-    StoragePath[] paths;
-    if (hasLogFiles) {
-      String rawFractionConfig = 
getWriteConfig().getString(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION);
-      String compactionFractor = rawFractionConfig != null
-          ? rawFractionConfig : 
HoodieMemoryConfig.DEFAULT_MR_COMPACTION_MEMORY_FRACTION;
-      params.put(HoodieMemoryConfig.MAX_MEMORY_FRACTION_FOR_COMPACTION.key(), 
compactionFractor);
+          Option<HoodieReaderContext> readerContextOpt = 
broadcastManager.retrieveFileGroupReaderContext(new StoragePath(basePath));
+          Configuration conf = broadcastManager.retrieveStorageConfig().get();
+          HoodieTableMetaClient newMetaClient = HoodieTableMetaClient.builder()
+              .setBasePath(basePath)
+              .setConf(new HadoopStorageConfiguration(conf))
+              .build();
+          HoodieFileGroupReader<T> fileGroupReader = new 
HoodieFileGroupReader<>(
+              readerContextOpt.get(),
+              newMetaClient.getStorage().newInstance(new 
StoragePath(basePath), new HadoopStorageConfiguration(conf)),
+              basePath,
+              instantTime,
+              fileSlice,
+              readerSchema,
+              readerSchema,
+              internalSchemaOption,
+              newMetaClient,
+              newMetaClient.getTableConfig().getProps(),
+              0,
+              Long.MAX_VALUE,
+              usePosition);
+          fileGroupReader.initRecordIterators();
+          HoodieFileGroupReader.HoodieFileGroupReaderIterator<InternalRow> 
recordIterator
+              = 
(HoodieFileGroupReader.HoodieFileGroupReaderIterator<InternalRow>) 
fileGroupReader.getClosableIterator();
+          return recordIterator;
+        }
+      }).rdd();
 
-      StoragePath[] deltaPaths = clusteringOps
+      return 
HoodieUnsafeUtils.createDataFrameFromRDD(((HoodieSparkEngineContext)getEngineContext()).getSqlContext().sparkSession(),
+          internalRowRDD, sparkSchema);
+    } else {
+      boolean hasLogFiles = clusteringOps.stream().anyMatch(op -> 
op.getDeltaFilePaths().size() > 0);

Review Comment:
   not changes in the "else block". entire "if" block is the new code written. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to