This is an automated email from the ASF dual-hosted git repository.

sivabalan pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


    from ac9f46c1ebb [HUDI-9532] Add sort columns in plan generated by Flink 
(#13455)
     add f8219428f35 [HUDI-8286] Standardize compaction and log compaction to 
use FileGroupReader (#13411)

No new revisions were added by this update.

Summary of changes:
 .../hudi/io/FileGroupReaderBasedAppendHandle.java  | 118 +++++++++++++++
 ...e.java => FileGroupReaderBasedMergeHandle.java} | 129 ++++++++++------
 .../java/org/apache/hudi/io/HoodieMergeHandle.java |  27 ++--
 .../apache/hudi/table/HoodieCompactionHandler.java |  10 --
 .../hudi/table/action/compact/HoodieCompactor.java |  71 ++++++---
 .../client/common/HoodieFlinkEngineContext.java    |  13 ++
 .../hudi/execution/FlinkLazyInsertIterable.java    |   3 +-
 .../apache/hudi/io/FlinkMergeAndReplaceHandle.java |   8 -
 .../java/org/apache/hudi/io/FlinkMergeHandle.java  |   8 -
 .../v2/FlinkFileGroupReaderBasedMergeHandle.java   | 141 ------------------
 .../hudi/table/HoodieFlinkCopyOnWriteTable.java    |  15 --
 .../HoodieFlinkMergeOnReadTableCompactor.java      |  20 ++-
 .../hudi/execution/JavaLazyInsertIterable.java     |   3 +-
 .../HoodieJavaMergeOnReadTableCompactor.java       |   5 +
 .../client/common/HoodieSparkEngineContext.java    |  12 +-
 .../hudi/common/model/HoodieSparkRecord.java       |   8 +
 .../hudi/execution/SparkLazyInsertIterable.java    |   2 +
 ...HoodieSparkFileGroupReaderBasedMergeHandle.java | 139 ------------------
 .../hudi/table/HoodieSparkCopyOnWriteTable.java    |  16 --
 .../HoodieSparkMergeOnReadTableCompactor.java      |   5 +
 .../java/org/apache/hudi/avro/AvroSchemaUtils.java |   6 +-
 .../apache/hudi/avro/HoodieAvroReaderContext.java  |   5 +-
 .../hudi/common/engine/HoodieEngineContext.java    |   8 +-
 .../hudi/common/model/HoodieAvroIndexedRecord.java |   6 +
 .../apache/hudi/common/model/HoodieAvroRecord.java |  16 ++
 .../hudi/common/table/HoodieTableMetaClient.java   |   2 +-
 .../common/table/read/FileGroupRecordBuffer.java   |   6 +-
 .../common/table/read/HoodieFileGroupReader.java   |  26 +++-
 .../read/SortedKeyBasedFileGroupRecordBuffer.java  | 106 ++++++++++++++
 .../hudi/metadata/HoodieBackedTableMetadata.java   |   3 +-
 .../hudi/metadata/HoodieMetadataPayload.java       |  49 ++++++-
 .../table/read/TestFileGroupRecordBuffer.java      |   9 +-
 .../table/read/TestHoodieFileGroupReaderBase.java  |  35 +++--
 .../TestSortedKeyBasedFileGroupRecordBuffer.java   | 163 +++++++++++++++++++++
 .../apache/hudi/sink/compact/CompactOperator.java  |   4 +-
 .../table/format/FlinkReaderContextFactory.java    |  25 +++-
 .../table/TestHoodieFileGroupReaderOnFlink.java    |   4 +-
 .../TestMetadataUtilRLIandSIRecordGeneration.java  |   9 +-
 .../TestColStatsRecordWithMetadataRecord.java      |   3 +-
 .../hudi/table/TestHoodieMergeOnReadTable.java     |   2 +
 ...dieSparkMergeOnReadTableInsertUpdateDelete.java |   6 +-
 41 files changed, 749 insertions(+), 497 deletions(-)
 create mode 100644 
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/FileGroupReaderBasedAppendHandle.java
 rename 
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/{BaseFileGroupReaderBasedMergeHandle.java
 => FileGroupReaderBasedMergeHandle.java} (55%)
 delete mode 100644 
hudi-client/hudi-flink-client/src/main/java/org/apache/hudi/io/v2/FlinkFileGroupReaderBasedMergeHandle.java
 delete mode 100644 
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/HoodieSparkFileGroupReaderBasedMergeHandle.java
 create mode 100644 
hudi-common/src/main/java/org/apache/hudi/common/table/read/SortedKeyBasedFileGroupRecordBuffer.java
 create mode 100644 
hudi-common/src/test/java/org/apache/hudi/common/table/read/TestSortedKeyBasedFileGroupRecordBuffer.java
 copy 
hudi-common/src/main/java/org/apache/hudi/common/engine/AvroReaderContextFactory.java
 => 
hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/table/format/FlinkReaderContextFactory.java
 (50%)

Reply via email to