yihua commented on code in PR #11210:
URL: https://github.com/apache/hudi/pull/11210#discussion_r1599351065


##########
hudi-common/src/main/java/org/apache/hudi/common/table/log/block/HoodieParquetDataBlock.java:
##########
@@ -99,29 +90,17 @@ public HoodieLogBlockType getBlockType() {
 
   @Override
   protected byte[] serializeRecords(List<HoodieRecord> records, 
StorageConfiguration<?> storageConf) throws IOException {
-    if (records.size() == 0) {
-      return new byte[0];
-    }
-
-    Schema writerSchema = new 
Schema.Parser().parse(super.getLogBlockHeader().get(HeaderMetadataType.SCHEMA));
-    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
-    HoodieConfig config = new HoodieConfig();
-    config.setValue(PARQUET_COMPRESSION_CODEC_NAME.key(), 
compressionCodecName.get().name());
-    config.setValue(PARQUET_BLOCK_SIZE.key(), 
String.valueOf(ParquetWriter.DEFAULT_BLOCK_SIZE));
-    config.setValue(PARQUET_PAGE_SIZE.key(), 
String.valueOf(ParquetWriter.DEFAULT_PAGE_SIZE));
-    config.setValue(PARQUET_MAX_FILE_SIZE.key(), String.valueOf(1024 * 1024 * 
1024));
-    config.setValue(PARQUET_COMPRESSION_RATIO_FRACTION.key(), 
String.valueOf(expectedCompressionRatio.get()));
-    config.setValue(PARQUET_DICTIONARY_ENABLED, 
String.valueOf(useDictionaryEncoding.get()));
-    HoodieRecordType recordType = records.iterator().next().getRecordType();
-    try (HoodieFileWriter parquetWriter = 
HoodieFileWriterFactory.getFileWriter(
-        HoodieFileFormat.PARQUET, outputStream, storageConf, config, 
writerSchema, recordType)) {
-      for (HoodieRecord<?> record : records) {
-        String recordKey = getRecordKey(record).orElse(null);
-        parquetWriter.write(recordKey, record, writerSchema);
-      }
-      outputStream.flush();
-    }
-    return outputStream.toByteArray();
+    Map<String, String> paramsMap = new HashMap<>();
+    paramsMap.put(PARQUET_COMPRESSION_CODEC_NAME.key(), 
compressionCodecName.get());
+    paramsMap.put(PARQUET_COMPRESSION_RATIO_FRACTION.key(), 
String.valueOf(expectedCompressionRatio.get()));
+    paramsMap.put(PARQUET_DICTIONARY_ENABLED.key(), 
String.valueOf(useDictionaryEncoding.get()));
+
+    return FileFormatUtils.getInstance(PARQUET).serializeRecordsToLogBlock(
+        storageConf, records,
+        new 
Schema.Parser().parse(super.getLogBlockHeader().get(HoodieLogBlock.HeaderMetadataType.SCHEMA)),

Review Comment:
   Fixed.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to