yihua commented on code in PR #12866:
URL: https://github.com/apache/hudi/pull/12866#discussion_r2106128064
##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java:
##########
@@ -187,34 +159,244 @@ public int getOnDiskSizeWithHeader() {
* @throws IOException upon decoding and decompression error.
*/
public void unpack() throws IOException {
- if (!isUnpacked) {
+ if (!readAttributesOpt.get().isUnpacked) {
// Should only be called for compressed blocks
CompressionCodec compression = context.getCompressionCodec();
if (compression != CompressionCodec.NONE) {
// Copy the block header which is not compressed
System.arraycopy(
- compressedByteBuff, startOffsetInCompressedBuff, byteBuff, 0,
HFILEBLOCK_HEADER_SIZE);
+ readAttributesOpt.get().compressedByteBuff,
+ readAttributesOpt.get().startOffsetInCompressedBuff,
+ readAttributesOpt.get().byteBuff,
+ 0,
+ HFILEBLOCK_HEADER_SIZE);
try (InputStream byteBuffInputStream = new ByteArrayInputStream(
- compressedByteBuff, startOffsetInCompressedBuff +
HFILEBLOCK_HEADER_SIZE, onDiskSizeWithoutHeader)) {
- context.getDecompressor().decompress(
+ readAttributesOpt.get().compressedByteBuff,
+ readAttributesOpt.get().startOffsetInCompressedBuff +
HFILEBLOCK_HEADER_SIZE,
+ readAttributesOpt.get().onDiskSizeWithoutHeader)) {
+ context.getCompressor().decompress(
byteBuffInputStream,
- byteBuff,
+ readAttributesOpt.get().byteBuff,
HFILEBLOCK_HEADER_SIZE,
- byteBuff.length - HFILEBLOCK_HEADER_SIZE);
+ readAttributesOpt.get().byteBuff.length -
HFILEBLOCK_HEADER_SIZE);
}
}
- isUnpacked = true;
+ readAttributesOpt.get().isUnpacked = true;
}
}
+ // ================ Below are for Write ================
+
/**
- * Allocates new byte buffer for the uncompressed bytes.
- *
- * @return a new byte array based on the size of uncompressed data, holding
the same header
- * bytes.
+ * Returns serialized "data" part of the block.
+ * This function must be implemented by each block type separately.
+ */
+ protected abstract ByteBuffer getUncompressedBlockDataToWrite();
+
+ /**
+ * Return serialized block including header, data, checksum.
+ */
+ public ByteBuffer serialize() throws IOException {
+ // Block payload.
+ ByteBuffer uncompressedBlockData = getUncompressedBlockDataToWrite();
+ // Compress if specified.
+ ByteBuffer compressedBlockData = compress(uncompressedBlockData);
+ // Buffer for building block.
+ ByteBuffer buf = ByteBuffer.allocate(Math.max(
+ context.getBlockSize() * 2,
+ compressedBlockData.limit() + HFILEBLOCK_HEADER_SIZE * 2));
+
+ // Block header
+ // 1. Magic is always 8 bytes.
+ buf.put(blockType.getMagic(), 0, 8);
+ // 2. onDiskSizeWithoutHeader.
+ buf.putInt(compressedBlockData.limit());
+ // 3. uncompressedSizeWithoutHeader.
+ buf.putInt(uncompressedBlockData.limit());
+ // 4. Previous block offset.
+ buf.putLong(previousBlockOffset);
+ // 5. Checksum type.
+ buf.put(context.getChecksumType().getCode());
+ // 6. Bytes covered per checksum.
+ buf.putInt(DEFAULT_BYTES_PER_CHECKSUM);
+ // 7. onDiskDataSizeWithHeader
+ int onDiskDataSizeWithHeader =
+ HFileBlock.HFILEBLOCK_HEADER_SIZE + uncompressedBlockData.limit();
+ buf.putInt(onDiskDataSizeWithHeader);
+ // 8. Payload.
+ buf.put(compressedBlockData);
+ // 9. Checksum.
+ buf.put(generateChecksumBytes(context.getChecksumType()));
+
+ // Update sizes
+ buf.flip();
+ return buf;
+ }
+
+ /**
+ * Compress block data without header and checksum.
+ */
+ protected ByteBuffer compress(ByteBuffer payload) throws IOException {
Review Comment:
Fixed.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]