yihua commented on code in PR #12866:
URL: https://github.com/apache/hudi/pull/12866#discussion_r2105468290


##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java:
##########
@@ -68,43 +77,38 @@ static class Header {
   }
 
   protected final HFileContext context;
-  protected final byte[] byteBuff;
-  protected final int startOffsetInBuff;
-  protected final int sizeCheckSum;
-  protected final int uncompressedEndOffset;
   private final HFileBlockType blockType;
-  protected final int onDiskSizeWithoutHeader;
-  protected final int uncompressedSizeWithoutHeader;
-  protected final int bytesPerChecksum;
-  private boolean isUnpacked = false;
-  protected byte[] compressedByteBuff;
-  protected int startOffsetInCompressedBuff;
 
+  protected Option<HFileBlockReadAttributes> readAttributesOpt;
+  protected Option<HFileBlockWriteAttributes> writeAttributesOpt;
+
+  /**
+   * Initialize HFileBlock for read.
+   */
   protected HFileBlock(HFileContext context,
                        HFileBlockType blockType,
                        byte[] byteBuff,
                        int startOffsetInBuff) {
     this.context = context;
     this.blockType = blockType;
-    this.onDiskSizeWithoutHeader = readInt(
-        byteBuff, startOffsetInBuff + 
Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX);
-    this.uncompressedSizeWithoutHeader = readInt(
-        byteBuff, startOffsetInBuff + 
Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX);
-    this.bytesPerChecksum = readInt(
-        byteBuff, startOffsetInBuff + Header.BYTES_PER_CHECKSUM_INDEX);
-    this.sizeCheckSum = numChecksumBytes(getOnDiskSizeWithHeader(), 
bytesPerChecksum);
-    if (CompressionCodec.NONE.equals(context.getCompressionCodec())) {
-      isUnpacked = true;
-      this.startOffsetInBuff = startOffsetInBuff;
-      this.byteBuff = byteBuff;
-    } else {
-      this.startOffsetInCompressedBuff = startOffsetInBuff;
-      this.compressedByteBuff = byteBuff;
-      this.startOffsetInBuff = 0;
-      this.byteBuff = allocateBufferForUnpacking();
-    }
-    this.uncompressedEndOffset =
-        this.startOffsetInBuff + HFILEBLOCK_HEADER_SIZE + 
uncompressedSizeWithoutHeader;
+    HFileBlockReadAttributes readAttributes =
+        new HFileBlockReadAttributes(this.context, byteBuff, 
startOffsetInBuff);
+    this.readAttributesOpt = Option.of(readAttributes);
+  }
+
+  /**
+   * Initialize HFileBlock for write.
+   */
+  protected HFileBlock(HFileContext context,
+                       HFileBlockType blockType,
+                       long previousBlockOffset) {
+    this.context = context;
+    this.blockType = blockType;
+    HFileBlockWriteAttributes writeAttributes = new 
HFileBlockWriteAttributes.Builder()
+        .blockSize(context.getBlockSize())
+        .previousBlockOffset(previousBlockOffset)
+        .build();
+    writeAttributesOpt = Option.of(writeAttributes);

Review Comment:
   ```suggestion
       writeAttributesOpt = Option.of(new HFileBlockWriteAttributes.Builder()
           .blockSize(context.getBlockSize())
           .previousBlockOffset(previousBlockOffset)
           .build());
   ```



##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java:
##########
@@ -187,34 +162,161 @@ public int getOnDiskSizeWithHeader() {
    * @throws IOException upon decoding and decompression error.
    */
   public void unpack() throws IOException {
-    if (!isUnpacked) {
+    if (!readAttributesOpt.get().isUnpacked) {
       // Should only be called for compressed blocks
       CompressionCodec compression = context.getCompressionCodec();
       if (compression != CompressionCodec.NONE) {
         // Copy the block header which is not compressed
         System.arraycopy(
-            compressedByteBuff, startOffsetInCompressedBuff, byteBuff, 0, 
HFILEBLOCK_HEADER_SIZE);
+            readAttributesOpt.get().compressedByteBuff,
+            readAttributesOpt.get().startOffsetInCompressedBuff,
+            readAttributesOpt.get().byteBuff,
+            0,
+            HFILEBLOCK_HEADER_SIZE);
         try (InputStream byteBuffInputStream = new ByteArrayInputStream(
-            compressedByteBuff, startOffsetInCompressedBuff + 
HFILEBLOCK_HEADER_SIZE, onDiskSizeWithoutHeader)) {
+            readAttributesOpt.get().compressedByteBuff,
+            readAttributesOpt.get().startOffsetInCompressedBuff + 
HFILEBLOCK_HEADER_SIZE,
+            readAttributesOpt.get().onDiskSizeWithoutHeader)) {
           context.getDecompressor().decompress(
               byteBuffInputStream,
-              byteBuff,
+              readAttributesOpt.get().byteBuff,
               HFILEBLOCK_HEADER_SIZE,
-              byteBuff.length - HFILEBLOCK_HEADER_SIZE);
+              readAttributesOpt.get().byteBuff.length - 
HFILEBLOCK_HEADER_SIZE);
         }
       }
-      isUnpacked = true;
+      readAttributesOpt.get().isUnpacked = true;
     }
   }
 
+  // ================ Below are for Write ================
+
   /**
-   * Allocates new byte buffer for the uncompressed bytes.
-   *
-   * @return a new byte array based on the size of uncompressed data, holding 
the same header
-   * bytes.
+   * Returns serialized "data" part of the block.
+   * This function should be implemented by each block type separately.
+   * By default, it does nothing.
+   */
+  public ByteBuffer getPayload() {
+    return ByteBuffer.allocate(0);
+  }
+
+  /**
+   * Return serialized block including header, data, checksum.
+   */
+  public ByteBuffer serialize() throws IOException {
+    // Block payload.
+    ByteBuffer payloadBuff = getPayload();
+    // Compress if specified.
+    ByteBuffer compressedPayload = compress(payloadBuff);
+    // Buffer for building block.
+    ByteBuffer buf = ByteBuffer.allocate(Math.max(
+        context.getBlockSize() * 2,
+        compressedPayload.limit() + HFILEBLOCK_HEADER_SIZE * 2));
+    // Block header
+    // 1. Magic is always 8 bytes.
+    buf.put(blockType.getMagic(), 0, 8);
+    // 2. onDiskSizeWithoutHeader.
+    buf.putInt(compressedPayload.limit());
+    // 3. uncompressedSizeWithoutHeader.
+    buf.putInt(payloadBuff.limit());
+    // 4. previous block offset.
+    buf.putLong(writeAttributesOpt.get().previousBlockOffset);
+    // TODO: set checksum type properly.
+    // 5. checksum type.
+    buf.put(CHECKSUM_TYPE.getCode());
+    // TODO: verify that if hudi uses 4 bytes for checksum always.
+    // 6. bytes covered per checksum.
+    buf.putInt(DEFAULT_BYTES_PER_CHECKSUM);
+    // 7. onDiskDataSizeWithHeader
+    int onDiskDataSizeWithHeader =
+        HFileBlock.HFILEBLOCK_HEADER_SIZE + payloadBuff.limit();
+    buf.putInt(onDiskDataSizeWithHeader);
+    // 8. payload.
+    buf.put(compressedPayload);
+    // 9. Checksum
+    buf.put(calcChecksumBytes(CHECKSUM_TYPE));
+
+    // Update sizes
+    buf.flip();
+    return buf;
+  }
+
+  protected ByteBuffer compress(ByteBuffer payload) throws IOException {
+    if (context.getCompressionCodec() == GZIP) {
+      byte[] temp = new byte[payload.remaining()];
+      payload.get(temp);
+      return ByteBuffer.wrap(new 
HoodieAirliftGzipDecompressor().compress(temp));
+    } else {
+      return payload;
+    }
+  }
+
+  // TODO: support non-NULL checksum types.
+  /**
+   * Returns checksum bytes if checksum type is not NULL.
+   */
+  private byte[] calcChecksumBytes(ChecksumType type) {
+    if (type == ChecksumType.NULL) {
+      return EMPTY_BYTE_ARRAY;
+    } else if (type == ChecksumType.CRC32) {
+      return EMPTY_BYTE_ARRAY;
+    } else {
+      return EMPTY_BYTE_ARRAY;
+    }

Review Comment:
   Only return `EMPTY_BYTE_ARRAY` upon `NULL` checksum type; for others, throw 
`HoodieNotSupportedException`.



##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java:
##########
@@ -187,34 +162,161 @@ public int getOnDiskSizeWithHeader() {
    * @throws IOException upon decoding and decompression error.
    */
   public void unpack() throws IOException {
-    if (!isUnpacked) {
+    if (!readAttributesOpt.get().isUnpacked) {
       // Should only be called for compressed blocks
       CompressionCodec compression = context.getCompressionCodec();
       if (compression != CompressionCodec.NONE) {
         // Copy the block header which is not compressed
         System.arraycopy(
-            compressedByteBuff, startOffsetInCompressedBuff, byteBuff, 0, 
HFILEBLOCK_HEADER_SIZE);
+            readAttributesOpt.get().compressedByteBuff,
+            readAttributesOpt.get().startOffsetInCompressedBuff,
+            readAttributesOpt.get().byteBuff,
+            0,
+            HFILEBLOCK_HEADER_SIZE);
         try (InputStream byteBuffInputStream = new ByteArrayInputStream(
-            compressedByteBuff, startOffsetInCompressedBuff + 
HFILEBLOCK_HEADER_SIZE, onDiskSizeWithoutHeader)) {
+            readAttributesOpt.get().compressedByteBuff,
+            readAttributesOpt.get().startOffsetInCompressedBuff + 
HFILEBLOCK_HEADER_SIZE,
+            readAttributesOpt.get().onDiskSizeWithoutHeader)) {
           context.getDecompressor().decompress(
               byteBuffInputStream,
-              byteBuff,
+              readAttributesOpt.get().byteBuff,
               HFILEBLOCK_HEADER_SIZE,
-              byteBuff.length - HFILEBLOCK_HEADER_SIZE);
+              readAttributesOpt.get().byteBuff.length - 
HFILEBLOCK_HEADER_SIZE);
         }
       }
-      isUnpacked = true;
+      readAttributesOpt.get().isUnpacked = true;
     }
   }
 
+  // ================ Below are for Write ================
+
   /**
-   * Allocates new byte buffer for the uncompressed bytes.
-   *
-   * @return a new byte array based on the size of uncompressed data, holding 
the same header
-   * bytes.
+   * Returns serialized "data" part of the block.
+   * This function should be implemented by each block type separately.
+   * By default, it does nothing.
+   */
+  public ByteBuffer getPayload() {
+    return ByteBuffer.allocate(0);
+  }
+
+  /**
+   * Return serialized block including header, data, checksum.
+   */
+  public ByteBuffer serialize() throws IOException {
+    // Block payload.
+    ByteBuffer payloadBuff = getPayload();
+    // Compress if specified.
+    ByteBuffer compressedPayload = compress(payloadBuff);
+    // Buffer for building block.
+    ByteBuffer buf = ByteBuffer.allocate(Math.max(
+        context.getBlockSize() * 2,
+        compressedPayload.limit() + HFILEBLOCK_HEADER_SIZE * 2));
+    // Block header
+    // 1. Magic is always 8 bytes.
+    buf.put(blockType.getMagic(), 0, 8);
+    // 2. onDiskSizeWithoutHeader.
+    buf.putInt(compressedPayload.limit());
+    // 3. uncompressedSizeWithoutHeader.
+    buf.putInt(payloadBuff.limit());
+    // 4. previous block offset.
+    buf.putLong(writeAttributesOpt.get().previousBlockOffset);
+    // TODO: set checksum type properly.
+    // 5. checksum type.
+    buf.put(CHECKSUM_TYPE.getCode());
+    // TODO: verify that if hudi uses 4 bytes for checksum always.
+    // 6. bytes covered per checksum.
+    buf.putInt(DEFAULT_BYTES_PER_CHECKSUM);
+    // 7. onDiskDataSizeWithHeader
+    int onDiskDataSizeWithHeader =
+        HFileBlock.HFILEBLOCK_HEADER_SIZE + payloadBuff.limit();
+    buf.putInt(onDiskDataSizeWithHeader);
+    // 8. payload.
+    buf.put(compressedPayload);
+    // 9. Checksum
+    buf.put(calcChecksumBytes(CHECKSUM_TYPE));
+
+    // Update sizes
+    buf.flip();
+    return buf;
+  }
+
+  protected ByteBuffer compress(ByteBuffer payload) throws IOException {
+    if (context.getCompressionCodec() == GZIP) {
+      byte[] temp = new byte[payload.remaining()];
+      payload.get(temp);
+      return ByteBuffer.wrap(new 
HoodieAirliftGzipDecompressor().compress(temp));
+    } else {
+      return payload;
+    }

Review Comment:
   Do not hardcode the compressor class here and use 
`context.getDecompressor().compress` instead and let the `context` throw 
exception on unsupported compression codec.



##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java:
##########
@@ -187,34 +162,161 @@ public int getOnDiskSizeWithHeader() {
    * @throws IOException upon decoding and decompression error.
    */
   public void unpack() throws IOException {
-    if (!isUnpacked) {
+    if (!readAttributesOpt.get().isUnpacked) {
       // Should only be called for compressed blocks
       CompressionCodec compression = context.getCompressionCodec();
       if (compression != CompressionCodec.NONE) {
         // Copy the block header which is not compressed
         System.arraycopy(
-            compressedByteBuff, startOffsetInCompressedBuff, byteBuff, 0, 
HFILEBLOCK_HEADER_SIZE);
+            readAttributesOpt.get().compressedByteBuff,
+            readAttributesOpt.get().startOffsetInCompressedBuff,
+            readAttributesOpt.get().byteBuff,
+            0,
+            HFILEBLOCK_HEADER_SIZE);
         try (InputStream byteBuffInputStream = new ByteArrayInputStream(
-            compressedByteBuff, startOffsetInCompressedBuff + 
HFILEBLOCK_HEADER_SIZE, onDiskSizeWithoutHeader)) {
+            readAttributesOpt.get().compressedByteBuff,
+            readAttributesOpt.get().startOffsetInCompressedBuff + 
HFILEBLOCK_HEADER_SIZE,
+            readAttributesOpt.get().onDiskSizeWithoutHeader)) {
           context.getDecompressor().decompress(
               byteBuffInputStream,
-              byteBuff,
+              readAttributesOpt.get().byteBuff,
               HFILEBLOCK_HEADER_SIZE,
-              byteBuff.length - HFILEBLOCK_HEADER_SIZE);
+              readAttributesOpt.get().byteBuff.length - 
HFILEBLOCK_HEADER_SIZE);
         }
       }
-      isUnpacked = true;
+      readAttributesOpt.get().isUnpacked = true;
     }
   }
 
+  // ================ Below are for Write ================
+
   /**
-   * Allocates new byte buffer for the uncompressed bytes.
-   *
-   * @return a new byte array based on the size of uncompressed data, holding 
the same header
-   * bytes.
+   * Returns serialized "data" part of the block.
+   * This function should be implemented by each block type separately.
+   * By default, it does nothing.
+   */
+  public ByteBuffer getPayload() {
+    return ByteBuffer.allocate(0);
+  }
+
+  /**
+   * Return serialized block including header, data, checksum.
+   */
+  public ByteBuffer serialize() throws IOException {
+    // Block payload.
+    ByteBuffer payloadBuff = getPayload();
+    // Compress if specified.
+    ByteBuffer compressedPayload = compress(payloadBuff);
+    // Buffer for building block.
+    ByteBuffer buf = ByteBuffer.allocate(Math.max(
+        context.getBlockSize() * 2,
+        compressedPayload.limit() + HFILEBLOCK_HEADER_SIZE * 2));
+    // Block header
+    // 1. Magic is always 8 bytes.
+    buf.put(blockType.getMagic(), 0, 8);
+    // 2. onDiskSizeWithoutHeader.
+    buf.putInt(compressedPayload.limit());
+    // 3. uncompressedSizeWithoutHeader.
+    buf.putInt(payloadBuff.limit());
+    // 4. previous block offset.
+    buf.putLong(writeAttributesOpt.get().previousBlockOffset);
+    // TODO: set checksum type properly.
+    // 5. checksum type.
+    buf.put(CHECKSUM_TYPE.getCode());
+    // TODO: verify that if hudi uses 4 bytes for checksum always.
+    // 6. bytes covered per checksum.
+    buf.putInt(DEFAULT_BYTES_PER_CHECKSUM);
+    // 7. onDiskDataSizeWithHeader
+    int onDiskDataSizeWithHeader =
+        HFileBlock.HFILEBLOCK_HEADER_SIZE + payloadBuff.limit();
+    buf.putInt(onDiskDataSizeWithHeader);
+    // 8. payload.
+    buf.put(compressedPayload);
+    // 9. Checksum
+    buf.put(calcChecksumBytes(CHECKSUM_TYPE));
+
+    // Update sizes
+    buf.flip();
+    return buf;
+  }
+
+  protected ByteBuffer compress(ByteBuffer payload) throws IOException {
+    if (context.getCompressionCodec() == GZIP) {
+      byte[] temp = new byte[payload.remaining()];
+      payload.get(temp);
+      return ByteBuffer.wrap(new 
HoodieAirliftGzipDecompressor().compress(temp));
+    } else {
+      return payload;
+    }
+  }
+
+  // TODO: support non-NULL checksum types.

Review Comment:
   Add JIRA ticket number



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to