yihua commented on code in PR #12866:
URL: https://github.com/apache/hudi/pull/12866#discussion_r2105648093


##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileWriterImpl.java:
##########
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.io.hfile;
+
+import org.apache.hudi.common.util.StringUtils;
+import org.apache.hudi.common.util.io.ByteBufferBackedInputStream;
+import org.apache.hudi.io.ByteArraySeekableDataInputStream;
+import org.apache.hudi.io.compress.CompressionCodec;
+import org.apache.hudi.io.hfile.protobuf.generated.HFileProtos;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedOutputStream;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT16;
+
+/**
+ * Pure Java implementation of HFile writer (HFile v3 format) for Hudi.
+ */
+public class HFileWriterImpl implements HFileWriter {
+  private final OutputStream outputStream;
+  private final HFileContext context;
+  // Meta Info map.
+  private final Map<String, byte[]> metaInfo = new HashMap<>();
+  // Data block under construction.
+  private HFileDataBlock currentDataBlock;
+  // Meta block under construction.
+  private final HFileRootIndexBlock rootIndexBlock;
+  private final HFileMetaIndexBlock metaIndexBlock;
+  private final HFileFileInfoBlock fileInfoBlock;
+  private long uncompressedBytes;
+  private long totalUncompressedBytes;
+  private long currentOffset;
+  private long loadOnOpenSectionOffset;
+  private final int blockSize;
+
+  // Variables used to record necessary information to reduce
+  // the memory usage.
+  private byte[] lastKey = new byte[0];
+  private long firstDataBlockOffset = -1;
+  private long lastDataBlockOffset;
+  private long totalNumberOfRecords = 0;
+
+  public HFileWriterImpl(HFileContext context, OutputStream outputStream) {
+    this.outputStream = outputStream;
+    this.context = context;
+    this.blockSize = this.context.getBlockSize();
+    this.uncompressedBytes = 0L;
+    this.totalUncompressedBytes = 0L;
+    this.currentOffset = 0L;
+    this.currentDataBlock = new HFileDataBlock(context);
+    this.rootIndexBlock = new HFileRootIndexBlock(context);
+    this.metaIndexBlock = new HFileMetaIndexBlock(context);
+    this.fileInfoBlock = new HFileFileInfoBlock(context);
+    initFileInfo();
+  }
+
+  // Append a data kv pair.
+  public void append(String key, byte[] value) throws IOException {
+    byte[] keyBytes = StringUtils.getUTF8Bytes(key);
+    lastKey = keyBytes;
+    // Records with the same key must be put into the same block.
+    if (!Arrays.equals(currentDataBlock.getLastKeyContent(), keyBytes)
+        && uncompressedBytes + keyBytes.length + value.length + 9 > blockSize) 
{
+      flushCurrentDataBlock();
+      uncompressedBytes = 0;
+    }
+    currentDataBlock.add(keyBytes, value);
+    int uncompressedKeyValueSize = keyBytes.length + value.length;
+    uncompressedBytes += uncompressedKeyValueSize + 9;
+    totalUncompressedBytes += uncompressedKeyValueSize + 9;
+  }
+
+  // Append a metadata kv pair.
+  public void appendMetaInfo(String name, byte[] value) {
+    metaInfo.put(name, value);
+  }
+
+  // Append a file info kv pair.
+  public void appendFileInfo(String name, byte[] value) {
+    fileInfoBlock.add(name, value);
+  }
+
+  @Override
+  public void close() throws IOException {
+    flushCurrentDataBlock();
+    flushMetaBlocks();
+    writeLoadOnOpenSection();
+    writeTrailer();
+    outputStream.flush();
+    outputStream.close();
+  }
+
+  private void flushCurrentDataBlock() throws IOException {
+    // 0. Skip flush if no data.
+    if (currentDataBlock.isEmpty()) {
+      return;
+    }
+    // 1. Update metrics.
+    if (firstDataBlockOffset < 0) {
+      firstDataBlockOffset = currentOffset;
+    }
+    lastDataBlockOffset = currentOffset;
+    totalNumberOfRecords += currentDataBlock.getNumOfEntries();
+    // 2. Flush data block.
+    ByteBuffer blockBuffer = currentDataBlock.serialize();
+    writeBuffer(blockBuffer);
+    // 3. Create an index entry.
+    rootIndexBlock.add(
+        currentDataBlock.getFirstKey(), lastDataBlockOffset, 
blockBuffer.limit());
+    // 4. Create a new data block.
+    currentDataBlock = new HFileDataBlock(context, currentOffset);
+  }
+
+  // NOTE that: reader assumes that every meta info piece
+  // should be a separate meta block.
+  private void flushMetaBlocks() throws IOException {
+    for (Map.Entry<String, byte[]> e : metaInfo.entrySet()) {
+      HFileMetaBlock currentMetaBlock = new HFileMetaBlock(context);
+      byte[] key = StringUtils.getUTF8Bytes(e.getKey());
+      currentMetaBlock.add(key, e.getValue());
+      ByteBuffer blockBuffer = currentMetaBlock.serialize();
+      long blockOffset = currentOffset;
+      currentMetaBlock.setStartOffsetInBuff(currentOffset);
+      writeBuffer(blockBuffer);
+      metaIndexBlock.add(
+          currentMetaBlock.getFirstKey(), blockOffset, blockBuffer.limit());
+    }
+  }
+
+  private void writeLoadOnOpenSection() throws IOException {
+    loadOnOpenSectionOffset = currentOffset;
+    // Write Root Data Index
+    ByteBuffer dataIndexBuffer = rootIndexBlock.serialize();
+    rootIndexBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(dataIndexBuffer);
+    // Write Meta Data Index.
+    // Note: Even this block is empty, it has to be there
+    //  due to the behavior of the reader.
+    ByteBuffer metaIndexBuffer = metaIndexBlock.serialize();
+    metaIndexBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(metaIndexBuffer);
+    // Write File Info.
+    fileInfoBlock.add("hfile.LASTKEY", addKeyLength(lastKey));
+    fileInfoBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(fileInfoBlock.serialize());
+  }
+
+  private void writeTrailer() throws IOException {
+    HFileProtos.TrailerProto.Builder builder = 
HFileProtos.TrailerProto.newBuilder();
+    builder.setFileInfoOffset(fileInfoBlock.getStartOffsetInBuff());
+    builder.setLoadOnOpenDataOffset(loadOnOpenSectionOffset);
+    builder.setUncompressedDataIndexSize(totalUncompressedBytes);
+    builder.setDataIndexCount(rootIndexBlock.getNumOfEntries());
+    builder.setMetaIndexCount(metaIndexBlock.getNumOfEntries());
+    builder.setEntryCount(totalNumberOfRecords);
+    // TODO: support multiple levels.
+    builder.setNumDataIndexLevels(1);
+    builder.setFirstDataBlockOffset(firstDataBlockOffset);
+    builder.setLastDataBlockOffset(lastDataBlockOffset);
+    builder.setComparatorClassName("NA");
+    // Set codec.
+    if (context.getCompressionCodec() == CompressionCodec.GZIP) {
+      builder.setCompressionCodec(1);
+    } else {
+      builder.setCompressionCodec(2);
+    }
+    builder.setEncryptionKey(ByteString.EMPTY);
+    HFileProtos.TrailerProto trailerProto = builder.build();
+
+    // Encode the varint size into a ByteBuffer
+    // This is necessary to make the parsing work.
+    ByteArrayOutputStream varintBuffer = new ByteArrayOutputStream();
+    CodedOutputStream varintOutput = 
CodedOutputStream.newInstance(varintBuffer);
+    varintOutput.writeUInt32NoTag(trailerProto.getSerializedSize());
+    varintOutput.flush();
+
+    ByteBuffer trailer = ByteBuffer.allocate(4096);

Review Comment:
   Add a variable to define trailer size (`4096`)



##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileWriterImpl.java:
##########
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.io.hfile;
+
+import org.apache.hudi.common.util.StringUtils;
+import org.apache.hudi.common.util.io.ByteBufferBackedInputStream;
+import org.apache.hudi.io.ByteArraySeekableDataInputStream;
+import org.apache.hudi.io.compress.CompressionCodec;
+import org.apache.hudi.io.hfile.protobuf.generated.HFileProtos;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedOutputStream;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT16;
+
+/**
+ * Pure Java implementation of HFile writer (HFile v3 format) for Hudi.
+ */
+public class HFileWriterImpl implements HFileWriter {
+  private final OutputStream outputStream;
+  private final HFileContext context;
+  // Meta Info map.
+  private final Map<String, byte[]> metaInfo = new HashMap<>();
+  // Data block under construction.
+  private HFileDataBlock currentDataBlock;
+  // Meta block under construction.
+  private final HFileRootIndexBlock rootIndexBlock;
+  private final HFileMetaIndexBlock metaIndexBlock;
+  private final HFileFileInfoBlock fileInfoBlock;
+  private long uncompressedBytes;
+  private long totalUncompressedBytes;
+  private long currentOffset;
+  private long loadOnOpenSectionOffset;
+  private final int blockSize;
+
+  // Variables used to record necessary information to reduce
+  // the memory usage.
+  private byte[] lastKey = new byte[0];
+  private long firstDataBlockOffset = -1;
+  private long lastDataBlockOffset;
+  private long totalNumberOfRecords = 0;
+
+  public HFileWriterImpl(HFileContext context, OutputStream outputStream) {
+    this.outputStream = outputStream;
+    this.context = context;
+    this.blockSize = this.context.getBlockSize();
+    this.uncompressedBytes = 0L;
+    this.totalUncompressedBytes = 0L;
+    this.currentOffset = 0L;
+    this.currentDataBlock = new HFileDataBlock(context);
+    this.rootIndexBlock = new HFileRootIndexBlock(context);
+    this.metaIndexBlock = new HFileMetaIndexBlock(context);
+    this.fileInfoBlock = new HFileFileInfoBlock(context);
+    initFileInfo();
+  }
+
+  // Append a data kv pair.
+  public void append(String key, byte[] value) throws IOException {
+    byte[] keyBytes = StringUtils.getUTF8Bytes(key);
+    lastKey = keyBytes;
+    // Records with the same key must be put into the same block.
+    if (!Arrays.equals(currentDataBlock.getLastKeyContent(), keyBytes)
+        && uncompressedBytes + keyBytes.length + value.length + 9 > blockSize) 
{
+      flushCurrentDataBlock();
+      uncompressedBytes = 0;
+    }
+    currentDataBlock.add(keyBytes, value);
+    int uncompressedKeyValueSize = keyBytes.length + value.length;
+    uncompressedBytes += uncompressedKeyValueSize + 9;
+    totalUncompressedBytes += uncompressedKeyValueSize + 9;
+  }
+
+  // Append a metadata kv pair.
+  public void appendMetaInfo(String name, byte[] value) {
+    metaInfo.put(name, value);
+  }
+
+  // Append a file info kv pair.
+  public void appendFileInfo(String name, byte[] value) {
+    fileInfoBlock.add(name, value);
+  }
+
+  @Override
+  public void close() throws IOException {
+    flushCurrentDataBlock();
+    flushMetaBlocks();
+    writeLoadOnOpenSection();
+    writeTrailer();
+    outputStream.flush();
+    outputStream.close();
+  }
+
+  private void flushCurrentDataBlock() throws IOException {
+    // 0. Skip flush if no data.
+    if (currentDataBlock.isEmpty()) {
+      return;
+    }
+    // 1. Update metrics.
+    if (firstDataBlockOffset < 0) {
+      firstDataBlockOffset = currentOffset;
+    }
+    lastDataBlockOffset = currentOffset;
+    totalNumberOfRecords += currentDataBlock.getNumOfEntries();
+    // 2. Flush data block.
+    ByteBuffer blockBuffer = currentDataBlock.serialize();
+    writeBuffer(blockBuffer);
+    // 3. Create an index entry.
+    rootIndexBlock.add(
+        currentDataBlock.getFirstKey(), lastDataBlockOffset, 
blockBuffer.limit());
+    // 4. Create a new data block.
+    currentDataBlock = new HFileDataBlock(context, currentOffset);
+  }
+
+  // NOTE that: reader assumes that every meta info piece
+  // should be a separate meta block.
+  private void flushMetaBlocks() throws IOException {
+    for (Map.Entry<String, byte[]> e : metaInfo.entrySet()) {
+      HFileMetaBlock currentMetaBlock = new HFileMetaBlock(context);
+      byte[] key = StringUtils.getUTF8Bytes(e.getKey());
+      currentMetaBlock.add(key, e.getValue());
+      ByteBuffer blockBuffer = currentMetaBlock.serialize();
+      long blockOffset = currentOffset;
+      currentMetaBlock.setStartOffsetInBuff(currentOffset);
+      writeBuffer(blockBuffer);
+      metaIndexBlock.add(
+          currentMetaBlock.getFirstKey(), blockOffset, blockBuffer.limit());
+    }
+  }
+
+  private void writeLoadOnOpenSection() throws IOException {
+    loadOnOpenSectionOffset = currentOffset;
+    // Write Root Data Index
+    ByteBuffer dataIndexBuffer = rootIndexBlock.serialize();
+    rootIndexBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(dataIndexBuffer);
+    // Write Meta Data Index.
+    // Note: Even this block is empty, it has to be there
+    //  due to the behavior of the reader.
+    ByteBuffer metaIndexBuffer = metaIndexBlock.serialize();
+    metaIndexBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(metaIndexBuffer);
+    // Write File Info.
+    fileInfoBlock.add("hfile.LASTKEY", addKeyLength(lastKey));
+    fileInfoBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(fileInfoBlock.serialize());
+  }
+
+  private void writeTrailer() throws IOException {
+    HFileProtos.TrailerProto.Builder builder = 
HFileProtos.TrailerProto.newBuilder();
+    builder.setFileInfoOffset(fileInfoBlock.getStartOffsetInBuff());
+    builder.setLoadOnOpenDataOffset(loadOnOpenSectionOffset);
+    builder.setUncompressedDataIndexSize(totalUncompressedBytes);
+    builder.setDataIndexCount(rootIndexBlock.getNumOfEntries());
+    builder.setMetaIndexCount(metaIndexBlock.getNumOfEntries());
+    builder.setEntryCount(totalNumberOfRecords);
+    // TODO: support multiple levels.
+    builder.setNumDataIndexLevels(1);
+    builder.setFirstDataBlockOffset(firstDataBlockOffset);
+    builder.setLastDataBlockOffset(lastDataBlockOffset);
+    builder.setComparatorClassName("NA");
+    // Set codec.
+    if (context.getCompressionCodec() == CompressionCodec.GZIP) {
+      builder.setCompressionCodec(1);
+    } else {
+      builder.setCompressionCodec(2);
+    }
+    builder.setEncryptionKey(ByteString.EMPTY);
+    HFileProtos.TrailerProto trailerProto = builder.build();
+
+    // Encode the varint size into a ByteBuffer
+    // This is necessary to make the parsing work.
+    ByteArrayOutputStream varintBuffer = new ByteArrayOutputStream();
+    CodedOutputStream varintOutput = 
CodedOutputStream.newInstance(varintBuffer);
+    varintOutput.writeUInt32NoTag(trailerProto.getSerializedSize());
+    varintOutput.flush();
+
+    ByteBuffer trailer = ByteBuffer.allocate(4096);
+    trailer.limit(4096);
+    trailer.put(StringUtils.getUTF8Bytes("TRABLK\"$"));

Review Comment:
   Use `HFileBlockType.TRAILER` to get the magic?



##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileWriterImpl.java:
##########
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.io.hfile;
+
+import org.apache.hudi.common.util.StringUtils;
+import org.apache.hudi.common.util.io.ByteBufferBackedInputStream;
+import org.apache.hudi.io.ByteArraySeekableDataInputStream;
+import org.apache.hudi.io.compress.CompressionCodec;
+import org.apache.hudi.io.hfile.protobuf.generated.HFileProtos;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedOutputStream;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT16;
+
+/**
+ * Pure Java implementation of HFile writer (HFile v3 format) for Hudi.
+ */
+public class HFileWriterImpl implements HFileWriter {
+  private final OutputStream outputStream;
+  private final HFileContext context;
+  // Meta Info map.
+  private final Map<String, byte[]> metaInfo = new HashMap<>();
+  // Data block under construction.
+  private HFileDataBlock currentDataBlock;
+  // Meta block under construction.
+  private final HFileRootIndexBlock rootIndexBlock;
+  private final HFileMetaIndexBlock metaIndexBlock;
+  private final HFileFileInfoBlock fileInfoBlock;
+  private long uncompressedBytes;
+  private long totalUncompressedBytes;
+  private long currentOffset;
+  private long loadOnOpenSectionOffset;
+  private final int blockSize;
+
+  // Variables used to record necessary information to reduce
+  // the memory usage.
+  private byte[] lastKey = new byte[0];
+  private long firstDataBlockOffset = -1;
+  private long lastDataBlockOffset;
+  private long totalNumberOfRecords = 0;
+
+  public HFileWriterImpl(HFileContext context, OutputStream outputStream) {
+    this.outputStream = outputStream;
+    this.context = context;
+    this.blockSize = this.context.getBlockSize();
+    this.uncompressedBytes = 0L;
+    this.totalUncompressedBytes = 0L;
+    this.currentOffset = 0L;
+    this.currentDataBlock = new HFileDataBlock(context);
+    this.rootIndexBlock = new HFileRootIndexBlock(context);
+    this.metaIndexBlock = new HFileMetaIndexBlock(context);
+    this.fileInfoBlock = new HFileFileInfoBlock(context);
+    initFileInfo();
+  }
+
+  // Append a data kv pair.
+  public void append(String key, byte[] value) throws IOException {
+    byte[] keyBytes = StringUtils.getUTF8Bytes(key);
+    lastKey = keyBytes;
+    // Records with the same key must be put into the same block.
+    if (!Arrays.equals(currentDataBlock.getLastKeyContent(), keyBytes)
+        && uncompressedBytes + keyBytes.length + value.length + 9 > blockSize) 
{
+      flushCurrentDataBlock();
+      uncompressedBytes = 0;
+    }
+    currentDataBlock.add(keyBytes, value);
+    int uncompressedKeyValueSize = keyBytes.length + value.length;
+    uncompressedBytes += uncompressedKeyValueSize + 9;
+    totalUncompressedBytes += uncompressedKeyValueSize + 9;
+  }
+
+  // Append a metadata kv pair.
+  public void appendMetaInfo(String name, byte[] value) {
+    metaInfo.put(name, value);
+  }
+
+  // Append a file info kv pair.
+  public void appendFileInfo(String name, byte[] value) {
+    fileInfoBlock.add(name, value);
+  }
+
+  @Override
+  public void close() throws IOException {
+    flushCurrentDataBlock();
+    flushMetaBlocks();
+    writeLoadOnOpenSection();
+    writeTrailer();
+    outputStream.flush();
+    outputStream.close();
+  }
+
+  private void flushCurrentDataBlock() throws IOException {
+    // 0. Skip flush if no data.
+    if (currentDataBlock.isEmpty()) {
+      return;
+    }
+    // 1. Update metrics.
+    if (firstDataBlockOffset < 0) {
+      firstDataBlockOffset = currentOffset;
+    }
+    lastDataBlockOffset = currentOffset;
+    totalNumberOfRecords += currentDataBlock.getNumOfEntries();
+    // 2. Flush data block.
+    ByteBuffer blockBuffer = currentDataBlock.serialize();
+    writeBuffer(blockBuffer);
+    // 3. Create an index entry.
+    rootIndexBlock.add(
+        currentDataBlock.getFirstKey(), lastDataBlockOffset, 
blockBuffer.limit());
+    // 4. Create a new data block.
+    currentDataBlock = new HFileDataBlock(context, currentOffset);
+  }
+
+  // NOTE that: reader assumes that every meta info piece
+  // should be a separate meta block.
+  private void flushMetaBlocks() throws IOException {
+    for (Map.Entry<String, byte[]> e : metaInfo.entrySet()) {
+      HFileMetaBlock currentMetaBlock = new HFileMetaBlock(context);
+      byte[] key = StringUtils.getUTF8Bytes(e.getKey());
+      currentMetaBlock.add(key, e.getValue());
+      ByteBuffer blockBuffer = currentMetaBlock.serialize();
+      long blockOffset = currentOffset;
+      currentMetaBlock.setStartOffsetInBuff(currentOffset);
+      writeBuffer(blockBuffer);
+      metaIndexBlock.add(
+          currentMetaBlock.getFirstKey(), blockOffset, blockBuffer.limit());
+    }
+  }
+
+  private void writeLoadOnOpenSection() throws IOException {
+    loadOnOpenSectionOffset = currentOffset;
+    // Write Root Data Index
+    ByteBuffer dataIndexBuffer = rootIndexBlock.serialize();
+    rootIndexBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(dataIndexBuffer);
+    // Write Meta Data Index.
+    // Note: Even this block is empty, it has to be there
+    //  due to the behavior of the reader.
+    ByteBuffer metaIndexBuffer = metaIndexBlock.serialize();
+    metaIndexBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(metaIndexBuffer);
+    // Write File Info.
+    fileInfoBlock.add("hfile.LASTKEY", addKeyLength(lastKey));
+    fileInfoBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(fileInfoBlock.serialize());
+  }
+
+  private void writeTrailer() throws IOException {
+    HFileProtos.TrailerProto.Builder builder = 
HFileProtos.TrailerProto.newBuilder();
+    builder.setFileInfoOffset(fileInfoBlock.getStartOffsetInBuff());
+    builder.setLoadOnOpenDataOffset(loadOnOpenSectionOffset);
+    builder.setUncompressedDataIndexSize(totalUncompressedBytes);
+    builder.setDataIndexCount(rootIndexBlock.getNumOfEntries());
+    builder.setMetaIndexCount(metaIndexBlock.getNumOfEntries());
+    builder.setEntryCount(totalNumberOfRecords);
+    // TODO: support multiple levels.
+    builder.setNumDataIndexLevels(1);
+    builder.setFirstDataBlockOffset(firstDataBlockOffset);
+    builder.setLastDataBlockOffset(lastDataBlockOffset);
+    builder.setComparatorClassName("NA");
+    // Set codec.
+    if (context.getCompressionCodec() == CompressionCodec.GZIP) {
+      builder.setCompressionCodec(1);
+    } else {
+      builder.setCompressionCodec(2);
+    }
+    builder.setEncryptionKey(ByteString.EMPTY);
+    HFileProtos.TrailerProto trailerProto = builder.build();
+
+    // Encode the varint size into a ByteBuffer
+    // This is necessary to make the parsing work.
+    ByteArrayOutputStream varintBuffer = new ByteArrayOutputStream();
+    CodedOutputStream varintOutput = 
CodedOutputStream.newInstance(varintBuffer);
+    varintOutput.writeUInt32NoTag(trailerProto.getSerializedSize());
+    varintOutput.flush();
+
+    ByteBuffer trailer = ByteBuffer.allocate(4096);
+    trailer.limit(4096);
+    trailer.put(StringUtils.getUTF8Bytes("TRABLK\"$"));
+    trailer.put(varintBuffer.toByteArray());
+    trailer.put(trailerProto.toByteArray());
+    // Force trailer to have fixed length.
+    trailer.position(4095);
+    trailer.put((byte)3);
+
+    trailer.flip();
+    writeBuffer(trailer);
+  }
+
+  private void writeBuffer(ByteBuffer buffer) throws IOException {
+    // Note that: Use `write(byte[], off, len)`, instead of `write(byte[])`.
+    outputStream.write(buffer.array(), 0, buffer.limit());
+    currentOffset += buffer.limit();
+  }
+
+  private void initFileInfo() {
+    fileInfoBlock.add("hfile.MAX_MEMSTORE_TS_KEY", new byte[]{0});
+  }
+
+  // Note: HFileReaderImpl assumes that:
+  //   The last key should contain the content length bytes.
+  public byte[] addKeyLength(byte[] key) {
+    if (0 == key.length) {
+      return new byte[0];
+    }
+    ByteBuffer byteBuffer = ByteBuffer.allocate(key.length + SIZEOF_INT16);
+    byteBuffer.putShort((short) key.length);
+    byteBuffer.put(key);
+    return byteBuffer.array();
+  }
+
+  // Example to demonstrate the code is runnable.
+  public static void main(String[] args) throws Exception {
+    String fileName = "test.hfile";
+    HFileContext context = HFileContext.builder().build();
+    try (OutputStream outputStream = new DataOutputStream(
+        Files.newOutputStream(Paths.get(fileName)));
+         HFileWriterImpl writer = new HFileWriterImpl(context, outputStream)) {
+      writer.append("key1", "value1".getBytes());
+      writer.append("key2", "value2".getBytes());
+    }
+
+    Path file = Paths.get("test.hfile");
+    byte[] content = Files.readAllBytes(file);
+    try (HFileReader reader = new HFileReaderImpl(
+        new ByteArraySeekableDataInputStream(
+            new ByteBufferBackedInputStream(content)), content.length)) {
+      reader.initializeMetadata();
+      reader.getNumKeyValueEntries();
+    }
+  }

Review Comment:
   Let's add these to a test class instead?



##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileWriterImpl.java:
##########
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.io.hfile;
+
+import org.apache.hudi.common.util.StringUtils;
+import org.apache.hudi.common.util.io.ByteBufferBackedInputStream;
+import org.apache.hudi.io.ByteArraySeekableDataInputStream;
+import org.apache.hudi.io.compress.CompressionCodec;
+import org.apache.hudi.io.hfile.protobuf.generated.HFileProtos;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedOutputStream;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT16;
+
+/**
+ * Pure Java implementation of HFile writer (HFile v3 format) for Hudi.
+ */
+public class HFileWriterImpl implements HFileWriter {

Review Comment:
   Let's use this native HFile writer and HBase HFile writer to write the same 
key-value pairs and compare the HFile content so that they should be the same, 
as the final validation.



##########
hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileWriterImpl.java:
##########
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.io.hfile;
+
+import org.apache.hudi.common.util.StringUtils;
+import org.apache.hudi.common.util.io.ByteBufferBackedInputStream;
+import org.apache.hudi.io.ByteArraySeekableDataInputStream;
+import org.apache.hudi.io.compress.CompressionCodec;
+import org.apache.hudi.io.hfile.protobuf.generated.HFileProtos;
+
+import com.google.protobuf.ByteString;
+import com.google.protobuf.CodedOutputStream;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT16;
+
+/**
+ * Pure Java implementation of HFile writer (HFile v3 format) for Hudi.
+ */
+public class HFileWriterImpl implements HFileWriter {
+  private final OutputStream outputStream;
+  private final HFileContext context;
+  // Meta Info map.
+  private final Map<String, byte[]> metaInfo = new HashMap<>();
+  // Data block under construction.
+  private HFileDataBlock currentDataBlock;
+  // Meta block under construction.
+  private final HFileRootIndexBlock rootIndexBlock;
+  private final HFileMetaIndexBlock metaIndexBlock;
+  private final HFileFileInfoBlock fileInfoBlock;
+  private long uncompressedBytes;
+  private long totalUncompressedBytes;
+  private long currentOffset;
+  private long loadOnOpenSectionOffset;
+  private final int blockSize;
+
+  // Variables used to record necessary information to reduce
+  // the memory usage.
+  private byte[] lastKey = new byte[0];
+  private long firstDataBlockOffset = -1;
+  private long lastDataBlockOffset;
+  private long totalNumberOfRecords = 0;
+
+  public HFileWriterImpl(HFileContext context, OutputStream outputStream) {
+    this.outputStream = outputStream;
+    this.context = context;
+    this.blockSize = this.context.getBlockSize();
+    this.uncompressedBytes = 0L;
+    this.totalUncompressedBytes = 0L;
+    this.currentOffset = 0L;
+    this.currentDataBlock = new HFileDataBlock(context);
+    this.rootIndexBlock = new HFileRootIndexBlock(context);
+    this.metaIndexBlock = new HFileMetaIndexBlock(context);
+    this.fileInfoBlock = new HFileFileInfoBlock(context);
+    initFileInfo();
+  }
+
+  // Append a data kv pair.
+  public void append(String key, byte[] value) throws IOException {
+    byte[] keyBytes = StringUtils.getUTF8Bytes(key);
+    lastKey = keyBytes;
+    // Records with the same key must be put into the same block.
+    if (!Arrays.equals(currentDataBlock.getLastKeyContent(), keyBytes)
+        && uncompressedBytes + keyBytes.length + value.length + 9 > blockSize) 
{
+      flushCurrentDataBlock();
+      uncompressedBytes = 0;
+    }
+    currentDataBlock.add(keyBytes, value);
+    int uncompressedKeyValueSize = keyBytes.length + value.length;
+    uncompressedBytes += uncompressedKeyValueSize + 9;
+    totalUncompressedBytes += uncompressedKeyValueSize + 9;
+  }
+
+  // Append a metadata kv pair.
+  public void appendMetaInfo(String name, byte[] value) {
+    metaInfo.put(name, value);
+  }
+
+  // Append a file info kv pair.
+  public void appendFileInfo(String name, byte[] value) {
+    fileInfoBlock.add(name, value);
+  }
+
+  @Override
+  public void close() throws IOException {
+    flushCurrentDataBlock();
+    flushMetaBlocks();
+    writeLoadOnOpenSection();
+    writeTrailer();
+    outputStream.flush();
+    outputStream.close();
+  }
+
+  private void flushCurrentDataBlock() throws IOException {
+    // 0. Skip flush if no data.
+    if (currentDataBlock.isEmpty()) {
+      return;
+    }
+    // 1. Update metrics.
+    if (firstDataBlockOffset < 0) {
+      firstDataBlockOffset = currentOffset;
+    }
+    lastDataBlockOffset = currentOffset;
+    totalNumberOfRecords += currentDataBlock.getNumOfEntries();
+    // 2. Flush data block.
+    ByteBuffer blockBuffer = currentDataBlock.serialize();
+    writeBuffer(blockBuffer);
+    // 3. Create an index entry.
+    rootIndexBlock.add(
+        currentDataBlock.getFirstKey(), lastDataBlockOffset, 
blockBuffer.limit());
+    // 4. Create a new data block.
+    currentDataBlock = new HFileDataBlock(context, currentOffset);
+  }
+
+  // NOTE that: reader assumes that every meta info piece
+  // should be a separate meta block.
+  private void flushMetaBlocks() throws IOException {
+    for (Map.Entry<String, byte[]> e : metaInfo.entrySet()) {
+      HFileMetaBlock currentMetaBlock = new HFileMetaBlock(context);
+      byte[] key = StringUtils.getUTF8Bytes(e.getKey());
+      currentMetaBlock.add(key, e.getValue());
+      ByteBuffer blockBuffer = currentMetaBlock.serialize();
+      long blockOffset = currentOffset;
+      currentMetaBlock.setStartOffsetInBuff(currentOffset);
+      writeBuffer(blockBuffer);
+      metaIndexBlock.add(
+          currentMetaBlock.getFirstKey(), blockOffset, blockBuffer.limit());
+    }
+  }
+
+  private void writeLoadOnOpenSection() throws IOException {
+    loadOnOpenSectionOffset = currentOffset;
+    // Write Root Data Index
+    ByteBuffer dataIndexBuffer = rootIndexBlock.serialize();
+    rootIndexBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(dataIndexBuffer);
+    // Write Meta Data Index.
+    // Note: Even this block is empty, it has to be there
+    //  due to the behavior of the reader.
+    ByteBuffer metaIndexBuffer = metaIndexBlock.serialize();
+    metaIndexBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(metaIndexBuffer);
+    // Write File Info.
+    fileInfoBlock.add("hfile.LASTKEY", addKeyLength(lastKey));
+    fileInfoBlock.setStartOffsetInBuff(currentOffset);
+    writeBuffer(fileInfoBlock.serialize());
+  }
+
+  private void writeTrailer() throws IOException {
+    HFileProtos.TrailerProto.Builder builder = 
HFileProtos.TrailerProto.newBuilder();
+    builder.setFileInfoOffset(fileInfoBlock.getStartOffsetInBuff());
+    builder.setLoadOnOpenDataOffset(loadOnOpenSectionOffset);
+    builder.setUncompressedDataIndexSize(totalUncompressedBytes);
+    builder.setDataIndexCount(rootIndexBlock.getNumOfEntries());
+    builder.setMetaIndexCount(metaIndexBlock.getNumOfEntries());
+    builder.setEntryCount(totalNumberOfRecords);
+    // TODO: support multiple levels.
+    builder.setNumDataIndexLevels(1);
+    builder.setFirstDataBlockOffset(firstDataBlockOffset);
+    builder.setLastDataBlockOffset(lastDataBlockOffset);
+    builder.setComparatorClassName("NA");
+    // Set codec.
+    if (context.getCompressionCodec() == CompressionCodec.GZIP) {
+      builder.setCompressionCodec(1);
+    } else {
+      builder.setCompressionCodec(2);
+    }
+    builder.setEncryptionKey(ByteString.EMPTY);
+    HFileProtos.TrailerProto trailerProto = builder.build();
+
+    // Encode the varint size into a ByteBuffer
+    // This is necessary to make the parsing work.
+    ByteArrayOutputStream varintBuffer = new ByteArrayOutputStream();
+    CodedOutputStream varintOutput = 
CodedOutputStream.newInstance(varintBuffer);
+    varintOutput.writeUInt32NoTag(trailerProto.getSerializedSize());
+    varintOutput.flush();
+
+    ByteBuffer trailer = ByteBuffer.allocate(4096);
+    trailer.limit(4096);
+    trailer.put(StringUtils.getUTF8Bytes("TRABLK\"$"));
+    trailer.put(varintBuffer.toByteArray());
+    trailer.put(trailerProto.toByteArray());
+    // Force trailer to have fixed length.
+    trailer.position(4095);
+    trailer.put((byte)3);

Review Comment:
   Similar here on defining these constant values.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to