yihua commented on code in PR #12866: URL: https://github.com/apache/hudi/pull/12866#discussion_r2105647137
########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileWriterImpl.java: ########## @@ -0,0 +1,264 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hudi.common.util.StringUtils; +import org.apache.hudi.common.util.io.ByteBufferBackedInputStream; +import org.apache.hudi.io.ByteArraySeekableDataInputStream; +import org.apache.hudi.io.compress.CompressionCodec; +import org.apache.hudi.io.hfile.protobuf.generated.HFileProtos; + +import com.google.protobuf.ByteString; +import com.google.protobuf.CodedOutputStream; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT16; + +/** + * Pure Java implementation of HFile writer (HFile v3 format) for Hudi. + */ +public class HFileWriterImpl implements HFileWriter { + private final OutputStream outputStream; + private final HFileContext context; + // Meta Info map. + private final Map<String, byte[]> metaInfo = new HashMap<>(); + // Data block under construction. + private HFileDataBlock currentDataBlock; + // Meta block under construction. + private final HFileRootIndexBlock rootIndexBlock; + private final HFileMetaIndexBlock metaIndexBlock; + private final HFileFileInfoBlock fileInfoBlock; + private long uncompressedBytes; + private long totalUncompressedBytes; + private long currentOffset; + private long loadOnOpenSectionOffset; + private final int blockSize; + + // Variables used to record necessary information to reduce + // the memory usage. + private byte[] lastKey = new byte[0]; + private long firstDataBlockOffset = -1; + private long lastDataBlockOffset; + private long totalNumberOfRecords = 0; + + public HFileWriterImpl(HFileContext context, OutputStream outputStream) { + this.outputStream = outputStream; + this.context = context; + this.blockSize = this.context.getBlockSize(); + this.uncompressedBytes = 0L; + this.totalUncompressedBytes = 0L; + this.currentOffset = 0L; + this.currentDataBlock = new HFileDataBlock(context); + this.rootIndexBlock = new HFileRootIndexBlock(context); + this.metaIndexBlock = new HFileMetaIndexBlock(context); + this.fileInfoBlock = new HFileFileInfoBlock(context); + initFileInfo(); + } + + // Append a data kv pair. + public void append(String key, byte[] value) throws IOException { + byte[] keyBytes = StringUtils.getUTF8Bytes(key); + lastKey = keyBytes; + // Records with the same key must be put into the same block. + if (!Arrays.equals(currentDataBlock.getLastKeyContent(), keyBytes) + && uncompressedBytes + keyBytes.length + value.length + 9 > blockSize) { + flushCurrentDataBlock(); + uncompressedBytes = 0; + } + currentDataBlock.add(keyBytes, value); + int uncompressedKeyValueSize = keyBytes.length + value.length; + uncompressedBytes += uncompressedKeyValueSize + 9; + totalUncompressedBytes += uncompressedKeyValueSize + 9; + } + + // Append a metadata kv pair. + public void appendMetaInfo(String name, byte[] value) { + metaInfo.put(name, value); + } + + // Append a file info kv pair. + public void appendFileInfo(String name, byte[] value) { + fileInfoBlock.add(name, value); + } + + @Override + public void close() throws IOException { + flushCurrentDataBlock(); + flushMetaBlocks(); + writeLoadOnOpenSection(); + writeTrailer(); + outputStream.flush(); + outputStream.close(); + } + + private void flushCurrentDataBlock() throws IOException { + // 0. Skip flush if no data. + if (currentDataBlock.isEmpty()) { + return; + } + // 1. Update metrics. + if (firstDataBlockOffset < 0) { + firstDataBlockOffset = currentOffset; + } + lastDataBlockOffset = currentOffset; + totalNumberOfRecords += currentDataBlock.getNumOfEntries(); + // 2. Flush data block. + ByteBuffer blockBuffer = currentDataBlock.serialize(); + writeBuffer(blockBuffer); + // 3. Create an index entry. + rootIndexBlock.add( + currentDataBlock.getFirstKey(), lastDataBlockOffset, blockBuffer.limit()); + // 4. Create a new data block. + currentDataBlock = new HFileDataBlock(context, currentOffset); + } + + // NOTE that: reader assumes that every meta info piece + // should be a separate meta block. + private void flushMetaBlocks() throws IOException { + for (Map.Entry<String, byte[]> e : metaInfo.entrySet()) { + HFileMetaBlock currentMetaBlock = new HFileMetaBlock(context); + byte[] key = StringUtils.getUTF8Bytes(e.getKey()); + currentMetaBlock.add(key, e.getValue()); + ByteBuffer blockBuffer = currentMetaBlock.serialize(); + long blockOffset = currentOffset; + currentMetaBlock.setStartOffsetInBuff(currentOffset); + writeBuffer(blockBuffer); + metaIndexBlock.add( + currentMetaBlock.getFirstKey(), blockOffset, blockBuffer.limit()); + } + } + + private void writeLoadOnOpenSection() throws IOException { + loadOnOpenSectionOffset = currentOffset; + // Write Root Data Index + ByteBuffer dataIndexBuffer = rootIndexBlock.serialize(); + rootIndexBlock.setStartOffsetInBuff(currentOffset); + writeBuffer(dataIndexBuffer); + // Write Meta Data Index. + // Note: Even this block is empty, it has to be there + // due to the behavior of the reader. + ByteBuffer metaIndexBuffer = metaIndexBlock.serialize(); + metaIndexBlock.setStartOffsetInBuff(currentOffset); + writeBuffer(metaIndexBuffer); + // Write File Info. + fileInfoBlock.add("hfile.LASTKEY", addKeyLength(lastKey)); + fileInfoBlock.setStartOffsetInBuff(currentOffset); + writeBuffer(fileInfoBlock.serialize()); + } + + private void writeTrailer() throws IOException { + HFileProtos.TrailerProto.Builder builder = HFileProtos.TrailerProto.newBuilder(); + builder.setFileInfoOffset(fileInfoBlock.getStartOffsetInBuff()); + builder.setLoadOnOpenDataOffset(loadOnOpenSectionOffset); + builder.setUncompressedDataIndexSize(totalUncompressedBytes); + builder.setDataIndexCount(rootIndexBlock.getNumOfEntries()); + builder.setMetaIndexCount(metaIndexBlock.getNumOfEntries()); + builder.setEntryCount(totalNumberOfRecords); + // TODO: support multiple levels. + builder.setNumDataIndexLevels(1); + builder.setFirstDataBlockOffset(firstDataBlockOffset); + builder.setLastDataBlockOffset(lastDataBlockOffset); + builder.setComparatorClassName("NA"); + // Set codec. + if (context.getCompressionCodec() == CompressionCodec.GZIP) { + builder.setCompressionCodec(1); + } else { + builder.setCompressionCodec(2); + } + builder.setEncryptionKey(ByteString.EMPTY); + HFileProtos.TrailerProto trailerProto = builder.build(); + + // Encode the varint size into a ByteBuffer + // This is necessary to make the parsing work. + ByteArrayOutputStream varintBuffer = new ByteArrayOutputStream(); + CodedOutputStream varintOutput = CodedOutputStream.newInstance(varintBuffer); + varintOutput.writeUInt32NoTag(trailerProto.getSerializedSize()); + varintOutput.flush(); Review Comment: Reuse the `getVariableLengthEncodes`? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java: ########## @@ -187,34 +162,161 @@ public int getOnDiskSizeWithHeader() { * @throws IOException upon decoding and decompression error. */ public void unpack() throws IOException { - if (!isUnpacked) { + if (!readAttributesOpt.get().isUnpacked) { // Should only be called for compressed blocks CompressionCodec compression = context.getCompressionCodec(); if (compression != CompressionCodec.NONE) { // Copy the block header which is not compressed System.arraycopy( - compressedByteBuff, startOffsetInCompressedBuff, byteBuff, 0, HFILEBLOCK_HEADER_SIZE); + readAttributesOpt.get().compressedByteBuff, + readAttributesOpt.get().startOffsetInCompressedBuff, + readAttributesOpt.get().byteBuff, + 0, + HFILEBLOCK_HEADER_SIZE); try (InputStream byteBuffInputStream = new ByteArrayInputStream( - compressedByteBuff, startOffsetInCompressedBuff + HFILEBLOCK_HEADER_SIZE, onDiskSizeWithoutHeader)) { + readAttributesOpt.get().compressedByteBuff, + readAttributesOpt.get().startOffsetInCompressedBuff + HFILEBLOCK_HEADER_SIZE, + readAttributesOpt.get().onDiskSizeWithoutHeader)) { context.getDecompressor().decompress( byteBuffInputStream, - byteBuff, + readAttributesOpt.get().byteBuff, HFILEBLOCK_HEADER_SIZE, - byteBuff.length - HFILEBLOCK_HEADER_SIZE); + readAttributesOpt.get().byteBuff.length - HFILEBLOCK_HEADER_SIZE); } } - isUnpacked = true; + readAttributesOpt.get().isUnpacked = true; } } + // ================ Below are for Write ================ + /** - * Allocates new byte buffer for the uncompressed bytes. - * - * @return a new byte array based on the size of uncompressed data, holding the same header - * bytes. + * Returns serialized "data" part of the block. + * This function should be implemented by each block type separately. + * By default, it does nothing. + */ + public ByteBuffer getPayload() { + return ByteBuffer.allocate(0); + } + + /** + * Return serialized block including header, data, checksum. + */ + public ByteBuffer serialize() throws IOException { + // Block payload. + ByteBuffer payloadBuff = getPayload(); + // Compress if specified. + ByteBuffer compressedPayload = compress(payloadBuff); + // Buffer for building block. + ByteBuffer buf = ByteBuffer.allocate(Math.max( + context.getBlockSize() * 2, + compressedPayload.limit() + HFILEBLOCK_HEADER_SIZE * 2)); + // Block header + // 1. Magic is always 8 bytes. + buf.put(blockType.getMagic(), 0, 8); + // 2. onDiskSizeWithoutHeader. + buf.putInt(compressedPayload.limit()); + // 3. uncompressedSizeWithoutHeader. + buf.putInt(payloadBuff.limit()); + // 4. previous block offset. + buf.putLong(writeAttributesOpt.get().previousBlockOffset); + // TODO: set checksum type properly. + // 5. checksum type. + buf.put(CHECKSUM_TYPE.getCode()); + // TODO: verify that if hudi uses 4 bytes for checksum always. + // 6. bytes covered per checksum. + buf.putInt(DEFAULT_BYTES_PER_CHECKSUM); + // 7. onDiskDataSizeWithHeader + int onDiskDataSizeWithHeader = + HFileBlock.HFILEBLOCK_HEADER_SIZE + payloadBuff.limit(); + buf.putInt(onDiskDataSizeWithHeader); + // 8. payload. + buf.put(compressedPayload); + // 9. Checksum + buf.put(calcChecksumBytes(CHECKSUM_TYPE)); + + // Update sizes + buf.flip(); + return buf; + } + + protected ByteBuffer compress(ByteBuffer payload) throws IOException { + if (context.getCompressionCodec() == GZIP) { + byte[] temp = new byte[payload.remaining()]; + payload.get(temp); + return ByteBuffer.wrap(new HoodieAirliftGzipDecompressor().compress(temp)); + } else { + return payload; + } + } + + // TODO: support non-NULL checksum types. + /** + * Returns checksum bytes if checksum type is not NULL. + */ + private byte[] calcChecksumBytes(ChecksumType type) { + if (type == ChecksumType.NULL) { + return EMPTY_BYTE_ARRAY; + } else if (type == ChecksumType.CRC32) { + return EMPTY_BYTE_ARRAY; + } else { + return EMPTY_BYTE_ARRAY; + } + } + + /** + * Sets start offset of the block in the buffer. + */ + public void setStartOffsetInBuff(long startOffsetInBuff) { + this.writeAttributesOpt.get().startOffsetInBuff = startOffsetInBuff; + } + + /** + * Gets start offset of the block in the buffer. + */ + public long getStartOffsetInBuff() { + return this.writeAttributesOpt.get().startOffsetInBuff; + } + + /** + * Returns the number of bytes that should be used by checksum. + * @param onDiskBlockBytesWithHeaderSize + * @param bytesPerChecksum + * @return + */ + private long calcNumChecksumBytes(int onDiskBlockBytesWithHeaderSize, int bytesPerChecksum) { + return numBytes(onDiskBlockBytesWithHeaderSize, bytesPerChecksum); + } + + /** + * Returns the number of bytes needed to store the checksums for a specified data size + * @param datasize number of bytes of data + * @param bytesPerChecksum number of bytes in a checksum chunk + * @return The number of bytes needed to store the checksum values */ - protected byte[] allocateBufferForUnpacking() { - int capacity = HFILEBLOCK_HEADER_SIZE + uncompressedSizeWithoutHeader + sizeCheckSum; - return new byte[capacity]; + static long numBytes(long datasize, int bytesPerChecksum) { + return numChunks(datasize, bytesPerChecksum) * HFileBlock.CHECKSUM_SIZE; + } + + /** + * Returns the number of checksum chunks needed to store the checksums for a specified data size + * @param datasize number of bytes of data + * @param bytesPerChecksum number of bytes in a checksum chunk + * @return The number of checksum chunks + */ + static long numChunks(long datasize, int bytesPerChecksum) { + long numChunks = datasize / bytesPerChecksum; + if (datasize % bytesPerChecksum != 0) { + numChunks++; + } + return numChunks; + } + + static byte[] getVariableLengthEncodes(int length) throws IOException { Review Comment: ```suggestion static byte[] getVariableLengthEncodedBytes(int length) throws IOException { ``` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
