vinothchandar commented on code in PR #10241: URL: https://github.com/apache/hudi/pull/10241#discussion_r1420435335
########## hudi-io/src/main/java/org/apache/hudi/io/compress/CompressionCodec.java: ########## @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.compress; Review Comment: Are there other parts of code we can use these codecs at? Instead of Hadoop codec names? Might be good to replace and clean up? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/KeyOnlyKeyValue.java: ########## @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_SHORT; + +/** + * Represents the key part only. + */ +public class KeyOnlyKeyValue extends KeyValue { Review Comment: IDK if we need this, as is. Can't we just have a `Key` class? ########## hudi-io/src/main/java/org/apache/hudi/io/compress/CodecPool.java: ########## @@ -0,0 +1,263 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.compress; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.apache.hadoop.conf.Configuration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * A global compressor/decompressor pool used to save and reuse + * (possibly native) compression/decompression codecs. + * <p> + * This class is copied from + * {@code org.apache.hadoop.io.compress.CodecPool} + */ +public class CodecPool { Review Comment: can we use sth like https://github.com/airlift/aircompressor or better and kill this code? Basically a well-used library that can do decompression in java. ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/DataSize.java: ########## @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +/** + * Sizes of different primitive data structures used by HFile. + */ Review Comment: Are these Jvm specific? They should bejust fixed constants? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/ByteUtils.java: ########## @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +public class ByteUtils { Review Comment: Unit test all methods? None of these methods are Hfile specific? Have such generic things in a io.util package to encourage reuse ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/DataSize.java: ########## @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +/** + * Sizes of different primitive data structures used by HFile. + */ +public class DataSize { + /** + * Size of boolean in bytes + */ + public static final int SIZEOF_BOOLEAN = 1; + + /** + * Size of byte in bytes + */ + public static final int SIZEOF_BYTE = SIZEOF_BOOLEAN; + + /** + * Size of int in bytes + */ + public static final int SIZEOF_INT = Integer.SIZE / Byte.SIZE; Review Comment: 4 ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java: ########## @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hudi.io.compress.CompressionCodec; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; + +import static org.apache.hudi.io.hfile.ByteUtils.readInt; +import static org.apache.hudi.io.hfile.DataSize.MAGIC_LENGTH; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_BYTE; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_LONG; + +/** + * Represents a block in a HFile. The types of blocks are defined in {@link HFileBlockType}. + */ +public abstract class HFileBlock { + /** + * The HFile block header size without checksum + */ + public static final int HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = + MAGIC_LENGTH + 2 * SIZEOF_INT + SIZEOF_LONG; + /** + * The HFile block header size with checksum + * There is a 1 byte checksum type, followed by a 4 byte bytesPerChecksum + * followed by another 4 byte value to store sizeofDataOnDisk. + */ + public static final int HFILEBLOCK_HEADER_SIZE = + HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM + SIZEOF_BYTE + 2 * SIZEOF_INT; + + /** + * Each checksum value is an integer that can be stored in 4 bytes. + */ + static final int CHECKSUM_SIZE = SIZEOF_INT; + + static class Header { + // Format of header is: + // 8 bytes - block magic + // 4 bytes int - onDiskSizeWithoutHeader + // 4 bytes int - uncompressedSizeWithoutHeader + // 8 bytes long - prevBlockOffset + // The following 3 are only present if header contains checksum information + // 1 byte - checksum type + // 4 byte int - bytes per checksum + // 4 byte int - onDiskDataSizeWithHeader + static int BLOCK_MAGIC_INDEX = 0; + static int ON_DISK_SIZE_WITHOUT_HEADER_INDEX = 8; + static int UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX = 12; + static int PREV_BLOCK_OFFSET_INDEX = 16; + static int CHECKSUM_TYPE_INDEX = 24; + static int BYTES_PER_CHECKSUM_INDEX = 25; + static int ON_DISK_DATA_SIZE_WITH_HEADER_INDEX = 29; + } + + protected final HFileContext context; + protected final byte[] byteBuff; + protected final int startOffsetInBuff; + private final HFileBlockType blockType; + protected int onDiskSizeWithoutHeader; + protected int uncompressedSizeWithoutHeader; + + protected HFileBlock(HFileContext context, + HFileBlockType blockType, + byte[] byteBuff, + int startOffsetInBuff) { + this.context = context; + this.byteBuff = byteBuff; + this.startOffsetInBuff = startOffsetInBuff; + this.blockType = blockType; + this.onDiskSizeWithoutHeader = readInt( + byteBuff, startOffsetInBuff + Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); + this.uncompressedSizeWithoutHeader = readInt( + byteBuff, startOffsetInBuff + Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); + } + + /** + * Parses the HFile block header and returns the {@link HFileBlock} instance based on the input. + * + * @param context HFile context. + * @param byteBuff Input data. + * @param startOffsetInBuff Offset to start parsing. + * @return The {@link HFileBlock} instance based on the input; {@code null} if not able to parse. + * @throws IOException + */ + public static HFileBlock parse(HFileContext context, byte[] byteBuff, int startOffsetInBuff) throws IOException { + HFileBlockType blockType = HFileBlockType.parse(byteBuff, startOffsetInBuff); + switch (blockType) { + case ROOT_INDEX: + return new HFileRootIndexBlock(context, byteBuff, startOffsetInBuff); + case FILE_INFO: + return new HFileFileInfoBlock(context, byteBuff, startOffsetInBuff); + case DATA: + return new HFileDataBlock(context, byteBuff, startOffsetInBuff); + default: + return null; + } + } + + /** + * Allocates a new byte buffer for uncompressed data and returns a new {@link HFileBlock} + * instance for the decompressed content. + * + * @return + */ + public abstract HFileBlock cloneForUnpack(); + + public HFileBlockType getBlockType() { + return blockType; + } + + public byte[] getByteBuff() { + return byteBuff; + } + + public int getOnDiskSizeWithHeader() { + return onDiskSizeWithoutHeader + HFILEBLOCK_HEADER_SIZE; + } + + /** + * Decodes and decompresses the block content if the block content is compressed. + * + * @return {@link HFileBlock} instance + * @throws IOException + */ + public HFileBlock unpack() throws IOException { + // Should only be called for compressed blocks + CompressionCodec compression = context.getCompressionCodec(); Review Comment: Just a reminder to think through if resources opened here are closed correctly. ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java: ########## @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hudi.io.compress.CompressionCodec; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; + +import static org.apache.hudi.io.hfile.ByteUtils.readInt; +import static org.apache.hudi.io.hfile.DataSize.MAGIC_LENGTH; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_BYTE; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_LONG; + +/** + * Represents a block in a HFile. The types of blocks are defined in {@link HFileBlockType}. + */ +public abstract class HFileBlock { + /** + * The HFile block header size without checksum + */ + public static final int HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = Review Comment: Instead of dealing as ints and long, better to think as uint32 and uint64 etc that makes the length explicit ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockReader.java: ########## @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hadoop.fs.FSDataInputStream; + +import java.io.IOException; + +/** + * A reader to read one or more HFile blocks based on the start and end offsets. + */ +public class HFileBlockReader { + private final HFileContext context; + private final byte[] byteBuff; + private int offset; + + /** + * Instantiates the {@link HFileBlockReader}. + * + * @param context HFile context. + * @param stream Input data. + * @param startOffset Start offset to read from. + * @param endOffset End offset to stop at. + * @throws IOException + */ + public HFileBlockReader(HFileContext context, + FSDataInputStream stream, + long startOffset, + long endOffset) throws IOException { + this.context = context; + this.offset = 0; + stream.seek(startOffset); + this.byteBuff = new byte[(int) (endOffset - startOffset)]; Review Comment: Can we just assert That this will be a valid int? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileReader.java: ########## @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hadoop.fs.FSDataInputStream; Review Comment: Are you going to replace this? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java: ########## @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import java.io.DataInputStream; +import java.io.IOException; + +import static java.nio.charset.StandardCharsets.UTF_8; Review Comment: Do we need a class that enforces that string is utf8 ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileReader.java: ########## @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hadoop.fs.FSDataInputStream; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * A reader reading a HFile. + */ +public class HFileReader { + private final FSDataInputStream stream; + private final long fileSize; + private boolean isMetadataInitialized = false; + private HFileContext context; + private List<BlockIndexEntry> blockIndexEntryList; + private HFileBlock metaIndexBlock; + private HFileBlock fileInfoBlock; + + public HFileReader(FSDataInputStream stream, long fileSize) { + this.stream = stream; + this.fileSize = fileSize; + } + + /** + * Initializes the metadata by reading the "Load-on-open" section. + * + * @throws IOException upon error. + */ + public void initializeMetadata() throws IOException { + assert !this.isMetadataInitialized; + + // Read Trailer (serialized in Proto) + HFileTrailer trailer = readTrailer(stream, fileSize); + this.context = HFileContext.builder() + .compressAlgo(trailer.getCompressionCodec()) + .build(); + // At this point, HFileContext is not known yet until we parse the trailer + HFileBlockReader blockReader = new HFileBlockReader( + context, stream, trailer.getLoadOnOpenDataOffset(), fileSize - HFileTrailer.getTrailerSize()); + HFileRootIndexBlock dataIndexBlock = + (HFileRootIndexBlock) blockReader.nextBlock(HFileBlockType.ROOT_INDEX); + this.blockIndexEntryList = dataIndexBlock.readDataIndex(trailer.getDataIndexCount()); + this.metaIndexBlock = blockReader.nextBlock(HFileBlockType.ROOT_INDEX); + this.fileInfoBlock = blockReader.nextBlock(HFileBlockType.FILE_INFO); + + this.isMetadataInitialized = true; + } + + /** + * Seeks to the key to look up. + * + * @param key Key to look up. + * @return The {@link KeyValue} instance in the block that contains the exact same key as the + * lookup key; or {@link null} if the lookup key does not exist. + * @throws IOException upon error. + */ + public KeyValue seekTo(KeyValue key) throws IOException { + BlockIndexEntry lookUpKey = new BlockIndexEntry(key, -1, -1); + int rootLevelBlockIndex = searchBlockByKey(lookUpKey); + if (rootLevelBlockIndex < 0) { + // Key smaller than the start key of the first block Review Comment: Other conditions to check? Like being greater than the end of the file ########## hudi-io/src/main/java/org/apache/hudi/io/compress/airlift/HoodieAirliftGzipDecompressor.java: ########## @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.compress.airlift; + +import org.apache.hudi.io.compress.CompressionCodec; +import org.apache.hudi.io.compress.HoodieDecompressor; + +import io.airlift.compress.gzip.JdkGzipHadoopStreams; +import io.airlift.compress.hadoop.HadoopInputStream; + +import java.io.IOException; +import java.io.InputStream; + +/** + * Implementation of {@link HoodieDecompressor} for {@link CompressionCodec#GZIP} compression + * codec using airlift aircompressor's GZIP decompressor. + */ +public class HoodieAirliftGzipDecompressor implements HoodieDecompressor { + + private final JdkGzipHadoopStreams gzipStreams; + + public HoodieAirliftGzipDecompressor() { + gzipStreams = new JdkGzipHadoopStreams(); + } + + @Override + public void decompress(InputStream compressedInput, byte[] targetByteArray, int offset, int length) throws IOException { Review Comment: Lets unit test all subclasses of the decompressor? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/ByteUtils.java: ########## @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +public class ByteUtils { Review Comment: Any other utility classes that need to be consolidated here? IOUtils?? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/ByteUtils.java: ########## @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +public class ByteUtils { + /** + * Reads four bytes starting from the offset in the input and returns {@code int} value. + * + * @param bytes Input byte array. + * @param offset Offset to start reading. + * @return The {@code int} value. + */ + public static int readInt(byte[] bytes, int offset) { + return (((bytes[offset] & 0xff) << 24) + | ((bytes[offset + 1] & 0xff) << 16) + | ((bytes[offset + 2] & 0xff) << 8) + | (bytes[offset + 3] & 0xff)); + } + + /** + * Reads eight bytes starting from the offset in the input and returns {@code long} value. + * + * @param bytes Input byte array. + * @param offset Offset to start reading. + * @return The {@code long} value. + */ + public static long readLong(byte[] bytes, int offset) { + return (((long) (bytes[offset] & 0xff) << 56) + | ((long) (bytes[offset + 1] & 0xff) << 48) + | ((long) (bytes[offset + 2] & 0xff) << 40) + | ((long) (bytes[offset + 3] & 0xff) << 32) + | ((long) (bytes[offset + 4] & 0xff) << 24) + | ((long) (bytes[offset + 5] & 0xff) << 16) + | ((long) (bytes[offset + 6] & 0xff) << 8) + | (long) (bytes[offset + 7] & 0xff)); + } + + /** + * Reads two bytes starting from the offset in the input and returns {@code short} value. + * + * @param bytes Input byte array. + * @param offset Offset to start reading. + * @return The {@code short} value. + */ + public static short readShort(byte[] bytes, int offset) { + short n = 0; + n = (short) ((n ^ bytes[offset]) & 0xFF); + n = (short) (n << 8); + n ^= (short) (bytes[offset + 1] & 0xFF); + return n; + } + + /** + * Parses the first byte of a vint/vlong to determine the number of bytes on disk. + * + * @param bytes Input byte array. + * @param offset Offset to start reading. + * @return The total number of bytes (1 to 9) on disk. + */ + public static int decodeVLongSizeOnDisk(byte[] bytes, int offset) { Review Comment: What's a v int? Rename for clarity? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/DataSize.java: ########## @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +/** + * Sizes of different primitive data structures used by HFile. + */ +public class DataSize { + /** + * Size of boolean in bytes + */ + public static final int SIZEOF_BOOLEAN = 1; + + /** + * Size of byte in bytes + */ + public static final int SIZEOF_BYTE = SIZEOF_BOOLEAN; + + /** + * Size of int in bytes + */ + public static final int SIZEOF_INT = Integer.SIZE / Byte.SIZE; + + /** + * Size of short in bytes + */ + public static final int SIZEOF_SHORT = Short.SIZE / Byte.SIZE; + + /** + * Size of long in bytes + */ + public static final int SIZEOF_LONG = Long.SIZE / Byte.SIZE; Review Comment: 8? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/BlockIndexEntry.java: ########## @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +/** + * Represents the index entry of a data block in the Data Index stored in the + * {@link HFileBlockType#ROOT_INDEX} block in the "Load-on-open" section. + * <p> + * This is completely in-memory representation and does not involve byte parsing. + * <p> + * When comparing two {@link BlockIndexEntry} instances, the underlying bytes of the keys + * are compared in lexicographical order. + */ +public class BlockIndexEntry implements Comparable<BlockIndexEntry> { + private final KeyValue firstKey; Review Comment: Can we just have a Key class instead? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/DataSize.java: ########## @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +/** + * Sizes of different primitive data structures used by HFile. + */ +public class DataSize { + /** + * Size of boolean in bytes + */ + public static final int SIZEOF_BOOLEAN = 1; + + /** + * Size of byte in bytes + */ + public static final int SIZEOF_BYTE = SIZEOF_BOOLEAN; + + /** + * Size of int in bytes + */ + public static final int SIZEOF_INT = Integer.SIZE / Byte.SIZE; + + /** + * Size of short in bytes + */ + public static final int SIZEOF_SHORT = Short.SIZE / Byte.SIZE; Review Comment: 2? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java: ########## @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hudi.io.compress.CompressionCodec; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; + +import static org.apache.hudi.io.hfile.ByteUtils.readInt; +import static org.apache.hudi.io.hfile.DataSize.MAGIC_LENGTH; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_BYTE; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_LONG; + +/** + * Represents a block in a HFile. The types of blocks are defined in {@link HFileBlockType}. + */ +public abstract class HFileBlock { + /** + * The HFile block header size without checksum + */ + public static final int HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = + MAGIC_LENGTH + 2 * SIZEOF_INT + SIZEOF_LONG; + /** + * The HFile block header size with checksum + * There is a 1 byte checksum type, followed by a 4 byte bytesPerChecksum + * followed by another 4 byte value to store sizeofDataOnDisk. + */ + public static final int HFILEBLOCK_HEADER_SIZE = + HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM + SIZEOF_BYTE + 2 * SIZEOF_INT; + + /** + * Each checksum value is an integer that can be stored in 4 bytes. + */ + static final int CHECKSUM_SIZE = SIZEOF_INT; + + static class Header { + // Format of header is: + // 8 bytes - block magic + // 4 bytes int - onDiskSizeWithoutHeader + // 4 bytes int - uncompressedSizeWithoutHeader + // 8 bytes long - prevBlockOffset + // The following 3 are only present if header contains checksum information + // 1 byte - checksum type + // 4 byte int - bytes per checksum + // 4 byte int - onDiskDataSizeWithHeader + static int BLOCK_MAGIC_INDEX = 0; + static int ON_DISK_SIZE_WITHOUT_HEADER_INDEX = 8; + static int UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX = 12; + static int PREV_BLOCK_OFFSET_INDEX = 16; + static int CHECKSUM_TYPE_INDEX = 24; + static int BYTES_PER_CHECKSUM_INDEX = 25; + static int ON_DISK_DATA_SIZE_WITH_HEADER_INDEX = 29; + } + + protected final HFileContext context; + protected final byte[] byteBuff; + protected final int startOffsetInBuff; + private final HFileBlockType blockType; + protected int onDiskSizeWithoutHeader; + protected int uncompressedSizeWithoutHeader; + + protected HFileBlock(HFileContext context, + HFileBlockType blockType, + byte[] byteBuff, + int startOffsetInBuff) { + this.context = context; + this.byteBuff = byteBuff; + this.startOffsetInBuff = startOffsetInBuff; + this.blockType = blockType; + this.onDiskSizeWithoutHeader = readInt( + byteBuff, startOffsetInBuff + Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); + this.uncompressedSizeWithoutHeader = readInt( + byteBuff, startOffsetInBuff + Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); + } + + /** + * Parses the HFile block header and returns the {@link HFileBlock} instance based on the input. + * + * @param context HFile context. + * @param byteBuff Input data. + * @param startOffsetInBuff Offset to start parsing. + * @return The {@link HFileBlock} instance based on the input; {@code null} if not able to parse. + * @throws IOException + */ + public static HFileBlock parse(HFileContext context, byte[] byteBuff, int startOffsetInBuff) throws IOException { + HFileBlockType blockType = HFileBlockType.parse(byteBuff, startOffsetInBuff); + switch (blockType) { + case ROOT_INDEX: + return new HFileRootIndexBlock(context, byteBuff, startOffsetInBuff); + case FILE_INFO: + return new HFileFileInfoBlock(context, byteBuff, startOffsetInBuff); + case DATA: + return new HFileDataBlock(context, byteBuff, startOffsetInBuff); + default: + return null; Review Comment: Do you wanna just throw an exception here? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileCompressionCodec.java: ########## @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hudi.io.compress.CompressionCodec; + +/** + * Compression codec supported by HFile. + * <p> + * The ordinal of these cannot change or else that breaks all existing HFiles out there, + * even the ones that are not compressed! (They use the NONE algorithm) + * This is because HFile stores the ordinal to indicate which compression codec is used. + */ +public enum HFileCompressionCodec { Review Comment: Can you eliminate this class? Just feels like it as indirection ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlock.java: ########## @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hudi.io.compress.CompressionCodec; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.io.InputStream; + +import static org.apache.hudi.io.hfile.ByteUtils.readInt; +import static org.apache.hudi.io.hfile.DataSize.MAGIC_LENGTH; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_BYTE; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_INT; +import static org.apache.hudi.io.hfile.DataSize.SIZEOF_LONG; + +/** + * Represents a block in a HFile. The types of blocks are defined in {@link HFileBlockType}. + */ +public abstract class HFileBlock { + /** + * The HFile block header size without checksum + */ + public static final int HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = + MAGIC_LENGTH + 2 * SIZEOF_INT + SIZEOF_LONG; + /** + * The HFile block header size with checksum + * There is a 1 byte checksum type, followed by a 4 byte bytesPerChecksum + * followed by another 4 byte value to store sizeofDataOnDisk. + */ + public static final int HFILEBLOCK_HEADER_SIZE = + HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM + SIZEOF_BYTE + 2 * SIZEOF_INT; + + /** + * Each checksum value is an integer that can be stored in 4 bytes. + */ + static final int CHECKSUM_SIZE = SIZEOF_INT; + + static class Header { + // Format of header is: + // 8 bytes - block magic + // 4 bytes int - onDiskSizeWithoutHeader + // 4 bytes int - uncompressedSizeWithoutHeader + // 8 bytes long - prevBlockOffset + // The following 3 are only present if header contains checksum information + // 1 byte - checksum type + // 4 byte int - bytes per checksum + // 4 byte int - onDiskDataSizeWithHeader + static int BLOCK_MAGIC_INDEX = 0; + static int ON_DISK_SIZE_WITHOUT_HEADER_INDEX = 8; + static int UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX = 12; + static int PREV_BLOCK_OFFSET_INDEX = 16; + static int CHECKSUM_TYPE_INDEX = 24; + static int BYTES_PER_CHECKSUM_INDEX = 25; + static int ON_DISK_DATA_SIZE_WITH_HEADER_INDEX = 29; + } + + protected final HFileContext context; + protected final byte[] byteBuff; + protected final int startOffsetInBuff; + private final HFileBlockType blockType; + protected int onDiskSizeWithoutHeader; + protected int uncompressedSizeWithoutHeader; + + protected HFileBlock(HFileContext context, + HFileBlockType blockType, + byte[] byteBuff, + int startOffsetInBuff) { + this.context = context; + this.byteBuff = byteBuff; + this.startOffsetInBuff = startOffsetInBuff; + this.blockType = blockType; + this.onDiskSizeWithoutHeader = readInt( + byteBuff, startOffsetInBuff + Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX); + this.uncompressedSizeWithoutHeader = readInt( + byteBuff, startOffsetInBuff + Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX); + } + + /** + * Parses the HFile block header and returns the {@link HFileBlock} instance based on the input. + * + * @param context HFile context. + * @param byteBuff Input data. + * @param startOffsetInBuff Offset to start parsing. + * @return The {@link HFileBlock} instance based on the input; {@code null} if not able to parse. + * @throws IOException + */ + public static HFileBlock parse(HFileContext context, byte[] byteBuff, int startOffsetInBuff) throws IOException { + HFileBlockType blockType = HFileBlockType.parse(byteBuff, startOffsetInBuff); + switch (blockType) { + case ROOT_INDEX: + return new HFileRootIndexBlock(context, byteBuff, startOffsetInBuff); + case FILE_INFO: + return new HFileFileInfoBlock(context, byteBuff, startOffsetInBuff); + case DATA: + return new HFileDataBlock(context, byteBuff, startOffsetInBuff); + default: + return null; + } + } + + /** + * Allocates a new byte buffer for uncompressed data and returns a new {@link HFileBlock} + * instance for the decompressed content. + * + * @return + */ + public abstract HFileBlock cloneForUnpack(); + + public HFileBlockType getBlockType() { + return blockType; + } + + public byte[] getByteBuff() { + return byteBuff; + } + + public int getOnDiskSizeWithHeader() { + return onDiskSizeWithoutHeader + HFILEBLOCK_HEADER_SIZE; + } + + /** + * Decodes and decompresses the block content if the block content is compressed. + * + * @return {@link HFileBlock} instance + * @throws IOException + */ + public HFileBlock unpack() throws IOException { + // Should only be called for compressed blocks + CompressionCodec compression = context.getCompressionCodec(); + if (compression != CompressionCodec.NONE) { + HFileBlock unpacked = this.cloneForUnpack(); + final InputStream byteBuffInputStream = new ByteArrayInputStream( + byteBuff, startOffsetInBuff + HFILEBLOCK_HEADER_SIZE, onDiskSizeWithoutHeader); + InputStream dataInputStream = new DataInputStream(byteBuffInputStream); Review Comment: Reading this out of a byte buffer treat this without any JVM specific assumptions on sizes? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockReader.java: ########## @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hadoop.fs.FSDataInputStream; + +import java.io.IOException; + +/** + * A reader to read one or more HFile blocks based on the start and end offsets. + */ +public class HFileBlockReader { + private final HFileContext context; + private final byte[] byteBuff; + private int offset; + + /** + * Instantiates the {@link HFileBlockReader}. + * + * @param context HFile context. + * @param stream Input data. + * @param startOffset Start offset to read from. + * @param endOffset End offset to stop at. + * @throws IOException + */ + public HFileBlockReader(HFileContext context, + FSDataInputStream stream, + long startOffset, + long endOffset) throws IOException { + this.context = context; + this.offset = 0; + stream.seek(startOffset); + this.byteBuff = new byte[(int) (endOffset - startOffset)]; + stream.readFully(byteBuff); + } + + /** + * Reads the next block based on the expected block type. + * + * @param expectedBlockType Expected block type. + * @return {@link HFileBlock} instance matching the expected block type. + * @throws IOException if the type of next block does not match the expeced type. + */ + public HFileBlock nextBlock(HFileBlockType expectedBlockType) throws IOException { + if (offset >= byteBuff.length) { + return null; Review Comment: Can we avoid returning nulls ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileReader.java: ########## @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hadoop.fs.FSDataInputStream; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * A reader reading a HFile. + */ +public class HFileReader { + private final FSDataInputStream stream; + private final long fileSize; + private boolean isMetadataInitialized = false; + private HFileContext context; + private List<BlockIndexEntry> blockIndexEntryList; + private HFileBlock metaIndexBlock; + private HFileBlock fileInfoBlock; + + public HFileReader(FSDataInputStream stream, long fileSize) { + this.stream = stream; + this.fileSize = fileSize; + } + + /** + * Initializes the metadata by reading the "Load-on-open" section. + * + * @throws IOException upon error. + */ + public void initializeMetadata() throws IOException { + assert !this.isMetadataInitialized; + + // Read Trailer (serialized in Proto) + HFileTrailer trailer = readTrailer(stream, fileSize); + this.context = HFileContext.builder() + .compressAlgo(trailer.getCompressionCodec()) + .build(); + // At this point, HFileContext is not known yet until we parse the trailer + HFileBlockReader blockReader = new HFileBlockReader( + context, stream, trailer.getLoadOnOpenDataOffset(), fileSize - HFileTrailer.getTrailerSize()); + HFileRootIndexBlock dataIndexBlock = + (HFileRootIndexBlock) blockReader.nextBlock(HFileBlockType.ROOT_INDEX); + this.blockIndexEntryList = dataIndexBlock.readDataIndex(trailer.getDataIndexCount()); + this.metaIndexBlock = blockReader.nextBlock(HFileBlockType.ROOT_INDEX); + this.fileInfoBlock = blockReader.nextBlock(HFileBlockType.FILE_INFO); + + this.isMetadataInitialized = true; + } + + /** + * Seeks to the key to look up. + * + * @param key Key to look up. + * @return The {@link KeyValue} instance in the block that contains the exact same key as the + * lookup key; or {@link null} if the lookup key does not exist. + * @throws IOException upon error. + */ + public KeyValue seekTo(KeyValue key) throws IOException { + BlockIndexEntry lookUpKey = new BlockIndexEntry(key, -1, -1); + int rootLevelBlockIndex = searchBlockByKey(lookUpKey); + if (rootLevelBlockIndex < 0) { + // Key smaller than the start key of the first block + return null; + } + BlockIndexEntry blockToRead = blockIndexEntryList.get(rootLevelBlockIndex); + HFileBlockReader blockReader = new HFileBlockReader( + context, stream, blockToRead.getOffset(), blockToRead.getOffset() + (long) blockToRead.getSize()); + HFileDataBlock dataBlock = (HFileDataBlock) blockReader.nextBlock(HFileBlockType.DATA); + return seekToKeyInBlock(dataBlock, key); + } + + /** + * Reads the HFile major version from the input. + * + * @param bytes Input data. + * @param offset Offset to start reading. + * @return Major version of the file. + */ + public static int readMajorVersion(byte[] bytes, int offset) { + int ch1 = bytes[offset] & 0xFF; + int ch2 = bytes[offset + 1] & 0xFF; + int ch3 = bytes[offset + 2] & 0xFF; + return ((ch1 << 16) + (ch2 << 8) + ch3); + } + + /** + * Reads and parses the HFile trailer. + * + * @param stream HFile input. + * @param fileSize HFile size. + * @return {@link HFileTrailer} instance. + * @throws IOException upon error. + */ + private static HFileTrailer readTrailer(FSDataInputStream stream, + long fileSize) throws IOException { + int bufferSize = HFileTrailer.getTrailerSize(); + long seekPos = fileSize - bufferSize; + if (seekPos < 0) { + // It is hard to imagine such a small HFile. + seekPos = 0; + bufferSize = (int) fileSize; + } + stream.seek(seekPos); + + byte[] byteBuff = new byte[bufferSize]; + stream.readFully(byteBuff); + + int majorVersion = readMajorVersion(byteBuff, bufferSize - 3); + int minorVersion = byteBuff[bufferSize - 3]; + + HFileTrailer trailer = new HFileTrailer(majorVersion, minorVersion); + trailer.deserialize(new DataInputStream(new ByteArrayInputStream(byteBuff))); + return trailer; + } + + /** + * Searches the block that may contain the lookup key based the starting keys + * of all blocks (sorted in the input list), using binary search. + * + * @param lookUpKey The key to lookup. + * @return Block index in the input. An index outside the range of input means the key does not + * exist in the HFile. + */ + private int searchBlockByKey(BlockIndexEntry lookUpKey) { Review Comment: Is this score mostly reused, or written from scratch? ########## hudi-io/pom.xml: ########## @@ -0,0 +1,126 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!-- + ~ Licensed to the Apache Software Foundation (ASF) under one + ~ or more contributor license agreements. See the NOTICE file + ~ distributed with this work for additional information + ~ regarding copyright ownership. The ASF licenses this file + ~ to you under the Apache License, Version 2.0 (the + ~ "License"); you may not use this file except in compliance + ~ with the License. You may obtain a copy of the License at + ~ + ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ + ~ Unless required by applicable law or agreed to in writing, + ~ software distributed under the License is distributed on an + ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + ~ KIND, either express or implied. See the License for the + ~ specific language governing permissions and limitations + ~ under the License. + --> +<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <parent> + <artifactId>hudi</artifactId> + <groupId>org.apache.hudi</groupId> + <version>1.0.0-SNAPSHOT</version> + </parent> + <modelVersion>4.0.0</modelVersion> + + <artifactId>hudi-io</artifactId> + + <properties> + <main.basedir>${project.parent.basedir}</main.basedir> + <protobuf.plugin.version>0.6.1</protobuf.plugin.version> + <os.maven.version>1.5.0.Final</os.maven.version> + </properties> + + <build> + <resources> + <resource> + <directory>src/main/resources</directory> + </resource> + </resources> + + <extensions> + <extension> + <groupId>kr.motd.maven</groupId> + <artifactId>os-maven-plugin</artifactId> + <version>${os.maven.version}</version> + </extension> + </extensions> + + <plugins> + <plugin> + <groupId>org.xolstice.maven.plugins</groupId> + <artifactId>protobuf-maven-plugin</artifactId> + <version>${protobuf.plugin.version}</version> + <configuration> + <protocArtifact> + com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier} + </protocArtifact> + <protoSourceRoot>${basedir}/src/main/protobuf/</protoSourceRoot> + <clearOutputDirectory>false</clearOutputDirectory> + <checkStaleness>true</checkStaleness> + </configuration> + <executions> + <execution> + <id>compile-protoc</id> + <phase>generate-sources</phase> + <goals> + <goal>compile</goal> + </goals> + </execution> + </executions> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-jar-plugin</artifactId> + <version>${maven-jar-plugin.version}</version> + <executions> + <execution> + <goals> + <goal>test-jar</goal> + </goals> + <phase>test-compile</phase> + </execution> + </executions> + <configuration> + <skip>false</skip> + </configuration> + </plugin> + <plugin> + <groupId>org.apache.rat</groupId> + <artifactId>apache-rat-plugin</artifactId> + </plugin> + <plugin> + <groupId>org.jacoco</groupId> + <artifactId>jacoco-maven-plugin</artifactId> + </plugin> + </plugins> + </build> + + <dependencies> + <dependency> + <groupId>com.google.protobuf</groupId> + <artifactId>protobuf-java</artifactId> + </dependency> + + <dependency> + <groupId>io.airlift</groupId> + <artifactId>aircompressor</artifactId> + </dependency> + + <dependency> Review Comment: Is this going to be removed? ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileRootIndexBlock.java: ########## @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import java.util.ArrayList; +import java.util.List; + +import static org.apache.hudi.io.hfile.ByteUtils.copy; +import static org.apache.hudi.io.hfile.ByteUtils.decodeVLongSizeOnDisk; +import static org.apache.hudi.io.hfile.ByteUtils.readInt; +import static org.apache.hudi.io.hfile.ByteUtils.readLong; +import static org.apache.hudi.io.hfile.ByteUtils.readVLong; + +/** + * Represents a {@link HFileBlockType#ROOT_INDEX} block in the "Load-on-open" section. Review Comment: Revisit all comments to make sure we don't have some things out of context ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileBlockType.java: ########## @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import java.io.DataInputStream; +import java.io.IOException; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hudi.io.hfile.DataSize.MAGIC_LENGTH; + +/** + * Represents the HFile block type. + */ +public enum HFileBlockType { + /** + * Data block, both versions + */ + DATA("DATABLK*", BlockCategory.DATA), + + /** + * An encoded data block (e.g. with prefix compression), version 2 + */ + ENCODED_DATA("DATABLKE", BlockCategory.DATA) { + @Override + public int getId() { + return DATA.ordinal(); + } + }, + + /** + * Version 2 leaf index block. Appears in the data block section + */ + LEAF_INDEX("IDXLEAF2", BlockCategory.INDEX), + + /** + * Bloom filter block, version 2 + */ + BLOOM_CHUNK("BLMFBLK2", BlockCategory.BLOOM), Review Comment: Did you get around to reading these? Writing and reading bloomfilters ########## hudi-io/src/main/java/org/apache/hudi/io/hfile/HFileReader.java: ########## @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.io.hfile; + +import org.apache.hadoop.fs.FSDataInputStream; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * A reader reading a HFile. + */ +public class HFileReader { + private final FSDataInputStream stream; + private final long fileSize; + private boolean isMetadataInitialized = false; + private HFileContext context; + private List<BlockIndexEntry> blockIndexEntryList; + private HFileBlock metaIndexBlock; + private HFileBlock fileInfoBlock; + + public HFileReader(FSDataInputStream stream, long fileSize) { + this.stream = stream; + this.fileSize = fileSize; + } + + /** + * Initializes the metadata by reading the "Load-on-open" section. + * + * @throws IOException upon error. + */ + public void initializeMetadata() throws IOException { + assert !this.isMetadataInitialized; + + // Read Trailer (serialized in Proto) + HFileTrailer trailer = readTrailer(stream, fileSize); + this.context = HFileContext.builder() + .compressAlgo(trailer.getCompressionCodec()) + .build(); + // At this point, HFileContext is not known yet until we parse the trailer Review Comment: Revise comment? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
