sunchao commented on a change in pull request #34471: URL: https://github.com/apache/spark/pull/34471#discussion_r766251193
########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.datasources.parquet; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.apache.parquet.Preconditions; +import org.apache.parquet.bytes.ByteBufferInputStream; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.column.values.bitpacking.BytePackerForLong; +import org.apache.parquet.column.values.bitpacking.Packer; +import org.apache.parquet.io.ParquetDecodingException; +import org.apache.spark.sql.catalyst.util.RebaseDateTime; +import org.apache.spark.sql.execution.datasources.DataSourceUtils; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; + +/** + * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports the vectorized + * interface. DELTA_BINARY_PACKED is a delta encoding for integer and long types that stores values + * as a delta between consecutive values. Delta values are themselves bit packed. Similar to RLE but + * is more effective in the case of large variation of values in the encoded column. <br/> + * DELTA_BINARY_PACKED is the default encoding for integer and long columns in Parquet V2. <br/> + * Supported Types: INT32, INT64 <br/> + * + * @see <a href="https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-encoding-delta_binary_packed--5"> + * Parquet format encodings: DELTA_BINARY_PACKED</a> + */ +public class VectorizedDeltaBinaryPackedReader extends VectorizedReaderBase { + + // header data + private int blockSizeInValues; + private int miniBlockNumInABlock; + private int totalValueCount; + private long firstValue; + + private int miniBlockSizeInValues; + + // values read by the caller + private int valuesRead = 0; + + // variables to keep state of the current block and miniblock + private long lastValueRead; // needed to compute the next value + private long minDeltaInCurrentBlock; // needed to compute the next value + private int currentMiniBlock = 0; // keep track of the mini block within the current block that we + // we read and decoded most recently. Only used as an index into + // bitWidths array + private int[] bitWidths; // bit widths for each miniBlock in the current block + private int remainingInBlock = 0; // values in current block still to be read + private int remainingInMiniBlock = 0; // values in current mini block still to be read + private long[] unpackedValuesBuffer; + + private ByteBufferInputStream in; + + // temporary buffers used by readByte, readShort, readInteger, and readLong + byte byteVal; + short shortVal; + int intVal; + long longVal; + + @Override + public void initFromPage(int valueCount, ByteBufferInputStream in) throws IOException { + Preconditions.checkArgument(valueCount >= 1, + "Page must have at least one value, but it has " + valueCount); + this.in = in; + // Read the header + this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in); + this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in); + double miniSize = (double) blockSizeInValues / miniBlockNumInABlock; + Preconditions.checkArgument(miniSize % 8 == 0, + "miniBlockSize must be multiple of 8, but it's " + miniSize); + this.miniBlockSizeInValues = (int) miniSize; + this.totalValueCount = BytesUtils.readUnsignedVarInt(in); + this.bitWidths = new int[miniBlockNumInABlock]; + this.unpackedValuesBuffer = new long[miniBlockSizeInValues]; + // read the first value + firstValue = BytesUtils.readZigZagVarLong(in); + } + + @Override + public byte readByte() { + readValues(1, null, 0, (w, r, v) -> byteVal = (byte) v); + return byteVal; + } + + @Override + public short readShort() { + readValues(1, null, 0, (w, r, v) -> shortVal = (short) v); + return shortVal; + } + + @Override + public int readInteger() { + readValues(1, null, 0, (w, r, v) -> intVal = (int) v); + return intVal; + } + + @Override + public long readLong() { + readValues(1, null, 0, (w, r, v) -> longVal = v); + return longVal; + } + + + @Override + public void readBytes(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putByte(r, (byte) v)); + } + + @Override + public void readShorts(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putShort(r, (short) v)); + } + + @Override + public void readIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putInt(r, (int) v)); + } + + // Based on VectorizedPlainValuesReader.readIntegersWithRebase + @Override + public final void readIntegersWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putInt(r, RebaseDateTime.rebaseJulianToGregorianDays((int) v)); + } + } else { + w.putInt(r, (int) v); + } + }); + } + + @Override + public void readUnsignedIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putLong(r, Integer.toUnsignedLong((int) v)); + }); + } + + @Override + public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putByteArray(r, new BigInteger(Long.toUnsignedString(v)).toByteArray()); + }); + } + + @Override + public void readLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, WritableColumnVector::putLong); + } + + @Override + public final void readLongsWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { Review comment: should be `v < RebaseDateTime.lastSwitchJulianTs()`? ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.datasources.parquet; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.apache.parquet.Preconditions; +import org.apache.parquet.bytes.ByteBufferInputStream; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.column.values.bitpacking.BytePackerForLong; +import org.apache.parquet.column.values.bitpacking.Packer; +import org.apache.parquet.io.ParquetDecodingException; +import org.apache.spark.sql.catalyst.util.RebaseDateTime; +import org.apache.spark.sql.execution.datasources.DataSourceUtils; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; + +/** + * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports the vectorized + * interface. DELTA_BINARY_PACKED is a delta encoding for integer and long types that stores values + * as a delta between consecutive values. Delta values are themselves bit packed. Similar to RLE but + * is more effective in the case of large variation of values in the encoded column. <br/> + * DELTA_BINARY_PACKED is the default encoding for integer and long columns in Parquet V2. <br/> + * Supported Types: INT32, INT64 <br/> + * + * @see <a href="https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-encoding-delta_binary_packed--5"> + * Parquet format encodings: DELTA_BINARY_PACKED</a> + */ +public class VectorizedDeltaBinaryPackedReader extends VectorizedReaderBase { + + // header data + private int blockSizeInValues; + private int miniBlockNumInABlock; + private int totalValueCount; + private long firstValue; + + private int miniBlockSizeInValues; + + // values read by the caller + private int valuesRead = 0; + + // variables to keep state of the current block and miniblock + private long lastValueRead; // needed to compute the next value + private long minDeltaInCurrentBlock; // needed to compute the next value + private int currentMiniBlock = 0; // keep track of the mini block within the current block that we + // we read and decoded most recently. Only used as an index into + // bitWidths array + private int[] bitWidths; // bit widths for each miniBlock in the current block + private int remainingInBlock = 0; // values in current block still to be read + private int remainingInMiniBlock = 0; // values in current mini block still to be read + private long[] unpackedValuesBuffer; + + private ByteBufferInputStream in; + + // temporary buffers used by readByte, readShort, readInteger, and readLong + byte byteVal; + short shortVal; + int intVal; + long longVal; + + @Override + public void initFromPage(int valueCount, ByteBufferInputStream in) throws IOException { + Preconditions.checkArgument(valueCount >= 1, + "Page must have at least one value, but it has " + valueCount); + this.in = in; + // Read the header + this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in); + this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in); + double miniSize = (double) blockSizeInValues / miniBlockNumInABlock; + Preconditions.checkArgument(miniSize % 8 == 0, + "miniBlockSize must be multiple of 8, but it's " + miniSize); + this.miniBlockSizeInValues = (int) miniSize; + this.totalValueCount = BytesUtils.readUnsignedVarInt(in); + this.bitWidths = new int[miniBlockNumInABlock]; + this.unpackedValuesBuffer = new long[miniBlockSizeInValues]; + // read the first value + firstValue = BytesUtils.readZigZagVarLong(in); + } + + @Override + public byte readByte() { + readValues(1, null, 0, (w, r, v) -> byteVal = (byte) v); + return byteVal; + } + + @Override + public short readShort() { + readValues(1, null, 0, (w, r, v) -> shortVal = (short) v); + return shortVal; + } + + @Override + public int readInteger() { + readValues(1, null, 0, (w, r, v) -> intVal = (int) v); + return intVal; + } + + @Override + public long readLong() { + readValues(1, null, 0, (w, r, v) -> longVal = v); + return longVal; + } + + + @Override + public void readBytes(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putByte(r, (byte) v)); + } + + @Override + public void readShorts(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putShort(r, (short) v)); + } + + @Override + public void readIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putInt(r, (int) v)); + } + + // Based on VectorizedPlainValuesReader.readIntegersWithRebase + @Override + public final void readIntegersWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putInt(r, RebaseDateTime.rebaseJulianToGregorianDays((int) v)); + } + } else { + w.putInt(r, (int) v); + } + }); + } + + @Override + public void readUnsignedIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putLong(r, Integer.toUnsignedLong((int) v)); + }); + } + + @Override + public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putByteArray(r, new BigInteger(Long.toUnsignedString(v)).toByteArray()); + }); + } + + @Override + public void readLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, WritableColumnVector::putLong); + } + + @Override + public final void readLongsWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putLong(r, RebaseDateTime.rebaseJulianToGregorianMicros(v)); + } + } else { + w.putLong(r, v); + } + }); + } + + @Override + public void skipBytes(int total) { + skipValues(total); + } + + @Override + public void skipShorts(int total) { + skipValues(total); + } + + @Override + public void skipIntegers(int total) { + skipValues(total); + } + + @Override + public void skipLongs(int total) { + skipValues(total); + } + + private void readValues(int total, WritableColumnVector c, int rowId, + IntegerOutputWriter outputWriter) { + if (valuesRead + total > totalValueCount) { + throw new ParquetDecodingException( + "no more values to read, total value count is " + valuesRead); + } + int remaining = total; + // First value + if (valuesRead == 0) { + outputWriter.write(c, rowId, firstValue); + lastValueRead = firstValue; + rowId++; + remaining--; + } + while (remaining > 0) { + int n; + try { + n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter); + } catch (IOException e) { + throw new ParquetDecodingException("Error reading mini block.", e); + } + rowId += n; + remaining -= n; + } + valuesRead = total - remaining; + } + + + /** + * Read from a mini block. Read at most 'remaining' values into output. + * + * @return the number of values read into output + */ + private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int rowId, + IntegerOutputWriter outputWriter) throws IOException { + + // new block; read the block header + if (remainingInBlock == 0) { + readBlockHeader(); + } + + // new miniblock, unpack the miniblock + if (remainingInMiniBlock == 0) { + unpackMiniBlock(); + } + + //read values from miniblock + int valuesRead = 0; + for (int i = miniBlockSizeInValues - remainingInMiniBlock; + i < miniBlockSizeInValues && valuesRead < remaining; i++) { + //calculate values from deltas unpacked for current block + long outValue = lastValueRead + minDeltaInCurrentBlock + unpackedValuesBuffer[i]; + lastValueRead = outValue; + outputWriter.write(c, rowId + valuesRead, outValue); + remainingInBlock--; + remainingInMiniBlock--; + valuesRead++; + } + + return valuesRead; + } + + private void readBlockHeader() { + try { + minDeltaInCurrentBlock = BytesUtils.readZigZagVarLong(in); + } catch (IOException e) { + throw new ParquetDecodingException("can not read min delta in current block", e); Review comment: ditto: "Can not read .." ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.datasources.parquet; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.apache.parquet.Preconditions; +import org.apache.parquet.bytes.ByteBufferInputStream; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.column.values.bitpacking.BytePackerForLong; +import org.apache.parquet.column.values.bitpacking.Packer; +import org.apache.parquet.io.ParquetDecodingException; +import org.apache.spark.sql.catalyst.util.RebaseDateTime; +import org.apache.spark.sql.execution.datasources.DataSourceUtils; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; + +/** + * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports the vectorized + * interface. DELTA_BINARY_PACKED is a delta encoding for integer and long types that stores values + * as a delta between consecutive values. Delta values are themselves bit packed. Similar to RLE but + * is more effective in the case of large variation of values in the encoded column. <br/> + * DELTA_BINARY_PACKED is the default encoding for integer and long columns in Parquet V2. <br/> + * Supported Types: INT32, INT64 <br/> + * + * @see <a href="https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-encoding-delta_binary_packed--5"> + * Parquet format encodings: DELTA_BINARY_PACKED</a> + */ +public class VectorizedDeltaBinaryPackedReader extends VectorizedReaderBase { + + // header data + private int blockSizeInValues; + private int miniBlockNumInABlock; + private int totalValueCount; + private long firstValue; + + private int miniBlockSizeInValues; + + // values read by the caller + private int valuesRead = 0; + + // variables to keep state of the current block and miniblock + private long lastValueRead; // needed to compute the next value + private long minDeltaInCurrentBlock; // needed to compute the next value + private int currentMiniBlock = 0; // keep track of the mini block within the current block that we + // we read and decoded most recently. Only used as an index into + // bitWidths array + private int[] bitWidths; // bit widths for each miniBlock in the current block + private int remainingInBlock = 0; // values in current block still to be read + private int remainingInMiniBlock = 0; // values in current mini block still to be read + private long[] unpackedValuesBuffer; + + private ByteBufferInputStream in; + + // temporary buffers used by readByte, readShort, readInteger, and readLong + byte byteVal; + short shortVal; + int intVal; + long longVal; + + @Override + public void initFromPage(int valueCount, ByteBufferInputStream in) throws IOException { + Preconditions.checkArgument(valueCount >= 1, + "Page must have at least one value, but it has " + valueCount); + this.in = in; + // Read the header + this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in); + this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in); + double miniSize = (double) blockSizeInValues / miniBlockNumInABlock; + Preconditions.checkArgument(miniSize % 8 == 0, + "miniBlockSize must be multiple of 8, but it's " + miniSize); + this.miniBlockSizeInValues = (int) miniSize; + this.totalValueCount = BytesUtils.readUnsignedVarInt(in); + this.bitWidths = new int[miniBlockNumInABlock]; + this.unpackedValuesBuffer = new long[miniBlockSizeInValues]; + // read the first value + firstValue = BytesUtils.readZigZagVarLong(in); + } + + @Override + public byte readByte() { + readValues(1, null, 0, (w, r, v) -> byteVal = (byte) v); + return byteVal; + } + + @Override + public short readShort() { + readValues(1, null, 0, (w, r, v) -> shortVal = (short) v); + return shortVal; + } + + @Override + public int readInteger() { + readValues(1, null, 0, (w, r, v) -> intVal = (int) v); + return intVal; + } + + @Override + public long readLong() { + readValues(1, null, 0, (w, r, v) -> longVal = v); + return longVal; + } + Review comment: nit: extra empty line ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.datasources.parquet; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.apache.parquet.Preconditions; +import org.apache.parquet.bytes.ByteBufferInputStream; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.column.values.bitpacking.BytePackerForLong; +import org.apache.parquet.column.values.bitpacking.Packer; +import org.apache.parquet.io.ParquetDecodingException; +import org.apache.spark.sql.catalyst.util.RebaseDateTime; +import org.apache.spark.sql.execution.datasources.DataSourceUtils; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; + +/** + * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports the vectorized + * interface. DELTA_BINARY_PACKED is a delta encoding for integer and long types that stores values + * as a delta between consecutive values. Delta values are themselves bit packed. Similar to RLE but + * is more effective in the case of large variation of values in the encoded column. <br/> + * DELTA_BINARY_PACKED is the default encoding for integer and long columns in Parquet V2. <br/> + * Supported Types: INT32, INT64 <br/> + * Review comment: nit: maybe use `<p>` instead of `<br>`. It looks better after rendered :) ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.datasources.parquet; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.apache.parquet.Preconditions; +import org.apache.parquet.bytes.ByteBufferInputStream; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.column.values.bitpacking.BytePackerForLong; +import org.apache.parquet.column.values.bitpacking.Packer; +import org.apache.parquet.io.ParquetDecodingException; +import org.apache.spark.sql.catalyst.util.RebaseDateTime; +import org.apache.spark.sql.execution.datasources.DataSourceUtils; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; + +/** + * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports the vectorized + * interface. DELTA_BINARY_PACKED is a delta encoding for integer and long types that stores values + * as a delta between consecutive values. Delta values are themselves bit packed. Similar to RLE but + * is more effective in the case of large variation of values in the encoded column. <br/> + * DELTA_BINARY_PACKED is the default encoding for integer and long columns in Parquet V2. <br/> + * Supported Types: INT32, INT64 <br/> + * + * @see <a href="https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-encoding-delta_binary_packed--5"> + * Parquet format encodings: DELTA_BINARY_PACKED</a> + */ +public class VectorizedDeltaBinaryPackedReader extends VectorizedReaderBase { + + // header data + private int blockSizeInValues; + private int miniBlockNumInABlock; + private int totalValueCount; + private long firstValue; + + private int miniBlockSizeInValues; + + // values read by the caller + private int valuesRead = 0; + + // variables to keep state of the current block and miniblock + private long lastValueRead; // needed to compute the next value + private long minDeltaInCurrentBlock; // needed to compute the next value + private int currentMiniBlock = 0; // keep track of the mini block within the current block that we + // we read and decoded most recently. Only used as an index into + // bitWidths array + private int[] bitWidths; // bit widths for each miniBlock in the current block + private int remainingInBlock = 0; // values in current block still to be read + private int remainingInMiniBlock = 0; // values in current mini block still to be read + private long[] unpackedValuesBuffer; + + private ByteBufferInputStream in; + + // temporary buffers used by readByte, readShort, readInteger, and readLong + byte byteVal; + short shortVal; + int intVal; + long longVal; + + @Override + public void initFromPage(int valueCount, ByteBufferInputStream in) throws IOException { + Preconditions.checkArgument(valueCount >= 1, + "Page must have at least one value, but it has " + valueCount); + this.in = in; + // Read the header + this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in); + this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in); + double miniSize = (double) blockSizeInValues / miniBlockNumInABlock; + Preconditions.checkArgument(miniSize % 8 == 0, + "miniBlockSize must be multiple of 8, but it's " + miniSize); + this.miniBlockSizeInValues = (int) miniSize; + this.totalValueCount = BytesUtils.readUnsignedVarInt(in); + this.bitWidths = new int[miniBlockNumInABlock]; + this.unpackedValuesBuffer = new long[miniBlockSizeInValues]; + // read the first value + firstValue = BytesUtils.readZigZagVarLong(in); + } + + @Override + public byte readByte() { + readValues(1, null, 0, (w, r, v) -> byteVal = (byte) v); + return byteVal; + } + + @Override + public short readShort() { + readValues(1, null, 0, (w, r, v) -> shortVal = (short) v); + return shortVal; + } + + @Override + public int readInteger() { + readValues(1, null, 0, (w, r, v) -> intVal = (int) v); + return intVal; + } + + @Override + public long readLong() { + readValues(1, null, 0, (w, r, v) -> longVal = v); + return longVal; + } + + + @Override + public void readBytes(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putByte(r, (byte) v)); + } + + @Override + public void readShorts(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putShort(r, (short) v)); + } + + @Override + public void readIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putInt(r, (int) v)); + } + + // Based on VectorizedPlainValuesReader.readIntegersWithRebase + @Override + public final void readIntegersWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putInt(r, RebaseDateTime.rebaseJulianToGregorianDays((int) v)); + } + } else { + w.putInt(r, (int) v); + } + }); + } + + @Override + public void readUnsignedIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putLong(r, Integer.toUnsignedLong((int) v)); + }); + } + + @Override + public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putByteArray(r, new BigInteger(Long.toUnsignedString(v)).toByteArray()); + }); + } + + @Override + public void readLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, WritableColumnVector::putLong); + } + + @Override + public final void readLongsWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putLong(r, RebaseDateTime.rebaseJulianToGregorianMicros(v)); + } + } else { + w.putLong(r, v); + } + }); + } + + @Override + public void skipBytes(int total) { + skipValues(total); + } + + @Override + public void skipShorts(int total) { + skipValues(total); + } + + @Override + public void skipIntegers(int total) { + skipValues(total); + } + + @Override + public void skipLongs(int total) { + skipValues(total); + } + + private void readValues(int total, WritableColumnVector c, int rowId, + IntegerOutputWriter outputWriter) { + if (valuesRead + total > totalValueCount) { + throw new ParquetDecodingException( + "no more values to read, total value count is " + valuesRead); Review comment: nit: let's all use capital letter for the first word to be consistent: "No more values to read ..." ########## File path: sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetEncodingSuite.scala ########## @@ -16,14 +16,22 @@ */ package org.apache.spark.sql.execution.datasources.parquet +import java.math.BigDecimal +import java.sql.{Date, Timestamp} import java.time.{Duration, Period} import scala.collection.JavaConverters._ +import org.apache.hadoop.fs.Path +import org.apache.parquet.column.{Encoding, ParquetProperties} import org.apache.parquet.hadoop.ParquetOutputFormat +import org.apache.spark.sql.Row +import org.apache.spark.sql.catalyst.util.DateTimeUtils +import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession + Review comment: nit: unnecessary change. ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.datasources.parquet; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.apache.parquet.Preconditions; +import org.apache.parquet.bytes.ByteBufferInputStream; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.column.values.bitpacking.BytePackerForLong; +import org.apache.parquet.column.values.bitpacking.Packer; +import org.apache.parquet.io.ParquetDecodingException; +import org.apache.spark.sql.catalyst.util.RebaseDateTime; +import org.apache.spark.sql.execution.datasources.DataSourceUtils; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; + +/** + * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports the vectorized + * interface. DELTA_BINARY_PACKED is a delta encoding for integer and long types that stores values + * as a delta between consecutive values. Delta values are themselves bit packed. Similar to RLE but + * is more effective in the case of large variation of values in the encoded column. <br/> + * DELTA_BINARY_PACKED is the default encoding for integer and long columns in Parquet V2. <br/> + * Supported Types: INT32, INT64 <br/> + * + * @see <a href="https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-encoding-delta_binary_packed--5"> + * Parquet format encodings: DELTA_BINARY_PACKED</a> + */ +public class VectorizedDeltaBinaryPackedReader extends VectorizedReaderBase { + + // header data + private int blockSizeInValues; + private int miniBlockNumInABlock; + private int totalValueCount; + private long firstValue; + + private int miniBlockSizeInValues; + + // values read by the caller + private int valuesRead = 0; + + // variables to keep state of the current block and miniblock + private long lastValueRead; // needed to compute the next value + private long minDeltaInCurrentBlock; // needed to compute the next value + private int currentMiniBlock = 0; // keep track of the mini block within the current block that we + // we read and decoded most recently. Only used as an index into + // bitWidths array + private int[] bitWidths; // bit widths for each miniBlock in the current block + private int remainingInBlock = 0; // values in current block still to be read + private int remainingInMiniBlock = 0; // values in current mini block still to be read + private long[] unpackedValuesBuffer; + + private ByteBufferInputStream in; + + // temporary buffers used by readByte, readShort, readInteger, and readLong + byte byteVal; + short shortVal; + int intVal; + long longVal; + + @Override + public void initFromPage(int valueCount, ByteBufferInputStream in) throws IOException { + Preconditions.checkArgument(valueCount >= 1, + "Page must have at least one value, but it has " + valueCount); + this.in = in; + // Read the header + this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in); + this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in); + double miniSize = (double) blockSizeInValues / miniBlockNumInABlock; + Preconditions.checkArgument(miniSize % 8 == 0, + "miniBlockSize must be multiple of 8, but it's " + miniSize); + this.miniBlockSizeInValues = (int) miniSize; + this.totalValueCount = BytesUtils.readUnsignedVarInt(in); + this.bitWidths = new int[miniBlockNumInABlock]; + this.unpackedValuesBuffer = new long[miniBlockSizeInValues]; + // read the first value + firstValue = BytesUtils.readZigZagVarLong(in); + } + + @Override + public byte readByte() { + readValues(1, null, 0, (w, r, v) -> byteVal = (byte) v); + return byteVal; + } + + @Override + public short readShort() { + readValues(1, null, 0, (w, r, v) -> shortVal = (short) v); + return shortVal; + } + + @Override + public int readInteger() { + readValues(1, null, 0, (w, r, v) -> intVal = (int) v); + return intVal; + } + + @Override + public long readLong() { + readValues(1, null, 0, (w, r, v) -> longVal = v); + return longVal; + } + + + @Override + public void readBytes(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putByte(r, (byte) v)); + } + + @Override + public void readShorts(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putShort(r, (short) v)); + } + + @Override + public void readIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putInt(r, (int) v)); + } + + // Based on VectorizedPlainValuesReader.readIntegersWithRebase + @Override + public final void readIntegersWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putInt(r, RebaseDateTime.rebaseJulianToGregorianDays((int) v)); + } + } else { + w.putInt(r, (int) v); + } + }); + } + + @Override + public void readUnsignedIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putLong(r, Integer.toUnsignedLong((int) v)); + }); + } + + @Override + public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putByteArray(r, new BigInteger(Long.toUnsignedString(v)).toByteArray()); + }); + } + + @Override + public void readLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, WritableColumnVector::putLong); + } + + @Override + public final void readLongsWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putLong(r, RebaseDateTime.rebaseJulianToGregorianMicros(v)); + } + } else { + w.putLong(r, v); + } + }); + } + + @Override + public void skipBytes(int total) { + skipValues(total); + } + + @Override + public void skipShorts(int total) { + skipValues(total); + } + + @Override + public void skipIntegers(int total) { + skipValues(total); + } + + @Override + public void skipLongs(int total) { + skipValues(total); + } + + private void readValues(int total, WritableColumnVector c, int rowId, + IntegerOutputWriter outputWriter) { + if (valuesRead + total > totalValueCount) { + throw new ParquetDecodingException( + "no more values to read, total value count is " + valuesRead); + } + int remaining = total; + // First value + if (valuesRead == 0) { + outputWriter.write(c, rowId, firstValue); + lastValueRead = firstValue; + rowId++; + remaining--; + } + while (remaining > 0) { + int n; + try { + n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter); + } catch (IOException e) { + throw new ParquetDecodingException("Error reading mini block.", e); + } + rowId += n; + remaining -= n; + } + valuesRead = total - remaining; + } + + + /** + * Read from a mini block. Read at most 'remaining' values into output. + * + * @return the number of values read into output + */ + private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int rowId, + IntegerOutputWriter outputWriter) throws IOException { + + // new block; read the block header + if (remainingInBlock == 0) { + readBlockHeader(); + } + + // new miniblock, unpack the miniblock + if (remainingInMiniBlock == 0) { + unpackMiniBlock(); + } + + //read values from miniblock + int valuesRead = 0; + for (int i = miniBlockSizeInValues - remainingInMiniBlock; + i < miniBlockSizeInValues && valuesRead < remaining; i++) { + //calculate values from deltas unpacked for current block + long outValue = lastValueRead + minDeltaInCurrentBlock + unpackedValuesBuffer[i]; + lastValueRead = outValue; + outputWriter.write(c, rowId + valuesRead, outValue); + remainingInBlock--; + remainingInMiniBlock--; + valuesRead++; + } + + return valuesRead; + } + + private void readBlockHeader() { + try { + minDeltaInCurrentBlock = BytesUtils.readZigZagVarLong(in); + } catch (IOException e) { + throw new ParquetDecodingException("can not read min delta in current block", e); + } + readBitWidthsForMiniBlocks(); + remainingInBlock = blockSizeInValues; + currentMiniBlock = 0; + remainingInMiniBlock = 0; + } + + /** + * mini block has a size of 8*n, unpack 32 value each time + * + * see org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesReader#unpackMiniBlock + */ + private void unpackMiniBlock() throws IOException { + Arrays.fill(this.unpackedValuesBuffer, 0); Review comment: Is this necessary? since we are overwriting it anyway. ########## File path: sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala ########## @@ -147,6 +159,16 @@ object DataSourceReadBenchmark extends SqlBasedBenchmark { } } + sqlBenchmark.addCase("SQL Parquet Vectorized (Delta Binary)") { _ => Review comment: Can we: - move this up to be adjacent to the Parquet cases - change the name to "SQL Parquet Vectorized Format v2" or something? You also need to rebase and handle the newly added `BooleanType` here. ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.datasources.parquet; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.apache.parquet.Preconditions; +import org.apache.parquet.bytes.ByteBufferInputStream; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.column.values.bitpacking.BytePackerForLong; +import org.apache.parquet.column.values.bitpacking.Packer; +import org.apache.parquet.io.ParquetDecodingException; +import org.apache.spark.sql.catalyst.util.RebaseDateTime; +import org.apache.spark.sql.execution.datasources.DataSourceUtils; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; + +/** + * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports the vectorized + * interface. DELTA_BINARY_PACKED is a delta encoding for integer and long types that stores values + * as a delta between consecutive values. Delta values are themselves bit packed. Similar to RLE but + * is more effective in the case of large variation of values in the encoded column. <br/> + * DELTA_BINARY_PACKED is the default encoding for integer and long columns in Parquet V2. <br/> + * Supported Types: INT32, INT64 <br/> + * + * @see <a href="https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-encoding-delta_binary_packed--5"> + * Parquet format encodings: DELTA_BINARY_PACKED</a> + */ +public class VectorizedDeltaBinaryPackedReader extends VectorizedReaderBase { + + // header data + private int blockSizeInValues; + private int miniBlockNumInABlock; + private int totalValueCount; + private long firstValue; + + private int miniBlockSizeInValues; + + // values read by the caller + private int valuesRead = 0; + + // variables to keep state of the current block and miniblock + private long lastValueRead; // needed to compute the next value + private long minDeltaInCurrentBlock; // needed to compute the next value + private int currentMiniBlock = 0; // keep track of the mini block within the current block that we + // we read and decoded most recently. Only used as an index into + // bitWidths array + private int[] bitWidths; // bit widths for each miniBlock in the current block + private int remainingInBlock = 0; // values in current block still to be read + private int remainingInMiniBlock = 0; // values in current mini block still to be read + private long[] unpackedValuesBuffer; + + private ByteBufferInputStream in; + + // temporary buffers used by readByte, readShort, readInteger, and readLong + byte byteVal; + short shortVal; + int intVal; + long longVal; + + @Override + public void initFromPage(int valueCount, ByteBufferInputStream in) throws IOException { + Preconditions.checkArgument(valueCount >= 1, + "Page must have at least one value, but it has " + valueCount); + this.in = in; + // Read the header + this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in); + this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in); + double miniSize = (double) blockSizeInValues / miniBlockNumInABlock; + Preconditions.checkArgument(miniSize % 8 == 0, + "miniBlockSize must be multiple of 8, but it's " + miniSize); + this.miniBlockSizeInValues = (int) miniSize; + this.totalValueCount = BytesUtils.readUnsignedVarInt(in); + this.bitWidths = new int[miniBlockNumInABlock]; + this.unpackedValuesBuffer = new long[miniBlockSizeInValues]; + // read the first value + firstValue = BytesUtils.readZigZagVarLong(in); + } + + @Override + public byte readByte() { + readValues(1, null, 0, (w, r, v) -> byteVal = (byte) v); + return byteVal; + } + + @Override + public short readShort() { + readValues(1, null, 0, (w, r, v) -> shortVal = (short) v); + return shortVal; + } + + @Override + public int readInteger() { + readValues(1, null, 0, (w, r, v) -> intVal = (int) v); + return intVal; + } + + @Override + public long readLong() { + readValues(1, null, 0, (w, r, v) -> longVal = v); + return longVal; + } + + + @Override + public void readBytes(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putByte(r, (byte) v)); + } + + @Override + public void readShorts(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putShort(r, (short) v)); + } + + @Override + public void readIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putInt(r, (int) v)); + } + + // Based on VectorizedPlainValuesReader.readIntegersWithRebase + @Override + public final void readIntegersWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putInt(r, RebaseDateTime.rebaseJulianToGregorianDays((int) v)); + } + } else { + w.putInt(r, (int) v); + } + }); + } + + @Override + public void readUnsignedIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putLong(r, Integer.toUnsignedLong((int) v)); + }); + } + + @Override + public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putByteArray(r, new BigInteger(Long.toUnsignedString(v)).toByteArray()); + }); + } + + @Override + public void readLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, WritableColumnVector::putLong); + } + + @Override + public final void readLongsWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putLong(r, RebaseDateTime.rebaseJulianToGregorianMicros(v)); + } + } else { + w.putLong(r, v); + } + }); + } + + @Override + public void skipBytes(int total) { + skipValues(total); + } + + @Override + public void skipShorts(int total) { + skipValues(total); + } + + @Override + public void skipIntegers(int total) { + skipValues(total); + } + + @Override + public void skipLongs(int total) { + skipValues(total); + } + + private void readValues(int total, WritableColumnVector c, int rowId, + IntegerOutputWriter outputWriter) { + if (valuesRead + total > totalValueCount) { + throw new ParquetDecodingException( + "no more values to read, total value count is " + valuesRead); + } + int remaining = total; + // First value + if (valuesRead == 0) { + outputWriter.write(c, rowId, firstValue); + lastValueRead = firstValue; + rowId++; + remaining--; + } + while (remaining > 0) { + int n; + try { + n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter); + } catch (IOException e) { + throw new ParquetDecodingException("Error reading mini block.", e); + } + rowId += n; + remaining -= n; + } + valuesRead = total - remaining; + } + + + /** + * Read from a mini block. Read at most 'remaining' values into output. + * + * @return the number of values read into output + */ + private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int rowId, + IntegerOutputWriter outputWriter) throws IOException { + + // new block; read the block header + if (remainingInBlock == 0) { + readBlockHeader(); + } + + // new miniblock, unpack the miniblock + if (remainingInMiniBlock == 0) { + unpackMiniBlock(); + } + + //read values from miniblock Review comment: nit: space after `//` ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.datasources.parquet; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.apache.parquet.Preconditions; +import org.apache.parquet.bytes.ByteBufferInputStream; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.column.values.bitpacking.BytePackerForLong; +import org.apache.parquet.column.values.bitpacking.Packer; +import org.apache.parquet.io.ParquetDecodingException; +import org.apache.spark.sql.catalyst.util.RebaseDateTime; +import org.apache.spark.sql.execution.datasources.DataSourceUtils; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; + +/** + * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports the vectorized + * interface. DELTA_BINARY_PACKED is a delta encoding for integer and long types that stores values + * as a delta between consecutive values. Delta values are themselves bit packed. Similar to RLE but + * is more effective in the case of large variation of values in the encoded column. <br/> + * DELTA_BINARY_PACKED is the default encoding for integer and long columns in Parquet V2. <br/> + * Supported Types: INT32, INT64 <br/> + * + * @see <a href="https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-encoding-delta_binary_packed--5"> + * Parquet format encodings: DELTA_BINARY_PACKED</a> + */ +public class VectorizedDeltaBinaryPackedReader extends VectorizedReaderBase { + + // header data + private int blockSizeInValues; + private int miniBlockNumInABlock; + private int totalValueCount; + private long firstValue; + + private int miniBlockSizeInValues; + + // values read by the caller + private int valuesRead = 0; + + // variables to keep state of the current block and miniblock + private long lastValueRead; // needed to compute the next value + private long minDeltaInCurrentBlock; // needed to compute the next value + private int currentMiniBlock = 0; // keep track of the mini block within the current block that we + // we read and decoded most recently. Only used as an index into + // bitWidths array + private int[] bitWidths; // bit widths for each miniBlock in the current block + private int remainingInBlock = 0; // values in current block still to be read + private int remainingInMiniBlock = 0; // values in current mini block still to be read + private long[] unpackedValuesBuffer; + + private ByteBufferInputStream in; + + // temporary buffers used by readByte, readShort, readInteger, and readLong + byte byteVal; + short shortVal; + int intVal; + long longVal; + + @Override + public void initFromPage(int valueCount, ByteBufferInputStream in) throws IOException { + Preconditions.checkArgument(valueCount >= 1, + "Page must have at least one value, but it has " + valueCount); + this.in = in; + // Read the header + this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in); + this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in); + double miniSize = (double) blockSizeInValues / miniBlockNumInABlock; + Preconditions.checkArgument(miniSize % 8 == 0, + "miniBlockSize must be multiple of 8, but it's " + miniSize); + this.miniBlockSizeInValues = (int) miniSize; + this.totalValueCount = BytesUtils.readUnsignedVarInt(in); + this.bitWidths = new int[miniBlockNumInABlock]; + this.unpackedValuesBuffer = new long[miniBlockSizeInValues]; + // read the first value + firstValue = BytesUtils.readZigZagVarLong(in); + } + + @Override + public byte readByte() { + readValues(1, null, 0, (w, r, v) -> byteVal = (byte) v); + return byteVal; + } + + @Override + public short readShort() { + readValues(1, null, 0, (w, r, v) -> shortVal = (short) v); + return shortVal; + } + + @Override + public int readInteger() { + readValues(1, null, 0, (w, r, v) -> intVal = (int) v); + return intVal; + } + + @Override + public long readLong() { + readValues(1, null, 0, (w, r, v) -> longVal = v); + return longVal; + } + + + @Override + public void readBytes(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putByte(r, (byte) v)); + } + + @Override + public void readShorts(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putShort(r, (short) v)); + } + + @Override + public void readIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> w.putInt(r, (int) v)); + } + + // Based on VectorizedPlainValuesReader.readIntegersWithRebase + @Override + public final void readIntegersWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putInt(r, RebaseDateTime.rebaseJulianToGregorianDays((int) v)); + } + } else { + w.putInt(r, (int) v); + } + }); + } + + @Override + public void readUnsignedIntegers(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putLong(r, Integer.toUnsignedLong((int) v)); + }); + } + + @Override + public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, (w, r, v) -> { + w.putByteArray(r, new BigInteger(Long.toUnsignedString(v)).toByteArray()); + }); + } + + @Override + public void readLongs(int total, WritableColumnVector c, int rowId) { + readValues(total, c, rowId, WritableColumnVector::putLong); + } + + @Override + public final void readLongsWithRebase( + int total, WritableColumnVector c, int rowId, boolean failIfRebase) { + readValues(total, c, rowId, (w, r, v) -> { + if (v < RebaseDateTime.lastSwitchJulianDay()) { + if (failIfRebase) { + throw DataSourceUtils.newRebaseExceptionInRead("Parquet"); + } else { + w.putLong(r, RebaseDateTime.rebaseJulianToGregorianMicros(v)); + } + } else { + w.putLong(r, v); + } + }); + } + + @Override + public void skipBytes(int total) { + skipValues(total); + } + + @Override + public void skipShorts(int total) { + skipValues(total); + } + + @Override + public void skipIntegers(int total) { + skipValues(total); + } + + @Override + public void skipLongs(int total) { + skipValues(total); + } + + private void readValues(int total, WritableColumnVector c, int rowId, + IntegerOutputWriter outputWriter) { + if (valuesRead + total > totalValueCount) { + throw new ParquetDecodingException( + "no more values to read, total value count is " + valuesRead); + } + int remaining = total; + // First value + if (valuesRead == 0) { + outputWriter.write(c, rowId, firstValue); + lastValueRead = firstValue; + rowId++; + remaining--; + } + while (remaining > 0) { + int n; + try { + n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter); + } catch (IOException e) { + throw new ParquetDecodingException("Error reading mini block.", e); + } + rowId += n; + remaining -= n; + } + valuesRead = total - remaining; + } + + + /** + * Read from a mini block. Read at most 'remaining' values into output. + * + * @return the number of values read into output + */ + private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int rowId, + IntegerOutputWriter outputWriter) throws IOException { + + // new block; read the block header + if (remainingInBlock == 0) { + readBlockHeader(); + } + + // new miniblock, unpack the miniblock + if (remainingInMiniBlock == 0) { + unpackMiniBlock(); + } + + //read values from miniblock + int valuesRead = 0; + for (int i = miniBlockSizeInValues - remainingInMiniBlock; + i < miniBlockSizeInValues && valuesRead < remaining; i++) { + //calculate values from deltas unpacked for current block Review comment: ditto ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java ########## @@ -0,0 +1,315 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution.datasources.parquet; + +import java.io.IOException; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import org.apache.parquet.Preconditions; +import org.apache.parquet.bytes.ByteBufferInputStream; +import org.apache.parquet.bytes.BytesUtils; +import org.apache.parquet.column.values.bitpacking.BytePackerForLong; +import org.apache.parquet.column.values.bitpacking.Packer; +import org.apache.parquet.io.ParquetDecodingException; +import org.apache.spark.sql.catalyst.util.RebaseDateTime; +import org.apache.spark.sql.execution.datasources.DataSourceUtils; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; + +/** + * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports the vectorized + * interface. DELTA_BINARY_PACKED is a delta encoding for integer and long types that stores values + * as a delta between consecutive values. Delta values are themselves bit packed. Similar to RLE but + * is more effective in the case of large variation of values in the encoded column. <br/> + * DELTA_BINARY_PACKED is the default encoding for integer and long columns in Parquet V2. <br/> + * Supported Types: INT32, INT64 <br/> + * + * @see <a href="https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-encoding-delta_binary_packed--5"> + * Parquet format encodings: DELTA_BINARY_PACKED</a> + */ +public class VectorizedDeltaBinaryPackedReader extends VectorizedReaderBase { + + // header data + private int blockSizeInValues; + private int miniBlockNumInABlock; + private int totalValueCount; + private long firstValue; + + private int miniBlockSizeInValues; + + // values read by the caller + private int valuesRead = 0; + + // variables to keep state of the current block and miniblock + private long lastValueRead; // needed to compute the next value + private long minDeltaInCurrentBlock; // needed to compute the next value + private int currentMiniBlock = 0; // keep track of the mini block within the current block that we Review comment: nit: the comment seems broken at the end. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
