sunchao commented on a change in pull request #34471:
URL: https://github.com/apache/spark/pull/34471#discussion_r743047058
##########
File path: .gitignore
##########
@@ -33,6 +33,7 @@ R/pkg/tests/fulltests/Rplots.pdf
build/*.jar
build/apache-maven*
build/scala*
+build/zinc*
Review comment:
nit: unrelated change?
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedValuesReader.java
##########
@@ -63,4 +64,36 @@
void skipDoubles(int total);
void skipBinary(int total);
void skipFixedLenByteArray(int total, int len);
+
+ /**
+ * An interface to write columnar output in various ways
+ */
+ @FunctionalInterface
+ interface IntegerOutputWriter {
+ void write(WritableColumnVector c, int rowId, long val);
+ }
+
+ @FunctionalInterface
+ interface ByteBufferOutputWriter {
Review comment:
this doesn't seem to be used anywhere
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
+ private long lastValueRead;
+ private long minDeltaInCurrentBlock;
+ private int currentMiniBlock = 0;
Review comment:
maybe add some comments to these fields - it's a bit confusing that
`currentMiniBlock` is an integer
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaByteArrayReader.java
##########
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.deltastrings.DeltaByteArrayReader;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An implementation of the Parquet DELTA_BYTE_ARRAY decoder that supports the
vectorized interface.
+ */
+public class VectorizedDeltaByteArrayReader extends ValuesReader implements
VectorizedValuesReader {
+ private final DeltaByteArrayReader deltaByteArrayReader = new
DeltaByteArrayReader();
+
+ @Override
+ public void initFromPage(int valueCount, ByteBufferInputStream in) throws
IOException {
+ deltaByteArrayReader.initFromPage(valueCount, in);
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ return 0;
+ }
+
+ @Override
+ public Binary readBinary(int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBooleans(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBytes(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readShorts(int total, WritableColumnVector c, int rowId) {
+
+ }
+
+ @Override
+ public void readIntegers(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readIntegersWithRebase(int total, WritableColumnVector c, int
rowId,
+ boolean failIfRebase) {
+
+ }
+
+ @Override
+ public void readUnsignedIntegers(int total, WritableColumnVector c, int
rowId) {
+
+ }
+
+ @Override
+ public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+
+ }
+
+ @Override
+ public void readLongs(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+ boolean failIfRebase) {
+
+ }
+
+ @Override
+ public void readFloats(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readDoubles(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBinary(int total, WritableColumnVector c, int rowId) {
+ for (int i = 0; i < total; i++) {
+ Binary binary = deltaByteArrayReader.readBytes();
+ ByteBuffer buffer = binary.toByteBuffer();
+ if (buffer.hasArray()) {
+ c.putByteArray(rowId + i, buffer.array(), buffer.arrayOffset() +
buffer.position(),
+ binary.length());
+ } else {
+ byte[] bytes = new byte[binary.length()];
+ buffer.get(bytes);
+ c.putByteArray(rowId + i, bytes);
+ }
+ }
+ }
+
+ @Override
+ public void skipBooleans(int total) {
+
+ }
+
+ @Override
+ public void skipBytes(int total) {
+
+ }
+
+ @Override
+ public void skipShorts(int total) {
+
+ }
+
+ @Override
+ public void skipIntegers(int total) {
+
+ }
+
+ @Override
+ public void skipLongs(int total) {
+
+ }
+
+ @Override
+ public void skipFloats(int total) {
+
+ }
+
+ @Override
+ public void skipDoubles(int total) {
+
+ }
+
+ @Override
+ public void skipBinary(int total) {
Review comment:
we need this too
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
##########
@@ -166,7 +166,7 @@ void readBatch(int total, WritableColumnVector column)
throws IOException {
readState.resetForNewPage(pageValueCount, pageFirstRowIndex);
}
PrimitiveType.PrimitiveTypeName typeName =
- descriptor.getPrimitiveType().getPrimitiveTypeName();
+ descriptor.getPrimitiveType().getPrimitiveTypeName();
Review comment:
ditto
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaByteArrayReader.java
##########
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.deltastrings.DeltaByteArrayReader;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An implementation of the Parquet DELTA_BYTE_ARRAY decoder that supports the
vectorized interface.
+ */
+public class VectorizedDeltaByteArrayReader extends ValuesReader implements
VectorizedValuesReader {
+ private final DeltaByteArrayReader deltaByteArrayReader = new
DeltaByteArrayReader();
+
+ @Override
+ public void initFromPage(int valueCount, ByteBufferInputStream in) throws
IOException {
+ deltaByteArrayReader.initFromPage(valueCount, in);
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
Review comment:
seems it's better to have an abstract class inheriting `ValuesReader`
and `VectorizedValuesReader` with this default behavior defined, rather than
repeating the same thing in all the different value readers.
this can be done separately though.
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
+ private long lastValueRead;
+ private long minDeltaInCurrentBlock;
+ private int currentMiniBlock = 0;
+ private int[] bitWidths; // bit widths for each miniblock in the current
block
+ private int remainingInBlock = 0; // values in current block still to be read
+ private int remainingInMiniBlock = 0; // values in current mini block still
to be read
+ private long[] unpackedValuesBuffer;
+
+ private ByteBufferInputStream in;
+
+ @SuppressWarnings("unused")
+ @Override
+ public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in)
throws IOException {
+ Preconditions.checkArgument(valueCount >= 1,
+ "Page must have at least one value, but it has " + valueCount);
+ this.in = in;
+
+ // Read the header
+ this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+ this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+ double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+ Preconditions.checkArgument(miniSize % 8 == 0,
+ "miniBlockSize must be multiple of 8, but it's " + miniSize);
+ this.miniBlockSizeInValues = (int) miniSize;
+ this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+ this.bitWidths = new int[miniBlockNumInABlock];
+
+ // read the first value
+ firstValue = BytesUtils.readZigZagVarLong(in);
+
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Binary readBinary(int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBooleans(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBytes(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readShorts(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readIntegers(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, (w, r, v) -> {
+ c.putInt(r, (int) v);
+ });
+ }
+
+ @Override
+ public void readIntegersWithRebase(int total, WritableColumnVector c, int
rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException("Only readIntegers is valid.");
+ }
+
+ @Override
+ public void readUnsignedIntegers(int total, WritableColumnVector c, int
rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readLongs(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, WritableColumnVector::putLong);
+ }
+
+ @Override
+ public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFloats(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readDoubles(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBinary(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBooleans(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBytes(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipShorts(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipIntegers(int total) {
+ // Read the values but don't write them out (the writer output method is a
no-op)
+ readValues(total, null, -1, (w, r, v) -> {
+ });
+ }
+
+ @Override
+ public void skipLongs(int total) {
+ // Read the values but don't write them out (the writer output method is a
no-op)
+ readValues(total, null, -1, (w, r, v) -> {
+ });
+ }
+
+ @Override
+ public void skipFloats(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipDoubles(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBinary(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipFixedLenByteArray(int total, int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ private void readValues(int total, WritableColumnVector c, int rowId,
+ IntegerOutputWriter outputWriter) {
+ int remaining = total;
+ if (valuesRead + total > totalValueCount) {
+ throw new ParquetDecodingException(
+ "no more values to read, total value count is " + valuesRead);
+ }
+ // First value
+ if (valuesRead == 0) {
+ //c.putInt(rowId, (int)firstValue);
+ outputWriter.write(c, rowId, firstValue);
+ lastValueRead = firstValue;
+ rowId++;
+ remaining--;
+ }
+ while (remaining > 0) {
+ int n;
+ try {
+ n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter);
+ } catch (IOException e) {
+ throw new ParquetDecodingException("Error reading mini block.", e);
+ }
+ rowId += n;
+ remaining -= n;
+ }
+ valuesRead = total - remaining;
+ }
+
+
+ /**
+ * Read from a mini block. Read at most 'remaining' values into output.
+ *
+ * @return the number of values read into output
+ */
+ private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int
rowId,
+ IntegerOutputWriter outputWriter) throws IOException {
+
+ // new block; read the block header
+ if (remainingInBlock == 0) {
+ readBlockHeader();
+ }
+
+ // new miniblock, unpack the miniblock
+ if (remainingInMiniBlock == 0) {
+ unpackMiniBlock();
+ }
+
+ //read values from miniblock
+ int valuesRead = 0;
+ for (int i = miniBlockSizeInValues - remainingInMiniBlock;
+ i < miniBlockSizeInValues && valuesRead < remaining; i++) {
+ //calculate values from deltas unpacked for current block
+ long outValue = lastValueRead + minDeltaInCurrentBlock +
unpackedValuesBuffer[i];
+ lastValueRead = outValue;
+ outputWriter.write(c, rowId + valuesRead, outValue);
+ remaining--;
Review comment:
hmm I wonder why we decrement `remaining` here.
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
+ private long lastValueRead;
+ private long minDeltaInCurrentBlock;
+ private int currentMiniBlock = 0;
+ private int[] bitWidths; // bit widths for each miniblock in the current
block
+ private int remainingInBlock = 0; // values in current block still to be read
+ private int remainingInMiniBlock = 0; // values in current mini block still
to be read
+ private long[] unpackedValuesBuffer;
+
+ private ByteBufferInputStream in;
+
+ @SuppressWarnings("unused")
+ @Override
+ public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in)
throws IOException {
+ Preconditions.checkArgument(valueCount >= 1,
+ "Page must have at least one value, but it has " + valueCount);
+ this.in = in;
+
+ // Read the header
+ this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+ this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+ double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+ Preconditions.checkArgument(miniSize % 8 == 0,
+ "miniBlockSize must be multiple of 8, but it's " + miniSize);
+ this.miniBlockSizeInValues = (int) miniSize;
+ this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+ this.bitWidths = new int[miniBlockNumInABlock];
+
+ // read the first value
+ firstValue = BytesUtils.readZigZagVarLong(in);
+
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Binary readBinary(int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBooleans(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBytes(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readShorts(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readIntegers(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, (w, r, v) -> {
+ c.putInt(r, (int) v);
Review comment:
should this be `w.putInt`?
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedValuesReader.java
##########
@@ -63,4 +64,36 @@
void skipDoubles(int total);
void skipBinary(int total);
void skipFixedLenByteArray(int total, int len);
+
+ /**
+ * An interface to write columnar output in various ways
+ */
+ @FunctionalInterface
+ interface IntegerOutputWriter {
+ void write(WritableColumnVector c, int rowId, long val);
Review comment:
maybe add some comments for this? what are `c`, `rowId` and `val` for?
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaByteArrayReader.java
##########
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.deltastrings.DeltaByteArrayReader;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An implementation of the Parquet DELTA_BYTE_ARRAY decoder that supports the
vectorized interface.
+ */
+public class VectorizedDeltaByteArrayReader extends ValuesReader implements
VectorizedValuesReader {
+ private final DeltaByteArrayReader deltaByteArrayReader = new
DeltaByteArrayReader();
+
+ @Override
+ public void initFromPage(int valueCount, ByteBufferInputStream in) throws
IOException {
+ deltaByteArrayReader.initFromPage(valueCount, in);
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ return 0;
+ }
+
+ @Override
+ public Binary readBinary(int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBooleans(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBytes(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readShorts(int total, WritableColumnVector c, int rowId) {
+
Review comment:
ditto
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
Review comment:
nit: add space after `//`
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
+ private long lastValueRead;
+ private long minDeltaInCurrentBlock;
+ private int currentMiniBlock = 0;
+ private int[] bitWidths; // bit widths for each miniblock in the current
block
+ private int remainingInBlock = 0; // values in current block still to be read
+ private int remainingInMiniBlock = 0; // values in current mini block still
to be read
+ private long[] unpackedValuesBuffer;
+
+ private ByteBufferInputStream in;
+
+ @SuppressWarnings("unused")
+ @Override
+ public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in)
throws IOException {
+ Preconditions.checkArgument(valueCount >= 1,
+ "Page must have at least one value, but it has " + valueCount);
+ this.in = in;
+
+ // Read the header
+ this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+ this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+ double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+ Preconditions.checkArgument(miniSize % 8 == 0,
+ "miniBlockSize must be multiple of 8, but it's " + miniSize);
+ this.miniBlockSizeInValues = (int) miniSize;
+ this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+ this.bitWidths = new int[miniBlockNumInABlock];
+
+ // read the first value
+ firstValue = BytesUtils.readZigZagVarLong(in);
+
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Binary readBinary(int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBooleans(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBytes(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readShorts(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readIntegers(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, (w, r, v) -> {
+ c.putInt(r, (int) v);
+ });
+ }
+
+ @Override
+ public void readIntegersWithRebase(int total, WritableColumnVector c, int
rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException("Only readIntegers is valid.");
+ }
+
+ @Override
+ public void readUnsignedIntegers(int total, WritableColumnVector c, int
rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readLongs(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, WritableColumnVector::putLong);
+ }
+
+ @Override
+ public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFloats(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readDoubles(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBinary(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBooleans(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBytes(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipShorts(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipIntegers(int total) {
+ // Read the values but don't write them out (the writer output method is a
no-op)
+ readValues(total, null, -1, (w, r, v) -> {
+ });
+ }
+
+ @Override
+ public void skipLongs(int total) {
+ // Read the values but don't write them out (the writer output method is a
no-op)
+ readValues(total, null, -1, (w, r, v) -> {
+ });
+ }
+
+ @Override
+ public void skipFloats(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipDoubles(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBinary(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipFixedLenByteArray(int total, int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ private void readValues(int total, WritableColumnVector c, int rowId,
+ IntegerOutputWriter outputWriter) {
+ int remaining = total;
+ if (valuesRead + total > totalValueCount) {
+ throw new ParquetDecodingException(
+ "no more values to read, total value count is " + valuesRead);
+ }
+ // First value
+ if (valuesRead == 0) {
+ //c.putInt(rowId, (int)firstValue);
+ outputWriter.write(c, rowId, firstValue);
+ lastValueRead = firstValue;
+ rowId++;
+ remaining--;
+ }
+ while (remaining > 0) {
+ int n;
+ try {
+ n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter);
+ } catch (IOException e) {
+ throw new ParquetDecodingException("Error reading mini block.", e);
+ }
+ rowId += n;
+ remaining -= n;
+ }
+ valuesRead = total - remaining;
+ }
+
+
+ /**
+ * Read from a mini block. Read at most 'remaining' values into output.
+ *
+ * @return the number of values read into output
+ */
+ private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int
rowId,
+ IntegerOutputWriter outputWriter) throws IOException {
+
+ // new block; read the block header
+ if (remainingInBlock == 0) {
+ readBlockHeader();
+ }
+
+ // new miniblock, unpack the miniblock
+ if (remainingInMiniBlock == 0) {
+ unpackMiniBlock();
+ }
+
+ //read values from miniblock
+ int valuesRead = 0;
+ for (int i = miniBlockSizeInValues - remainingInMiniBlock;
+ i < miniBlockSizeInValues && valuesRead < remaining; i++) {
+ //calculate values from deltas unpacked for current block
+ long outValue = lastValueRead + minDeltaInCurrentBlock +
unpackedValuesBuffer[i];
+ lastValueRead = outValue;
+ outputWriter.write(c, rowId + valuesRead, outValue);
+ remaining--;
+ remainingInBlock--;
+ remainingInMiniBlock--;
+ valuesRead++;
+ }
+
+ return valuesRead;
+ }
+
+ private void readBlockHeader() {
+ try {
+ minDeltaInCurrentBlock = BytesUtils.readZigZagVarLong(in);
+ } catch (IOException e) {
+ throw new ParquetDecodingException("can not read min delta in current
block", e);
+ }
+ readBitWidthsForMiniBlocks();
+ remainingInBlock = blockSizeInValues;
+ currentMiniBlock = 0;
+ remainingInMiniBlock = 0;
+ }
+
+ /**
+ * mini block has a size of 8*n, unpack 8 value each time
+ * @see
org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesReader#unpackMiniBlock
+ */
+ private void unpackMiniBlock() throws IOException {
+ this.unpackedValuesBuffer = new long[miniBlockSizeInValues];
+ BytePackerForLong packer = Packer.LITTLE_ENDIAN.newBytePackerForLong(
+ bitWidths[currentMiniBlock]);
+ for (int j = 0; j < miniBlockSizeInValues; j += 8) {
+ ByteBuffer buffer = in.slice(packer.getBitWidth());
+ packer.unpack8Values(buffer, buffer.position(), unpackedValuesBuffer, j);
Review comment:
wonder if we can unpack 32 values at once, since the
[spec](https://github.com/apache/parquet-format/blob/master/Encodings.md#delta-encoding-delta_binary_packed--5)
says the number of values in a mini block is a multiple of 32.
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
+ private long lastValueRead;
+ private long minDeltaInCurrentBlock;
+ private int currentMiniBlock = 0;
+ private int[] bitWidths; // bit widths for each miniblock in the current
block
+ private int remainingInBlock = 0; // values in current block still to be read
+ private int remainingInMiniBlock = 0; // values in current mini block still
to be read
+ private long[] unpackedValuesBuffer;
+
+ private ByteBufferInputStream in;
+
+ @SuppressWarnings("unused")
+ @Override
+ public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in)
throws IOException {
Review comment:
nit: remove `/*unused*/`: it's used although just in precondition check
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
Review comment:
maybe add a bit more information, like when it will be used, what data
types it supports, etc
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
+ private long lastValueRead;
+ private long minDeltaInCurrentBlock;
+ private int currentMiniBlock = 0;
+ private int[] bitWidths; // bit widths for each miniblock in the current
block
+ private int remainingInBlock = 0; // values in current block still to be read
+ private int remainingInMiniBlock = 0; // values in current mini block still
to be read
+ private long[] unpackedValuesBuffer;
+
+ private ByteBufferInputStream in;
+
+ @SuppressWarnings("unused")
+ @Override
+ public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in)
throws IOException {
+ Preconditions.checkArgument(valueCount >= 1,
+ "Page must have at least one value, but it has " + valueCount);
+ this.in = in;
+
+ // Read the header
+ this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+ this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+ double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+ Preconditions.checkArgument(miniSize % 8 == 0,
+ "miniBlockSize must be multiple of 8, but it's " + miniSize);
+ this.miniBlockSizeInValues = (int) miniSize;
+ this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+ this.bitWidths = new int[miniBlockNumInABlock];
+
+ // read the first value
+ firstValue = BytesUtils.readZigZagVarLong(in);
+
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Binary readBinary(int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBooleans(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBytes(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readShorts(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readIntegers(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, (w, r, v) -> {
+ c.putInt(r, (int) v);
+ });
+ }
+
+ @Override
+ public void readIntegersWithRebase(int total, WritableColumnVector c, int
rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException("Only readIntegers is valid.");
+ }
+
+ @Override
+ public void readUnsignedIntegers(int total, WritableColumnVector c, int
rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readLongs(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, WritableColumnVector::putLong);
+ }
+
+ @Override
+ public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFloats(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readDoubles(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBinary(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBooleans(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBytes(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipShorts(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipIntegers(int total) {
+ // Read the values but don't write them out (the writer output method is a
no-op)
+ readValues(total, null, -1, (w, r, v) -> {
+ });
+ }
+
+ @Override
+ public void skipLongs(int total) {
+ // Read the values but don't write them out (the writer output method is a
no-op)
+ readValues(total, null, -1, (w, r, v) -> {
+ });
+ }
+
+ @Override
+ public void skipFloats(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipDoubles(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBinary(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipFixedLenByteArray(int total, int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ private void readValues(int total, WritableColumnVector c, int rowId,
+ IntegerOutputWriter outputWriter) {
+ int remaining = total;
+ if (valuesRead + total > totalValueCount) {
+ throw new ParquetDecodingException(
+ "no more values to read, total value count is " + valuesRead);
+ }
+ // First value
+ if (valuesRead == 0) {
+ //c.putInt(rowId, (int)firstValue);
+ outputWriter.write(c, rowId, firstValue);
+ lastValueRead = firstValue;
+ rowId++;
+ remaining--;
+ }
+ while (remaining > 0) {
+ int n;
+ try {
+ n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter);
+ } catch (IOException e) {
+ throw new ParquetDecodingException("Error reading mini block.", e);
+ }
+ rowId += n;
+ remaining -= n;
+ }
+ valuesRead = total - remaining;
+ }
+
+
+ /**
+ * Read from a mini block. Read at most 'remaining' values into output.
+ *
+ * @return the number of values read into output
+ */
+ private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int
rowId,
+ IntegerOutputWriter outputWriter) throws IOException {
+
+ // new block; read the block header
+ if (remainingInBlock == 0) {
+ readBlockHeader();
+ }
+
+ // new miniblock, unpack the miniblock
+ if (remainingInMiniBlock == 0) {
+ unpackMiniBlock();
+ }
+
+ //read values from miniblock
+ int valuesRead = 0;
+ for (int i = miniBlockSizeInValues - remainingInMiniBlock;
+ i < miniBlockSizeInValues && valuesRead < remaining; i++) {
+ //calculate values from deltas unpacked for current block
+ long outValue = lastValueRead + minDeltaInCurrentBlock +
unpackedValuesBuffer[i];
+ lastValueRead = outValue;
+ outputWriter.write(c, rowId + valuesRead, outValue);
+ remaining--;
+ remainingInBlock--;
+ remainingInMiniBlock--;
+ valuesRead++;
+ }
+
+ return valuesRead;
+ }
+
+ private void readBlockHeader() {
+ try {
+ minDeltaInCurrentBlock = BytesUtils.readZigZagVarLong(in);
+ } catch (IOException e) {
+ throw new ParquetDecodingException("can not read min delta in current
block", e);
+ }
+ readBitWidthsForMiniBlocks();
+ remainingInBlock = blockSizeInValues;
+ currentMiniBlock = 0;
+ remainingInMiniBlock = 0;
+ }
+
+ /**
+ * mini block has a size of 8*n, unpack 8 value each time
+ * @see
org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesReader#unpackMiniBlock
+ */
+ private void unpackMiniBlock() throws IOException {
+ this.unpackedValuesBuffer = new long[miniBlockSizeInValues];
Review comment:
maybe we can reuse `this.unpackedValuesBuffer`?
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetEncodingSuite.scala
##########
@@ -122,4 +126,40 @@ class ParquetEncodingSuite extends
ParquetCompatibilityTest with SharedSparkSess
}
}
}
+
+ test("parquet v2 pages - delta encoding") {
+ val extraOptions = Map[String, String](
+ ParquetOutputFormat.WRITER_VERSION ->
ParquetProperties.WriterVersion.PARQUET_2_0.toString,
+ ParquetOutputFormat.ENABLE_DICTIONARY -> "false"
+ )
+
+ val hadoopConf = spark.sessionState.newHadoopConfWithOptions(extraOptions)
+ withSQLConf(
+ SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true",
+ ParquetOutputFormat.JOB_SUMMARY_LEVEL -> "ALL") {
Review comment:
why we need this?
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetDeltaEncodingIntegerSuite.scala
##########
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet
+
+import java.io.IOException
+import java.nio.ByteBuffer
+import java.util.Random
+
+import org.apache.parquet.bytes.{ByteBufferInputStream,
DirectByteBufferAllocator}
+import org.apache.parquet.column.values.ValuesWriter
+import
org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesWriterForInteger
+import org.apache.parquet.io.ParquetDecodingException
+
+import org.apache.spark.sql.execution.vectorized.{OnHeapColumnVector,
WritableColumnVector}
+import org.apache.spark.sql.test.SharedSparkSession
+import org.apache.spark.sql.types.IntegerType
+
+/**
+ * Read tests for vectorized Delta binary packed Integer reader.
+ * Translated from
+ *
org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesWriterForIntegerTest
+ */
+class ParquetDeltaEncodingIntegerSuite extends ParquetCompatibilityTest with
SharedSparkSession {
Review comment:
can we combine this with `ParquetDeltaEncodingLongSuite`? we can have a
base class with the common code (need to abstract a few things like how to
allocate an array, how to generate a rand int/long, and Parquet's value writer
class etc.)
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaByteArrayReader.java
##########
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.deltastrings.DeltaByteArrayReader;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An implementation of the Parquet DELTA_BYTE_ARRAY decoder that supports the
vectorized interface.
+ */
+public class VectorizedDeltaByteArrayReader extends ValuesReader implements
VectorizedValuesReader {
+ private final DeltaByteArrayReader deltaByteArrayReader = new
DeltaByteArrayReader();
+
+ @Override
+ public void initFromPage(int valueCount, ByteBufferInputStream in) throws
IOException {
+ deltaByteArrayReader.initFromPage(valueCount, in);
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ return 0;
Review comment:
why return 0 here?
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
+ private long lastValueRead;
+ private long minDeltaInCurrentBlock;
+ private int currentMiniBlock = 0;
+ private int[] bitWidths; // bit widths for each miniblock in the current
block
+ private int remainingInBlock = 0; // values in current block still to be read
+ private int remainingInMiniBlock = 0; // values in current mini block still
to be read
+ private long[] unpackedValuesBuffer;
+
+ private ByteBufferInputStream in;
+
+ @SuppressWarnings("unused")
+ @Override
+ public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in)
throws IOException {
+ Preconditions.checkArgument(valueCount >= 1,
+ "Page must have at least one value, but it has " + valueCount);
+ this.in = in;
+
+ // Read the header
+ this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+ this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+ double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+ Preconditions.checkArgument(miniSize % 8 == 0,
+ "miniBlockSize must be multiple of 8, but it's " + miniSize);
+ this.miniBlockSizeInValues = (int) miniSize;
+ this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+ this.bitWidths = new int[miniBlockNumInABlock];
+
+ // read the first value
+ firstValue = BytesUtils.readZigZagVarLong(in);
+
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Binary readBinary(int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBooleans(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBytes(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readShorts(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readIntegers(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, (w, r, v) -> {
+ c.putInt(r, (int) v);
+ });
+ }
+
+ @Override
+ public void readIntegersWithRebase(int total, WritableColumnVector c, int
rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException("Only readIntegers is valid.");
Review comment:
I think we'll need to implement these too.
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
+ private long lastValueRead;
+ private long minDeltaInCurrentBlock;
+ private int currentMiniBlock = 0;
+ private int[] bitWidths; // bit widths for each miniblock in the current
block
+ private int remainingInBlock = 0; // values in current block still to be read
+ private int remainingInMiniBlock = 0; // values in current mini block still
to be read
+ private long[] unpackedValuesBuffer;
+
+ private ByteBufferInputStream in;
+
+ @SuppressWarnings("unused")
+ @Override
+ public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in)
throws IOException {
+ Preconditions.checkArgument(valueCount >= 1,
+ "Page must have at least one value, but it has " + valueCount);
+ this.in = in;
+
+ // Read the header
+ this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+ this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+ double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+ Preconditions.checkArgument(miniSize % 8 == 0,
+ "miniBlockSize must be multiple of 8, but it's " + miniSize);
+ this.miniBlockSizeInValues = (int) miniSize;
+ this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+ this.bitWidths = new int[miniBlockNumInABlock];
+
+ // read the first value
+ firstValue = BytesUtils.readZigZagVarLong(in);
+
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Binary readBinary(int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBooleans(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBytes(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readShorts(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readIntegers(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, (w, r, v) -> {
+ c.putInt(r, (int) v);
+ });
+ }
+
+ @Override
+ public void readIntegersWithRebase(int total, WritableColumnVector c, int
rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException("Only readIntegers is valid.");
+ }
+
+ @Override
+ public void readUnsignedIntegers(int total, WritableColumnVector c, int
rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readLongs(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, WritableColumnVector::putLong);
+ }
+
+ @Override
+ public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFloats(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readDoubles(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBinary(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBooleans(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBytes(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipShorts(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipIntegers(int total) {
+ // Read the values but don't write them out (the writer output method is a
no-op)
+ readValues(total, null, -1, (w, r, v) -> {
Review comment:
I think this can be done more efficiently, for instance we don't need to
unpack the bits anymore, and don't need to compute the original value from
delta, etc.
##########
File path:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+ implements VectorizedValuesReader {
+
+ // header data
+ private int blockSizeInValues;
+ private int miniBlockNumInABlock;
+ private int totalValueCount;
+ private long firstValue;
+
+ private int miniBlockSizeInValues;
+
+ // values read by the caller
+ private int valuesRead = 0;
+
+ //variables to keep state of the current block and miniblock
+ private long lastValueRead;
+ private long minDeltaInCurrentBlock;
+ private int currentMiniBlock = 0;
+ private int[] bitWidths; // bit widths for each miniblock in the current
block
+ private int remainingInBlock = 0; // values in current block still to be read
+ private int remainingInMiniBlock = 0; // values in current mini block still
to be read
+ private long[] unpackedValuesBuffer;
+
+ private ByteBufferInputStream in;
+
+ @SuppressWarnings("unused")
+ @Override
+ public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in)
throws IOException {
+ Preconditions.checkArgument(valueCount >= 1,
+ "Page must have at least one value, but it has " + valueCount);
+ this.in = in;
+
+ // Read the header
+ this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+ this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+ double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+ Preconditions.checkArgument(miniSize % 8 == 0,
+ "miniBlockSize must be multiple of 8, but it's " + miniSize);
+ this.miniBlockSizeInValues = (int) miniSize;
+ this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+ this.bitWidths = new int[miniBlockNumInABlock];
+
+ // read the first value
+ firstValue = BytesUtils.readZigZagVarLong(in);
+
+ }
+
+ @Override
+ public void skip() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public short readShort() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Binary readBinary(int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBooleans(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBytes(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readShorts(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readIntegers(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, (w, r, v) -> {
+ c.putInt(r, (int) v);
+ });
+ }
+
+ @Override
+ public void readIntegersWithRebase(int total, WritableColumnVector c, int
rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException("Only readIntegers is valid.");
+ }
+
+ @Override
+ public void readUnsignedIntegers(int total, WritableColumnVector c, int
rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readLongs(int total, WritableColumnVector c, int rowId) {
+ readValues(total, c, rowId, WritableColumnVector::putLong);
+ }
+
+ @Override
+ public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+ boolean failIfRebase) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readFloats(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readDoubles(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void readBinary(int total, WritableColumnVector c, int rowId) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBooleans(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBytes(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipShorts(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipIntegers(int total) {
+ // Read the values but don't write them out (the writer output method is a
no-op)
+ readValues(total, null, -1, (w, r, v) -> {
+ });
+ }
+
+ @Override
+ public void skipLongs(int total) {
+ // Read the values but don't write them out (the writer output method is a
no-op)
+ readValues(total, null, -1, (w, r, v) -> {
+ });
+ }
+
+ @Override
+ public void skipFloats(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipDoubles(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipBinary(int total) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void skipFixedLenByteArray(int total, int len) {
+ throw new UnsupportedOperationException();
+ }
+
+ private void readValues(int total, WritableColumnVector c, int rowId,
+ IntegerOutputWriter outputWriter) {
+ int remaining = total;
+ if (valuesRead + total > totalValueCount) {
+ throw new ParquetDecodingException(
+ "no more values to read, total value count is " + valuesRead);
+ }
+ // First value
+ if (valuesRead == 0) {
+ //c.putInt(rowId, (int)firstValue);
Review comment:
nit: remove
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]