parthchandra commented on a change in pull request #34471:
URL: https://github.com/apache/spark/pull/34471#discussion_r744973480



##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedValuesReader.java
##########
@@ -63,4 +64,36 @@
    void skipDoubles(int total);
    void skipBinary(int total);
    void skipFixedLenByteArray(int total, int len);
+
+  /**
+   * An interface to write columnar output in various ways
+   */
+  @FunctionalInterface
+  interface IntegerOutputWriter {
+    void write(WritableColumnVector c, int rowId, long val);

Review comment:
       done

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetEncodingSuite.scala
##########
@@ -122,4 +126,40 @@ class ParquetEncodingSuite extends 
ParquetCompatibilityTest with SharedSparkSess
       }
     }
   }
+
+  test("parquet v2 pages - delta encoding") {
+    val extraOptions = Map[String, String](
+      ParquetOutputFormat.WRITER_VERSION -> 
ParquetProperties.WriterVersion.PARQUET_2_0.toString,
+      ParquetOutputFormat.ENABLE_DICTIONARY -> "false"
+    )
+
+    val hadoopConf = spark.sessionState.newHadoopConfWithOptions(extraOptions)
+    withSQLConf(
+      SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true",
+      ParquetOutputFormat.JOB_SUMMARY_LEVEL -> "ALL") {

Review comment:
       It does seem unnecessary, but I removed it and the test broke. Turns out 
this writes a metadata summary file that is read by `readFooter` 

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
##########
@@ -280,6 +277,20 @@ private void initDataReader(
     }
   }
 
+    private ValuesReader getValuesReader(Encoding encoding) {
+        switch (encoding) {

Review comment:
       Done

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, (w, r, v) -> {
+      c.putInt(r, (int) v);
+    });
+  }
+
+  @Override
+  public void readIntegersWithRebase(int total, WritableColumnVector c, int 
rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException("Only readIntegers is valid.");
+  }
+
+  @Override
+  public void readUnsignedIntegers(int total, WritableColumnVector c, int 
rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readLongs(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, WritableColumnVector::putLong);
+  }
+
+  @Override
+  public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readFloats(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readDoubles(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBinary(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBooleans(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBytes(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipShorts(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipIntegers(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipLongs(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipFloats(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipDoubles(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBinary(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipFixedLenByteArray(int total, int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  private void readValues(int total, WritableColumnVector c, int rowId,
+      IntegerOutputWriter outputWriter) {
+    int remaining = total;
+    if (valuesRead + total > totalValueCount) {
+      throw new ParquetDecodingException(
+          "no more values to read, total value count is " + valuesRead);
+    }
+    // First value
+    if (valuesRead == 0) {
+      //c.putInt(rowId, (int)firstValue);
+      outputWriter.write(c, rowId, firstValue);
+      lastValueRead = firstValue;
+      rowId++;
+      remaining--;
+    }
+    while (remaining > 0) {
+      int n;
+      try {
+        n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter);
+      } catch (IOException e) {
+        throw new ParquetDecodingException("Error reading mini block.", e);
+      }
+      rowId += n;
+      remaining -= n;
+    }
+    valuesRead = total - remaining;
+  }
+
+
+  /**
+   * Read from a mini block.  Read at most 'remaining' values into output.
+   *
+   * @return the number of values read into output
+   */
+  private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int 
rowId,
+      IntegerOutputWriter outputWriter) throws IOException {
+
+    // new block; read the block header
+    if (remainingInBlock == 0) {
+      readBlockHeader();
+    }
+
+    // new miniblock, unpack the miniblock
+    if (remainingInMiniBlock == 0) {
+      unpackMiniBlock();
+    }
+
+    //read values from miniblock
+    int valuesRead = 0;
+    for (int i = miniBlockSizeInValues - remainingInMiniBlock;
+        i < miniBlockSizeInValues && valuesRead < remaining; i++) {
+      //calculate values from deltas unpacked for current block
+      long outValue = lastValueRead + minDeltaInCurrentBlock + 
unpackedValuesBuffer[i];
+      lastValueRead = outValue;
+      outputWriter.write(c, rowId + valuesRead, outValue);
+      remaining--;
+      remainingInBlock--;
+      remainingInMiniBlock--;
+      valuesRead++;
+    }
+
+    return valuesRead;
+  }
+
+  private void readBlockHeader() {
+    try {
+      minDeltaInCurrentBlock = BytesUtils.readZigZagVarLong(in);
+    } catch (IOException e) {
+      throw new ParquetDecodingException("can not read min delta in current 
block", e);
+    }
+    readBitWidthsForMiniBlocks();
+    remainingInBlock = blockSizeInValues;
+    currentMiniBlock = 0;
+    remainingInMiniBlock = 0;
+  }
+
+  /**
+   * mini block has a size of 8*n, unpack 8 value each time
+   * @see 
org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesReader#unpackMiniBlock
+   */
+  private void unpackMiniBlock() throws IOException {
+    this.unpackedValuesBuffer = new long[miniBlockSizeInValues];
+    BytePackerForLong packer = Packer.LITTLE_ENDIAN.newBytePackerForLong(
+        bitWidths[currentMiniBlock]);
+    for (int j = 0; j < miniBlockSizeInValues; j += 8) {
+      ByteBuffer buffer = in.slice(packer.getBitWidth());
+      packer.unpack8Values(buffer, buffer.position(), unpackedValuesBuffer, j);

Review comment:
       I had thought about that too. I had looked at 
`ByteBitPackingForLongLE.unpack8Values` and my head hurt, so I left it. But on 
a second look, it does appear to be a simple change. I made the change and 
everything worked (i.e. all the unit tests pass)
   However, looking at the implementation of the DeltaBinaryPackedWriter it 
turns out that the number of values in a mini block is assumed to be a multiple 
of 8 (not 32).  (Also see: https://issues.apache.org/jira/browse/PARQUET-2077)
   So it is probably safer to leave it as it is. 
   

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;

Review comment:
       Done

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, (w, r, v) -> {
+      c.putInt(r, (int) v);
+    });
+  }
+
+  @Override
+  public void readIntegersWithRebase(int total, WritableColumnVector c, int 
rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException("Only readIntegers is valid.");
+  }
+
+  @Override
+  public void readUnsignedIntegers(int total, WritableColumnVector c, int 
rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readLongs(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, WritableColumnVector::putLong);
+  }
+
+  @Override
+  public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readFloats(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readDoubles(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBinary(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBooleans(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBytes(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipShorts(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipIntegers(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipLongs(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipFloats(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipDoubles(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBinary(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipFixedLenByteArray(int total, int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  private void readValues(int total, WritableColumnVector c, int rowId,
+      IntegerOutputWriter outputWriter) {
+    int remaining = total;
+    if (valuesRead + total > totalValueCount) {
+      throw new ParquetDecodingException(
+          "no more values to read, total value count is " + valuesRead);
+    }
+    // First value
+    if (valuesRead == 0) {
+      //c.putInt(rowId, (int)firstValue);

Review comment:
       Done

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, (w, r, v) -> {
+      c.putInt(r, (int) v);
+    });
+  }
+
+  @Override
+  public void readIntegersWithRebase(int total, WritableColumnVector c, int 
rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException("Only readIntegers is valid.");
+  }
+
+  @Override
+  public void readUnsignedIntegers(int total, WritableColumnVector c, int 
rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readLongs(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, WritableColumnVector::putLong);
+  }
+
+  @Override
+  public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readFloats(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readDoubles(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBinary(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBooleans(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBytes(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipShorts(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipIntegers(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipLongs(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipFloats(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipDoubles(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBinary(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipFixedLenByteArray(int total, int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  private void readValues(int total, WritableColumnVector c, int rowId,
+      IntegerOutputWriter outputWriter) {
+    int remaining = total;
+    if (valuesRead + total > totalValueCount) {
+      throw new ParquetDecodingException(
+          "no more values to read, total value count is " + valuesRead);
+    }
+    // First value
+    if (valuesRead == 0) {
+      //c.putInt(rowId, (int)firstValue);
+      outputWriter.write(c, rowId, firstValue);
+      lastValueRead = firstValue;
+      rowId++;
+      remaining--;
+    }
+    while (remaining > 0) {
+      int n;
+      try {
+        n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter);
+      } catch (IOException e) {
+        throw new ParquetDecodingException("Error reading mini block.", e);
+      }
+      rowId += n;
+      remaining -= n;
+    }
+    valuesRead = total - remaining;
+  }
+
+
+  /**
+   * Read from a mini block.  Read at most 'remaining' values into output.
+   *
+   * @return the number of values read into output
+   */
+  private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int 
rowId,
+      IntegerOutputWriter outputWriter) throws IOException {
+
+    // new block; read the block header
+    if (remainingInBlock == 0) {
+      readBlockHeader();
+    }
+
+    // new miniblock, unpack the miniblock
+    if (remainingInMiniBlock == 0) {
+      unpackMiniBlock();
+    }
+
+    //read values from miniblock
+    int valuesRead = 0;
+    for (int i = miniBlockSizeInValues - remainingInMiniBlock;
+        i < miniBlockSizeInValues && valuesRead < remaining; i++) {
+      //calculate values from deltas unpacked for current block
+      long outValue = lastValueRead + minDeltaInCurrentBlock + 
unpackedValuesBuffer[i];
+      lastValueRead = outValue;
+      outputWriter.write(c, rowId + valuesRead, outValue);
+      remaining--;
+      remainingInBlock--;
+      remainingInMiniBlock--;
+      valuesRead++;
+    }
+
+    return valuesRead;
+  }
+
+  private void readBlockHeader() {
+    try {
+      minDeltaInCurrentBlock = BytesUtils.readZigZagVarLong(in);
+    } catch (IOException e) {
+      throw new ParquetDecodingException("can not read min delta in current 
block", e);
+    }
+    readBitWidthsForMiniBlocks();
+    remainingInBlock = blockSizeInValues;
+    currentMiniBlock = 0;
+    remainingInMiniBlock = 0;
+  }
+
+  /**
+   * mini block has a size of 8*n, unpack 8 value each time
+   * @see 
org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesReader#unpackMiniBlock
+   */
+  private void unpackMiniBlock() throws IOException {
+    this.unpackedValuesBuffer = new long[miniBlockSizeInValues];

Review comment:
       I guess we could. Essentially, we need an initialized array of values 
and I was lazy and just allocated a new one. 
   TL;DR: In case of a run of deltas that are zero, the bitwidth for the 
miniblock is zero and  the `Packer`  used to decode is a `Packer0` which 
basically does nothing, leaving the output buffer untouched. If we reuse the 
buffer without re-initializing it, the old incorrect value gets used.
   Replaced with array initialized to zero on each use.

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.

Review comment:
       Done

##########
File path: .gitignore
##########
@@ -33,6 +33,7 @@ R/pkg/tests/fulltests/Rplots.pdf
 build/*.jar
 build/apache-maven*
 build/scala*
+build/zinc*

Review comment:
       Removed

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaByteArrayReader.java
##########
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.deltastrings.DeltaByteArrayReader;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An implementation of the Parquet DELTA_BYTE_ARRAY decoder that supports the 
vectorized interface.
+ */
+public class VectorizedDeltaByteArrayReader extends ValuesReader implements 
VectorizedValuesReader {
+  private final DeltaByteArrayReader deltaByteArrayReader = new 
DeltaByteArrayReader();
+
+  @Override
+  public void initFromPage(int valueCount, ByteBufferInputStream in) throws 
IOException {
+    deltaByteArrayReader.initFromPage(valueCount, in);
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {

Review comment:
       Done

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, (w, r, v) -> {
+      c.putInt(r, (int) v);
+    });
+  }
+
+  @Override
+  public void readIntegersWithRebase(int total, WritableColumnVector c, int 
rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException("Only readIntegers is valid.");
+  }
+
+  @Override
+  public void readUnsignedIntegers(int total, WritableColumnVector c, int 
rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readLongs(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, WritableColumnVector::putLong);
+  }
+
+  @Override
+  public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readFloats(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readDoubles(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBinary(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBooleans(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBytes(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipShorts(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipIntegers(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipLongs(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipFloats(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipDoubles(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBinary(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipFixedLenByteArray(int total, int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  private void readValues(int total, WritableColumnVector c, int rowId,
+      IntegerOutputWriter outputWriter) {
+    int remaining = total;
+    if (valuesRead + total > totalValueCount) {
+      throw new ParquetDecodingException(
+          "no more values to read, total value count is " + valuesRead);
+    }
+    // First value
+    if (valuesRead == 0) {
+      //c.putInt(rowId, (int)firstValue);
+      outputWriter.write(c, rowId, firstValue);
+      lastValueRead = firstValue;
+      rowId++;
+      remaining--;
+    }
+    while (remaining > 0) {
+      int n;
+      try {
+        n = loadMiniBlockToOutput(remaining, c, rowId, outputWriter);
+      } catch (IOException e) {
+        throw new ParquetDecodingException("Error reading mini block.", e);
+      }
+      rowId += n;
+      remaining -= n;
+    }
+    valuesRead = total - remaining;
+  }
+
+
+  /**
+   * Read from a mini block.  Read at most 'remaining' values into output.
+   *
+   * @return the number of values read into output
+   */
+  private int loadMiniBlockToOutput(int remaining, WritableColumnVector c, int 
rowId,
+      IntegerOutputWriter outputWriter) throws IOException {
+
+    // new block; read the block header
+    if (remainingInBlock == 0) {
+      readBlockHeader();
+    }
+
+    // new miniblock, unpack the miniblock
+    if (remainingInMiniBlock == 0) {
+      unpackMiniBlock();
+    }
+
+    //read values from miniblock
+    int valuesRead = 0;
+    for (int i = miniBlockSizeInValues - remainingInMiniBlock;
+        i < miniBlockSizeInValues && valuesRead < remaining; i++) {
+      //calculate values from deltas unpacked for current block
+      long outValue = lastValueRead + minDeltaInCurrentBlock + 
unpackedValuesBuffer[i];
+      lastValueRead = outValue;
+      outputWriter.write(c, rowId + valuesRead, outValue);
+      remaining--;

Review comment:
       `remaining` is the number values left to be written. We've just written 
the first value here so `remaining` is now `total - 1`

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedColumnReader.java
##########
@@ -166,7 +166,7 @@ void readBatch(int total, WritableColumnVector column) 
throws IOException {
         readState.resetForNewPage(pageValueCount, pageFirstRowIndex);
       }
       PrimitiveType.PrimitiveTypeName typeName =
-          descriptor.getPrimitiveType().getPrimitiveTypeName();
+        descriptor.getPrimitiveType().getPrimitiveTypeName();

Review comment:
       Done

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, (w, r, v) -> {
+      c.putInt(r, (int) v);

Review comment:
       yes. Good catch. Corrected.

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {

Review comment:
       Done

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaByteArrayReader.java
##########
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.deltastrings.DeltaByteArrayReader;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An implementation of the Parquet DELTA_BYTE_ARRAY decoder that supports the 
vectorized interface.
+ */
+public class VectorizedDeltaByteArrayReader extends ValuesReader implements 
VectorizedValuesReader {
+  private final DeltaByteArrayReader deltaByteArrayReader = new 
DeltaByteArrayReader();
+
+  @Override
+  public void initFromPage(int valueCount, ByteBufferInputStream in) throws 
IOException {
+    deltaByteArrayReader.initFromPage(valueCount, in);
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    return 0;

Review comment:
       Fixed

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaByteArrayReader.java
##########
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.deltastrings.DeltaByteArrayReader;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An implementation of the Parquet DELTA_BYTE_ARRAY decoder that supports the 
vectorized interface.
+ */
+public class VectorizedDeltaByteArrayReader extends ValuesReader implements 
VectorizedValuesReader {
+  private final DeltaByteArrayReader deltaByteArrayReader = new 
DeltaByteArrayReader();
+
+  @Override
+  public void initFromPage(int valueCount, ByteBufferInputStream in) throws 
IOException {
+    deltaByteArrayReader.initFromPage(valueCount, in);
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    return 0;

Review comment:
       Why indeed.  (Intellij generated code for unimplemented methods)

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaByteArrayReader.java
##########
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.deltastrings.DeltaByteArrayReader;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An implementation of the Parquet DELTA_BYTE_ARRAY decoder that supports the 
vectorized interface.
+ */
+public class VectorizedDeltaByteArrayReader extends ValuesReader implements 
VectorizedValuesReader {
+  private final DeltaByteArrayReader deltaByteArrayReader = new 
DeltaByteArrayReader();
+
+  @Override
+  public void initFromPage(int valueCount, ByteBufferInputStream in) throws 
IOException {
+    deltaByteArrayReader.initFromPage(valueCount, in);
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    return 0;
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegersWithRebase(int total, WritableColumnVector c, int 
rowId,
+      boolean failIfRebase) {
+
+  }
+
+  @Override
+  public void readUnsignedIntegers(int total, WritableColumnVector c, int 
rowId) {
+
+  }
+
+  @Override
+  public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+
+  }
+
+  @Override
+  public void readLongs(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+      boolean failIfRebase) {
+
+  }
+
+  @Override
+  public void readFloats(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readDoubles(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBinary(int total, WritableColumnVector c, int rowId) {
+    for (int i = 0; i < total; i++) {
+      Binary binary = deltaByteArrayReader.readBytes();
+      ByteBuffer buffer = binary.toByteBuffer();
+      if (buffer.hasArray()) {
+        c.putByteArray(rowId + i, buffer.array(), buffer.arrayOffset() + 
buffer.position(),
+          binary.length());
+      } else {
+        byte[] bytes = new byte[binary.length()];
+        buffer.get(bytes);
+        c.putByteArray(rowId + i, bytes);
+      }
+    }
+  }
+
+  @Override
+  public void skipBooleans(int total) {
+
+  }
+
+  @Override
+  public void skipBytes(int total) {
+
+  }
+
+  @Override
+  public void skipShorts(int total) {
+
+  }
+
+  @Override
+  public void skipIntegers(int total) {
+
+  }
+
+  @Override
+  public void skipLongs(int total) {
+
+  }
+
+  @Override
+  public void skipFloats(int total) {
+
+  }
+
+  @Override
+  public void skipDoubles(int total) {
+
+  }
+
+  @Override
+  public void skipBinary(int total) {

Review comment:
       I left this file as is from the previous PR. For the moment I'll just 
call skip from the underlying non-vectorized version, then implement the 
vectorized version in a subsequent PR.

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;

Review comment:
       Done

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaByteArrayReader.java
##########
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.deltastrings.DeltaByteArrayReader;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An implementation of the Parquet DELTA_BYTE_ARRAY decoder that supports the 
vectorized interface.
+ */
+public class VectorizedDeltaByteArrayReader extends ValuesReader implements 
VectorizedValuesReader {
+  private final DeltaByteArrayReader deltaByteArrayReader = new 
DeltaByteArrayReader();
+
+  @Override
+  public void initFromPage(int valueCount, ByteBufferInputStream in) throws 
IOException {
+    deltaByteArrayReader.initFromPage(valueCount, in);
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    return 0;
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+

Review comment:
       Fixed

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetDeltaEncodingIntegerSuite.scala
##########
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet
+
+import java.io.IOException
+import java.nio.ByteBuffer
+import java.util.Random
+
+import org.apache.parquet.bytes.{ByteBufferInputStream, 
DirectByteBufferAllocator}
+import org.apache.parquet.column.values.ValuesWriter
+import 
org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesWriterForInteger
+import org.apache.parquet.io.ParquetDecodingException
+
+import org.apache.spark.sql.execution.vectorized.{OnHeapColumnVector, 
WritableColumnVector}
+import org.apache.spark.sql.test.SharedSparkSession
+import org.apache.spark.sql.types.IntegerType
+
+/**
+ * Read tests for vectorized Delta binary packed Integer reader.
+ * Translated from
+ *  
org.apache.parquet.column.values.delta.DeltaBinaryPackingValuesWriterForIntegerTest
+ */
+class ParquetDeltaEncodingIntegerSuite extends ParquetCompatibilityTest with 
SharedSparkSession {

Review comment:
       I made an attempt to do so earlier. But I couldn't get it right :( . 
Also, this way it mirrors the Parquet library more exactly. 
   Anyway, I managed to get these merged. The result may not be pretty :( 

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, (w, r, v) -> {
+      c.putInt(r, (int) v);
+    });
+  }
+
+  @Override
+  public void readIntegersWithRebase(int total, WritableColumnVector c, int 
rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException("Only readIntegers is valid.");
+  }
+
+  @Override
+  public void readUnsignedIntegers(int total, WritableColumnVector c, int 
rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readLongs(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, WritableColumnVector::putLong);
+  }
+
+  @Override
+  public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readFloats(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readDoubles(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBinary(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBooleans(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBytes(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipShorts(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipIntegers(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipLongs(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipFloats(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipDoubles(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBinary(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipFixedLenByteArray(int total, int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  private void readValues(int total, WritableColumnVector c, int rowId,
+      IntegerOutputWriter outputWriter) {
+    int remaining = total;
+    if (valuesRead + total > totalValueCount) {
+      throw new ParquetDecodingException(
+          "no more values to read, total value count is " + valuesRead);
+    }

Review comment:
       Done

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+

Review comment:
       Done

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, (w, r, v) -> {
+      c.putInt(r, (int) v);
+    });
+  }
+
+  @Override
+  public void readIntegersWithRebase(int total, WritableColumnVector c, int 
rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException("Only readIntegers is valid.");
+  }
+
+  @Override
+  public void readUnsignedIntegers(int total, WritableColumnVector c, int 
rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readLongs(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, WritableColumnVector::putLong);
+  }
+
+  @Override
+  public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readFloats(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readDoubles(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBinary(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBooleans(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBytes(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipShorts(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipIntegers(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {

Review comment:
       I think we do. The original unit tests have interleaving read and skip. 
To continue to read after a skip, we need to have read the previous value. 

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, (w, r, v) -> {
+      c.putInt(r, (int) v);
+    });
+  }
+
+  @Override
+  public void readIntegersWithRebase(int total, WritableColumnVector c, int 
rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException("Only readIntegers is valid.");

Review comment:
       Oh dear.  I implemented only the methods the original PR had 
implemented. On closer look we also need support for byte, short, date, 
timestamp, yearmonth interval, and daytime interval datatypes which are stored 
as  int32 or int64. 
   Perf note: Rebased dates and timestamps appear to be a backward 
compatibility fix and incur the  penalty of checking if the value needs to be 
rebased.  

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedValuesReader.java
##########
@@ -63,4 +64,36 @@
    void skipDoubles(int total);
    void skipBinary(int total);
    void skipFixedLenByteArray(int total, int len);
+
+  /**
+   * An interface to write columnar output in various ways
+   */
+  @FunctionalInterface
+  interface IntegerOutputWriter {
+    void write(WritableColumnVector c, int rowId, long val);
+  }
+
+  @FunctionalInterface
+  interface ByteBufferOutputWriter {

Review comment:
       But not for long (there's a horrible pun in here somewhere). 
   I need this for the vectorized implemenatation of DeltaByteArrayReader 
(which I did not include to make review easier).

##########
File path: 
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedDeltaBinaryPackedReader.java
##########
@@ -0,0 +1,319 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.datasources.parquet;
+
+import java.nio.ByteBuffer;
+import org.apache.parquet.Preconditions;
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.apache.parquet.bytes.BytesUtils;
+import org.apache.parquet.column.values.ValuesReader;
+import org.apache.parquet.column.values.bitpacking.BytePackerForLong;
+import org.apache.parquet.column.values.bitpacking.Packer;
+import org.apache.parquet.io.ParquetDecodingException;
+import org.apache.parquet.io.api.Binary;
+import org.apache.spark.sql.execution.vectorized.WritableColumnVector;
+
+import java.io.IOException;
+
+/**
+ * An implementation of the Parquet DELTA_BINARY_PACKED decoder that supports 
the vectorized
+ * interface.
+ */
+public class VectorizedDeltaBinaryPackedReader extends ValuesReader
+    implements VectorizedValuesReader {
+
+  // header data
+  private int blockSizeInValues;
+  private int miniBlockNumInABlock;
+  private int totalValueCount;
+  private long firstValue;
+
+  private int miniBlockSizeInValues;
+
+  // values read by the caller
+  private int valuesRead = 0;
+
+  //variables to keep state of the current block and miniblock
+  private long lastValueRead;
+  private long minDeltaInCurrentBlock;
+  private int currentMiniBlock = 0;
+  private int[] bitWidths; // bit widths for each miniblock in the current 
block
+  private int remainingInBlock = 0; // values in current block still to be read
+  private int remainingInMiniBlock = 0; // values in current mini block still 
to be read
+  private long[] unpackedValuesBuffer;
+
+  private ByteBufferInputStream in;
+
+  @SuppressWarnings("unused")
+  @Override
+  public void initFromPage(/*unused*/int valueCount, ByteBufferInputStream in) 
throws IOException {
+    Preconditions.checkArgument(valueCount >= 1,
+        "Page must have at least one value, but it has " + valueCount);
+    this.in = in;
+
+    // Read the header
+    this.blockSizeInValues = BytesUtils.readUnsignedVarInt(in);
+    this.miniBlockNumInABlock = BytesUtils.readUnsignedVarInt(in);
+    double miniSize = (double) blockSizeInValues / miniBlockNumInABlock;
+    Preconditions.checkArgument(miniSize % 8 == 0,
+        "miniBlockSize must be multiple of 8, but it's " + miniSize);
+    this.miniBlockSizeInValues = (int) miniSize;
+    this.totalValueCount = BytesUtils.readUnsignedVarInt(in);
+    this.bitWidths = new int[miniBlockNumInABlock];
+
+    // read the first value
+    firstValue = BytesUtils.readZigZagVarLong(in);
+
+  }
+
+  @Override
+  public void skip() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public byte readByte() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public short readShort() {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Binary readBinary(int len) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBooleans(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBytes(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readShorts(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readIntegers(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, (w, r, v) -> {
+      c.putInt(r, (int) v);
+    });
+  }
+
+  @Override
+  public void readIntegersWithRebase(int total, WritableColumnVector c, int 
rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException("Only readIntegers is valid.");
+  }
+
+  @Override
+  public void readUnsignedIntegers(int total, WritableColumnVector c, int 
rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readUnsignedLongs(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readLongs(int total, WritableColumnVector c, int rowId) {
+    readValues(total, c, rowId, WritableColumnVector::putLong);
+  }
+
+  @Override
+  public void readLongsWithRebase(int total, WritableColumnVector c, int rowId,
+      boolean failIfRebase) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readFloats(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readDoubles(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void readBinary(int total, WritableColumnVector c, int rowId) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBooleans(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipBytes(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipShorts(int total) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void skipIntegers(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });
+  }
+
+  @Override
+  public void skipLongs(int total) {
+    // Read the values but don't write them out (the writer output method is a 
no-op)
+    readValues(total, null, -1, (w, r, v) -> {
+    });

Review comment:
       Done




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to