[
https://issues.apache.org/jira/browse/PARQUET-2159?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17681669#comment-17681669
]
ASF GitHub Bot commented on PARQUET-2159:
-----------------------------------------
jiangjiguang commented on code in PR #1011:
URL: https://github.com/apache/parquet-mr/pull/1011#discussion_r1089859296
##########
parquet-column/src/main/java/org/apache/parquet/column/values/bitpacking/ParquetReadRouter.java:
##########
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.parquet.column.values.bitpacking;
+
+import org.apache.parquet.bytes.ByteBufferInputStream;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * This is a utils class which is used for big data applications(such as Spark
Flink).
+ * For Intel CPU, Flags containing avx512vbmi and avx512_vbmi2 can have better
performance gains.
+ */
+public class ParquetReadRouter {
+ private static final Logger LOG =
LoggerFactory.getLogger(ParquetReadRouter.class);
+
+ private static volatile Boolean vector;
+
+ public static void read(int bitWidth, ByteBufferInputStream in, int
currentCount, int[] currentBuffer) throws IOException {
+ if (supportVector()) {
+ readBatchVector(bitWidth, in, currentCount, currentBuffer);
+ } else {
+ readBatchVector(bitWidth, in, currentCount, currentBuffer);
+ }
+ }
+
+ public static void readBatchVector(int bitWidth, ByteBufferInputStream in,
int currentCount, int[] currentBuffer) throws IOException {
+ BytePacker packer = Packer.LITTLE_ENDIAN.newBytePacker(bitWidth);
+ BytePacker packerVector =
Packer.LITTLE_ENDIAN.newBytePackerVector(bitWidth);
+ int valueIndex = 0;
+ int byteIndex = 0;
+ int unpackCount = packerVector.getUnpackCount();
+ int inputByteCountPerVector = packerVector.getUnpackCount() / 8 * bitWidth;
+ int totalByteCount = currentCount * bitWidth / 8;
+
+ // register of avx512 are 512 bits, and can load up to 64 bytes
+ int totalByteCountVector = totalByteCount - 64;
+ ByteBuffer buffer = in.slice(totalByteCount);
+ if (buffer.hasArray()) {
+ for (; byteIndex < totalByteCountVector; byteIndex +=
inputByteCountPerVector, valueIndex += unpackCount) {
+ packerVector.unpackValuesVector(buffer.array(), buffer.arrayOffset() +
buffer.position() + byteIndex, currentBuffer, valueIndex);
+ }
+ // If the remaining bytes size <= 64, the remaining bytes are unpacked
by packer
+ for (; byteIndex < totalByteCount; byteIndex += bitWidth, valueIndex +=
8) {
+ packer.unpack8Values(buffer.array(), buffer.arrayOffset() +
buffer.position() + byteIndex, currentBuffer, valueIndex);
+ }
+ } else {
+ for (; byteIndex < totalByteCountVector; byteIndex +=
inputByteCountPerVector, valueIndex += unpackCount) {
+ packerVector.unpackValuesVector(buffer, buffer.position() + byteIndex,
currentBuffer, valueIndex);
+ }
+ for (; byteIndex < totalByteCount; byteIndex += bitWidth, valueIndex +=
8) {
+ packer.unpack8Values(buffer, buffer.position() + byteIndex,
currentBuffer, valueIndex);
+ }
+ }
+ }
+ public static void readBatch(int bitWidth, ByteBufferInputStream in, int
currentCount, int[] currentBuffer) throws EOFException {
+ BytePacker packer = Packer.LITTLE_ENDIAN.newBytePacker(bitWidth);
+ int valueIndex = 0;
+ while (valueIndex < currentCount) {
+ // values are bit packed 8 at a time, so reading bitWidth will always
work
+ ByteBuffer buffer = in.slice(bitWidth);
+ packer.unpack8Values(buffer, buffer.position(), currentBuffer,
valueIndex);
+ valueIndex += 8;
+ }
+ }
+
+ public static Boolean supportVector() {
+ if (vector != null) {
+ return vector;
+ }
+ synchronized (ParquetReadRouter.class) {
+ if (vector == null) {
+ synchronized (ParquetReadRouter.class) {
+ vector = avx512Flag();
+ }
+ }
+ }
+ return vector;
+ }
+
+ private static boolean avx512Flag() {
Review Comment:
@wgtmac yes, I have added some comments to guide users how to use this class.
> Parquet bit-packing de/encode optimization
> ------------------------------------------
>
> Key: PARQUET-2159
> URL: https://issues.apache.org/jira/browse/PARQUET-2159
> Project: Parquet
> Issue Type: Improvement
> Components: parquet-mr
> Affects Versions: 1.13.0
> Reporter: Fang-Xie
> Assignee: Fang-Xie
> Priority: Major
> Fix For: 1.13.0
>
> Attachments: image-2022-06-15-22-56-08-396.png,
> image-2022-06-15-22-57-15-964.png, image-2022-06-15-22-58-01-442.png,
> image-2022-06-15-22-58-40-704.png
>
>
> Current Spark use Parquet-mr as parquet reader/writer library, but the
> built-in bit-packing en/decode is not efficient enough.
> Our optimization for Parquet bit-packing en/decode with jdk.incubator.vector
> in Open JDK18 brings prominent performance improvement.
> Due to Vector API is added to OpenJDK since 16, So this optimization request
> JDK16 or higher.
> *Below are our test results*
> Functional test is based on open-source parquet-mr Bit-pack decoding
> function: *_public final void unpack8Values(final byte[] in, final int inPos,
> final int[] out, final int outPos)_* __
> compared with our implementation with vector API *_public final void
> unpack8Values_vec(final byte[] in, final int inPos, final int[] out, final
> int outPos)_*
> We tested 10 pairs (open source parquet bit unpacking vs ours optimized
> vectorized SIMD implementation) decode function with bit
> width=\{1,2,3,4,5,6,7,8,9,10}, below are test results:
> !image-2022-06-15-22-56-08-396.png|width=437,height=223!
> We integrated our bit-packing decode implementation into parquet-mr, tested
> the parquet batch reader ability from Spark VectorizedParquetRecordReader
> which get parquet column data by the batch way. We construct parquet file
> with different row count and column count, the column data type is Int32, the
> maximum int value is 127 which satisfies bit pack encode with bit width=7,
> the count of the row is from 10k to 100 million and the count of the column
> is from 1 to 4.
> !image-2022-06-15-22-57-15-964.png|width=453,height=229!
> !image-2022-06-15-22-58-01-442.png|width=439,height=217!
> !image-2022-06-15-22-58-40-704.png|width=415,height=208!
--
This message was sent by Atlassian Jira
(v8.20.10#820010)