sadikovi commented on a change in pull request #34659: URL: https://github.com/apache/spark/pull/34659#discussion_r753860971
########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnVector.java ########## @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources.parquet; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import com.google.common.base.Preconditions; +import org.apache.spark.memory.MemoryMode; +import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; +import org.apache.spark.sql.types.ArrayType; +import org.apache.spark.sql.types.DataType; +import org.apache.spark.sql.types.DataTypes; +import org.apache.spark.sql.types.MapType; +import org.apache.spark.sql.types.StructType; + +/** + * Contains necessary information representing a Parquet column, either of primitive or nested type. + */ +final class ParquetColumnVector { + private final ParquetColumn column; + private final List<ParquetColumnVector> children; + private final WritableColumnVector vector; + + /** + * Repetition & Definition levels + * These are allocated only for leaf columns; for non-leaf columns, they simply maintain + * references to that of the former. + */ + private WritableColumnVector repetitionLevels; + private WritableColumnVector definitionLevels; + + /** Whether this column is primitive (i.e., leaf column) */ + private final boolean isPrimitive; + + /** Reader for this column - only set if 'isPrimitive' is true */ + private VectorizedColumnReader columnReader; + + ParquetColumnVector( + ParquetColumn column, + WritableColumnVector vector, + int capacity, + MemoryMode memoryMode, + Set<ParquetColumn> missingColumns) { + + DataType sparkType = column.sparkType(); + if (!sparkType.sameType(vector.dataType())) { + throw new IllegalArgumentException("Spark type: " + sparkType + + " doesn't match the type: " + vector.dataType() + " in column vector"); + } + + this.column = column; + this.vector = vector; + this.children = new ArrayList<>(); + this.isPrimitive = column.isPrimitive(); + + if (missingColumns.contains(column)) { + vector.setAllNull(); + return; + } + + if (isPrimitive) { + // TODO: avoid allocating these if not necessary, for instance, the node is of top-level + // and is not repeated, or the node is not top-level but its max repetition level is 0. + repetitionLevels = allocateLevelsVector(capacity, memoryMode); + definitionLevels = allocateLevelsVector(capacity, memoryMode); + } else { + Preconditions.checkArgument(column.children().size() == vector.getNumChildren()); + for (int i = 0; i < column.children().size(); i++) { + ParquetColumnVector childCv = new ParquetColumnVector(column.children().apply(i), + vector.getChild(i), capacity, memoryMode, missingColumns); + children.add(childCv); + + // only use levels from non-missing child, this can happen if only some but not all + // fields of a struct are missing. + if (!childCv.vector.isAllNull()) { + this.repetitionLevels = childCv.repetitionLevels; + this.definitionLevels = childCv.definitionLevels; + } + } + + // this can happen if all the fields of a struct are missing, in which case we should mark + // the struct itself as a missing column + if (repetitionLevels == null) { + vector.setAllNull(); + } + } + } + + /** + * Returns all the children of this column. + */ + List<ParquetColumnVector> getChildren() { + return children; + } + + /** + * Returns all the leaf columns in depth-first order. + */ + List<ParquetColumnVector> getLeaves() { + List<ParquetColumnVector> result = new ArrayList<>(); + getLeavesHelper(this, result); + return result; + } + + private static void getLeavesHelper(ParquetColumnVector vector, List<ParquetColumnVector> coll) { + if (vector.isPrimitive) { + coll.add(vector); + } else { + for (ParquetColumnVector child : vector.children) { + getLeavesHelper(child, coll); + } + } + } + + /** + * Assembles this column and calculate collection offsets recursively. + * This is a no-op for primitive columns. + */ + void assemble() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + DataType type = column.sparkType(); + if (type instanceof ArrayType || type instanceof MapType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateCollectionOffsets(); + } else if (type instanceof StructType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateStructOffsets(); + } + } + + void reset() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + vector.reset(); + repetitionLevels.reset(); + definitionLevels.reset(); + for (ParquetColumnVector child : children) { + child.reset(); + } + } + + ParquetColumn getColumn() { + return this.column; + } + + WritableColumnVector getValueVector() { + return this.vector; + } + + WritableColumnVector getRepetitionLevelVector() { + return this.repetitionLevels; + } + + WritableColumnVector getDefinitionLevelVector() { + return this.definitionLevels; + } + + VectorizedColumnReader getColumnReader() { + return this.columnReader; + } + + void setColumnReader(VectorizedColumnReader reader) { + if (!isPrimitive) { + throw new IllegalStateException("can't set reader for non-primitive column"); Review comment: nit: `Can't set...` ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnVector.java ########## @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources.parquet; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import com.google.common.base.Preconditions; +import org.apache.spark.memory.MemoryMode; +import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; +import org.apache.spark.sql.types.ArrayType; +import org.apache.spark.sql.types.DataType; +import org.apache.spark.sql.types.DataTypes; +import org.apache.spark.sql.types.MapType; +import org.apache.spark.sql.types.StructType; + +/** + * Contains necessary information representing a Parquet column, either of primitive or nested type. + */ +final class ParquetColumnVector { + private final ParquetColumn column; + private final List<ParquetColumnVector> children; + private final WritableColumnVector vector; + + /** + * Repetition & Definition levels + * These are allocated only for leaf columns; for non-leaf columns, they simply maintain + * references to that of the former. + */ + private WritableColumnVector repetitionLevels; + private WritableColumnVector definitionLevels; + + /** Whether this column is primitive (i.e., leaf column) */ + private final boolean isPrimitive; + + /** Reader for this column - only set if 'isPrimitive' is true */ + private VectorizedColumnReader columnReader; + + ParquetColumnVector( + ParquetColumn column, + WritableColumnVector vector, + int capacity, + MemoryMode memoryMode, + Set<ParquetColumn> missingColumns) { + + DataType sparkType = column.sparkType(); + if (!sparkType.sameType(vector.dataType())) { + throw new IllegalArgumentException("Spark type: " + sparkType + + " doesn't match the type: " + vector.dataType() + " in column vector"); + } + + this.column = column; + this.vector = vector; + this.children = new ArrayList<>(); + this.isPrimitive = column.isPrimitive(); + + if (missingColumns.contains(column)) { + vector.setAllNull(); + return; + } + + if (isPrimitive) { + // TODO: avoid allocating these if not necessary, for instance, the node is of top-level + // and is not repeated, or the node is not top-level but its max repetition level is 0. + repetitionLevels = allocateLevelsVector(capacity, memoryMode); + definitionLevels = allocateLevelsVector(capacity, memoryMode); + } else { + Preconditions.checkArgument(column.children().size() == vector.getNumChildren()); + for (int i = 0; i < column.children().size(); i++) { + ParquetColumnVector childCv = new ParquetColumnVector(column.children().apply(i), + vector.getChild(i), capacity, memoryMode, missingColumns); + children.add(childCv); + + // only use levels from non-missing child, this can happen if only some but not all + // fields of a struct are missing. + if (!childCv.vector.isAllNull()) { + this.repetitionLevels = childCv.repetitionLevels; + this.definitionLevels = childCv.definitionLevels; + } + } + + // this can happen if all the fields of a struct are missing, in which case we should mark + // the struct itself as a missing column + if (repetitionLevels == null) { + vector.setAllNull(); + } + } + } + + /** + * Returns all the children of this column. + */ + List<ParquetColumnVector> getChildren() { + return children; + } + + /** + * Returns all the leaf columns in depth-first order. + */ + List<ParquetColumnVector> getLeaves() { + List<ParquetColumnVector> result = new ArrayList<>(); + getLeavesHelper(this, result); + return result; + } + + private static void getLeavesHelper(ParquetColumnVector vector, List<ParquetColumnVector> coll) { + if (vector.isPrimitive) { + coll.add(vector); + } else { + for (ParquetColumnVector child : vector.children) { + getLeavesHelper(child, coll); + } + } + } + + /** + * Assembles this column and calculate collection offsets recursively. + * This is a no-op for primitive columns. + */ + void assemble() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + DataType type = column.sparkType(); + if (type instanceof ArrayType || type instanceof MapType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateCollectionOffsets(); + } else if (type instanceof StructType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateStructOffsets(); + } + } + + void reset() { Review comment: I understand it requires a lot of typing but it would be good if we could add a small javadoc here and below to highlight that the methods do. ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnVector.java ########## @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources.parquet; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import com.google.common.base.Preconditions; +import org.apache.spark.memory.MemoryMode; +import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; +import org.apache.spark.sql.types.ArrayType; +import org.apache.spark.sql.types.DataType; +import org.apache.spark.sql.types.DataTypes; +import org.apache.spark.sql.types.MapType; +import org.apache.spark.sql.types.StructType; + +/** + * Contains necessary information representing a Parquet column, either of primitive or nested type. + */ +final class ParquetColumnVector { + private final ParquetColumn column; + private final List<ParquetColumnVector> children; + private final WritableColumnVector vector; + + /** + * Repetition & Definition levels + * These are allocated only for leaf columns; for non-leaf columns, they simply maintain + * references to that of the former. + */ + private WritableColumnVector repetitionLevels; + private WritableColumnVector definitionLevels; + + /** Whether this column is primitive (i.e., leaf column) */ + private final boolean isPrimitive; + + /** Reader for this column - only set if 'isPrimitive' is true */ + private VectorizedColumnReader columnReader; + + ParquetColumnVector( + ParquetColumn column, + WritableColumnVector vector, + int capacity, + MemoryMode memoryMode, + Set<ParquetColumn> missingColumns) { + + DataType sparkType = column.sparkType(); + if (!sparkType.sameType(vector.dataType())) { + throw new IllegalArgumentException("Spark type: " + sparkType + + " doesn't match the type: " + vector.dataType() + " in column vector"); + } + + this.column = column; + this.vector = vector; + this.children = new ArrayList<>(); + this.isPrimitive = column.isPrimitive(); + + if (missingColumns.contains(column)) { + vector.setAllNull(); + return; + } + + if (isPrimitive) { + // TODO: avoid allocating these if not necessary, for instance, the node is of top-level + // and is not repeated, or the node is not top-level but its max repetition level is 0. + repetitionLevels = allocateLevelsVector(capacity, memoryMode); + definitionLevels = allocateLevelsVector(capacity, memoryMode); + } else { + Preconditions.checkArgument(column.children().size() == vector.getNumChildren()); + for (int i = 0; i < column.children().size(); i++) { + ParquetColumnVector childCv = new ParquetColumnVector(column.children().apply(i), + vector.getChild(i), capacity, memoryMode, missingColumns); + children.add(childCv); + + // only use levels from non-missing child, this can happen if only some but not all + // fields of a struct are missing. + if (!childCv.vector.isAllNull()) { + this.repetitionLevels = childCv.repetitionLevels; + this.definitionLevels = childCv.definitionLevels; + } + } + + // this can happen if all the fields of a struct are missing, in which case we should mark + // the struct itself as a missing column + if (repetitionLevels == null) { + vector.setAllNull(); + } + } + } + + /** + * Returns all the children of this column. + */ + List<ParquetColumnVector> getChildren() { + return children; + } + + /** + * Returns all the leaf columns in depth-first order. + */ + List<ParquetColumnVector> getLeaves() { + List<ParquetColumnVector> result = new ArrayList<>(); + getLeavesHelper(this, result); + return result; + } + + private static void getLeavesHelper(ParquetColumnVector vector, List<ParquetColumnVector> coll) { + if (vector.isPrimitive) { + coll.add(vector); + } else { + for (ParquetColumnVector child : vector.children) { + getLeavesHelper(child, coll); + } + } + } + + /** + * Assembles this column and calculate collection offsets recursively. + * This is a no-op for primitive columns. + */ + void assemble() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + DataType type = column.sparkType(); + if (type instanceof ArrayType || type instanceof MapType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateCollectionOffsets(); + } else if (type instanceof StructType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateStructOffsets(); + } + } + + void reset() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + vector.reset(); + repetitionLevels.reset(); + definitionLevels.reset(); + for (ParquetColumnVector child : children) { + child.reset(); + } + } + + ParquetColumn getColumn() { + return this.column; + } + + WritableColumnVector getValueVector() { + return this.vector; + } + + WritableColumnVector getRepetitionLevelVector() { + return this.repetitionLevels; + } + + WritableColumnVector getDefinitionLevelVector() { + return this.definitionLevels; + } + + VectorizedColumnReader getColumnReader() { + return this.columnReader; + } + + void setColumnReader(VectorizedColumnReader reader) { + if (!isPrimitive) { + throw new IllegalStateException("can't set reader for non-primitive column"); + } + this.columnReader = reader; + } + + private void calculateCollectionOffsets() { + int maxDefinitionLevel = column.definitionLevel(); + int maxElementRepetitionLevel = column.repetitionLevel(); + + // There are 4 cases when calculating definition levels: + // 1. definitionLevel == maxDefinitionLevel + // ==> value is defined and not null + // 2. definitionLevel == maxDefinitionLevel - 1 + // ==> value is null + // 3. definitionLevel < maxDefinitionLevel - 1 + // ==> value doesn't exist since one of its optional parent is null + // 4. definitionLevel > maxDefinitionLevel + // ==> value is a nested element within an array or map + // + // `i` is the index over all leaf elements of this array, while `offset` is the index over + // all top-level elements of this array. + int rowId = 0; + for (int i = 0, offset = 0; i < definitionLevels.getElementsAppended(); + i = getNextCollectionStart(maxElementRepetitionLevel, i)) { + vector.reserve(rowId + 1); + int definitionLevel = definitionLevels.getInt(i); + if (definitionLevel == maxDefinitionLevel - 1) { + // the collection is null + vector.putNull(rowId++); + } else if (definitionLevel == maxDefinitionLevel) { + // collection is defined but empty + vector.putNotNull(rowId); + vector.putArray(rowId, offset, 0); + rowId++; + } else if (definitionLevel > maxDefinitionLevel) { + // collection is defined and non-empty: find out how many top element there is till the Review comment: nit: find out how many elements are there until the start of the next array. Alternatively, find out the number of elements until the start of the next array. ########## File path: sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala ########## @@ -897,6 +897,14 @@ object SQLConf { .booleanConf .createWithDefault(true) + val PARQUET_VECTORIZED_READER_NESTED_COLUMN_ENABLED = + buildConf("spark.sql.parquet.enableNestedColumnVectorizedReader") + .doc("Enables vectorized Parquet decoding for nested columns (e.g., struct, list, map). " + + s"Note to enable this ${PARQUET_VECTORIZED_READER_ENABLED.key} also needs to be enabled.") Review comment: I am not sure what the guidelines are on a conf text but maybe this would sound a bit crisper: `Enables vectorized Parquet decoding for nested columns (e.g., struct, list, map). Requires ${PARQUET_VECTORIZED_READER_ENABLED.key} to be enabled.` ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnVector.java ########## @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources.parquet; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import com.google.common.base.Preconditions; +import org.apache.spark.memory.MemoryMode; +import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; +import org.apache.spark.sql.types.ArrayType; +import org.apache.spark.sql.types.DataType; +import org.apache.spark.sql.types.DataTypes; +import org.apache.spark.sql.types.MapType; +import org.apache.spark.sql.types.StructType; + +/** + * Contains necessary information representing a Parquet column, either of primitive or nested type. + */ +final class ParquetColumnVector { + private final ParquetColumn column; + private final List<ParquetColumnVector> children; + private final WritableColumnVector vector; + + /** + * Repetition & Definition levels + * These are allocated only for leaf columns; for non-leaf columns, they simply maintain + * references to that of the former. + */ + private WritableColumnVector repetitionLevels; + private WritableColumnVector definitionLevels; + + /** Whether this column is primitive (i.e., leaf column) */ + private final boolean isPrimitive; + + /** Reader for this column - only set if 'isPrimitive' is true */ + private VectorizedColumnReader columnReader; + + ParquetColumnVector( + ParquetColumn column, + WritableColumnVector vector, + int capacity, + MemoryMode memoryMode, + Set<ParquetColumn> missingColumns) { + + DataType sparkType = column.sparkType(); + if (!sparkType.sameType(vector.dataType())) { + throw new IllegalArgumentException("Spark type: " + sparkType + + " doesn't match the type: " + vector.dataType() + " in column vector"); + } + + this.column = column; + this.vector = vector; + this.children = new ArrayList<>(); + this.isPrimitive = column.isPrimitive(); + + if (missingColumns.contains(column)) { + vector.setAllNull(); + return; + } + + if (isPrimitive) { + // TODO: avoid allocating these if not necessary, for instance, the node is of top-level + // and is not repeated, or the node is not top-level but its max repetition level is 0. + repetitionLevels = allocateLevelsVector(capacity, memoryMode); + definitionLevels = allocateLevelsVector(capacity, memoryMode); + } else { + Preconditions.checkArgument(column.children().size() == vector.getNumChildren()); + for (int i = 0; i < column.children().size(); i++) { + ParquetColumnVector childCv = new ParquetColumnVector(column.children().apply(i), + vector.getChild(i), capacity, memoryMode, missingColumns); + children.add(childCv); + + // only use levels from non-missing child, this can happen if only some but not all + // fields of a struct are missing. + if (!childCv.vector.isAllNull()) { + this.repetitionLevels = childCv.repetitionLevels; + this.definitionLevels = childCv.definitionLevels; + } + } + + // this can happen if all the fields of a struct are missing, in which case we should mark + // the struct itself as a missing column + if (repetitionLevels == null) { + vector.setAllNull(); + } + } + } + + /** + * Returns all the children of this column. + */ + List<ParquetColumnVector> getChildren() { + return children; + } + + /** + * Returns all the leaf columns in depth-first order. + */ + List<ParquetColumnVector> getLeaves() { + List<ParquetColumnVector> result = new ArrayList<>(); + getLeavesHelper(this, result); + return result; + } + + private static void getLeavesHelper(ParquetColumnVector vector, List<ParquetColumnVector> coll) { + if (vector.isPrimitive) { + coll.add(vector); + } else { + for (ParquetColumnVector child : vector.children) { + getLeavesHelper(child, coll); + } + } + } + + /** + * Assembles this column and calculate collection offsets recursively. + * This is a no-op for primitive columns. + */ + void assemble() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + DataType type = column.sparkType(); + if (type instanceof ArrayType || type instanceof MapType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateCollectionOffsets(); + } else if (type instanceof StructType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateStructOffsets(); + } + } + + void reset() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + vector.reset(); + repetitionLevels.reset(); + definitionLevels.reset(); + for (ParquetColumnVector child : children) { + child.reset(); + } + } + + ParquetColumn getColumn() { + return this.column; + } + + WritableColumnVector getValueVector() { + return this.vector; + } + + WritableColumnVector getRepetitionLevelVector() { + return this.repetitionLevels; + } + + WritableColumnVector getDefinitionLevelVector() { + return this.definitionLevels; + } + + VectorizedColumnReader getColumnReader() { + return this.columnReader; + } + + void setColumnReader(VectorizedColumnReader reader) { + if (!isPrimitive) { + throw new IllegalStateException("can't set reader for non-primitive column"); + } + this.columnReader = reader; + } + + private void calculateCollectionOffsets() { + int maxDefinitionLevel = column.definitionLevel(); + int maxElementRepetitionLevel = column.repetitionLevel(); + + // There are 4 cases when calculating definition levels: + // 1. definitionLevel == maxDefinitionLevel + // ==> value is defined and not null + // 2. definitionLevel == maxDefinitionLevel - 1 + // ==> value is null + // 3. definitionLevel < maxDefinitionLevel - 1 + // ==> value doesn't exist since one of its optional parent is null + // 4. definitionLevel > maxDefinitionLevel + // ==> value is a nested element within an array or map + // + // `i` is the index over all leaf elements of this array, while `offset` is the index over + // all top-level elements of this array. + int rowId = 0; + for (int i = 0, offset = 0; i < definitionLevels.getElementsAppended(); + i = getNextCollectionStart(maxElementRepetitionLevel, i)) { + vector.reserve(rowId + 1); + int definitionLevel = definitionLevels.getInt(i); + if (definitionLevel == maxDefinitionLevel - 1) { + // the collection is null + vector.putNull(rowId++); + } else if (definitionLevel == maxDefinitionLevel) { + // collection is defined but empty + vector.putNotNull(rowId); + vector.putArray(rowId, offset, 0); + rowId++; + } else if (definitionLevel > maxDefinitionLevel) { + // collection is defined and non-empty: find out how many top element there is till the + // start of the next array. + vector.putNotNull(rowId); + int length = getCollectionSize(maxElementRepetitionLevel, i + 1); + vector.putArray(rowId, offset, length); + offset += length; + rowId++; + } + } + vector.addElementsAppended(rowId); + } + + private void calculateStructOffsets() { + int maxRepetitionLevel = column.repetitionLevel(); + int maxDefinitionLevel = column.definitionLevel(); + + vector.reserve(definitionLevels.getElementsAppended()); + + int rowId = 0; + int nonnullRowId = 0; + boolean hasRepetitionLevels = repetitionLevels.getElementsAppended() > 0; + for (int i = 0; i < definitionLevels.getElementsAppended(); i++) { + // if repetition level > maxRepetitionLevel, the value is a nested element (e.g., an array + // element in struct<array<int>>), and we should skip the definition level since it doesn't + // represent with the struct. + if (!hasRepetitionLevels || repetitionLevels.getInt(i) <= maxRepetitionLevel) { + if (definitionLevels.getInt(i) == maxDefinitionLevel - 1) { + // the struct is null + vector.putNull(rowId); + rowId++; + } else if (definitionLevels.getInt(i) >= maxDefinitionLevel) { + vector.putNotNull(rowId); + vector.putStruct(rowId, nonnullRowId); + rowId++; + nonnullRowId++; + } + } + } + vector.addElementsAppended(rowId); + } + + private static WritableColumnVector allocateLevelsVector(int capacity, MemoryMode memoryMode) { + switch (memoryMode) { + case ON_HEAP: + return new OnHeapColumnVector(capacity, DataTypes.IntegerType); + case OFF_HEAP: + return new OffHeapColumnVector(capacity, DataTypes.IntegerType); + default: + throw new IllegalArgumentException("Unknown memory mode: " + memoryMode); + } + } + + private int getNextCollectionStart(int maxRepetitionLevel, int elementIndex) { + int idx = elementIndex + 1; + for (; idx < repetitionLevels.getElementsAppended(); idx++) { + if (repetitionLevels.getInt(idx) <= maxRepetitionLevel) { + break; + } + } + return idx; + } + + private int getCollectionSize(int maxRepetitionLevel, int idx) { Review comment: We might need javadoc for this and a couple of methods above to go along with the inline comments. ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetReadState.java ########## @@ -42,24 +44,52 @@ /** The current row range */ private RowRange currentRange; + /** Maximum repetition level for the Parquet column */ + final int maxRepetitionLevel; + /** Maximum definition level for the Parquet column */ final int maxDefinitionLevel; + /** Whether this column is required */ + final boolean isRequired; + /** The current index over all rows within the column chunk. This is used to check if the * current row should be skipped by comparing against the row ranges. */ long rowId; - /** The offset in the current batch to put the next value */ - int offset; + /** The offset in the current batch to put the next value in value vector */ + int valueOffset; + + /** The offset in the current batch to put the next value in repetition & definition vector */ + int levelOffset; /** The remaining number of values to read in the current page */ int valuesToReadInPage; - /** The remaining number of values to read in the current batch */ - int valuesToReadInBatch; + /** The remaining number of rows to read in the current batch */ + int rowsToReadInBatch; + + + /* The following fields are only used when reading repeated values */ + + /** When processing repeated values, whether we've found the beginning of the first list after the + * current batch. */ + boolean lastListCompleted; - ParquetReadState(int maxDefinitionLevel, PrimitiveIterator.OfLong rowIndexes) { - this.maxDefinitionLevel = maxDefinitionLevel; + /** When processing repeated types, the number of accumulated definition levels to process */ + int numBatchedDefLevels; + + /** When processing repeated types, whether we should skip the current batch of definition Review comment: Same here. ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnVector.java ########## @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources.parquet; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import com.google.common.base.Preconditions; +import org.apache.spark.memory.MemoryMode; +import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; +import org.apache.spark.sql.types.ArrayType; +import org.apache.spark.sql.types.DataType; +import org.apache.spark.sql.types.DataTypes; +import org.apache.spark.sql.types.MapType; +import org.apache.spark.sql.types.StructType; + +/** + * Contains necessary information representing a Parquet column, either of primitive or nested type. + */ +final class ParquetColumnVector { + private final ParquetColumn column; + private final List<ParquetColumnVector> children; + private final WritableColumnVector vector; + + /** + * Repetition & Definition levels + * These are allocated only for leaf columns; for non-leaf columns, they simply maintain + * references to that of the former. + */ + private WritableColumnVector repetitionLevels; + private WritableColumnVector definitionLevels; + + /** Whether this column is primitive (i.e., leaf column) */ + private final boolean isPrimitive; + + /** Reader for this column - only set if 'isPrimitive' is true */ + private VectorizedColumnReader columnReader; + + ParquetColumnVector( + ParquetColumn column, + WritableColumnVector vector, + int capacity, + MemoryMode memoryMode, + Set<ParquetColumn> missingColumns) { + + DataType sparkType = column.sparkType(); + if (!sparkType.sameType(vector.dataType())) { + throw new IllegalArgumentException("Spark type: " + sparkType + + " doesn't match the type: " + vector.dataType() + " in column vector"); + } + + this.column = column; + this.vector = vector; + this.children = new ArrayList<>(); + this.isPrimitive = column.isPrimitive(); + + if (missingColumns.contains(column)) { + vector.setAllNull(); + return; + } + + if (isPrimitive) { + // TODO: avoid allocating these if not necessary, for instance, the node is of top-level + // and is not repeated, or the node is not top-level but its max repetition level is 0. + repetitionLevels = allocateLevelsVector(capacity, memoryMode); + definitionLevels = allocateLevelsVector(capacity, memoryMode); + } else { + Preconditions.checkArgument(column.children().size() == vector.getNumChildren()); + for (int i = 0; i < column.children().size(); i++) { + ParquetColumnVector childCv = new ParquetColumnVector(column.children().apply(i), + vector.getChild(i), capacity, memoryMode, missingColumns); + children.add(childCv); + + // only use levels from non-missing child, this can happen if only some but not all + // fields of a struct are missing. + if (!childCv.vector.isAllNull()) { + this.repetitionLevels = childCv.repetitionLevels; + this.definitionLevels = childCv.definitionLevels; + } + } + + // this can happen if all the fields of a struct are missing, in which case we should mark + // the struct itself as a missing column + if (repetitionLevels == null) { + vector.setAllNull(); + } + } + } + + /** + * Returns all the children of this column. + */ + List<ParquetColumnVector> getChildren() { + return children; + } + + /** + * Returns all the leaf columns in depth-first order. + */ + List<ParquetColumnVector> getLeaves() { + List<ParquetColumnVector> result = new ArrayList<>(); + getLeavesHelper(this, result); + return result; + } + + private static void getLeavesHelper(ParquetColumnVector vector, List<ParquetColumnVector> coll) { + if (vector.isPrimitive) { + coll.add(vector); + } else { + for (ParquetColumnVector child : vector.children) { + getLeavesHelper(child, coll); + } + } + } + + /** + * Assembles this column and calculate collection offsets recursively. + * This is a no-op for primitive columns. + */ + void assemble() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + DataType type = column.sparkType(); + if (type instanceof ArrayType || type instanceof MapType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateCollectionOffsets(); + } else if (type instanceof StructType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateStructOffsets(); + } + } + + void reset() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + vector.reset(); + repetitionLevels.reset(); + definitionLevels.reset(); + for (ParquetColumnVector child : children) { + child.reset(); + } + } + + ParquetColumn getColumn() { + return this.column; + } + + WritableColumnVector getValueVector() { + return this.vector; + } + + WritableColumnVector getRepetitionLevelVector() { + return this.repetitionLevels; + } + + WritableColumnVector getDefinitionLevelVector() { + return this.definitionLevels; + } + + VectorizedColumnReader getColumnReader() { + return this.columnReader; + } + + void setColumnReader(VectorizedColumnReader reader) { + if (!isPrimitive) { + throw new IllegalStateException("can't set reader for non-primitive column"); + } + this.columnReader = reader; + } + + private void calculateCollectionOffsets() { + int maxDefinitionLevel = column.definitionLevel(); + int maxElementRepetitionLevel = column.repetitionLevel(); + + // There are 4 cases when calculating definition levels: + // 1. definitionLevel == maxDefinitionLevel + // ==> value is defined and not null + // 2. definitionLevel == maxDefinitionLevel - 1 + // ==> value is null + // 3. definitionLevel < maxDefinitionLevel - 1 + // ==> value doesn't exist since one of its optional parent is null + // 4. definitionLevel > maxDefinitionLevel + // ==> value is a nested element within an array or map + // + // `i` is the index over all leaf elements of this array, while `offset` is the index over + // all top-level elements of this array. + int rowId = 0; + for (int i = 0, offset = 0; i < definitionLevels.getElementsAppended(); + i = getNextCollectionStart(maxElementRepetitionLevel, i)) { + vector.reserve(rowId + 1); + int definitionLevel = definitionLevels.getInt(i); + if (definitionLevel == maxDefinitionLevel - 1) { + // the collection is null + vector.putNull(rowId++); + } else if (definitionLevel == maxDefinitionLevel) { + // collection is defined but empty + vector.putNotNull(rowId); + vector.putArray(rowId, offset, 0); + rowId++; + } else if (definitionLevel > maxDefinitionLevel) { + // collection is defined and non-empty: find out how many top element there is till the + // start of the next array. + vector.putNotNull(rowId); + int length = getCollectionSize(maxElementRepetitionLevel, i + 1); + vector.putArray(rowId, offset, length); + offset += length; + rowId++; + } + } + vector.addElementsAppended(rowId); + } + + private void calculateStructOffsets() { + int maxRepetitionLevel = column.repetitionLevel(); + int maxDefinitionLevel = column.definitionLevel(); + + vector.reserve(definitionLevels.getElementsAppended()); + + int rowId = 0; + int nonnullRowId = 0; + boolean hasRepetitionLevels = repetitionLevels.getElementsAppended() > 0; + for (int i = 0; i < definitionLevels.getElementsAppended(); i++) { + // if repetition level > maxRepetitionLevel, the value is a nested element (e.g., an array + // element in struct<array<int>>), and we should skip the definition level since it doesn't + // represent with the struct. + if (!hasRepetitionLevels || repetitionLevels.getInt(i) <= maxRepetitionLevel) { + if (definitionLevels.getInt(i) == maxDefinitionLevel - 1) { + // the struct is null + vector.putNull(rowId); + rowId++; + } else if (definitionLevels.getInt(i) >= maxDefinitionLevel) { + vector.putNotNull(rowId); + vector.putStruct(rowId, nonnullRowId); + rowId++; + nonnullRowId++; + } + } + } + vector.addElementsAppended(rowId); + } + + private static WritableColumnVector allocateLevelsVector(int capacity, MemoryMode memoryMode) { + switch (memoryMode) { + case ON_HEAP: + return new OnHeapColumnVector(capacity, DataTypes.IntegerType); + case OFF_HEAP: + return new OffHeapColumnVector(capacity, DataTypes.IntegerType); + default: + throw new IllegalArgumentException("Unknown memory mode: " + memoryMode); + } + } + + private int getNextCollectionStart(int maxRepetitionLevel, int elementIndex) { + int idx = elementIndex + 1; + for (; idx < repetitionLevels.getElementsAppended(); idx++) { + if (repetitionLevels.getInt(idx) <= maxRepetitionLevel) { + break; + } + } + return idx; + } + + private int getCollectionSize(int maxRepetitionLevel, int idx) { + int size = 1; Review comment: I think `size = 1` requires a comment. Is it due to the fact that the collection is non-empty and we automatically add the first element? Depending on the idx, we could double count size or skip elements. I would propose to return whatever size was calculated based on index and add +1 or +K in the caller's code if required. ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java ########## @@ -303,55 +313,88 @@ public boolean nextBatch() throws IOException { } private void initializeInternal() throws IOException, UnsupportedOperationException { - // Check that the requested schema is supported. - missingColumns = new boolean[requestedSchema.getFieldCount()]; - List<ColumnDescriptor> columns = requestedSchema.getColumns(); - List<String[]> paths = requestedSchema.getPaths(); - for (int i = 0; i < requestedSchema.getFieldCount(); ++i) { - Type t = requestedSchema.getFields().get(i); - if (!t.isPrimitive() || t.isRepetition(Type.Repetition.REPEATED)) { - throw new UnsupportedOperationException("Complex types not supported."); - } + missingColumns = new HashSet<>(); Review comment: I don't know if Spark runs java lint. Is it allowed to use `<>` with generic types? ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java ########## @@ -303,55 +313,88 @@ public boolean nextBatch() throws IOException { } private void initializeInternal() throws IOException, UnsupportedOperationException { - // Check that the requested schema is supported. - missingColumns = new boolean[requestedSchema.getFieldCount()]; - List<ColumnDescriptor> columns = requestedSchema.getColumns(); - List<String[]> paths = requestedSchema.getPaths(); - for (int i = 0; i < requestedSchema.getFieldCount(); ++i) { - Type t = requestedSchema.getFields().get(i); - if (!t.isPrimitive() || t.isRepetition(Type.Repetition.REPEATED)) { - throw new UnsupportedOperationException("Complex types not supported."); - } + missingColumns = new HashSet<>(); + for (ParquetColumn column : JavaConverters.seqAsJavaList(parquetColumn.children())) { + checkColumn(column); + } + } - String[] colPath = paths.get(i); - if (fileSchema.containsPath(colPath)) { - ColumnDescriptor fd = fileSchema.getColumnDescription(colPath); - if (!fd.equals(columns.get(i))) { + /** + * Check whether a column from requested schema is missing from the file schema, or whether it + * conforms to the type of the file schema. + */ + private void checkColumn(ParquetColumn column) throws IOException { + String[] path = JavaConverters.seqAsJavaList(column.path()).toArray(new String[0]); Review comment: Maybe the original path could be returned as an array... ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedParquetRecordReader.java ########## @@ -39,12 +42,13 @@ import org.apache.spark.sql.vectorized.ColumnarBatch; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; +import scala.collection.JavaConverters; Review comment: Is this import required? ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetReadState.java ########## @@ -42,24 +44,52 @@ /** The current row range */ private RowRange currentRange; + /** Maximum repetition level for the Parquet column */ + final int maxRepetitionLevel; + /** Maximum definition level for the Parquet column */ final int maxDefinitionLevel; + /** Whether this column is required */ + final boolean isRequired; + /** The current index over all rows within the column chunk. This is used to check if the * current row should be skipped by comparing against the row ranges. */ long rowId; - /** The offset in the current batch to put the next value */ - int offset; + /** The offset in the current batch to put the next value in value vector */ + int valueOffset; + + /** The offset in the current batch to put the next value in repetition & definition vector */ + int levelOffset; /** The remaining number of values to read in the current page */ int valuesToReadInPage; - /** The remaining number of values to read in the current batch */ - int valuesToReadInBatch; + /** The remaining number of rows to read in the current batch */ + int rowsToReadInBatch; + + + /* The following fields are only used when reading repeated values */ + + /** When processing repeated values, whether we've found the beginning of the first list after the Review comment: Javadoc should be updated. ########## File path: sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/ParquetColumnVector.java ########## @@ -0,0 +1,321 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources.parquet; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import com.google.common.base.Preconditions; +import org.apache.spark.memory.MemoryMode; +import org.apache.spark.sql.execution.vectorized.OffHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector; +import org.apache.spark.sql.execution.vectorized.WritableColumnVector; +import org.apache.spark.sql.types.ArrayType; +import org.apache.spark.sql.types.DataType; +import org.apache.spark.sql.types.DataTypes; +import org.apache.spark.sql.types.MapType; +import org.apache.spark.sql.types.StructType; + +/** + * Contains necessary information representing a Parquet column, either of primitive or nested type. + */ +final class ParquetColumnVector { + private final ParquetColumn column; + private final List<ParquetColumnVector> children; + private final WritableColumnVector vector; + + /** + * Repetition & Definition levels + * These are allocated only for leaf columns; for non-leaf columns, they simply maintain + * references to that of the former. + */ + private WritableColumnVector repetitionLevels; + private WritableColumnVector definitionLevels; + + /** Whether this column is primitive (i.e., leaf column) */ + private final boolean isPrimitive; + + /** Reader for this column - only set if 'isPrimitive' is true */ + private VectorizedColumnReader columnReader; + + ParquetColumnVector( + ParquetColumn column, + WritableColumnVector vector, + int capacity, + MemoryMode memoryMode, + Set<ParquetColumn> missingColumns) { + + DataType sparkType = column.sparkType(); + if (!sparkType.sameType(vector.dataType())) { + throw new IllegalArgumentException("Spark type: " + sparkType + + " doesn't match the type: " + vector.dataType() + " in column vector"); + } + + this.column = column; + this.vector = vector; + this.children = new ArrayList<>(); + this.isPrimitive = column.isPrimitive(); + + if (missingColumns.contains(column)) { + vector.setAllNull(); + return; + } + + if (isPrimitive) { + // TODO: avoid allocating these if not necessary, for instance, the node is of top-level + // and is not repeated, or the node is not top-level but its max repetition level is 0. + repetitionLevels = allocateLevelsVector(capacity, memoryMode); + definitionLevels = allocateLevelsVector(capacity, memoryMode); + } else { + Preconditions.checkArgument(column.children().size() == vector.getNumChildren()); + for (int i = 0; i < column.children().size(); i++) { + ParquetColumnVector childCv = new ParquetColumnVector(column.children().apply(i), + vector.getChild(i), capacity, memoryMode, missingColumns); + children.add(childCv); + + // only use levels from non-missing child, this can happen if only some but not all + // fields of a struct are missing. + if (!childCv.vector.isAllNull()) { + this.repetitionLevels = childCv.repetitionLevels; + this.definitionLevels = childCv.definitionLevels; + } + } + + // this can happen if all the fields of a struct are missing, in which case we should mark + // the struct itself as a missing column + if (repetitionLevels == null) { + vector.setAllNull(); + } + } + } + + /** + * Returns all the children of this column. + */ + List<ParquetColumnVector> getChildren() { + return children; + } + + /** + * Returns all the leaf columns in depth-first order. + */ + List<ParquetColumnVector> getLeaves() { + List<ParquetColumnVector> result = new ArrayList<>(); + getLeavesHelper(this, result); + return result; + } + + private static void getLeavesHelper(ParquetColumnVector vector, List<ParquetColumnVector> coll) { + if (vector.isPrimitive) { + coll.add(vector); + } else { + for (ParquetColumnVector child : vector.children) { + getLeavesHelper(child, coll); + } + } + } + + /** + * Assembles this column and calculate collection offsets recursively. + * This is a no-op for primitive columns. + */ + void assemble() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + DataType type = column.sparkType(); + if (type instanceof ArrayType || type instanceof MapType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateCollectionOffsets(); + } else if (type instanceof StructType) { + for (ParquetColumnVector child : children) { + child.assemble(); + } + calculateStructOffsets(); + } + } + + void reset() { + // nothing to do if the column itself is missing + if (vector.isAllNull()) return; + + vector.reset(); + repetitionLevels.reset(); + definitionLevels.reset(); + for (ParquetColumnVector child : children) { + child.reset(); + } + } + + ParquetColumn getColumn() { + return this.column; + } + + WritableColumnVector getValueVector() { + return this.vector; + } + + WritableColumnVector getRepetitionLevelVector() { + return this.repetitionLevels; + } + + WritableColumnVector getDefinitionLevelVector() { + return this.definitionLevels; + } + + VectorizedColumnReader getColumnReader() { + return this.columnReader; + } + + void setColumnReader(VectorizedColumnReader reader) { + if (!isPrimitive) { + throw new IllegalStateException("can't set reader for non-primitive column"); + } + this.columnReader = reader; + } + + private void calculateCollectionOffsets() { + int maxDefinitionLevel = column.definitionLevel(); + int maxElementRepetitionLevel = column.repetitionLevel(); + + // There are 4 cases when calculating definition levels: + // 1. definitionLevel == maxDefinitionLevel + // ==> value is defined and not null + // 2. definitionLevel == maxDefinitionLevel - 1 + // ==> value is null + // 3. definitionLevel < maxDefinitionLevel - 1 + // ==> value doesn't exist since one of its optional parent is null + // 4. definitionLevel > maxDefinitionLevel + // ==> value is a nested element within an array or map + // + // `i` is the index over all leaf elements of this array, while `offset` is the index over + // all top-level elements of this array. + int rowId = 0; + for (int i = 0, offset = 0; i < definitionLevels.getElementsAppended(); + i = getNextCollectionStart(maxElementRepetitionLevel, i)) { + vector.reserve(rowId + 1); + int definitionLevel = definitionLevels.getInt(i); + if (definitionLevel == maxDefinitionLevel - 1) { + // the collection is null + vector.putNull(rowId++); + } else if (definitionLevel == maxDefinitionLevel) { + // collection is defined but empty + vector.putNotNull(rowId); + vector.putArray(rowId, offset, 0); + rowId++; + } else if (definitionLevel > maxDefinitionLevel) { + // collection is defined and non-empty: find out how many top element there is till the + // start of the next array. + vector.putNotNull(rowId); + int length = getCollectionSize(maxElementRepetitionLevel, i + 1); + vector.putArray(rowId, offset, length); + offset += length; + rowId++; + } + } + vector.addElementsAppended(rowId); + } + + private void calculateStructOffsets() { + int maxRepetitionLevel = column.repetitionLevel(); + int maxDefinitionLevel = column.definitionLevel(); + + vector.reserve(definitionLevels.getElementsAppended()); + + int rowId = 0; + int nonnullRowId = 0; + boolean hasRepetitionLevels = repetitionLevels.getElementsAppended() > 0; + for (int i = 0; i < definitionLevels.getElementsAppended(); i++) { + // if repetition level > maxRepetitionLevel, the value is a nested element (e.g., an array + // element in struct<array<int>>), and we should skip the definition level since it doesn't + // represent with the struct. + if (!hasRepetitionLevels || repetitionLevels.getInt(i) <= maxRepetitionLevel) { + if (definitionLevels.getInt(i) == maxDefinitionLevel - 1) { + // the struct is null + vector.putNull(rowId); + rowId++; + } else if (definitionLevels.getInt(i) >= maxDefinitionLevel) { + vector.putNotNull(rowId); + vector.putStruct(rowId, nonnullRowId); + rowId++; + nonnullRowId++; + } + } + } + vector.addElementsAppended(rowId); + } + + private static WritableColumnVector allocateLevelsVector(int capacity, MemoryMode memoryMode) { + switch (memoryMode) { + case ON_HEAP: + return new OnHeapColumnVector(capacity, DataTypes.IntegerType); + case OFF_HEAP: + return new OffHeapColumnVector(capacity, DataTypes.IntegerType); + default: + throw new IllegalArgumentException("Unknown memory mode: " + memoryMode); + } + } + + private int getNextCollectionStart(int maxRepetitionLevel, int elementIndex) { + int idx = elementIndex + 1; + for (; idx < repetitionLevels.getElementsAppended(); idx++) { + if (repetitionLevels.getInt(idx) <= maxRepetitionLevel) { + break; + } + } + return idx; + } + + private int getCollectionSize(int maxRepetitionLevel, int idx) { + int size = 1; + for (; idx < repetitionLevels.getElementsAppended(); idx++) { + if (repetitionLevels.getInt(idx) <= maxRepetitionLevel) { + break; + } else if (repetitionLevels.getInt(idx) <= maxRepetitionLevel + 1) { + // only count elements which belong to the current collection + // For instance, suppose we have the following Parquet schema: + // + // message schema { max rl max dl + // optional group col (LIST) { 0 1 + // repeated group list { 1 2 + // optional group element (LIST) { 1 3 + // repeated group list { 2 4 + // required int32 element; 2 4 + // } + // } + // } + // } + // } + // + // For a list such as: [[[0, 1], [2, 3]], [[4, 5], [6, 7]]], the repetition & definition + // levels would be: + // + // repetition levels: [0, 2, 1, 2, 0, 2, 1, 2] + // definition levels: [2, 2, 2, 2, 2, 2, 2, 2] + // + // when calculating collection size for the outer array, we should only count repetition + // levels whose value is <= 1 (which is the max repetition level for the inner array) Review comment: Could you elaborate on this comment a bit more? Is it about the example above or in general? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
