[
https://issues.apache.org/jira/browse/DRILL-5356?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15959870#comment-15959870
]
ASF GitHub Bot commented on DRILL-5356:
---------------------------------------
Github user ppadma commented on a diff in the pull request:
https://github.com/apache/drill/pull/789#discussion_r110267977
--- Diff:
exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/columnreaders/ParquetColumnMetadata.java
---
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.exec.store.parquet.columnreaders;
+
+import java.util.Map;
+
+import org.apache.drill.common.exceptions.ExecutionSetupException;
+import org.apache.drill.common.expression.SchemaPath;
+import org.apache.drill.common.types.TypeProtos;
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.exec.exception.SchemaChangeException;
+import org.apache.drill.exec.expr.TypeHelper;
+import org.apache.drill.exec.physical.impl.OutputMutator;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.complex.RepeatedValueVector;
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.format.SchemaElement;
+import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData;
+import org.apache.parquet.schema.PrimitiveType;
+import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName;
+
+/**
+ * Represents a single column read from the Parquet file by the record
reader.
+ */
+
+public class ParquetColumnMetadata {
+
+ ColumnDescriptor column;
+ private SchemaElement se;
+ MaterializedField field;
+ int length;
+ private MajorType type;
+ ColumnChunkMetaData columnChunkMetaData;
+ private ValueVector vector;
+
+ public ParquetColumnMetadata(ColumnDescriptor column) {
+ this.column = column;
+ }
+
+ public void resolveDrillType(Map<String, SchemaElement> schemaElements,
OptionManager options) {
+ se = schemaElements.get(column.getPath()[0]);
+ type = ParquetToDrillTypeConverter.toMajorType(column.getType(),
se.getType_length(),
+ getDataMode(column), se, options);
+ field = MaterializedField.create(toFieldName(column.getPath()), type);
+ length = getDataTypeLength();
+ }
+
+ private String toFieldName(String[] paths) {
+ return SchemaPath.getCompoundPath(paths).getAsUnescapedPath();
+ }
+
+ private TypeProtos.DataMode getDataMode(ColumnDescriptor column) {
+ if (isRepeated()) {
+ return DataMode.REPEATED;
+ } else if (column.getMaxDefinitionLevel() == 0) {
+ return TypeProtos.DataMode.REQUIRED;
+ } else {
+ return TypeProtos.DataMode.OPTIONAL;
+ }
+ }
+
+ /**
+ * @param type
+ * @param type a fixed length type from the parquet library enum
+ * @return the length in pageDataByteArray of the type
+ */
+ public static int getTypeLengthInBits(PrimitiveTypeName type) {
+ switch (type) {
+ case INT64: return 64;
+ case INT32: return 32;
+ case BOOLEAN: return 1;
+ case FLOAT: return 32;
+ case DOUBLE: return 64;
+ case INT96: return 96;
+ // binary and fixed length byte array
+ default:
+ throw new IllegalStateException("Length cannot be determined for
type " + type);
+ }
+ }
+
+ /**
+ * Returns data type length for a given {@see ColumnDescriptor} and it's
corresponding
+ * {@see SchemaElement}. Neither is enough information alone as the max
+ * repetition level (indicating if it is an array type) is in the
ColumnDescriptor and
+ * the length of a fixed width field is stored at the schema level.
+ *
+ * @return the length if fixed width, else -1
+ */
+ private int getDataTypeLength() {
+ if (! isFixedLength()) {
+ return -1;
--- End diff --
Use static final instead of -1.
> Refactor Parquet Record Reader
> ------------------------------
>
> Key: DRILL-5356
> URL: https://issues.apache.org/jira/browse/DRILL-5356
> Project: Apache Drill
> Issue Type: Improvement
> Affects Versions: 1.10.0, 1.11.0
> Reporter: Paul Rogers
> Assignee: Paul Rogers
> Priority: Minor
> Fix For: 1.11.0
>
>
> The Parquet record reader class is a key part of Drill that has evolved over
> time to become somewhat hard to follow.
> A number of us are working on Parquet-related tasks and find we have to spend
> an uncomfortable amount of time trying to understand the code. In particular,
> this writer needs to figure out how to convince the reader to provide
> higher-density record batches.
> Rather than continue to decypher the complex code multiple times, this ticket
> requests to refactor the code to make it functionally identical, but
> structurally cleaner. The result will be faster time to value when working
> with this code.
> This is a lower-priority change and will be coordinated with others working
> on this code base. This ticket is only for the record reader class itself; it
> does not include the various readers and writers that Parquet uses since
> another project is actively modifying those classes.
--
This message was sent by Atlassian JIRA
(v6.3.15#6346)