unical1988 commented on code in PR #669: URL: https://github.com/apache/incubator-xtable/pull/669#discussion_r2022928570
########## xtable-core/src/main/java/org/apache/xtable/parquet/ParquetSchemaExtractor.java: ########## @@ -0,0 +1,564 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.xtable.parquet; + +import java.util.HashMap; +import java.util.Map; +import java.util.List; +import java.util.ArrayList; + +import org.apache.xtable.schema.SchemaUtils; +import org.apache.xtable.exception.SchemaExtractorException; + +import java.util.Collections; +import java.util.Optional; + +import lombok.AccessLevel; +import lombok.NoArgsConstructor; + +import java.util.Arrays; +import java.util.Collections; +import java.util.function.Function; +import java.util.stream.Collectors; + +import org.apache.xtable.hudi.idtracking.models.IdMapping; +import org.apache.avro.Schema; +import org.apache.parquet.schema.GroupType; +import org.apache.parquet.schema.LogicalTypeAnnotation; +import org.apache.parquet.schema.MessageType; +import org.apache.parquet.schema.PrimitiveType; +import org.apache.parquet.schema.Type; +import org.apache.xtable.collectors.CustomCollectors; +import org.apache.xtable.exception.UnsupportedSchemaTypeException; +import org.apache.xtable.model.schema.InternalField; +import org.apache.parquet.schema.Type.Repetition; +import org.apache.xtable.model.schema.InternalSchema; +import org.apache.xtable.model.schema.InternalType; +import org.apache.parquet.schema.Type.ID; +import org.apache.parquet.schema.OriginalType; +import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName; +import org.apache.parquet.schema.Types; + +//import org.apache.parquet.avro.AvroSchemaConverter; + + +/** + * Class that converts parquet Schema {@link Schema} to Canonical Schema {@link InternalSchema} and + * vice-versa. This conversion is fully reversible and there is a strict 1 to 1 mapping between + * parquet data types and canonical data types. + */ +@NoArgsConstructor(access = AccessLevel.PRIVATE) +public class ParquetSchemaExtractor { + // parquet only supports string keys in maps + private static final InternalField MAP_KEY_FIELD = + InternalField.builder() + .name(InternalField.Constants.MAP_KEY_FIELD_NAME) + .schema( + InternalSchema.builder() + .name("map_key") + .dataType(InternalType.STRING) + .isNullable(false) + .build()) + .defaultValue("") + .build(); + private static final ParquetSchemaExtractor INSTANCE = new ParquetSchemaExtractor(); + private static final String ELEMENT = "element"; + private static final String KEY = "key"; + private static final String VALUE = "value"; + + public static ParquetSchemaExtractor getInstance() { + return INSTANCE; + } + + private static boolean groupTypeContainsNull(Type schema) { + if (!schema.isPrimitive()) { + for (Type field : schema.asGroupType().getFields()) { + if (field/*.getLogicalTypeAnnotation().toOriginalType()*/ == null) { + return true; + } + } + } else { + if (schema.equals(null)) { + return true; + } + } + + return false; + } + + /* private static LogicalTypeAnnotation finalizeSchema(LogicalTypeAnnotation targetSchema, InternalSchema inputSchema) { + if (inputSchema.isNullable()) { + return targetSchema.union(null); // LogicalTypeAnnotation.unknownType() + } + return targetSchema; + }*/ + + /** + * Converts the parquet {@link Schema} to {@link InternalSchema}. + * + * @param schema The schema being converted + * @param parentPath If this schema is nested within another, this will be a dot separated string + * representing the path from the top most field to the current schema. + * @return a converted schema + */ + public InternalSchema toInternalSchema( + Type schema, String parentPath) { + // TODO - Does not handle recursion in parquet schema + InternalType newDataType = null; + PrimitiveType typeName; + LogicalTypeAnnotation logicalType; + Map<InternalSchema.MetadataKey, Object> metadata = new HashMap<>(); + if (schema.isPrimitive()/*schema.getFields().size()==1*/) { + //Type schemaField = schema.getType(0); + //typeName = schemaField.asPrimitiveType(); + typeName = schema.asPrimitiveType(); + switch (typeName.getPrimitiveTypeName()) { + // PrimitiveTypes + case INT64: + logicalType = schema.getLogicalTypeAnnotation(); + if (logicalType instanceof LogicalTypeAnnotation.TimestampLogicalTypeAnnotation) { + LogicalTypeAnnotation.TimeUnit timeUnit = + ((LogicalTypeAnnotation.TimestampLogicalTypeAnnotation) logicalType).getUnit(); + if (timeUnit == LogicalTypeAnnotation.TimeUnit.MICROS) { + newDataType = InternalType.TIMESTAMP; + metadata.put( + InternalSchema.MetadataKey.TIMESTAMP_PRECISION, + InternalSchema.MetadataValue.MICROS); + } else if (timeUnit == LogicalTypeAnnotation.TimeUnit.MILLIS) { + newDataType = InternalType.TIMESTAMP_NTZ; + metadata.put( + InternalSchema.MetadataKey.TIMESTAMP_PRECISION, + InternalSchema.MetadataValue.MILLIS); + } else if (timeUnit == LogicalTypeAnnotation.TimeUnit.NANOS) { + newDataType = InternalType.TIMESTAMP_NTZ; + metadata.put( + InternalSchema.MetadataKey.TIMESTAMP_PRECISION, + InternalSchema.MetadataValue.NANOS); + } + } else if (logicalType instanceof LogicalTypeAnnotation.IntLogicalTypeAnnotation) { + newDataType = InternalType.INT; + } else if (logicalType instanceof LogicalTypeAnnotation.TimeLogicalTypeAnnotation) { + LogicalTypeAnnotation.TimeUnit timeUnit = ((LogicalTypeAnnotation.TimeLogicalTypeAnnotation) logicalType).getUnit(); + if (timeUnit == LogicalTypeAnnotation.TimeUnit.MICROS || timeUnit == LogicalTypeAnnotation.TimeUnit.NANOS) { + // check if INT is the InternalType needed here + newDataType = InternalType.INT; + } + } else { + newDataType = InternalType.LONG; + } + break; + case INT32: + logicalType = schema.getLogicalTypeAnnotation(); + if (logicalType instanceof LogicalTypeAnnotation.DateLogicalTypeAnnotation) { + newDataType = InternalType.DATE; + } else if (logicalType instanceof LogicalTypeAnnotation.TimeLogicalTypeAnnotation) { + LogicalTypeAnnotation.TimeUnit timeUnit = ((LogicalTypeAnnotation.TimeLogicalTypeAnnotation) logicalType).getUnit(); + if (timeUnit == LogicalTypeAnnotation.TimeUnit.MILLIS) { + // check if INT is the InternalType needed here + newDataType = InternalType.INT; + } + } else { + newDataType = InternalType.INT; + } + break; + /* case INT96: + newDataType = InternalType.INT; + break;*/ + case FLOAT: + logicalType = schema.getLogicalTypeAnnotation(); + /* if (logicalType instanceof LogicalTypeAnnotation.Float16LogicalTypeAnnotation) { + newDataType = InternalType.FLOAT; + } else*/ + if (logicalType + instanceof LogicalTypeAnnotation.DecimalLogicalTypeAnnotation) { + metadata.put( + InternalSchema.MetadataKey.DECIMAL_PRECISION, + ((LogicalTypeAnnotation.DecimalLogicalTypeAnnotation) logicalType).getPrecision()); + metadata.put( + InternalSchema.MetadataKey.DECIMAL_SCALE, + ((LogicalTypeAnnotation.DecimalLogicalTypeAnnotation) logicalType).getScale()); + newDataType = InternalType.DECIMAL; + } else { + newDataType = InternalType.FLOAT; + } + break; + case FIXED_LEN_BYTE_ARRAY: + logicalType = schema.getLogicalTypeAnnotation(); + if (logicalType instanceof LogicalTypeAnnotation.UUIDLogicalTypeAnnotation) { + newDataType = InternalType.UUID; + } else if (logicalType instanceof LogicalTypeAnnotation.IntervalLogicalTypeAnnotation) { + metadata.put(InternalSchema.MetadataKey.FIXED_BYTES_SIZE, 12); + newDataType = InternalType.FIXED; + } + break; + // TODO add other logicalTypes? + case BINARY: + // ? Variant,GEOMETRY, GEOGRAPHY, + //logicalType = schemaField.getLogicalTypeAnnotation(); + logicalType = schema.getLogicalTypeAnnotation(); + if (logicalType instanceof LogicalTypeAnnotation.EnumLogicalTypeAnnotation) { + metadata.put( + InternalSchema.MetadataKey.ENUM_VALUES, logicalType.toOriginalType().values()); + newDataType = InternalType.ENUM; + } else if (logicalType instanceof LogicalTypeAnnotation.JsonLogicalTypeAnnotation) { + newDataType = InternalType.BYTES; + } else if (logicalType instanceof LogicalTypeAnnotation.BsonLogicalTypeAnnotation) { + newDataType = InternalType.BYTES; + } else if (logicalType instanceof LogicalTypeAnnotation.StringLogicalTypeAnnotation) { + newDataType = InternalType.STRING; + } else { + newDataType = InternalType.BYTES; + } + break; + case BOOLEAN: + newDataType = InternalType.BOOLEAN; + break; + /* case UNKNOWN: + newDataType = InternalType.NULL; + break;*/ + default: + throw new UnsupportedSchemaTypeException( + String.format("Unsupported schema type %s", schema)); + } + } else { + //GroupTypes + //typeName = schema.asGroupType(); + logicalType = schema.getLogicalTypeAnnotation(); + if (logicalType instanceof LogicalTypeAnnotation.ListLogicalTypeAnnotation) { + String schemaName = schema.asGroupType().getName(); + Type.ID schemaId = schema.getId(); + InternalSchema elementSchema = + toInternalSchema( + schema.asGroupType().getType(0), Review Comment: This is the example I was referring to: ``` // List<List<Integer>> optional group array_of_arrays (LIST) { repeated group list { required group element (LIST) { repeated group list { required int32 element; } } } } ``` The update should deal with that. I was asking whether a good approach is to use the path to the leaf field for a given column in the schema, specifically by calculating the max depth (using https://javadoc.io/static/org.apache.parquet/parquet-column/1.15.0/org/apache/parquet/schema/GroupType.html#getMaxDefinitionLevel-java.lang.String:A-int-) over the column as to retrieve the type of the nested List. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: commits-unsubscr...@xtable.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org