prodeezy commented on a change in pull request #63: Use Iceberg writers for 
Parquet data written from Spark.
URL: https://github.com/apache/incubator-iceberg/pull/63#discussion_r262391461
 
 

 ##########
 File path: 
spark/src/main/java/com/netflix/iceberg/spark/data/SparkParquetWriters.java
 ##########
 @@ -0,0 +1,447 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package com.netflix.iceberg.spark.data;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.netflix.iceberg.Schema;
+import com.netflix.iceberg.parquet.ParquetTypeVisitor;
+import com.netflix.iceberg.parquet.ParquetValueReaders.ReusableEntry;
+import com.netflix.iceberg.parquet.ParquetValueWriter;
+import com.netflix.iceberg.parquet.ParquetValueWriters;
+import com.netflix.iceberg.parquet.ParquetValueWriters.PrimitiveWriter;
+import com.netflix.iceberg.parquet.ParquetValueWriters.RepeatedKeyValueWriter;
+import com.netflix.iceberg.parquet.ParquetValueWriters.RepeatedWriter;
+import com.netflix.iceberg.types.TypeUtil;
+import org.apache.parquet.column.ColumnDescriptor;
+import org.apache.parquet.io.api.Binary;
+import org.apache.parquet.schema.DecimalMetadata;
+import org.apache.parquet.schema.GroupType;
+import org.apache.parquet.schema.MessageType;
+import org.apache.parquet.schema.PrimitiveType;
+import org.apache.parquet.schema.Type;
+import org.apache.spark.sql.catalyst.InternalRow;
+import org.apache.spark.sql.catalyst.util.ArrayData;
+import org.apache.spark.sql.catalyst.util.MapData;
+import org.apache.spark.sql.types.DataType;
+import org.apache.spark.sql.types.Decimal;
+import org.apache.spark.unsafe.types.UTF8String;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+
+import static com.netflix.iceberg.parquet.ParquetValueWriters.option;
+import static com.netflix.iceberg.spark.SparkSchemaUtil.convert;
+
+public class SparkParquetWriters {
+  private SparkParquetWriters() {
+  }
+
+  @SuppressWarnings("unchecked")
+  public static <T> ParquetValueWriter<T> buildWriter(Schema schema, 
MessageType type) {
+    return (ParquetValueWriter<T>) ParquetTypeVisitor.visit(type, new 
WriteBuilder(schema, type));
+  }
+
+  private static class WriteBuilder extends 
ParquetTypeVisitor<ParquetValueWriter<?>> {
+    private final Schema schema;
+    private final MessageType type;
+
+    WriteBuilder(Schema schema, MessageType type) {
+      this.schema = schema;
+      this.type = type;
+    }
+
+    @Override
+    public ParquetValueWriter<?> message(MessageType message,
+                                         List<ParquetValueWriter<?>> 
fieldWriters) {
+      return struct(message.asGroupType(), fieldWriters);
+    }
+
+    @Override
+    public ParquetValueWriter<?> struct(GroupType struct,
+                                        List<ParquetValueWriter<?>> 
fieldWriters) {
+      List<Type> fields = struct.getFields();
+      List<ParquetValueWriter<?>> writers = 
Lists.newArrayListWithExpectedSize(fieldWriters.size());
+      List<DataType> sparkTypes = Lists.newArrayList();
+      for (int i = 0; i < fields.size(); i += 1) {
+        Type fieldType = struct.getType(i);
+        int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName()));
+        writers.add(option(fieldType, fieldD, fieldWriters.get(i)));
+        sparkTypes.add(convert(schema.findType(fieldType.getId().intValue())));
+      }
+
+      return new InternalRowWriter(writers, sparkTypes);
+    }
+
+    @Override
+    public ParquetValueWriter<?> list(GroupType array, ParquetValueWriter<?> 
elementWriter) {
+      GroupType repeated = array.getFields().get(0).asGroupType();
+      String[] repeatedPath = currentPath();
+
+      int repeatedD = type.getMaxDefinitionLevel(repeatedPath);
+      int repeatedR = type.getMaxRepetitionLevel(repeatedPath);
+
+      org.apache.parquet.schema.Type elementType = repeated.getType(0);
+      int elementD = type.getMaxDefinitionLevel(path(elementType.getName()));
+
+      DataType elementSparkType = 
convert(schema.findType(elementType.getId().intValue()));
+
+      return new ArrayDataWriter<>(repeatedD, repeatedR,
+          option(elementType, elementD, elementWriter),
+          elementSparkType);
+    }
+
+    @Override
+    public ParquetValueWriter<?> map(GroupType map,
+                                     ParquetValueWriter<?> keyWriter,
+                                     ParquetValueWriter<?> valueWriter) {
+      GroupType repeatedKeyValue = map.getFields().get(0).asGroupType();
+      String[] repeatedPath = currentPath();
+
+      int repeatedD = type.getMaxDefinitionLevel(repeatedPath);
+      int repeatedR = type.getMaxRepetitionLevel(repeatedPath);
+
+      org.apache.parquet.schema.Type keyType = repeatedKeyValue.getType(0);
+      int keyD = type.getMaxDefinitionLevel(path(keyType.getName()));
+      DataType keySparkType = 
convert(schema.findType(keyType.getId().intValue()));
+      org.apache.parquet.schema.Type valueType = repeatedKeyValue.getType(1);
+      int valueD = type.getMaxDefinitionLevel(path(valueType.getName()));
+      DataType valueSparkType = 
convert(schema.findType(valueType.getId().intValue()));
+
+      return new MapDataWriter<>(repeatedD, repeatedR,
+          option(keyType, keyD, keyWriter), option(valueType, valueD, 
valueWriter),
+          keySparkType, valueSparkType);
+    }
+
+    @Override
+    public ParquetValueWriter<?> primitive(PrimitiveType primitive) {
+      ColumnDescriptor desc = type.getColumnDescription(currentPath());
+
+      if (primitive.getOriginalType() != null) {
+        switch (primitive.getOriginalType()) {
+          case ENUM:
+          case JSON:
+          case UTF8:
+            return utf8Strings(desc);
+          case DATE:
+          case INT_8:
+          case INT_16:
+          case INT_32:
+          case INT_64:
 
 Review comment:
   I realize that INT96 is deprecated as per 
https://issues.apache.org/jira/browse/PARQUET-323 and don't want to encourage 
people using it in Iceberg but if people do want to re-write data with INT96 
rows into iceberg (as INT64)  would we run into this code? If so should we not 
handle it by truncating INT96 to INT64? 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to