rdblue commented on a change in pull request #843: InputFormat support for 
Iceberg
URL: https://github.com/apache/incubator-iceberg/pull/843#discussion_r403174571
 
 

 ##########
 File path: 
mr/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java
 ##########
 @@ -0,0 +1,571 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.iceberg.mr.mapreduce;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import java.io.Closeable;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.function.Function;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.iceberg.CombinedScanTask;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.FileScanTask;
+import org.apache.iceberg.PartitionField;
+import org.apache.iceberg.PartitionSpec;
+import org.apache.iceberg.Schema;
+import org.apache.iceberg.SchemaParser;
+import org.apache.iceberg.StructLike;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.TableProperties;
+import org.apache.iceberg.TableScan;
+import org.apache.iceberg.avro.Avro;
+import org.apache.iceberg.catalog.Catalog;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.common.DynConstructors;
+import org.apache.iceberg.data.GenericRecord;
+import org.apache.iceberg.data.Record;
+import org.apache.iceberg.data.avro.DataReader;
+import org.apache.iceberg.data.parquet.GenericParquetReaders;
+import org.apache.iceberg.exceptions.RuntimeIOException;
+import org.apache.iceberg.expressions.Expression;
+import org.apache.iceberg.expressions.Expressions;
+import org.apache.iceberg.hadoop.HadoopInputFile;
+import org.apache.iceberg.hadoop.HadoopTables;
+import org.apache.iceberg.hadoop.Util;
+import org.apache.iceberg.io.CloseableIterable;
+import org.apache.iceberg.io.InputFile;
+import org.apache.iceberg.mr.SerializationUtil;
+import org.apache.iceberg.orc.ORC;
+import org.apache.iceberg.parquet.Parquet;
+import org.apache.iceberg.types.TypeUtil;
+import org.apache.iceberg.types.Types;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+/**
+ * Generic Mrv2 InputFormat API for Iceberg.
+ * @param <T> T is the in memory data model which can either be Pig tuples, 
Hive rows. Default is Iceberg records
+ */
+public class IcebergInputFormat<T> extends InputFormat<Void, T> {
+  private static final Logger LOG = 
LoggerFactory.getLogger(IcebergInputFormat.class);
+
+  static final String AS_OF_TIMESTAMP = "iceberg.mr.as.of.time";
+  static final String CASE_SENSITIVE = "iceberg.mr.case.sensitive";
+  static final String FILTER_EXPRESSION = "iceberg.mr.filter.expression";
+  static final String IN_MEMORY_DATA_MODEL = "iceberg.mr.in.memory.data.model";
+  static final String READ_SCHEMA = "iceberg.mr.read.schema";
+  static final String REUSE_CONTAINERS = "iceberg.mr.reuse.containers";
+  static final String SNAPSHOT_ID = "iceberg.mr.snapshot.id";
+  static final String SPLIT_SIZE = "iceberg.mr.split.size";
+  static final String TABLE_PATH = "iceberg.mr.table.path";
+  static final String TABLE_SCHEMA = "iceberg.mr.table.schema";
+  static final String LOCALITY = "iceberg.mr.locality";
+  static final String CATALOG = "iceberg.mr.catalog";
+  static final String PLATFORM_APPLIES_FILTER_RESIDUALS = 
"iceberg.mr.platform.applies.filter.residuals";
+
+  private transient List<InputSplit> splits;
+
+  private enum InMemoryDataModel {
+    PIG,
+    HIVE,
+    DEFAULT // Default data model is of Iceberg Generics
+  }
+
+  /**
+   * Configures the {@code Job} to use the {@code IcebergInputFormat} and
+   * returns a helper to add further configuration.
+   *
+   * @param job the {@code Job} to configure
+   */
+  public static ConfigBuilder configure(Job job) {
+    job.setInputFormatClass(IcebergInputFormat.class);
+    return new ConfigBuilder(job.getConfiguration());
+  }
+
+  public static class ConfigBuilder {
+    private final Configuration conf;
+
+    public ConfigBuilder(Configuration conf) {
+      this.conf = conf;
+    }
+
+    public ConfigBuilder readFrom(String path) {
+      conf.set(TABLE_PATH, path);
+      Table table = findTable(conf);
+      conf.set(TABLE_SCHEMA, SchemaParser.toJson(table.schema()));
+      return this;
+    }
+
+    public ConfigBuilder filter(Expression expression) {
+      conf.set(FILTER_EXPRESSION, 
SerializationUtil.serializeToBase64(expression));
+      return this;
+    }
+
+    public ConfigBuilder project(Schema schema) {
+      conf.set(READ_SCHEMA, SchemaParser.toJson(schema));
+      return this;
+    }
+
+    public ConfigBuilder reuseContainers(boolean reuse) {
+      conf.setBoolean(REUSE_CONTAINERS, reuse);
+      return this;
+    }
+
+    public ConfigBuilder caseSensitive(boolean caseSensitive) {
+      conf.setBoolean(CASE_SENSITIVE, caseSensitive);
+      return this;
+    }
+
+    public ConfigBuilder snapshotId(long snapshotId) {
+      conf.setLong(SNAPSHOT_ID, snapshotId);
+      return this;
+    }
+
+    public ConfigBuilder asOfTime(long asOfTime) {
+      conf.setLong(AS_OF_TIMESTAMP, asOfTime);
+      return this;
+    }
+
+    public ConfigBuilder splitSize(long splitSize) {
+      conf.setLong(SPLIT_SIZE, splitSize);
+      return this;
+    }
+
+    /**
+     * If this API is called. The input splits
+     * constructed will have host location information
+     */
+    public ConfigBuilder preferLocality() {
+      conf.setBoolean(LOCALITY, true);
+      return this;
+    }
+
+    public ConfigBuilder catalogFunc(Class<? extends Function<Configuration, 
Catalog>> catalogFuncClass) {
+      Preconditions.checkState(
+          conf.get(TABLE_PATH) == null,
+          "Please provide custom catalog before specifying the table to read 
from");
+      conf.setClass(CATALOG, catalogFuncClass, Function.class);
+      return this;
+    }
+
+    public ConfigBuilder useHiveRows() {
+      conf.set(IN_MEMORY_DATA_MODEL, InMemoryDataModel.HIVE.name());
+      return this;
+    }
+
+    public ConfigBuilder usePigTuples() {
+      conf.set(IN_MEMORY_DATA_MODEL, InMemoryDataModel.PIG.name());
+      return this;
+    }
+
+    /**
+     * Compute platforms pass down filters to data sources.
+     * If the data source cannot apply some filters, or only
+     * partially applies the filter, it will return the
+     * residual filter back. If the platform
+     * can correctly apply the residual filters, then it
+     * should call this api. Otherwise the current
+     * api will throw an exception if the passed in
+     * filter is not completely satisfied. Note: This
+     * does not apply to standalone MR application
+     */
+    public ConfigBuilder platformAppliesFilterResiduals() {
+      conf.setBoolean(PLATFORM_APPLIES_FILTER_RESIDUALS, true);
+      return this;
+    }
+  }
+
+  @Override
+  public List<InputSplit> getSplits(JobContext context) {
+    if (splits != null) {
+      LOG.info("Returning cached splits: {}", splits.size());
+      return splits;
+    }
+
+    Configuration conf = context.getConfiguration();
+    Table table = findTable(conf);
+    TableScan scan = table.newScan()
+                          .caseSensitive(conf.getBoolean(CASE_SENSITIVE, 
true));
+    long snapshotId = conf.getLong(SNAPSHOT_ID, -1);
+    if (snapshotId != -1) {
+      scan = scan.useSnapshot(snapshotId);
+    }
+    long asOfTime = conf.getLong(AS_OF_TIMESTAMP, -1);
+    if (asOfTime != -1) {
+      scan = scan.asOfTime(asOfTime);
+    }
+    long splitSize = conf.getLong(SPLIT_SIZE, 0);
+    if (splitSize > 0) {
+      scan = scan.option(TableProperties.SPLIT_SIZE, 
String.valueOf(splitSize));
+    }
+    String schemaStr = conf.get(READ_SCHEMA);
+    if (schemaStr != null) {
+      scan.project(SchemaParser.fromJson(schemaStr));
+    }
+
+    // TODO add a filter parser to get rid of Serialization
+    Expression filter = 
SerializationUtil.deserializeFromBase64(conf.get(FILTER_EXPRESSION));
+    if (filter != null) {
+      scan = scan.filter(filter);
+    }
+
+    splits = Lists.newArrayList();
+    try (CloseableIterable<CombinedScanTask> tasksIterable = scan.planTasks()) 
{
+      tasksIterable.forEach(task -> {
+        checkResiduals(conf, task);
+        splits.add(new IcebergSplit(conf, task));
+      });
+    } catch (IOException e) {
+      throw new RuntimeIOException(e, "Failed to close table scan: %s", scan);
+    }
+
+    return splits;
+  }
+
+  private static void checkResiduals(Configuration conf, CombinedScanTask 
task) {
+    boolean platformAppliesFilter = 
conf.getBoolean(PLATFORM_APPLIES_FILTER_RESIDUALS, false);
+    //TODO remove the check on dataModel once we start supporting
+    // residual evaluation for Iceberg Generics in InputFormat
+    InMemoryDataModel dataModel = conf.getEnum(IN_MEMORY_DATA_MODEL, 
InMemoryDataModel.DEFAULT);
+    if (dataModel == InMemoryDataModel.DEFAULT || !platformAppliesFilter) {
+      task.files().forEach(fileScanTask -> {
+        Expression residual = fileScanTask.residual();
+        if (residual != null && !residual.equals(Expressions.alwaysTrue())) {
+          throw new RuntimeException(
+              String.format(
+                  "Filter expression %s is not completely satisfied. 
Additional rows " +
+                      "can be returned not satisfied by the filter 
expression", residual));
+        }
+      });
+    }
+  }
+
+  @Override
+  public RecordReader<Void, T> createRecordReader(InputSplit split, 
TaskAttemptContext context) {
+    return new IcebergRecordReader<>();
+  }
+
+  private static final class IcebergRecordReader<T> extends RecordReader<Void, 
T> {
+    private TaskAttemptContext context;
+    private Iterator<FileScanTask> tasks;
+    private Iterator<T> currentIterator;
+    private T currentRow;
+    private Schema expectedSchema;
+    private Schema tableSchema;
+    private InMemoryDataModel inMemoryDataModel;
+    private Closeable currentCloseable;
+    private boolean reuseContainers;
+    private boolean caseSensitive;
+
+    @Override
+    public void initialize(InputSplit split, TaskAttemptContext newContext) {
+      Configuration conf = newContext.getConfiguration();
+      // For now IcebergInputFormat does its own split planning and does not
+      // accept FileSplit instances
+      CombinedScanTask task = ((IcebergSplit) split).task;
+      this.context = newContext;
+      this.tasks = task.files().iterator();
+      this.tableSchema = SchemaParser.fromJson(conf.get(TABLE_SCHEMA));
+      String readSchemaStr = conf.get(READ_SCHEMA);
+      if (readSchemaStr != null) {
+        this.expectedSchema = SchemaParser.fromJson(readSchemaStr);
+      }
+      this.reuseContainers = conf.getBoolean(REUSE_CONTAINERS, false);
+      this.caseSensitive = conf.getBoolean(CASE_SENSITIVE, true);
+      this.inMemoryDataModel = conf.getEnum(IN_MEMORY_DATA_MODEL, 
InMemoryDataModel.DEFAULT);
+      this.currentIterator = open(tasks.next());
+    }
+
+    @Override
+    public boolean nextKeyValue() throws IOException {
+      while (true) {
+        if (currentIterator.hasNext()) {
+          currentRow = currentIterator.next();
+          return true;
+        } else if (tasks.hasNext()) {
+          currentCloseable.close();
+          currentIterator = open(tasks.next());
+        } else {
+          return false;
+        }
+      }
+    }
+
+    @Override
+    public Void getCurrentKey() {
+      return null;
+    }
+
+    @Override
+    public T getCurrentValue() {
+      return currentRow;
+    }
+
+    @Override
+    public float getProgress() {
+      return context.getProgress();
 
 Review comment:
   Just that we could give a more accurate progress based on records read from 
the file. I don't think that `context.getProgress` has enough information to 
give an accurate progress value.
   
   This isn't that easy, since we don't know how much of the input split has 
been processed and we are pushing filters into Parquet and ORC. But we do know 
when a file is opened and could count the number of rows returned, so we can 
estimate. And we could also add a row count to the readers so that we can get 
an accurate count of rows that have been either returned or filtered out.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@iceberg.apache.org
For additional commands, e-mail: issues-h...@iceberg.apache.org

Reply via email to