chamikaramj commented on code in PR #30797:
URL: https://github.com/apache/beam/pull/30797#discussion_r1544706525


##########
sdks/java/io/iceberg/src/main/java/org/apache/beam/io/iceberg/IcebergIO.java:
##########
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.io.iceberg;
+
+import java.util.Arrays;
+import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.transforms.SerializableBiFunction;
+import org.apache.beam.sdk.transforms.SerializableFunctions;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.data.Record;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public class IcebergIO {
+
+  public static <ElementT, DestinationT> Write<ElementT, DestinationT> 
writeToDestinations(
+      IcebergCatalog catalog,
+      DynamicDestinations<ElementT, DestinationT> dynamicDestinations,

Review Comment:
   I'm wondering if we can strip dynamic destinations based on UDFs out and 
think about how to introduce dynamic destinations to this I/O in a portable way 
based on https://s.apache.org/portable-dynamic-destinations



##########
sdks/java/io/iceberg/src/main/java/org/apache/beam/io/iceberg/WriteToDestinations.java:
##########
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.io.iceberg;
+
+import java.util.Collections;
+import java.util.UUID;
+import org.apache.beam.io.iceberg.WriteBundlesToFiles.Result;
+import org.apache.beam.sdk.Pipeline;
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.coders.IterableCoder;
+import org.apache.beam.sdk.coders.KvCoder;
+import org.apache.beam.sdk.coders.SerializableCoder;
+import org.apache.beam.sdk.coders.ShardedKeyCoder;
+import org.apache.beam.sdk.coders.StringUtf8Coder;
+import org.apache.beam.sdk.transforms.Create;
+import org.apache.beam.sdk.transforms.DoFn;
+import org.apache.beam.sdk.transforms.Flatten;
+import org.apache.beam.sdk.transforms.GroupByKey;
+import org.apache.beam.sdk.transforms.MapElements;
+import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.transforms.ParDo;
+import org.apache.beam.sdk.transforms.SimpleFunction;
+import org.apache.beam.sdk.transforms.View;
+import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
+import org.apache.beam.sdk.values.KV;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.beam.sdk.values.PCollectionList;
+import org.apache.beam.sdk.values.PCollectionTuple;
+import org.apache.beam.sdk.values.PCollectionView;
+import org.apache.beam.sdk.values.ShardedKey;
+import org.apache.beam.sdk.values.TupleTag;
+import org.apache.beam.sdk.values.TupleTagList;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.annotations.VisibleForTesting;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.TableIdentifier;
+
+class WriteToDestinations<DestinationT, ElementT>
+    extends PTransform<
+        PCollection<KV<DestinationT, ElementT>>, 
IcebergWriteResult<DestinationT, ElementT>> {
+
+  @VisibleForTesting static final int DEFAULT_MAX_WRITERS_PER_BUNDLE = 20;

Review Comment:
   Any idea how we got to these defaults ? (if so we should document)



##########
sdks/java/io/iceberg/src/main/java/org/apache/beam/io/iceberg/IcebergIO.java:
##########
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.io.iceberg;
+
+import java.util.Arrays;
+import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.transforms.SerializableBiFunction;
+import org.apache.beam.sdk.transforms.SerializableFunctions;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.data.Record;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public class IcebergIO {
+
+  public static <ElementT, DestinationT> Write<ElementT, DestinationT> 
writeToDestinations(
+      IcebergCatalog catalog,
+      DynamicDestinations<ElementT, DestinationT> dynamicDestinations,
+      SerializableBiFunction<Record, ElementT, Record> toRecord) {
+    return new Write<>(catalog, dynamicDestinations, toRecord);
+  }
+
+  public static TableFactory<String> forCatalog(final IcebergCatalog catalog) {

Review Comment:
   Is it possible to easily convert "IcebergCatalog" into a portable 
representation for SchemaTransforms ?



##########
sdks/java/io/iceberg/src/main/java/org/apache/beam/io/iceberg/IcebergIO.java:
##########
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.io.iceberg;
+
+import java.util.Arrays;
+import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.transforms.SerializableBiFunction;
+import org.apache.beam.sdk.transforms.SerializableFunctions;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.catalog.TableIdentifier;
+import org.apache.iceberg.data.Record;
+import org.checkerframework.checker.nullness.qual.NonNull;
+
+public class IcebergIO {
+
+  public static <ElementT, DestinationT> Write<ElementT, DestinationT> 
writeToDestinations(
+      IcebergCatalog catalog,
+      DynamicDestinations<ElementT, DestinationT> dynamicDestinations,
+      SerializableBiFunction<Record, ElementT, Record> toRecord) {
+    return new Write<>(catalog, dynamicDestinations, toRecord);
+  }
+
+  public static TableFactory<String> forCatalog(final IcebergCatalog catalog) {
+    return new TableFactory<String>() {
+      @Override
+      public Table getTable(String id) {
+        TableIdentifier tableId = TableIdentifier.parse(id);
+        // If the first element in the namespace is our catalog, remove that.
+        if (tableId.hasNamespace()) {
+          Namespace ns = tableId.namespace();
+          if (catalog.catalog().name().equals(ns.level(0))) {
+            String[] levels = ns.levels();
+            @SuppressWarnings("nullness") // we know that copyOfRange will not 
do any padding
+            @NonNull
+            String[] levelsMinusFirst = Arrays.copyOfRange(levels, 1, 
levels.length);
+            tableId = TableIdentifier.of(Namespace.of(levelsMinusFirst), 
tableId.name());
+          }
+        }
+        return catalog.catalog().loadTable(tableId);
+      }
+    };
+  }
+
+  public static class Write<ElementT, DestinationT>

Review Comment:
   I would just limit this to PTransform\<PCollection\<Row\>, 
IcebergWriteResult\<Row\>\> to make this portability first and make it friendly 
for SchemaTransforms.



##########
sdks/java/io/iceberg/src/main/java/org/apache/beam/io/iceberg/WriteToDestinations.java:
##########
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.io.iceberg;
+
+import java.util.Collections;
+import java.util.UUID;
+import org.apache.beam.io.iceberg.WriteBundlesToFiles.Result;
+import org.apache.beam.sdk.Pipeline;
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.coders.IterableCoder;
+import org.apache.beam.sdk.coders.KvCoder;
+import org.apache.beam.sdk.coders.SerializableCoder;
+import org.apache.beam.sdk.coders.ShardedKeyCoder;
+import org.apache.beam.sdk.coders.StringUtf8Coder;
+import org.apache.beam.sdk.transforms.Create;
+import org.apache.beam.sdk.transforms.DoFn;
+import org.apache.beam.sdk.transforms.Flatten;
+import org.apache.beam.sdk.transforms.GroupByKey;
+import org.apache.beam.sdk.transforms.MapElements;
+import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.transforms.ParDo;
+import org.apache.beam.sdk.transforms.SimpleFunction;
+import org.apache.beam.sdk.transforms.View;
+import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
+import org.apache.beam.sdk.values.KV;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.beam.sdk.values.PCollectionList;
+import org.apache.beam.sdk.values.PCollectionTuple;
+import org.apache.beam.sdk.values.PCollectionView;
+import org.apache.beam.sdk.values.ShardedKey;
+import org.apache.beam.sdk.values.TupleTag;
+import org.apache.beam.sdk.values.TupleTagList;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.annotations.VisibleForTesting;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.TableIdentifier;
+
+class WriteToDestinations<DestinationT, ElementT>
+    extends PTransform<
+        PCollection<KV<DestinationT, ElementT>>, 
IcebergWriteResult<DestinationT, ElementT>> {
+
+  @VisibleForTesting static final int DEFAULT_MAX_WRITERS_PER_BUNDLE = 20;
+  @VisibleForTesting static final int DEFAULT_MAX_FILES_PER_PARTITION = 10_000;
+  @VisibleForTesting static final long DEFAULT_MAX_BYTES_PER_PARTITION = 10L * 
(1L << 40); // 10TB
+  static final long DEFAULT_MAX_BYTES_PER_FILE = (1L << 40); // 1TB
+  static final int DEFAULT_NUM_FILE_SHARDS = 0;
+  static final int FILE_TRIGGERING_RECORD_COUNT = 50_000;
+
+  private final Coder<DestinationT> destinationCoder;
+
+  private final RecordWriterFactory<ElementT, DestinationT> 
recordWriterFactory;
+  private final TableFactory<String> tableFactory;
+
+  WriteToDestinations(
+      Coder<DestinationT> destinationCoder,
+      RecordWriterFactory<ElementT, DestinationT> recordWriterFactory,
+      TableFactory<String> tableFactory) {
+    this.destinationCoder = destinationCoder;
+    this.recordWriterFactory = recordWriterFactory;
+    this.tableFactory = tableFactory;
+  }
+
+  private PCollectionView<String> createJobIdPrefixView(Pipeline p) {
+
+    final String jobName = p.getOptions().getJobName();
+
+    return p.apply("JobIdCreationRoot_", Create.of((Void) null))
+        .apply(
+            "CreateJobId",
+            ParDo.of(
+                new DoFn<Void, String>() {
+                  @ProcessElement
+                  public void process(ProcessContext c) {
+                    c.output(jobName + "-" + UUID.randomUUID().toString());
+                  }
+                }))
+        .apply("JobIdSideInput", View.asSingleton());
+  }
+
+  @Override
+  public IcebergWriteResult<DestinationT, ElementT> expand(
+      PCollection<KV<DestinationT, ElementT>> input) {
+
+    final PCollectionView<String> fileView = 
createJobIdPrefixView(input.getPipeline());
+    // We always do the equivalent of a dynamically sharded file creation
+    TupleTag<Result<DestinationT>> writtenFilesTag = new 
TupleTag<>("writtenFiles");
+    TupleTag<KV<ShardedKey<DestinationT>, ElementT>> successfulWritesTag =
+        new TupleTag<>("successfulWrites");
+    TupleTag<KV<ShardedKey<DestinationT>, ElementT>> failedWritesTag =
+        new TupleTag<>("failedWrites");
+    TupleTag<KV<TableIdentifier, Snapshot>> snapshotsTag = new 
TupleTag<>("snapshots");
+
+    final Coder<ElementT> elementCoder =
+        ((KvCoder<DestinationT, ElementT>) input.getCoder()).getValueCoder();
+
+    // Write everything to files
+    PCollectionTuple writeBundlesToFiles =
+        input.apply(
+            "Write Bundles To Files",
+            ParDo.of(
+                    new WriteBundlesToFiles<>(
+                        fileView,
+                        successfulWritesTag,
+                        failedWritesTag,
+                        DEFAULT_MAX_WRITERS_PER_BUNDLE,
+                        DEFAULT_MAX_BYTES_PER_FILE,
+                        recordWriterFactory))
+                .withSideInputs(fileView)
+                .withOutputTags(
+                    writtenFilesTag,
+                    TupleTagList.of(ImmutableList.of(successfulWritesTag, 
failedWritesTag))));
+
+    PCollection<KV<ShardedKey<DestinationT>, ElementT>> successfulWrites =
+        writeBundlesToFiles
+            .get(successfulWritesTag)
+            .setCoder(KvCoder.of(ShardedKeyCoder.of(destinationCoder), 
elementCoder));
+
+    PCollection<KV<ShardedKey<DestinationT>, ElementT>> failedWrites =

Review Comment:
   Can we use the new DLQ framework instead ? (seems like this is following the 
old DLQ implementation in BQ).
   
   New framework also considers portability aspects for example so it's more 
advantageous.
   
https://docs.google.com/document/d/1NGeCk6tOqF-TiGEAV7ixd_vhIiWz9sHPlCa1P_77Ajs/edit?tab=t.0#heading=h.fppublcudjbt
   
   (can be a separate PR but we should remove the DLQ feature from this PR in 
that case)
   



##########
sdks/java/io/iceberg/src/main/java/org/apache/beam/io/iceberg/WriteToDestinations.java:
##########
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.io.iceberg;
+
+import java.util.Collections;
+import java.util.UUID;
+import org.apache.beam.io.iceberg.WriteBundlesToFiles.Result;
+import org.apache.beam.sdk.Pipeline;
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.coders.IterableCoder;
+import org.apache.beam.sdk.coders.KvCoder;
+import org.apache.beam.sdk.coders.SerializableCoder;
+import org.apache.beam.sdk.coders.ShardedKeyCoder;
+import org.apache.beam.sdk.coders.StringUtf8Coder;
+import org.apache.beam.sdk.transforms.Create;
+import org.apache.beam.sdk.transforms.DoFn;
+import org.apache.beam.sdk.transforms.Flatten;
+import org.apache.beam.sdk.transforms.GroupByKey;
+import org.apache.beam.sdk.transforms.MapElements;
+import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.transforms.ParDo;
+import org.apache.beam.sdk.transforms.SimpleFunction;
+import org.apache.beam.sdk.transforms.View;
+import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
+import org.apache.beam.sdk.values.KV;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.beam.sdk.values.PCollectionList;
+import org.apache.beam.sdk.values.PCollectionTuple;
+import org.apache.beam.sdk.values.PCollectionView;
+import org.apache.beam.sdk.values.ShardedKey;
+import org.apache.beam.sdk.values.TupleTag;
+import org.apache.beam.sdk.values.TupleTagList;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.annotations.VisibleForTesting;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.TableIdentifier;
+
+class WriteToDestinations<DestinationT, ElementT>
+    extends PTransform<
+        PCollection<KV<DestinationT, ElementT>>, 
IcebergWriteResult<DestinationT, ElementT>> {
+
+  @VisibleForTesting static final int DEFAULT_MAX_WRITERS_PER_BUNDLE = 20;
+  @VisibleForTesting static final int DEFAULT_MAX_FILES_PER_PARTITION = 10_000;
+  @VisibleForTesting static final long DEFAULT_MAX_BYTES_PER_PARTITION = 10L * 
(1L << 40); // 10TB
+  static final long DEFAULT_MAX_BYTES_PER_FILE = (1L << 40); // 1TB
+  static final int DEFAULT_NUM_FILE_SHARDS = 0;
+  static final int FILE_TRIGGERING_RECORD_COUNT = 50_000;
+
+  private final Coder<DestinationT> destinationCoder;
+
+  private final RecordWriterFactory<ElementT, DestinationT> 
recordWriterFactory;
+  private final TableFactory<String> tableFactory;
+
+  WriteToDestinations(
+      Coder<DestinationT> destinationCoder,
+      RecordWriterFactory<ElementT, DestinationT> recordWriterFactory,
+      TableFactory<String> tableFactory) {
+    this.destinationCoder = destinationCoder;
+    this.recordWriterFactory = recordWriterFactory;
+    this.tableFactory = tableFactory;
+  }
+
+  private PCollectionView<String> createJobIdPrefixView(Pipeline p) {
+
+    final String jobName = p.getOptions().getJobName();
+
+    return p.apply("JobIdCreationRoot_", Create.of((Void) null))
+        .apply(
+            "CreateJobId",
+            ParDo.of(
+                new DoFn<Void, String>() {
+                  @ProcessElement
+                  public void process(ProcessContext c) {
+                    c.output(jobName + "-" + UUID.randomUUID().toString());
+                  }
+                }))
+        .apply("JobIdSideInput", View.asSingleton());
+  }
+
+  @Override
+  public IcebergWriteResult<DestinationT, ElementT> expand(
+      PCollection<KV<DestinationT, ElementT>> input) {
+
+    final PCollectionView<String> fileView = 
createJobIdPrefixView(input.getPipeline());
+    // We always do the equivalent of a dynamically sharded file creation
+    TupleTag<Result<DestinationT>> writtenFilesTag = new 
TupleTag<>("writtenFiles");
+    TupleTag<KV<ShardedKey<DestinationT>, ElementT>> successfulWritesTag =
+        new TupleTag<>("successfulWrites");
+    TupleTag<KV<ShardedKey<DestinationT>, ElementT>> failedWritesTag =
+        new TupleTag<>("failedWrites");
+    TupleTag<KV<TableIdentifier, Snapshot>> snapshotsTag = new 
TupleTag<>("snapshots");
+
+    final Coder<ElementT> elementCoder =
+        ((KvCoder<DestinationT, ElementT>) input.getCoder()).getValueCoder();
+
+    // Write everything to files
+    PCollectionTuple writeBundlesToFiles =
+        input.apply(
+            "Write Bundles To Files",
+            ParDo.of(
+                    new WriteBundlesToFiles<>(
+                        fileView,
+                        successfulWritesTag,
+                        failedWritesTag,
+                        DEFAULT_MAX_WRITERS_PER_BUNDLE,
+                        DEFAULT_MAX_BYTES_PER_FILE,
+                        recordWriterFactory))
+                .withSideInputs(fileView)
+                .withOutputTags(
+                    writtenFilesTag,
+                    TupleTagList.of(ImmutableList.of(successfulWritesTag, 
failedWritesTag))));
+
+    PCollection<KV<ShardedKey<DestinationT>, ElementT>> successfulWrites =
+        writeBundlesToFiles
+            .get(successfulWritesTag)
+            .setCoder(KvCoder.of(ShardedKeyCoder.of(destinationCoder), 
elementCoder));
+
+    PCollection<KV<ShardedKey<DestinationT>, ElementT>> failedWrites =
+        writeBundlesToFiles
+            .get(failedWritesTag)
+            .setCoder(KvCoder.of(ShardedKeyCoder.of(destinationCoder), 
elementCoder));
+
+    PCollection<Result<DestinationT>> writtenFilesGrouped =
+        failedWrites

Review Comment:
   Not sure what we are doing here. Are we trying to write failed records again 
and flatten with the originally written records (in the subsequent step below) ?
   Possibly we should be writing failed records to a DLQ ?



##########
sdks/java/io/iceberg/src/main/java/org/apache/beam/io/iceberg/WriteToDestinations.java:
##########
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.io.iceberg;
+
+import java.util.Collections;
+import java.util.UUID;
+import org.apache.beam.io.iceberg.WriteBundlesToFiles.Result;
+import org.apache.beam.sdk.Pipeline;
+import org.apache.beam.sdk.coders.Coder;
+import org.apache.beam.sdk.coders.IterableCoder;
+import org.apache.beam.sdk.coders.KvCoder;
+import org.apache.beam.sdk.coders.SerializableCoder;
+import org.apache.beam.sdk.coders.ShardedKeyCoder;
+import org.apache.beam.sdk.coders.StringUtf8Coder;
+import org.apache.beam.sdk.transforms.Create;
+import org.apache.beam.sdk.transforms.DoFn;
+import org.apache.beam.sdk.transforms.Flatten;
+import org.apache.beam.sdk.transforms.GroupByKey;
+import org.apache.beam.sdk.transforms.MapElements;
+import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.transforms.ParDo;
+import org.apache.beam.sdk.transforms.SimpleFunction;
+import org.apache.beam.sdk.transforms.View;
+import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
+import org.apache.beam.sdk.values.KV;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.beam.sdk.values.PCollectionList;
+import org.apache.beam.sdk.values.PCollectionTuple;
+import org.apache.beam.sdk.values.PCollectionView;
+import org.apache.beam.sdk.values.ShardedKey;
+import org.apache.beam.sdk.values.TupleTag;
+import org.apache.beam.sdk.values.TupleTagList;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.annotations.VisibleForTesting;
+import 
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.ImmutableList;
+import org.apache.iceberg.AppendFiles;
+import org.apache.iceberg.DataFile;
+import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.Table;
+import org.apache.iceberg.catalog.TableIdentifier;
+
+class WriteToDestinations<DestinationT, ElementT>
+    extends PTransform<
+        PCollection<KV<DestinationT, ElementT>>, 
IcebergWriteResult<DestinationT, ElementT>> {
+
+  @VisibleForTesting static final int DEFAULT_MAX_WRITERS_PER_BUNDLE = 20;
+  @VisibleForTesting static final int DEFAULT_MAX_FILES_PER_PARTITION = 10_000;
+  @VisibleForTesting static final long DEFAULT_MAX_BYTES_PER_PARTITION = 10L * 
(1L << 40); // 10TB
+  static final long DEFAULT_MAX_BYTES_PER_FILE = (1L << 40); // 1TB
+  static final int DEFAULT_NUM_FILE_SHARDS = 0;
+  static final int FILE_TRIGGERING_RECORD_COUNT = 50_000;
+
+  private final Coder<DestinationT> destinationCoder;
+
+  private final RecordWriterFactory<ElementT, DestinationT> 
recordWriterFactory;
+  private final TableFactory<String> tableFactory;
+
+  WriteToDestinations(
+      Coder<DestinationT> destinationCoder,
+      RecordWriterFactory<ElementT, DestinationT> recordWriterFactory,
+      TableFactory<String> tableFactory) {
+    this.destinationCoder = destinationCoder;
+    this.recordWriterFactory = recordWriterFactory;
+    this.tableFactory = tableFactory;
+  }
+
+  private PCollectionView<String> createJobIdPrefixView(Pipeline p) {
+
+    final String jobName = p.getOptions().getJobName();
+
+    return p.apply("JobIdCreationRoot_", Create.of((Void) null))
+        .apply(
+            "CreateJobId",
+            ParDo.of(
+                new DoFn<Void, String>() {
+                  @ProcessElement
+                  public void process(ProcessContext c) {
+                    c.output(jobName + "-" + UUID.randomUUID().toString());
+                  }
+                }))
+        .apply("JobIdSideInput", View.asSingleton());
+  }
+
+  @Override
+  public IcebergWriteResult<DestinationT, ElementT> expand(
+      PCollection<KV<DestinationT, ElementT>> input) {
+
+    final PCollectionView<String> fileView = 
createJobIdPrefixView(input.getPipeline());
+    // We always do the equivalent of a dynamically sharded file creation
+    TupleTag<Result<DestinationT>> writtenFilesTag = new 
TupleTag<>("writtenFiles");
+    TupleTag<KV<ShardedKey<DestinationT>, ElementT>> successfulWritesTag =
+        new TupleTag<>("successfulWrites");
+    TupleTag<KV<ShardedKey<DestinationT>, ElementT>> failedWritesTag =
+        new TupleTag<>("failedWrites");
+    TupleTag<KV<TableIdentifier, Snapshot>> snapshotsTag = new 
TupleTag<>("snapshots");
+
+    final Coder<ElementT> elementCoder =
+        ((KvCoder<DestinationT, ElementT>) input.getCoder()).getValueCoder();
+
+    // Write everything to files
+    PCollectionTuple writeBundlesToFiles =
+        input.apply(
+            "Write Bundles To Files",
+            ParDo.of(
+                    new WriteBundlesToFiles<>(
+                        fileView,
+                        successfulWritesTag,
+                        failedWritesTag,
+                        DEFAULT_MAX_WRITERS_PER_BUNDLE,
+                        DEFAULT_MAX_BYTES_PER_FILE,
+                        recordWriterFactory))
+                .withSideInputs(fileView)
+                .withOutputTags(
+                    writtenFilesTag,
+                    TupleTagList.of(ImmutableList.of(successfulWritesTag, 
failedWritesTag))));
+
+    PCollection<KV<ShardedKey<DestinationT>, ElementT>> successfulWrites =
+        writeBundlesToFiles
+            .get(successfulWritesTag)
+            .setCoder(KvCoder.of(ShardedKeyCoder.of(destinationCoder), 
elementCoder));
+
+    PCollection<KV<ShardedKey<DestinationT>, ElementT>> failedWrites =
+        writeBundlesToFiles
+            .get(failedWritesTag)
+            .setCoder(KvCoder.of(ShardedKeyCoder.of(destinationCoder), 
elementCoder));
+
+    PCollection<Result<DestinationT>> writtenFilesGrouped =
+        failedWrites
+            .apply("Group By Destination", GroupByKey.create())
+            .apply(
+                "Strip Shard ID",
+                MapElements.via(
+                    new SimpleFunction<
+                        KV<ShardedKey<DestinationT>, Iterable<ElementT>>,
+                        KV<DestinationT, Iterable<ElementT>>>() {
+                      @Override
+                      public KV<DestinationT, Iterable<ElementT>> apply(
+                          KV<ShardedKey<DestinationT>, Iterable<ElementT>> 
input11) {
+                        return KV.of(input11.getKey().getKey(), 
input11.getValue());
+                      }
+                    }))
+            .setCoder(KvCoder.of(destinationCoder, 
IterableCoder.of(elementCoder)))
+            .apply(
+                "Write Grouped Records",
+                ParDo.of(
+                    new WriteBundlesToFiles.WriteGroupedRecordsToFiles<>(
+                        fileView, DEFAULT_MAX_BYTES_PER_FILE, 
recordWriterFactory)))
+            .setCoder(WriteBundlesToFiles.ResultCoder.of(destinationCoder));
+
+    PCollection<Result<DestinationT>> catalogUpdates =
+        PCollectionList.of(
+                writeBundlesToFiles
+                    .get(writtenFilesTag)
+                    
.setCoder(WriteBundlesToFiles.ResultCoder.of(destinationCoder)))
+            .and(writtenFilesGrouped)
+            .apply("Flatten Files", Flatten.pCollections())
+            .setCoder(WriteBundlesToFiles.ResultCoder.of(destinationCoder));
+
+    // Apply any sharded writes and flatten everything for catalog updates
+    PCollection<KV<String, Snapshot>> snapshots =
+        catalogUpdates
+            .apply(
+                "Extract Data File",
+                ParDo.of(
+                    new DoFn<Result<DestinationT>, KV<String, 
MetadataUpdate>>() {
+                      @ProcessElement
+                      public void processElement(
+                          ProcessContext c, @Element Result<DestinationT> 
element) {
+                        c.output(
+                            KV.of(
+                                element.tableId,
+                                new MetadataUpdate(
+                                    element.partitionSpec.partitionType(),
+                                    element.update.getDataFiles(),
+                                    Collections.emptyList())));
+                      }
+                    }))
+            .setCoder(KvCoder.of(StringUtf8Coder.of(), MetadataUpdate.coder()))
+            .apply(GroupByKey.create())
+            .apply("Write Metadata Updates", ParDo.of(new 
MetadataUpdates<>(tableFactory)))
+            .setCoder(KvCoder.of(StringUtf8Coder.of(), 
SerializableCoder.of(Snapshot.class)));
+
+    return new IcebergWriteResult<>(
+        input.getPipeline(),
+        successfulWrites,
+        catalogUpdates,
+        snapshots,
+        successfulWritesTag,
+        writtenFilesTag,
+        snapshotsTag);
+  }
+
+  public enum WriteFormat {
+    AVRO,
+    PARQUET,
+    ORC
+  }
+
+  public static class MetadataUpdates<IdentifierT>

Review Comment:
   Prob rename to `MetadataUpdateDoFn` for clarify.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to