aokolnychyi commented on a change in pull request #3763:
URL: https://github.com/apache/iceberg/pull/3763#discussion_r773298290



##########
File path: 
spark/v3.2/spark-extensions/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteDeltaExec.scala
##########
@@ -0,0 +1,235 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.execution.datasources.v2
+
+import org.apache.spark.SparkEnv
+import org.apache.spark.SparkException
+import org.apache.spark.TaskContext
+import org.apache.spark.internal.Logging
+import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.catalyst.expressions.AttributeSet
+import org.apache.spark.sql.catalyst.util.RowDeltaUtils._
+import org.apache.spark.sql.catalyst.util.WriteDeltaProjections
+import org.apache.spark.sql.connector.iceberg.write.DeltaWrite
+import org.apache.spark.sql.connector.iceberg.write.DeltaWriter
+import org.apache.spark.sql.connector.write.BatchWrite
+import org.apache.spark.sql.connector.write.DataWriter
+import org.apache.spark.sql.connector.write.DataWriterFactory
+import org.apache.spark.sql.connector.write.PhysicalWriteInfoImpl
+import org.apache.spark.sql.connector.write.WriterCommitMessage
+import org.apache.spark.sql.errors.QueryExecutionErrors
+import org.apache.spark.sql.execution.SparkPlan
+import org.apache.spark.sql.execution.metric.CustomMetrics
+import org.apache.spark.sql.execution.metric.SQLMetric
+import org.apache.spark.util.LongAccumulator
+import org.apache.spark.util.Utils
+import scala.util.control.NonFatal
+
+/**
+ * Physical plan node to write a delta of rows to an existing table.
+ */
+case class WriteDeltaExec(
+    query: SparkPlan,
+    refreshCache: () => Unit,
+    projections: WriteDeltaProjections,
+    write: DeltaWrite) extends ExtendedV2ExistingTableWriteExec {
+
+  override lazy val references: AttributeSet = query.outputSet
+  override lazy val stringArgs: Iterator[Any] = Iterator(query, write)
+
+  override lazy val writingTask: WritingSparkTask = 
DeltaWithMetadataWritingSparkTask(projections)
+
+  override protected def withNewChildInternal(newChild: SparkPlan): 
WriteDeltaExec = {
+    copy(query = newChild)
+  }
+}
+
+// a trait similar to V2ExistingTableWriteExec but supports custom write tasks
+trait ExtendedV2ExistingTableWriteExec extends V2ExistingTableWriteExec {
+  def writingTask: WritingSparkTask
+
+  protected override def writeWithV2(batchWrite: BatchWrite): Seq[InternalRow] 
= {
+    val rdd: RDD[InternalRow] = {
+      val tempRdd = query.execute()
+      // SPARK-23271 If we are attempting to write a zero partition rdd, 
create a dummy single
+      // partition rdd to make sure we at least set up one write task to write 
the metadata.
+      if (tempRdd.partitions.length == 0) {
+        sparkContext.parallelize(Array.empty[InternalRow], 1)
+      } else {
+        tempRdd
+      }
+    }
+    // introduce a local var to avoid serializing the whole class
+    val task = writingTask
+    val writerFactory = batchWrite.createBatchWriterFactory(
+      PhysicalWriteInfoImpl(rdd.getNumPartitions))
+    val useCommitCoordinator = batchWrite.useCommitCoordinator
+    val messages = new Array[WriterCommitMessage](rdd.partitions.length)
+    val totalNumRowsAccumulator = new LongAccumulator()
+
+    logInfo(s"Start processing data source write support: $batchWrite. " +
+      s"The input RDD has ${messages.length} partitions.")
+
+    // Avoid object not serializable issue.
+    val writeMetrics: Map[String, SQLMetric] = customMetrics
+
+    try {
+      sparkContext.runJob(
+        rdd,
+        (context: TaskContext, iter: Iterator[InternalRow]) =>
+          task.run(writerFactory, context, iter, useCommitCoordinator, 
writeMetrics),
+        rdd.partitions.indices,
+        (index, result: DataWritingSparkTaskResult) => {
+          val commitMessage = result.writerCommitMessage
+          messages(index) = commitMessage
+          totalNumRowsAccumulator.add(result.numRows)
+          batchWrite.onDataWriterCommit(commitMessage)
+        }
+      )
+
+      logInfo(s"Data source write support $batchWrite is committing.")
+      batchWrite.commit(messages)
+      logInfo(s"Data source write support $batchWrite committed.")
+      commitProgress = 
Some(StreamWriterCommitProgress(totalNumRowsAccumulator.value))
+    } catch {
+      case cause: Throwable =>
+        logError(s"Data source write support $batchWrite is aborting.")
+        try {
+          batchWrite.abort(messages)
+        } catch {
+          case t: Throwable =>
+            logError(s"Data source write support $batchWrite failed to abort.")
+            cause.addSuppressed(t)
+            throw QueryExecutionErrors.writingJobFailedError(cause)
+        }
+        logError(s"Data source write support $batchWrite aborted.")
+        cause match {
+          // Only wrap non fatal exceptions.
+          case NonFatal(e) => throw 
QueryExecutionErrors.writingJobAbortedError(e)
+          case _ => throw cause
+        }
+    }
+
+    Nil
+  }
+}
+
+trait WritingSparkTask extends Logging with Serializable {
+
+  protected def writeFunc(writer: DataWriter[InternalRow], row: InternalRow): 
Unit
+
+  def run(
+      writerFactory: DataWriterFactory,
+      context: TaskContext,
+      iter: Iterator[InternalRow],
+      useCommitCoordinator: Boolean,
+      customMetrics: Map[String, SQLMetric]): DataWritingSparkTaskResult = {
+    val stageId = context.stageId()
+    val stageAttempt = context.stageAttemptNumber()
+    val partId = context.partitionId()
+    val taskId = context.taskAttemptId()
+    val attemptId = context.attemptNumber()
+    val dataWriter = writerFactory.createWriter(partId, taskId)

Review comment:
       Added a cast here.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to