RussellSpitzer commented on a change in pull request #3461:
URL: https://github.com/apache/iceberg/pull/3461#discussion_r745061932



##########
File path: 
spark/v3.2/spark-extensions/src/main/scala/org/apache/spark/sql/connector/expressions/TruncateTransform.scala
##########
@@ -17,21 +17,22 @@
  * under the License.
  */
 
+package org.apache.spark.sql.connector.expressions
 
-package org.apache.spark.sql.connector.iceberg.distributions.impl;
+import org.apache.spark.sql.types.IntegerType
 
-import 
org.apache.spark.sql.connector.iceberg.distributions.OrderedDistribution;
-import org.apache.spark.sql.connector.iceberg.expressions.SortOrder;
-
-public class OrderedDistributionImpl implements OrderedDistribution {
-  private SortOrder[] orderingExprs;
-
-  public OrderedDistributionImpl(SortOrder[] orderingExprs) {
-    this.orderingExprs = orderingExprs;
-  }
-
-  @Override
-  public SortOrder[] ordering() {
-    return orderingExprs;
+private[sql] object TruncateTransform {
+  def unapply(expr: Expression): Option[(Int, FieldReference)] = expr match {
+    case transform: Transform =>
+      transform match {
+        case NamedTransform("truncate", Seq(Ref(seq: Seq[String]), Lit(value: 
Int, IntegerType))) =>

Review comment:
       Just curious, are we doing the two cases here because we aren't 
guaranteed on the order of the elements?

##########
File path: 
spark/v3.2/spark-extensions/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ExtendedV2Writes.scala
##########
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.execution.datasources.v2
+
+import java.util.UUID
+import org.apache.spark.sql.catalyst.expressions.PredicateHelper
+import org.apache.spark.sql.catalyst.plans.logical.AppendData
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.OverwriteByExpression
+import org.apache.spark.sql.catalyst.plans.logical.OverwritePartitionsDynamic
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.utils.PlanUtils.isIcebergRelation
+import org.apache.spark.sql.connector.catalog.Table
+import org.apache.spark.sql.connector.write.LogicalWriteInfoImpl
+import org.apache.spark.sql.connector.write.SupportsDynamicOverwrite
+import org.apache.spark.sql.connector.write.SupportsOverwrite
+import org.apache.spark.sql.connector.write.SupportsTruncate
+import org.apache.spark.sql.connector.write.WriteBuilder
+import org.apache.spark.sql.errors.QueryCompilationErrors
+import org.apache.spark.sql.errors.QueryExecutionErrors
+import org.apache.spark.sql.execution.datasources.DataSourceStrategy
+import org.apache.spark.sql.sources.AlwaysTrue
+import org.apache.spark.sql.sources.Filter
+
+/**
+ * A rule that is inspired by V2Writes in Spark but supports Iceberg 
transforms.
+ */
+object ExtendedV2Writes extends Rule[LogicalPlan] with PredicateHelper {
+
+  import DataSourceV2Implicits._
+
+  override def apply(plan: LogicalPlan): LogicalPlan = plan transformDown {
+    case a @ AppendData(r: DataSourceV2Relation, query, options, _, None) if 
isIcebergRelation(r) =>
+      val writeBuilder = newWriteBuilder(r.table, query, options)
+      val write = writeBuilder.build()
+      val newQuery = ExtendedDistributionAndOrderingUtils.prepareQuery(write, 
query, conf)
+      a.copy(write = Some(write), query = newQuery)
+
+    case o @ OverwriteByExpression(r: DataSourceV2Relation, deleteExpr, query, 
options, _, None)
+        if isIcebergRelation(r) =>

Review comment:
       Nit: formatting a bit off in this section

##########
File path: 
spark/v3.2/spark-extensions/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ExtendedDistributionAndOrderingUtils.scala
##########
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.spark.sql.execution.datasources.v2
+
+import org.apache.spark.sql.catalyst.expressions.Expression
+import 
org.apache.spark.sql.catalyst.expressions.ExtendedV2ExpressionUtils.toCatalyst
+import org.apache.spark.sql.catalyst.expressions.SortOrder
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.catalyst.plans.logical.RepartitionByExpression
+import org.apache.spark.sql.catalyst.plans.logical.Sort
+import org.apache.spark.sql.connector.distributions.ClusteredDistribution
+import org.apache.spark.sql.connector.distributions.OrderedDistribution
+import org.apache.spark.sql.connector.distributions.UnspecifiedDistribution
+import org.apache.spark.sql.connector.write.RequiresDistributionAndOrdering
+import org.apache.spark.sql.connector.write.Write
+import org.apache.spark.sql.errors.QueryCompilationErrors
+import org.apache.spark.sql.internal.SQLConf
+
+/**
+ * A rule that is inspired by DistributionAndOrderingUtils in Spark but 
supports Iceberg transforms.
+ *
+ * Note that similarly to the original rule in Spark, it does not let AQE pick 
the number of shuffle
+ * partitions. See SPARK-34230 for context.
+ */
+object ExtendedDistributionAndOrderingUtils {
+
+  def prepareQuery(write: Write, query: LogicalPlan, conf: SQLConf): 
LogicalPlan = write match {
+    case write: RequiresDistributionAndOrdering =>
+      val numPartitions = write.requiredNumPartitions()
+      val distribution = write.requiredDistribution match {
+        case d: OrderedDistribution => d.ordering.map(e => toCatalyst(e, 
query))
+        case d: ClusteredDistribution => d.clustering.map(e => toCatalyst(e, 
query))
+        case _: UnspecifiedDistribution => Array.empty[Expression]
+      }
+
+      val queryWithDistribution = if (distribution.nonEmpty) {
+        val finalNumPartitions = if (numPartitions > 0) {
+          numPartitions
+        } else {
+          conf.numShufflePartitions
+        }
+        // the conversion to catalyst expressions above produces SortOrder 
expressions
+        // for OrderedDistribution and generic expressions for 
ClusteredDistribution
+        // this allows RepartitionByExpression to pick either range or hash 
partitioning
+        RepartitionByExpression(distribution, query, finalNumPartitions)

Review comment:
       I think this should pass through Option - None here rather than filling 
in conf.numShufflePartitions if numPartitions is not set.  I suggest this just 
because I worry about how this interacts with AQE and Adaptive Splitting in the 
future




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to