ulysses-you commented on a change in pull request #28032:
URL: https://github.com/apache/spark/pull/28032#discussion_r654214255



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
##########
@@ -221,6 +221,106 @@ object DataSourceAnalysis extends Rule[LogicalPlan] with 
CastSupport {
   }
 }
 
+/**
+ * Add a repartition before writing Spark SQL Data Sources. It supports three 
patterns:
+ * 1. Repartition by none when writing normal table/directory.
+ * 2. Repartition by dynamic partition column when writing dynamic partition 
table/directory.
+ * 3. Repartition by bucket column with bucket number and sort by sort column 
when writing
+ *    bucket table/directory.
+ *
+ * Note that, this rule must be run after `DataSourceAnalysis`.
+ */
+object RepartitionWritingDataSource extends Rule[LogicalPlan] {
+  override def apply(plan: LogicalPlan): LogicalPlan = {
+    if (conf.repartitionWritingDataSource) {
+      insertRepartition(plan)
+    } else {
+      plan
+    }
+  }
+
+  private def insertRepartition(plan: LogicalPlan): LogicalPlan = plan 
resolveOperators {
+    case c @ CreateDataSourceTableAsSelectCommand(table, _, query, _) if 
c.resolved =>
+      val dynamicPartExps = resolveColumnNames(table.partitionColumnNames, 
query.output)
+      applyRepartition(c, table.bucketSpec, dynamicPartExps)
+
+    case i: InsertIntoHadoopFsRelationCommand if i.resolved =>
+      val dynamicPartExps = i.partitionColumns
+        .filterNot(p => i.staticPartitions.exists(s => conf.resolver(p.name, 
s._1)))
+      applyRepartition(i, i.bucketSpec, dynamicPartExps)
+
+    case i: InsertIntoDataSourceDirCommand if i.resolved && 
canApplyRepartition(i.query) =>
+      i.copy(query = RepartitionByExpression(Nil, i.query, None))
+
+    // InsertIntoDataSourceCommand only accept InsertableRelation, do not need 
repartition.
+  }
+
+  def applyRepartition(
+      dataWritingCommand: DataWritingCommand,
+      bucketSpec: Option[BucketSpec],
+      partitionColumns: Seq[Expression]): LogicalPlan = {
+    val query = dataWritingCommand.query
+    (bucketSpec, partitionColumns) match {
+      case (None, Nil) =>
+        if (canApplyRepartition(query)) {
+          dataWritingCommand.withNewChildrenInternal(
+            IndexedSeq(RepartitionByExpression(Nil, query, None)))
+        } else {
+          dataWritingCommand
+        }
+
+      case (None, partExps @ _ +: _) =>
+        query match {
+          case RepartitionByExpression(partExpressions, _, _) if 
partExpressions == partExps =>
+            dataWritingCommand
+          case _ =>
+            dataWritingCommand.withNewChildrenInternal(
+              IndexedSeq(RepartitionByExpression(partExps, query, None)))
+        }
+
+      case (Some(bucket), _) if bucket.sortColumnNames.nonEmpty =>
+        val bucketExps = resolveColumnNames(bucket.bucketColumnNames, 
query.output)
+        val sortExps = resolveColumnNames(bucket.sortColumnNames, query.output)
+          .map(SortOrder(_, Ascending))
+        query match {
+          case Sort(order, false, RepartitionByExpression(partExpr, _, 
Some(bucket.numBuckets)))
+            if order == sortExps && partExpr == bucketExps =>
+            dataWritingCommand
+          case _ =>
+            dataWritingCommand.withNewChildrenInternal(
+              IndexedSeq(Sort(sortExps, false,
+                RepartitionByExpression(bucketExps, query, 
Some(bucket.numBuckets)))))

Review comment:
       Why we don't repartition by partitionColumns for bucketed table if it's 
a dynamic partition query ? 

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
##########
@@ -221,6 +221,106 @@ object DataSourceAnalysis extends Rule[LogicalPlan] with 
CastSupport {
   }
 }
 
+/**
+ * Add a repartition before writing Spark SQL Data Sources. It supports three 
patterns:
+ * 1. Repartition by none when writing normal table/directory.
+ * 2. Repartition by dynamic partition column when writing dynamic partition 
table/directory.
+ * 3. Repartition by bucket column with bucket number and sort by sort column 
when writing
+ *    bucket table/directory.
+ *
+ * Note that, this rule must be run after `DataSourceAnalysis`.
+ */
+object RepartitionWritingDataSource extends Rule[LogicalPlan] {
+  override def apply(plan: LogicalPlan): LogicalPlan = {
+    if (conf.repartitionWritingDataSource) {
+      insertRepartition(plan)
+    } else {
+      plan
+    }
+  }
+
+  private def insertRepartition(plan: LogicalPlan): LogicalPlan = plan 
resolveOperators {
+    case c @ CreateDataSourceTableAsSelectCommand(table, _, query, _) if 
c.resolved =>
+      val dynamicPartExps = resolveColumnNames(table.partitionColumnNames, 
query.output)
+      applyRepartition(c, table.bucketSpec, dynamicPartExps)
+
+    case i: InsertIntoHadoopFsRelationCommand if i.resolved =>
+      val dynamicPartExps = i.partitionColumns
+        .filterNot(p => i.staticPartitions.exists(s => conf.resolver(p.name, 
s._1)))
+      applyRepartition(i, i.bucketSpec, dynamicPartExps)
+
+    case i: InsertIntoDataSourceDirCommand if i.resolved && 
canApplyRepartition(i.query) =>
+      i.copy(query = RepartitionByExpression(Nil, i.query, None))
+
+    // InsertIntoDataSourceCommand only accept InsertableRelation, do not need 
repartition.
+  }
+
+  def applyRepartition(
+      dataWritingCommand: DataWritingCommand,
+      bucketSpec: Option[BucketSpec],
+      partitionColumns: Seq[Expression]): LogicalPlan = {
+    val query = dataWritingCommand.query
+    (bucketSpec, partitionColumns) match {
+      case (None, Nil) =>
+        if (canApplyRepartition(query)) {
+          dataWritingCommand.withNewChildrenInternal(
+            IndexedSeq(RepartitionByExpression(Nil, query, None)))
+        } else {
+          dataWritingCommand
+        }
+
+      case (None, partExps @ _ +: _) =>
+        query match {
+          case RepartitionByExpression(partExpressions, _, _) if 
partExpressions == partExps =>
+            dataWritingCommand
+          case _ =>
+            dataWritingCommand.withNewChildrenInternal(
+              IndexedSeq(RepartitionByExpression(partExps, query, None)))
+        }
+
+      case (Some(bucket), _) if bucket.sortColumnNames.nonEmpty =>
+        val bucketExps = resolveColumnNames(bucket.bucketColumnNames, 
query.output)
+        val sortExps = resolveColumnNames(bucket.sortColumnNames, query.output)
+          .map(SortOrder(_, Ascending))
+        query match {
+          case Sort(order, false, RepartitionByExpression(partExpr, _, 
Some(bucket.numBuckets)))
+            if order == sortExps && partExpr == bucketExps =>
+            dataWritingCommand
+          case _ =>
+            dataWritingCommand.withNewChildrenInternal(
+              IndexedSeq(Sort(sortExps, false,
+                RepartitionByExpression(bucketExps, query, 
Some(bucket.numBuckets)))))
+        }
+
+      case (Some(bucket), _) if bucket.sortColumnNames.isEmpty =>
+        val bucketExps = resolveColumnNames(bucket.bucketColumnNames, 
query.output)
+        query match {
+          case RepartitionByExpression(partExpr, _, Some(bucket.numBuckets))
+            if partExpr == bucketExps =>
+            dataWritingCommand
+          case _ =>
+            dataWritingCommand.withNewChildrenInternal(
+              IndexedSeq(RepartitionByExpression(bucketExps, query, 
Some(bucket.numBuckets))))
+        }
+    }
+  }
+
+  def resolveColumnNames(
+      columnNames: Seq[String], outputAttrs: Seq[Attribute]): 
Seq[NamedExpression] = {
+    columnNames.map { c =>
+      outputAttrs.resolve(c :: Nil, conf.resolver).
+        getOrElse(throw new AnalysisException(s"Cannot resolve column name $c 
among (" +
+          s"${outputAttrs.map(_.name).mkString(",")})."))
+    }
+  }
+
+  def canApplyRepartition(plan: LogicalPlan): Boolean = {
+    plan match {
+      case _: RepartitionByExpression | _: Sort => false

Review comment:
       Repartition ?

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
##########
@@ -221,6 +221,106 @@ object DataSourceAnalysis extends Rule[LogicalPlan] with 
CastSupport {
   }
 }
 
+/**
+ * Add a repartition before writing Spark SQL Data Sources. It supports three 
patterns:
+ * 1. Repartition by none when writing normal table/directory.
+ * 2. Repartition by dynamic partition column when writing dynamic partition 
table/directory.
+ * 3. Repartition by bucket column with bucket number and sort by sort column 
when writing
+ *    bucket table/directory.
+ *
+ * Note that, this rule must be run after `DataSourceAnalysis`.
+ */
+object RepartitionWritingDataSource extends Rule[LogicalPlan] {
+  override def apply(plan: LogicalPlan): LogicalPlan = {
+    if (conf.repartitionWritingDataSource) {
+      insertRepartition(plan)
+    } else {
+      plan
+    }
+  }
+
+  private def insertRepartition(plan: LogicalPlan): LogicalPlan = plan 
resolveOperators {
+    case c @ CreateDataSourceTableAsSelectCommand(table, _, query, _) if 
c.resolved =>
+      val dynamicPartExps = resolveColumnNames(table.partitionColumnNames, 
query.output)
+      applyRepartition(c, table.bucketSpec, dynamicPartExps)
+
+    case i: InsertIntoHadoopFsRelationCommand if i.resolved =>

Review comment:
       I think we can just use `canApplyRepartition(i.query)` here to simple 
the code and respect if user repartition by other column.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to