wangyum commented on a change in pull request #28032:
URL: https://github.com/apache/spark/pull/28032#discussion_r650358297



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
##########
@@ -221,6 +221,46 @@ object DataSourceAnalysis extends Rule[LogicalPlan] with 
CastSupport {
   }
 }
 
+/**
+ * Add a repartition by dynamic partition columns before insert Datasource 
table.
+ *
+ * Note that, this rule must be run after `DataSourceAnalysis`.

Review comment:
       To use `CreateDataSourceTableAsSelectCommand`, 
`InsertIntoDataSourceDirCommand` and `InsertIntoHadoopFsRelationCommand`.
   
https://github.com/apache/spark/blob/95639a763cd4f73d295c11f21701321905399787/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala#L139-L156

##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
##########
@@ -221,6 +221,106 @@ object DataSourceAnalysis extends Rule[LogicalPlan] with 
CastSupport {
   }
 }
 
+/**
+ * Add a repartition before writing Spark SQL Data Sources. It supports three 
patterns:
+ * 1. Repartition by none when writing normal table/directory.
+ * 2. Repartition by dynamic partition column when writing dynamic partition 
table/directory.
+ * 3. Repartition by bucket column with bucket number and sort by sort column 
when writing
+ *    bucket table/directory.
+ *
+ * Note that, this rule must be run after `DataSourceAnalysis`.
+ */
+object RepartitionWritingDataSource extends Rule[LogicalPlan] {
+  override def apply(plan: LogicalPlan): LogicalPlan = {
+    if (conf.repartitionWritingDataSource) {
+      insertRepartition(plan)
+    } else {
+      plan
+    }
+  }
+
+  private def insertRepartition(plan: LogicalPlan): LogicalPlan = plan 
resolveOperators {
+    case c @ CreateDataSourceTableAsSelectCommand(table, _, query, _) if 
c.resolved =>
+      val dynamicPartExps = resolveColumnNames(table.partitionColumnNames, 
query.output)
+      applyRepartition(c, table.bucketSpec, dynamicPartExps)
+
+    case i: InsertIntoHadoopFsRelationCommand if i.resolved =>
+      val dynamicPartExps = i.partitionColumns
+        .filterNot(p => i.staticPartitions.exists(s => conf.resolver(p.name, 
s._1)))
+      applyRepartition(i, i.bucketSpec, dynamicPartExps)
+
+    case i: InsertIntoDataSourceDirCommand if i.resolved && 
canApplyRepartition(i.query) =>
+      i.copy(query = RepartitionByExpression(Nil, i.query, None))
+
+    // InsertIntoDataSourceCommand only accept InsertableRelation, do not need 
repartition.

Review comment:
       Supports all commands supported by `DataSourceAnalysis`.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to