dongjoon-hyun commented on a change in pull request #31355:
URL: https://github.com/apache/spark/pull/31355#discussion_r565709706
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DistributionAndOrderingUtils.scala
##########
@@ -32,21 +32,29 @@ object DistributionAndOrderingUtils {
case write: RequiresDistributionAndOrdering =>
val resolver = conf.resolver
- val distribution = write.requiredDistribution match {
+ val (distribution, numPartitions) = write.requiredDistribution match {
case d: OrderedDistribution =>
- d.ordering.map(e => toCatalyst(e, query, resolver))
+ val dist = d.ordering.map(e => toCatalyst(e, query, resolver))
+ val numParts = d.requiredNumPartitions()
+ (dist, numParts)
case d: ClusteredDistribution =>
- d.clustering.map(e => toCatalyst(e, query, resolver))
+ val dist = d.clustering.map(e => toCatalyst(e, query, resolver))
+ val numParts = d.requiredNumPartitions()
+ (dist, numParts)
case _: UnspecifiedDistribution =>
- Array.empty[Expression]
+ (Array.empty[Expression], 0)
}
val queryWithDistribution = if (distribution.nonEmpty) {
- val numShufflePartitions = conf.numShufflePartitions
+ val finalNumPartitions = if (numPartitions > 0) {
Review comment:
~Shall we keep the original variable name?~ Never mind. I was a little
confused at `final` wording, but it looks reasonable.
```scala
- val finalNumPartitions = if (numPartitions > 0) {
+ val numShufflePartitions = if (numPartitions > 0) {
```
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]