Github user jiangxb1987 commented on a diff in the pull request:
https://github.com/apache/spark/pull/20664#discussion_r170455929
--- Diff: core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala ---
@@ -266,17 +266,15 @@ private class DefaultPartitionCoalescer(val
balanceSlack: Double = 0.10)
numCreated += 1
}
}
- tries = 0
// if we don't have enough partition groups, create duplicates
while (numCreated < targetLen) {
- val (nxt_replica, nxt_part) = partitionLocs.partsWithLocs(tries)
- tries += 1
+ val (nxt_replica, nxt_part) = partitionLocs.partsWithLocs(
--- End diff --
Perhaps add comment to explain the purpose of this change here?
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]