waitinfuture commented on code in PR #2462:
URL: https://github.com/apache/celeborn/pull/2462#discussion_r1571697665
##########
client/src/main/scala/org/apache/celeborn/client/ChangePartitionManager.scala:
##########
@@ -79,14 +81,17 @@ class ChangePartitionManager(
batchHandleChangePartitionExecutors.submit {
new Runnable {
override def run(): Unit = {
- val distinctPartitions = requests.synchronized {
- // For each partition only need handle one request
- requests.asScala.filter { case (partitionId, _) =>
-
!inBatchPartitions.get(shuffleId).contains(partitionId)
- }.map { case (partitionId, request) =>
- inBatchPartitions.get(shuffleId).add(partitionId)
- request.asScala.toArray.maxBy(_.epoch)
- }.toArray
+ val distinctPartitions = {
+ requests.asScala.map { case (partitionId, request) =>
+ locks(partitionId % locks.length).synchronized {
+ if (!inBatchPartitions.contains(partitionId)) {
+ inBatchPartitions.get(shuffleId).add(partitionId)
+ Some(request.asScala.toArray.maxBy(_.epoch))
+ } else {
+ None
+ }
+ }
+ }.filter(_.isDefined).map(_.get).toArray
Review Comment:
> it feels like it will have a lot more contention - as each entry in the
requests map will need to acquire a lock
To reduce the frequency of acquiring locks, I think we can calculate the
lock buckets for each partition ids first, then group the partition ids by the
lock bucket, then acquire lock and process each group (in random order).
Though I'm not sure how beneficial this will be.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]