CodingCat commented on code in PR #2462:
URL: https://github.com/apache/celeborn/pull/2462#discussion_r1571780407
##########
client/src/main/scala/org/apache/celeborn/client/ChangePartitionManager.scala:
##########
@@ -151,7 +156,7 @@ class ChangePartitionManager(
oldPartition,
cause)
- requests.synchronized {
+ locks(partitionId % locks.length).synchronized {
if (requests.containsKey(partitionId)) {
requests.get(partitionId).add(changePartition)
logTrace(s"[handleRequestPartitionLocation] For $shuffleId, request
for same partition" +
Review Comment:
i have updated the code , will run more test in our env
##########
client/src/main/scala/org/apache/celeborn/client/ChangePartitionManager.scala:
##########
@@ -79,14 +81,18 @@ class ChangePartitionManager(
batchHandleChangePartitionExecutors.submit {
new Runnable {
override def run(): Unit = {
- val distinctPartitions = requests.synchronized {
- // For each partition only need handle one request
- requests.asScala.filter { case (partitionId, _) =>
-
!inBatchPartitions.get(shuffleId).contains(partitionId)
- }.map { case (partitionId, request) =>
- inBatchPartitions.get(shuffleId).add(partitionId)
- request.asScala.toArray.maxBy(_.epoch)
- }.toArray
+ val distinctPartitions = {
+ val requestSet = inBatchPartitions.get(shuffleId)
+ requests.asScala.map { case (partitionId, request) =>
+ locks(partitionId % locks.length).synchronized {
+ if (!inBatchPartitions.contains(partitionId)) {
Review Comment:
yeah, I meant requestSet
##########
common/src/main/scala/org/apache/celeborn/common/CelebornConf.scala:
##########
@@ -3899,6 +3901,14 @@ object CelebornConf extends Logging {
.booleanConf
.createWithDefault(true)
+ val CLIENT_BATCH_HANDLE_CHANGE_PARTITION_PARALLELISM: ConfigEntry[Int] =
+ buildConf("celeborn.client.shuffle.batchHandleChangePartition.parallelism")
Review Comment:
updated
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]