yl09099 commented on code in PR #1146:
URL:
https://github.com/apache/incubator-uniffle/pull/1146#discussion_r1381138950
##########
client-spark/spark3/src/main/java/org/apache/spark/shuffle/RssShuffleManager.java:
##########
@@ -1104,4 +1112,79 @@ private ShuffleHandleInfo getRemoteShuffleHandleInfo(int
shuffleId) {
public void addFailuresShuffleServerInfos(String shuffleServerId) {
failuresShuffleServerIds.add(shuffleServerId);
}
+
+ /**
+ * Reassign the ShuffleServer list for ShuffleId
+ *
+ * @param shuffleId
+ * @param numPartitions
+ */
+ @Override
+ public synchronized boolean reassignShuffleServers(
+ int stageId, int stageAttemptNumber, int shuffleId, int numPartitions) {
+ String stageIdAndAttempt = stageId + "_" + stageAttemptNumber;
+ Boolean needReassgin =
serverAssignedInfos.computeIfAbsent(stageIdAndAttempt, id -> false);
+ if (!needReassgin) {
+ String storageType =
sparkConf.get(RssSparkConfig.RSS_STORAGE_TYPE.key());
+ RemoteStorageInfo defaultRemoteStorage =
+ new
RemoteStorageInfo(sparkConf.get(RssSparkConfig.RSS_REMOTE_STORAGE_PATH.key(),
""));
+ RemoteStorageInfo remoteStorage =
+ ClientUtils.fetchRemoteStorage(
+ id.get(), defaultRemoteStorage, dynamicConfEnabled, storageType,
shuffleWriteClient);
+ Set<String> assignmentTags =
RssSparkShuffleUtils.getAssignmentTags(sparkConf);
+ int requiredShuffleServerNumber =
+ RssSparkShuffleUtils.getRequiredShuffleServerNumber(sparkConf);
+ long retryInterval =
sparkConf.get(RssSparkConfig.RSS_CLIENT_ASSIGNMENT_RETRY_INTERVAL);
+ int retryTimes =
sparkConf.get(RssSparkConfig.RSS_CLIENT_ASSIGNMENT_RETRY_TIMES);
+ int estimateTaskConcurrency =
RssSparkShuffleUtils.estimateTaskConcurrency(sparkConf);
+ /** Before reassigning ShuffleServer, clear the ShuffleServer list in
ShuffleWriteClient. */
+ shuffleWriteClient.unregisterShuffle(id.get(), shuffleId);
+ Map<Integer, List<ShuffleServerInfo>> partitionToServers;
+ try {
+ partitionToServers =
+ RetryUtils.retry(
+ () -> {
+ ShuffleAssignmentsInfo response =
+ shuffleWriteClient.getShuffleAssignments(
+ id.get(),
+ shuffleId,
+ numPartitions,
+ 1,
+ assignmentTags,
+ requiredShuffleServerNumber,
+ estimateTaskConcurrency,
+ failuresShuffleServerIds);
+ registerShuffleServers(
+ id.get(), shuffleId,
response.getServerToPartitionRanges(), remoteStorage);
+ return response.getPartitionToServers();
+ },
+ retryInterval,
+ retryTimes);
+
+ } catch (Throwable throwable) {
+ throw new RssException("registerShuffle failed!", throwable);
+ }
+ /**
+ * we need to clear the metadata of the completed task, otherwise some
of the stage's data
+ * will be lost
+ */
+ try {
+ unregisterAllMapOutput(shuffleId);
Review Comment:
> What happens if there are not enough ShuffleServers to assign at this time?
The ShuffleServer allocation was done before the metadata was cleaned, and
the amount was insufficient to throw an exception directly, a few lines of code
before this code.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]