This is an automated email from the ASF dual-hosted git repository.
roryqi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-uniffle.git
The following commit(s) were added to refs/heads/master by this push:
new 30332a4e9 [#1089] feat(spark): Add dynamic allocation patch for Spark
2.3 (#1242)
30332a4e9 is described below
commit 30332a4e926aa9cf44a137754385b4481fbee4de
Author: summaryzb <[email protected]>
AuthorDate: Mon Oct 16 20:44:01 2023 -0500
[#1089] feat(spark): Add dynamic allocation patch for Spark 2.3 (#1242)
### What changes were proposed in this pull request?
Add the dynamic allocation patch for Spark 2.3
### Why are the changes needed?
https://github.com/apache/incubator-uniffle/issues/1089
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
Manual local test
---
README.md | 2 +-
.../spark-2.3.4_dynamic_allocation_support.patch | 90 ++++++++++++++++++++++
2 files changed, 91 insertions(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 5ec89f35e..c18dca220 100644
--- a/README.md
+++ b/README.md
@@ -250,7 +250,7 @@ Deploy Steps:
### Support Spark dynamic allocation
To support spark dynamic allocation with Uniffle, spark code should be updated.
-There are 4 patches for spark (2.4.6/3.1.2/3.2.1/3.3.1/3.4.1) in patch/spark
folder for reference.
+There are 7 patches for spark (2.3.4/2.4.6/3.0.1/3.1.2/3.2.1/3.3.1/3.4.1) in
patch/spark folder for reference.
After apply the patch and rebuild spark, add following configuration in spark
conf to enable dynamic allocation:
```
diff --git a/patch/spark/spark-2.3.4_dynamic_allocation_support.patch
b/patch/spark/spark-2.3.4_dynamic_allocation_support.patch
new file mode 100644
index 000000000..61c21f1d2
--- /dev/null
+++ b/patch/spark/spark-2.3.4_dynamic_allocation_support.patch
@@ -0,0 +1,90 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+diff --git
a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
+index 28f70528da1..a7f16bde5b2 100644
+--- a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
++++ b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
+@@ -205,7 +205,8 @@ private[spark] class ExecutorAllocationManager(
+ }
+ // Require external shuffle service for dynamic allocation
+ // Otherwise, we may lose shuffle files when killing executors
+- if (!conf.getBoolean("spark.shuffle.service.enabled", false) && !testing)
{
++ if (!conf.getBoolean("spark.shuffle.service.enabled", false) &&
++ !testing && !conf.isRssEnable) {
+ throw new SparkException("Dynamic allocation of executors requires the
external " +
+ "shuffle service. You may enable this through
spark.shuffle.service.enabled.")
+ }
+diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala
b/core/src/main/scala/org/apache/spark/SparkConf.scala
+index 5670f7d8707..0fbecef9790 100644
+--- a/core/src/main/scala/org/apache/spark/SparkConf.scala
++++ b/core/src/main/scala/org/apache/spark/SparkConf.scala
+@@ -582,6 +582,10 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable
with Logging with Seria
+ Utils.redact(this, getAll).sorted.map { case (k, v) => k + "=" + v
}.mkString("\n")
+ }
+
++ /**
++ * Return true if remote shuffle service is enabled.
++ */
++ def isRssEnable: Boolean = get("spark.shuffle.manager",
"sort").contains("RssShuffleManager")
+ }
+
+ private[spark] object SparkConf extends Logging {
+diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
+index 6ec40a0da89..0d694a4c226 100644
+--- a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
++++ b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
+@@ -1508,7 +1508,8 @@ class DAGScheduler(
+ // if the cluster manager explicitly tells us that the entire worker was
lost, then
+ // we know to unregister shuffle output. (Note that "worker"
specifically refers to the process
+ // from a Standalone cluster, where the shuffle service lives in the
Worker.)
+- val fileLost = workerLost ||
!env.blockManager.externalShuffleServiceEnabled
++ val fileLost = (workerLost ||
!env.blockManager.externalShuffleServiceEnabled) &&
++ !sc.getConf.isRssEnable
+ removeExecutorAndUnregisterOutputs(
+ execId = execId,
+ fileLost = fileLost,
+diff --git
a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
+index df8d9142f91..13be97b2fb7 100644
+--- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
++++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
+@@ -939,7 +939,7 @@ private[spark] class TaskSetManager(
+ // The reason is the next stage wouldn't be able to fetch the data from
this dead executor
+ // so we would need to rerun these tasks on other executors.
+ if (tasks(0).isInstanceOf[ShuffleMapTask] &&
!env.blockManager.externalShuffleServiceEnabled
+- && !isZombie) {
++ && !isZombie && !conf.isRssEnable) {
+ for ((tid, info) <- taskInfos if info.executorId == execId) {
+ val index = taskInfos(tid).index
+ if (successful(index) && !killedByOtherAttempt.contains(tid)) {
+diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/execution/ShuffledRowRDD.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/ShuffledRowRDD.scala
+index 862ee05392f..47138207686 100644
+---
a/sql/core/src/main/scala/org/apache/spark/sql/execution/ShuffledRowRDD.scala
++++
b/sql/core/src/main/scala/org/apache/spark/sql/execution/ShuffledRowRDD.scala
+@@ -147,6 +147,9 @@ class ShuffledRowRDD(
+ }
+
+ override def getPreferredLocations(partition: Partition): Seq[String] = {
++ if (conf.isRssEnable) {
++ return Nil
++ }
+ val tracker =
SparkEnv.get.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster]
+ val dep = dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]]
+ tracker.getPreferredLocationsForShuffle(dep, partition.index)
+--
+2.39.3 (Apple Git-145)
+