This is an automated email from the ASF dual-hosted git repository.
gurwls223 pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-3.2 by this push:
new e9dd296 [SPARK-36246][CORE][TEST] GHA WorkerDecommissionExtended flake
e9dd296 is described below
commit e9dd2969c2f599252fb84fac4c239d0055b0ff4e
Author: Holden Karau <[email protected]>
AuthorDate: Thu Jul 22 15:17:48 2021 +0900
[SPARK-36246][CORE][TEST] GHA WorkerDecommissionExtended flake
### What changes were proposed in this pull request?
GHA probably doesn't have the same resources as jenkins so move down from 5
to 3 execs and give a bit more time for them to come up.
### Why are the changes needed?
Test is timing out in GHA
### Does this PR introduce _any_ user-facing change?
No, test only change.
### How was this patch tested?
Run through GHA verify no OOM during WorkerDecommissionExtended
Closes #33467 from
holdenk/SPARK-36246-WorkerDecommissionExtendedSuite-flakes-in-GHA.
Lead-authored-by: Holden Karau <[email protected]>
Co-authored-by: Holden Karau <[email protected]>
Signed-off-by: Hyukjin Kwon <[email protected]>
(cherry picked from commit 89a83196ac37617a8d19209ec1d7fea6b52d0f25)
Signed-off-by: Hyukjin Kwon <[email protected]>
---
.../spark/scheduler/WorkerDecommissionExtendedSuite.scala | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git
a/core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionExtendedSuite.scala
b/core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionExtendedSuite.scala
index 129eb8b..66d3cf2 100644
---
a/core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionExtendedSuite.scala
+++
b/core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionExtendedSuite.scala
@@ -31,17 +31,17 @@ import
org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
class WorkerDecommissionExtendedSuite extends SparkFunSuite with
LocalSparkContext {
private val conf = new org.apache.spark.SparkConf()
.setAppName(getClass.getName)
- .set(SPARK_MASTER, "local-cluster[5,1,512]")
- .set(EXECUTOR_MEMORY, "512m")
+ .set(SPARK_MASTER, "local-cluster[3,1,384]")
+ .set(EXECUTOR_MEMORY, "384m")
.set(DYN_ALLOCATION_ENABLED, true)
.set(DYN_ALLOCATION_SHUFFLE_TRACKING_ENABLED, true)
- .set(DYN_ALLOCATION_INITIAL_EXECUTORS, 5)
+ .set(DYN_ALLOCATION_INITIAL_EXECUTORS, 3)
.set(DECOMMISSION_ENABLED, true)
test("Worker decommission and executor idle timeout") {
sc = new SparkContext(conf.set(DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT.key,
"10s"))
withSpark(sc) { sc =>
- TestUtils.waitUntilExecutorsUp(sc, 5, 60000)
+ TestUtils.waitUntilExecutorsUp(sc, 3, 80000)
val rdd1 = sc.parallelize(1 to 10, 2)
val rdd2 = rdd1.map(x => (1, x))
val rdd3 = rdd2.reduceByKey(_ + _)
@@ -53,10 +53,10 @@ class WorkerDecommissionExtendedSuite extends SparkFunSuite
with LocalSparkConte
}
}
- test("Decommission 4 executors from 5 executors in total") {
+ test("Decommission 2 executors from 3 executors in total") {
sc = new SparkContext(conf)
withSpark(sc) { sc =>
- TestUtils.waitUntilExecutorsUp(sc, 5, 60000)
+ TestUtils.waitUntilExecutorsUp(sc, 3, 80000)
val rdd1 = sc.parallelize(1 to 100000, 200)
val rdd2 = rdd1.map(x => (x % 100, x))
val rdd3 = rdd2.reduceByKey(_ + _)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]