tgravescs commented on a change in pull request #29395:
URL: https://github.com/apache/spark/pull/29395#discussion_r468587871
##########
File path:
core/src/test/scala/org/apache/spark/BarrierStageOnSubmittedSuite.scala
##########
@@ -259,4 +262,37 @@ class BarrierStageOnSubmittedSuite extends SparkFunSuite
with LocalSparkContext
testSubmitJob(sc, rdd,
message =
ERROR_MESSAGE_BARRIER_REQUIRE_MORE_SLOTS_THAN_CURRENT_TOTAL_NUMBER)
}
+
+ test("SPARK-32518: CoarseGrainedSchedulerBackend.maxNumConcurrentTasks
should " +
+ "consider all kinds of resources for the barrier stage") {
+ withTempDir { dir =>
+ val discoveryScript = createTempScriptWithExpectedOutput(
+ dir, "gpuDiscoveryScript", """{"name": "gpu","addresses":["0"]}""")
+
+ val conf = new SparkConf()
+ .setMaster("local-cluster[1, 2, 1024]")
+ .setAppName("test-cluster")
+ .set(WORKER_GPU_ID.amountConf, "1")
+ .set(WORKER_GPU_ID.discoveryScriptConf, discoveryScript)
+ .set(EXECUTOR_GPU_ID.amountConf, "1")
+ .set(TASK_GPU_ID.amountConf, "1")
+ // disable barrier stage retry to fail the application as soon as
possible
+ .set(BARRIER_MAX_CONCURRENT_TASKS_CHECK_MAX_FAILURES, 1)
+ // disable the check to simulate the behavior of Standalone in order to
+ // reproduce the issue.
+ .set(Tests.SKIP_VALIDATE_CORES_TESTING, true)
+ sc = new SparkContext(conf)
+ // setup an executor which will have 2 CPUs and 1 GPU
+ TestUtils.waitUntilExecutorsUp(sc, 1, 60000)
+
+ val exception = intercept[BarrierJobSlotsNumberCheckFailed] {
+ sc.parallelize(Range(1, 10), 2)
+ .barrier()
+ .mapPartitions { iter => iter }
+ .collect()
+ }
+ assert(exception.getMessage.contains("[SPARK-24819]: Barrier execution "
+
+ "mode does not allow run a barrier stage that requires more slots"))
Review comment:
ok, we can revisit if it becomes an issue later.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]