This is an automated email from the ASF dual-hosted git repository.
chengpan pushed a commit to branch branch-1.7
in repository https://gitbox.apache.org/repos/asf/kyuubi.git
The following commit(s) were added to refs/heads/branch-1.7 by this push:
new 35033ff21 Revert "[KYUUBI #4713][TEST] Fix false positive result in
SchedulerPoolSuite"
35033ff21 is described below
commit 35033ff214f9690e63204b6e73a79ecee44c5a81
Author: Cheng Pan <[email protected]>
AuthorDate: Mon Apr 17 16:56:48 2023 +0800
Revert "[KYUUBI #4713][TEST] Fix false positive result in
SchedulerPoolSuite"
This reverts commit 84f19a76b0309da29787775c2e66ee15a8b93a61.
---
.../kyuubi/engine/spark/SchedulerPoolSuite.scala | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git
a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
index d42b7f4d5..af8c90cf2 100644
---
a/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
+++
b/externals/kyuubi-spark-sql-engine/src/test/scala/org/apache/kyuubi/engine/spark/SchedulerPoolSuite.scala
@@ -19,8 +19,6 @@ package org.apache.kyuubi.engine.spark
import java.util.concurrent.Executors
-import scala.concurrent.duration.SECONDS
-
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobEnd,
SparkListenerJobStart}
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.SpanSugar.convertIntToGrainOfTime
@@ -82,7 +80,6 @@ class SchedulerPoolSuite extends WithSparkSQLEngine with
HiveJDBCTestHelper {
threads.execute(() => {
priority match {
case 0 =>
- // job name job2
withJdbcStatement() { statement =>
statement.execute("SET kyuubi.operation.scheduler.pool=p0")
statement.execute("SELECT java_method('java.lang.Thread',
'sleep', 1500l)" +
@@ -95,18 +92,17 @@ class SchedulerPoolSuite extends WithSparkSQLEngine with
HiveJDBCTestHelper {
statement.execute("SELECT java_method('java.lang.Thread',
'sleep', 1500l)" +
" FROM range(1, 3, 1, 2)")
}
- // make sure this job name job1
- Thread.sleep(1000)
}
})
}
threads.shutdown()
- threads.awaitTermination(20, SECONDS)
- // because after job1 submitted, sleep 1s, so job1 should be started
before job2
- assert(job1StartTime < job2StartTime)
- // job2 minShare is 2(total resource) so that job1 should be allocated
tasks after
- // job2 finished.
- assert(job2FinishTime < job1FinishTime)
+ eventually(Timeout(20.seconds)) {
+ // We can not ensure that job1 is started before job2 so here using
abs.
+ assert(Math.abs(job1StartTime - job2StartTime) < 1000)
+ // Job1 minShare is 2(total resource) so that job2 should be allocated
tasks after
+ // job1 finished.
+ assert(job2FinishTime - job1FinishTime >= 1000)
+ }
} finally {
spark.sparkContext.removeSparkListener(listener)
}