Ngone51 commented on a change in pull request #28245:
URL: https://github.com/apache/spark/pull/28245#discussion_r411169934
##########
File path: core/src/main/scala/org/apache/spark/rpc/RpcEndpointRef.scala
##########
@@ -114,11 +114,9 @@ private[spark] class RpcAbortException(message: String)
extends Exception(messag
* A wrapper for [[Future]] but add abort method.
* This is used in long run RPC and provide an approach to abort the RPC.
*/
-private[spark] class AbortableRpcFuture[T: ClassTag](
- future: Future[T],
- onAbort: String => Unit) {
+private[spark] class AbortableRpcFuture[T: ClassTag](f: Future[T], onAbort:
Throwable => Unit) {
- def abort(reason: String): Unit = onAbort(reason)
+ def abort(t: Throwable): Unit = onAbort(t)
- def toFuture: Future[T] = future
+ def future: Future[T] = f
Review comment:
sounds goof!
##########
File path: core/src/main/scala/org/apache/spark/rpc/RpcEndpointRef.scala
##########
@@ -114,11 +114,9 @@ private[spark] class RpcAbortException(message: String)
extends Exception(messag
* A wrapper for [[Future]] but add abort method.
* This is used in long run RPC and provide an approach to abort the RPC.
*/
-private[spark] class AbortableRpcFuture[T: ClassTag](
- future: Future[T],
- onAbort: String => Unit) {
+private[spark] class AbortableRpcFuture[T: ClassTag](f: Future[T], onAbort:
Throwable => Unit) {
- def abort(reason: String): Unit = onAbort(reason)
+ def abort(t: Throwable): Unit = onAbort(t)
- def toFuture: Future[T] = future
+ def future: Future[T] = f
Review comment:
sounds good!
##########
File path: core/src/main/scala/org/apache/spark/BarrierTaskContext.scala
##########
@@ -85,28 +84,28 @@ class BarrierTaskContext private[spark] (
// BarrierCoordinator on timeout, instead of RPCTimeoutException from
the RPC framework.
timeout = new RpcTimeout(365.days, "barrierTimeout"))
- // messages which consist of all barrier tasks' messages
- var messages: Array[String] = null
// Wait the RPC future to be completed, but every 1 second it will jump
out waiting
// and check whether current spark task is killed. If killed, then throw
// a `TaskKilledException`, otherwise continue wait RPC until it
completes.
- try {
- while (!abortableRpcFuture.toFuture.isCompleted) {
+
+ // import scala Success locally to avoid conflict with
org.apache.spark.Success
+ import scala.util.{Failure, Success, Try}
Review comment:
sure.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]