rdblue commented on a change in pull request #28864:
URL: https://github.com/apache/spark/pull/28864#discussion_r446318223



##########
File path: 
core/src/main/scala/org/apache/spark/storage/BlockManagerMessages.scala
##########
@@ -24,37 +24,37 @@ import org.apache.spark.util.Utils
 
 private[spark] object BlockManagerMessages {
   
//////////////////////////////////////////////////////////////////////////////////
-  // Messages from the master to slaves.
+  // Messages from the master to storage endpoints.
   
//////////////////////////////////////////////////////////////////////////////////
-  sealed trait ToBlockManagerSlave
+  sealed trait ToBlockManagerStorage

Review comment:
       Nit: I would prefer having `Endpoint` added because I don't think 
`Storage` is the correct noun.

##########
File path: core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
##########
@@ -1912,7 +1912,7 @@ private[spark] class DAGScheduler(
    * modify the scheduler's internal state. Use executorLost() to post a loss 
event from outside.
    *
    * We will also assume that we've lost all shuffle blocks associated with 
the executor if the
-   * executor serves its own blocks (i.e., we're not using external shuffle), 
the entire slave
+   * executor serves its own blocks (i.e., we're not using external shuffle), 
the entire agent

Review comment:
       Should this also be updated to "executor process"?

##########
File path: 
core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala
##########
@@ -656,7 +655,7 @@ private[spark] class BlockManagerInfo(
     var originalLevel: StorageLevel = StorageLevel.NONE
 
     if (blockExists) {
-      // The block exists on the slave already.
+      // The block exists on the replica already.

Review comment:
       Here's another where the replacement term was changed and you may want 
to update the comment.

##########
File path: 
resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosClusterScheduler.scala
##########
@@ -731,7 +731,7 @@ private[spark] class MesosClusterScheduler(
   override def reregistered(driver: SchedulerDriver, masterInfo: MasterInfo): 
Unit = {
     logInfo(s"Framework re-registered with master ${masterInfo.getId}")
   }
-  override def slaveLost(driver: SchedulerDriver, slaveId: SlaveID): Unit = {}
+  override def slaveLost(driver: SchedulerDriver, agentId: SlaveID): Unit = {}

Review comment:
       Can we rename the `SlaveID` type to `AgentID` when it is imported?

##########
File path: 
core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala
##########
@@ -466,7 +466,7 @@ class DAGSchedulerSuite extends SparkFunSuite with 
LocalSparkContext with TimeLi
     assertDataStructuresEmpty()
   }
 
-  test("All shuffle files on the slave should be cleaned up when slave lost") {
+  test("All shuffle files on the replica should be cleaned up when it is 
lost") {

Review comment:
       I think this should be "storage endpoint".

##########
File path: core/src/test/scala/org/apache/spark/MapOutputTrackerSuite.scala
##########
@@ -136,39 +136,39 @@ class MapOutputTrackerSuite extends SparkFunSuite {
     masterTracker.trackerEndpoint = 
rpcEnv.setupEndpoint(MapOutputTracker.ENDPOINT_NAME,
       new MapOutputTrackerMasterEndpoint(rpcEnv, masterTracker, conf))
 
-    val slaveRpcEnv = createRpcEnv("spark-slave", hostname, 0, new 
SecurityManager(conf))
-    val slaveTracker = new MapOutputTrackerWorker(conf)
-    slaveTracker.trackerEndpoint =
-      slaveRpcEnv.setupEndpointRef(rpcEnv.address, 
MapOutputTracker.ENDPOINT_NAME)
+    val mapWorkerRpcEnv = createRpcEnv("spark-replica", hostname, 0, new 
SecurityManager(conf))

Review comment:
       What about `spark-worker`?

##########
File path: 
core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala
##########
@@ -27,17 +27,17 @@ import org.apache.spark.util.{ThreadUtils, Utils}
 
 /**
  * An RpcEndpoint to take commands from the master to execute options. For 
example,
- * this is used to remove blocks from the slave's BlockManager.
+ * this is used to remove blocks from the replica's BlockManager.
  */
 private[storage]
-class BlockManagerSlaveEndpoint(
+class BlockManagerStorageEndpoint(
     override val rpcEnv: RpcEnv,
     blockManager: BlockManager,
     mapOutputTracker: MapOutputTracker)
   extends IsolatedRpcEndpoint with Logging {
 
   private val asyncThreadPool =
-    
ThreadUtils.newDaemonCachedThreadPool("block-manager-slave-async-thread-pool", 
100)
+    
ThreadUtils.newDaemonCachedThreadPool("block-manager-replica-async-thread-pool",
 100)

Review comment:
       Endpoint?

##########
File path: 
core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala
##########
@@ -27,17 +27,17 @@ import org.apache.spark.util.{ThreadUtils, Utils}
 
 /**
  * An RpcEndpoint to take commands from the master to execute options. For 
example,
- * this is used to remove blocks from the slave's BlockManager.
+ * this is used to remove blocks from the replica's BlockManager.

Review comment:
       Did you mean replica here? It does seem to work, but could also be 
"storage endpoint".




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to