Ngone51 commented on a change in pull request #31876:
URL: https://github.com/apache/spark/pull/31876#discussion_r618118932



##########
File path: core/src/main/scala/org/apache/spark/scheduler/MapStatus.scala
##########
@@ -52,8 +55,41 @@ private[spark] sealed trait MapStatus {
    * partitionId of the task or taskContext.taskAttemptId is used.
    */
   def mapId: Long
+
 }
 
+private[spark] class MapStatusLocationFactory(conf: SparkConf) {
+  private val locationExtension = classOf[Location]
+  private val (locationConstructor, locationName) = {
+    conf.get(config.SHUFFLE_LOCATION_PLUGIN_CLASS).map { className =>
+      val clazz = Utils.classForName(className)
+      require(locationExtension.isAssignableFrom(clazz),
+        s"$className is not a subclass of ${locationExtension.getName}.")
+      (clazz.getConstructor(), className)
+    }.orNull
+  }
+
+  private lazy val locationCache: LoadingCache[Location, Location] = 
CacheBuilder.newBuilder()
+    .maximumSize(10000)
+    .build(
+      new CacheLoader[Location, Location]() {
+        override def load(loc: Location): Location = loc
+      }
+    )
+
+  def load(in: ObjectInput): Location = {
+    try {
+      Option(locationConstructor).map { ctr =>
+        val loc = ctr.newInstance().asInstanceOf[Location]
+        loc.readExternal(in)
+        locationCache.get(loc)

Review comment:
       The cache mimics the `blockManagerIdCache`.
   
   It's for reducing the Java objects of the same location in the driver. 
Ideally, the cache intends to provide the singleton instance for the same 
location.
   
   For example, a 10 executors' cluster could have a task with 100 partitions. 
So, generally, every 10 partitions would share the same location. Without the 
cache, we will have 100 objects but only 10 with the cache.
   
   
   I can add a comment for it later.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to