LantaoJin commented on a change in pull request #23951: 
[SPARK-13704][CORE][YARN] Re-implement RackResolver to reduce resolving time
URL: https://github.com/apache/spark/pull/23951#discussion_r267179410
 
 

 ##########
 File path: 
resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/SparkRackResolver.scala
 ##########
 @@ -17,24 +17,90 @@
 
 package org.apache.spark.deploy.yarn
 
+import scala.collection.JavaConverters._
+import scala.collection.mutable.ArrayBuffer
+
+import com.google.common.base.Strings
 import org.apache.hadoop.conf.Configuration
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic
+import org.apache.hadoop.net._
+import org.apache.hadoop.util.ReflectionUtils
 import org.apache.hadoop.yarn.util.RackResolver
 import org.apache.log4j.{Level, Logger}
 
+import org.apache.spark.internal.Logging
+
 /**
- * Wrapper around YARN's [[RackResolver]]. This allows Spark tests to easily 
override the
+ * Re-implement YARN's [[RackResolver]]. This allows Spark tests to easily 
override the
  * default behavior, since YARN's class self-initializes the first time it's 
called, and
  * future calls all use the initial configuration.
  */
-private[yarn] class SparkRackResolver {
+private[spark] class SparkRackResolver {
 
   // RackResolver logs an INFO message whenever it resolves a rack, which is 
way too often.
   if (Logger.getLogger(classOf[RackResolver]).getLevel == null) {
     Logger.getLogger(classOf[RackResolver]).setLevel(Level.WARN)
   }
 
   def resolve(conf: Configuration, hostName: String): String = {
-    RackResolver.resolve(conf, hostName).getNetworkLocation()
+    SparkRackResolver.coreResolve(conf, List(hostName)).head.getNetworkLocation
+  }
+
+  /**
+   * Added in SPARK-27038.
+   * This should be changed to `RackResolver.resolve(conf, hostNames)`
+   * in hadoop releases with YARN-9332.
+   */
+  def resolve(conf: Configuration, hostNames: List[String]): List[Node] = {
+    SparkRackResolver.coreResolve(conf, hostNames)
   }
+}
 
+/**
+ * Utility to resolve the rack for hosts in an efficient manner.
+ * It will cache the rack for individual hosts to avoid
+ * repeatedly performing the same expensive lookup.
+ *
+ * Its logic refers [[org.apache.hadoop.yarn.util.RackResolver]] and enhanced.
+ * This will be unnecessary in hadoop releases with YARN-9332.
+ * With that, we could just directly use 
[[org.apache.hadoop.yarn.util.RackResolver]].
+ * In the meantime, this is a re-implementation for spark's use.
+ */
+object SparkRackResolver extends Logging {
+  private var dnsToSwitchMapping: DNSToSwitchMapping = _
+  private var initCalled = false
+
+  def coreResolve(conf: Configuration, hostNames: List[String]): List[Node] = {
+    if (!initCalled) {
 
 Review comment:
   Every calling of `get` in different threads will generate a new 
SparkResolver since the `conf` instances are different. So how to keep only one 
cache in memory?

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to