mridulm commented on code in PR #40850:
URL: https://github.com/apache/spark/pull/40850#discussion_r1172865732


##########
core/src/main/scala/org/apache/spark/util/Utils.scala:
##########
@@ -3371,22 +3358,11 @@ private[spark] class CallerContext(
 
   /**
    * Set up the caller context [[context]] by invoking Hadoop CallerContext 
API of
-   * [[org.apache.hadoop.ipc.CallerContext]], which was added in hadoop 2.8.
+   * [[org.apache.hadoop.ipc.CallerContext]].
    */
-  def setCurrentContext(): Unit = {
-    if (CallerContext.callerContextSupported) {
-      try {
-        val callerContext = 
Utils.classForName("org.apache.hadoop.ipc.CallerContext")
-        val builder: Class[AnyRef] =
-          Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
-        val builderInst = 
builder.getConstructor(classOf[String]).newInstance(context)
-        val hdfsContext = builder.getMethod("build").invoke(builderInst)
-        callerContext.getMethod("setCurrent", callerContext).invoke(null, 
hdfsContext)
-      } catch {
-        case NonFatal(e) =>
-          logWarning("Fail to set Spark caller context", e)
-      }
-    }
+  def setCurrentContext(): Unit = if (CallerContext.callerContextEnabled) {
+    val hdfsContext = new 
org.apache.hadoop.ipc.CallerContext.Builder(context).build()
+    org.apache.hadoop.ipc.CallerContext.setCurrent(hdfsContext)

Review Comment:
   nit: Instead of using the fully qualified name (which made sense in 
reflection code earlier), we should have used a renamed import statement.
   Something like:
   import org.apache.hadoop.ipc.{CallerContext => HadoopCallerContext}
   import org.apache.hadoop.ipc.CallerContext.{Builder => 
HadoopCallerContextBuilder}



##########
core/src/main/scala/org/apache/spark/util/Utils.scala:
##########
@@ -3371,22 +3358,11 @@ private[spark] class CallerContext(
 
   /**
    * Set up the caller context [[context]] by invoking Hadoop CallerContext 
API of
-   * [[org.apache.hadoop.ipc.CallerContext]], which was added in hadoop 2.8.
+   * [[org.apache.hadoop.ipc.CallerContext]].
    */
-  def setCurrentContext(): Unit = {
-    if (CallerContext.callerContextSupported) {
-      try {
-        val callerContext = 
Utils.classForName("org.apache.hadoop.ipc.CallerContext")
-        val builder: Class[AnyRef] =
-          Utils.classForName("org.apache.hadoop.ipc.CallerContext$Builder")
-        val builderInst = 
builder.getConstructor(classOf[String]).newInstance(context)
-        val hdfsContext = builder.getMethod("build").invoke(builderInst)
-        callerContext.getMethod("setCurrent", callerContext).invoke(null, 
hdfsContext)
-      } catch {
-        case NonFatal(e) =>
-          logWarning("Fail to set Spark caller context", e)
-      }
-    }
+  def setCurrentContext(): Unit = if (CallerContext.callerContextEnabled) {
+    val hdfsContext = new 
org.apache.hadoop.ipc.CallerContext.Builder(context).build()
+    org.apache.hadoop.ipc.CallerContext.setCurrent(hdfsContext)

Review Comment:
   nit: Instead of using the fully qualified name (which made sense in 
reflection code earlier), we should have used a renamed import statement.
   Something like:
   ```
   import org.apache.hadoop.ipc.{CallerContext => HadoopCallerContext}
   import org.apache.hadoop.ipc.CallerContext.{Builder => 
HadoopCallerContextBuilder}
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to