AngersZhuuuu commented on a change in pull request #30973:
URL: https://github.com/apache/spark/pull/30973#discussion_r550945043



##########
File path: 
sql/core/src/main/scala/org/apache/spark/sql/execution/BaseScriptTransformationExec.scala
##########
@@ -69,9 +75,19 @@ trait BaseScriptTransformationExec extends UnaryExecNode {
     }
   }
 
-  protected def initProc: (OutputStream, Process, InputStream, CircularBuffer) 
= {
-    val cmd = List("/bin/bash", "-c", script)
+  protected def initProc(hadoopConf: Configuration): ProcParameters = {
+    val wrapper = 
splitArgs(hadoopConf.get(SQLConf.SCRIPT_TRANSFORMATION_COMMAND_WRAPPER.key))
+    val cmdArgs = splitArgs(script)
+    val prog = cmdArgs(0)
+    if(!new File(prog).isAbsolute) {
+      val progFile = new File(SparkFiles.get(prog))
+      if (progFile.exists()) {
+        cmdArgs(0) = progFile.getAbsolutePath
+      }
+    }
+    val cmd = wrapper.toList ++ cmdArgs.toList
     val builder = new ProcessBuilder(cmd.asJava)
+      .directory(new File(SparkFiles.getRootDirectory()))

Review comment:
       > @AngersZhuuuu, is this required? In executor side, the root directory 
is already current working directory IIRC.
   
   But in Unit Test, driver node's working dir is not 
`SparkFile.getRootDirectory`. If remove this, test will failed.
   ```
    /usr/bin/python: can't open file 'test-resource8890660714423343799.py': 
[Errno 2] No such file or directory
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to