This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 3d5af030707 [SPARK-41407][SQL][FOLLOW-UP] Use string jobTrackerID for 
FileFormatWriter.executeTask
3d5af030707 is described below

commit 3d5af030707e1dd8bd14a1bee244350329303943
Author: Hyukjin Kwon <gurwls...@apache.org>
AuthorDate: Fri Dec 23 20:22:33 2022 +0900

    [SPARK-41407][SQL][FOLLOW-UP] Use string jobTrackerID for 
FileFormatWriter.executeTask
    
    ### What changes were proposed in this pull request?
    
    This PR is a followup of https://github.com/apache/spark/pull/38939 that 
fixes a logical conflict during merging PRs, see 
https://github.com/apache/spark/pull/38980 and 
https://github.com/apache/spark/pull/38939.
    
    ### Why are the changes needed?
    
    To recover the broken build.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No, dev-only.
    
    ### How was this patch tested?
    
    Manually tested:
    
    ```
     ./build/sbt -Phive clean package
    ```
    
    Closes #39194 from HyukjinKwon/SPARK-41407.
    
    Authored-by: Hyukjin Kwon <gurwls...@apache.org>
    Signed-off-by: Hyukjin Kwon <gurwls...@apache.org>
---
 .../org/apache/spark/sql/execution/datasources/WriteFiles.scala     | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriteFiles.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriteFiles.scala
index 39b7b252f6e..5bc8f9db32b 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriteFiles.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriteFiles.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.execution.datasources
 import java.util.Date
 
 import org.apache.spark.{SparkException, TaskContext}
-import org.apache.spark.internal.io.FileCommitProtocol
+import org.apache.spark.internal.io.{FileCommitProtocol, 
SparkHadoopWriterUtils}
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.expressions.Attribute
@@ -72,7 +72,7 @@ case class WriteFilesExec(child: SparkPlan) extends 
UnaryExecNode {
     val concurrentOutputWriterSpec = 
writeFilesSpec.concurrentOutputWriterSpecFunc(child)
     val description = writeFilesSpec.description
     val committer = writeFilesSpec.committer
-    val jobIdInstant = new Date().getTime
+    val jobTrackerID = SparkHadoopWriterUtils.createJobTrackerID(new Date())
     rddWithNonEmptyPartitions.mapPartitionsInternal { iterator =>
       val sparkStageId = TaskContext.get().stageId()
       val sparkPartitionId = TaskContext.get().partitionId()
@@ -80,7 +80,7 @@ case class WriteFilesExec(child: SparkPlan) extends 
UnaryExecNode {
 
       val ret = FileFormatWriter.executeTask(
         description,
-        jobIdInstant,
+        jobTrackerID,
         sparkStageId,
         sparkPartitionId,
         sparkAttemptNumber,


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to