cloud-fan commented on a change in pull request #29000:
URL: https://github.com/apache/spark/pull/29000#discussion_r526899169



##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
##########
@@ -164,4 +165,48 @@ class PartitionedWriteSuite extends QueryTest with 
SharedSparkSession {
       assert(e.getMessage.contains("Found duplicate column(s) b, b: `b`;"))
     }
   }
+
+  test("SPARK-27194 SPARK-29302: Fix commit collision in dynamic partition 
overwrite mode") {
+    withSQLConf(SQLConf.PARTITION_OVERWRITE_MODE.key ->
+      SQLConf.PartitionOverwriteMode.DYNAMIC.toString,
+      SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
+        classOf[PartitionFileExistCommitProtocol].getName) {
+      withTempDir { d =>
+        withTable("t") {
+          sql(
+            s"""
+               | create table t(c1 int, p1 int) using parquet partitioned by 
(p1)
+               | location '${d.getAbsolutePath}'
+            """.stripMargin)
+
+          val df = Seq((1, 2)).toDF("c1", "p1")
+          df.write
+            .partitionBy("p1")
+            .mode("overwrite")
+            .saveAsTable("t")
+          checkAnswer(sql("select * from t"), df)
+        }
+      }
+    }
+  }
+}
+
+/**
+ * A file commit protocol with pre-created partition file. when try to 
overwrite partition dir
+ * in dynamic partition mode, FileAlreadyExist exception would raise without 
SPARK-27194
+ */
+private class PartitionFileExistCommitProtocol(
+    jobId: String,
+    path: String,
+    dynamicPartitionOverwrite: Boolean)
+  extends SQLHadoopMapReduceCommitProtocol(jobId, path, 
dynamicPartitionOverwrite) {
+  override def setupJob(jobContext: JobContext): Unit = {
+    super.setupJob(jobContext)
+    val stagingDir = new File(new Path(path).toUri.getPath, 
s".spark-staging-$jobId")
+    stagingDir.mkdirs()
+    val stagingPartDir = new File(stagingDir, "p1=2")
+    stagingPartDir.mkdirs()
+    val conflictTaskFile = new File(stagingPartDir, 
s"part-00000-$jobId.c000.snappy.parquet")

Review comment:
       So `FileOutputCommitter` will clean up the output path 
`.spark-staging-$jobId`at the beginning, which avoids the 
FileAlreadyExistException?




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to