cloud-fan commented on a change in pull request #29000:
URL: https://github.com/apache/spark/pull/29000#discussion_r524276998
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala
##########
@@ -164,4 +166,48 @@ class PartitionedWriteSuite extends QueryTest with
SharedSparkSession {
assert(e.getMessage.contains("Found duplicate column(s) b, b: `b`;"))
}
}
+
+ test("SPARK-27194 SPARK-29302: Fix commit collision in dynamic partition
overwrite mode") {
+ withSQLConf(SQLConf.PARTITION_OVERWRITE_MODE.key ->
+ SQLConf.PartitionOverwriteMode.DYNAMIC.toString,
+ SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
+ classOf[PartitionFileExistCommitProtocol].getName) {
+ withTempDir { d =>
+ withTable("t") {
+ sql(
+ s"""
+ | create table t(c1 int, p1 int) using parquet partitioned by
(p1)
+ | location '${d.getAbsolutePath}'
+ """.stripMargin)
+
+ val df = Seq((1, 2)).toDF("c1", "p1")
+ df.write
+ .partitionBy("p1")
+ .mode("overwrite")
+ .saveAsTable("t")
+ checkAnswer(sql("select * from t"), df)
+ }
+ }
+ }
+ }
+}
+
+/**
+ * A file commit protocol with pre-created partition file. when try to
overwrite partition dir
+ * in dynamic partition mode, FileAlreadyExist exception would raise without
SPARK-27194
+ */
+private class PartitionFileExistCommitProtocol(
+ jobId: String,
+ path: String,
+ dynamicPartitionOverwrite: Boolean)
+ extends HadoopMapReduceCommitProtocol(jobId, path,
dynamicPartitionOverwrite) {
Review comment:
shall we extend `SQLHadoopMapReduceCommitProtocol`? that's the one used
in SQL INSERT.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]