maropu commented on a change in pull request #25699: 
[SPARK-28050][SQL]DataFrameWriter support insertInto a specific table partition
URL: https://github.com/apache/spark/pull/25699#discussion_r321593387
 
 

 ##########
 File path: sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala
 ##########
 @@ -824,4 +824,28 @@ class InsertSuite extends QueryTest with 
TestHiveSingleton with BeforeAndAfter
       }
     }
   }
+
+  test("SPARK-28050: DataFrameWriter support insertInto a specific table 
partition") {
+    withTable("mc_test_pt_table") {
+      import spark.implicits._
+      spark.sql(
+        s"CREATE TABLE mc_test_pt_table (name STRING, num BIGINT) PARTITIONED 
BY (pt1 STRING, pt2 STRING)")
+      val result =
+        """+----+----+
+          || pt1| pt2|
+          |+----+----+
+          ||0101|0202|
+          |+----+----+
+          |only showing top 1 row
+          |""".stripMargin
+      val partionDf = spark.sparkContext
+        .parallelize(0 to 9, 2)
+        .map(f => {
+          (s"name-$f", f)
+        })
+        .toDF("name", "num")
+      partionDf.write.insertInto("mc_test_pt_table", "pt1='0101',pt2='0202'")
 
 Review comment:
   oh, I see. Why not use variable-length params? e.g., `insertInto(tableName: 
String, partionInfo: String*)`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to