cashmand commented on code in PR #44742:
URL: https://github.com/apache/spark/pull/44742#discussion_r1458212982
##########
sql/core/src/test/scala/org/apache/spark/sql/VariantSuite.scala:
##########
@@ -138,4 +138,35 @@ class VariantSuite extends QueryTest with
SharedSparkSession {
}
}
}
+
+ test("write partitioned file") {
+ def verifyResult(df: DataFrame): Unit = {
+ val result = df.selectExpr("v").collect()
+ .map(_.get(0).asInstanceOf[VariantVal].toString)
+ .sorted
+ .toSeq
+ val expected = (1 until 10).map(id => "1" * id)
+ assert(result == expected)
+ }
+
+ // At this point, JSON parsing logic is not really implemented. We just
construct some number
+ // inputs that are also valid JSON. This exercises passing VariantVal
throughout the system.
+ val query = spark.sql("select id, parse_json(repeat('1', id)) as v from
range(1, 10)")
+ verifyResult(query)
+
+ // Partition by another column should work.
+ withTempDir { dir =>
+ val tempDir = new File(dir, "files").getCanonicalPath
+ query.write.partitionBy("id").parquet(tempDir)
+ verifyResult(spark.read.parquet(tempDir))
+ }
+
+ // Partitioning by Variant column is not allowed.
+ withTempDir { dir =>
+ val tempDir = new File(dir, "files").getCanonicalPath
+ intercept[AnalysisException] {
+ query.write.partitionBy("v").parquet(tempDir)
Review Comment:
Thanks for the review, I added the extra cases to the test.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]