dongjoon-hyun commented on a change in pull request #24260: [SPARK-27329][SQL]
Pruning nested field in map of map key and value from object serializers
URL: https://github.com/apache/spark/pull/24260#discussion_r270695803
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/DatasetOptimizationSuite.scala
##########
@@ -142,4 +142,25 @@ class DatasetOptimizationSuite extends QueryTest with
SharedSQLContext {
checkAnswer(df1, Seq(Row("1"), Row("2"), Row("3")))
}
}
+
+ test("Pruned nested serializers: map of map value") {
+ withSQLConf(SQLConf.SERIALIZER_NESTED_SCHEMA_PRUNING_ENABLED.key ->
"true") {
+ val mapData = Seq((Map(("k", Map(("k2", ("a_1", 11))))), 1),
+ (Map(("k", Map(("k2", ("b_1", 22))))), 2), (Map(("k", Map(("k2",
("c_1", 33))))), 3))
+ val mapDs = mapData.toDS().map(t => (t._1, t._2 + 1))
+ val df1 = mapDs.select("_1.k.k2._1")
+ testSerializer(df1, Seq(Seq("_1")))
+ }
+ }
+
+ test("Pruned nested serializers: map of map key") {
+ withSQLConf(SQLConf.SERIALIZER_NESTED_SCHEMA_PRUNING_ENABLED.key ->
"true") {
+ val mapData = Seq((Map((Map((("1", 1), "val1")), "a_1")), 1),
+ (Map((Map((("2", 2), "val2")), "b_1")), 2), (Map((Map((("3", 3),
"val3")), "c_1")), 3))
+ val mapDs = mapData.toDS().map(t => (t._1, t._2 + 1))
+ val df1 = mapDs.select(expr("map_keys(map_keys(_1)[0])._1[0]"))
Review comment:
ditto. `df1` -> `df`.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]