jingz-db commented on code in PR #48686:
URL: https://github.com/apache/spark/pull/48686#discussion_r1821321457
##########
sql/core/src/test/scala/org/apache/spark/sql/streaming/TransformWithStateInitialStateSuite.scala:
##########
@@ -497,4 +573,126 @@ class TransformWithStateInitialStateSuite extends
StateStoreMetricsTest
)
}
}
+
+ testInitialStateWithStateDataSource(true) { (valDf, listDf, mapDf,
inputData) =>
+ val valueDf = valDf.selectExpr("key.value AS groupingKey", "value.value AS
value")
+ val flattenListDf = listDf
+ .selectExpr("key.value AS groupingKey", "list_element.value AS
listValue")
+ val flattenMapDf = mapDf
+ .selectExpr(
+ "key.value AS groupingKey",
+ "user_map_key.value AS userMapKey",
+ "user_map_value.value AS userMapValue")
+ val df_joined =
+ valueDf.unionByName(flattenListDf, true)
+ .unionByName(flattenMapDf, true)
+ val kvDataSet = inputData.toDS().groupByKey(x => x.key)
+ val initDf = df_joined.as[UnionInitialStateRow].groupByKey(x =>
x.groupingKey)
+ kvDataSet.transformWithState(
+ new InitialStatefulProcessorWithStateDataSource(),
+ TimeMode.None(), OutputMode.Append(), initDf).toDF()
+ }
+
+ testInitialStateWithStateDataSource(false) { (valDf, listDf, mapDf,
inputData) =>
+ val valueDf = valDf.selectExpr("key.value AS groupingKey", "value.value AS
value")
+ val unflattenListDf = listDf
+ .selectExpr("key.value AS groupingKey",
+ "list_value.value as listValue")
+ val unflattenMapDf = mapDf
+ .selectExpr(
+ "key.value AS groupingKey",
+ "map_from_entries(transform(map_entries(map_value), x -> " +
+ "struct(x.key.value, x.value.value))) as mapValue")
+ val df_joined =
+ valueDf.unionByName(unflattenListDf, true)
+ .unionByName(unflattenMapDf, true)
+ val kvDataSet = inputData.toDS().groupByKey(x => x.key)
+ val initDf = df_joined.as[UnionUnflattenInitialStateRow].groupByKey(x =>
x.groupingKey)
+ kvDataSet.transformWithState(
+ new InitialStatefulProcessorWithUnflattenStateDataSource(),
+ TimeMode.None(), OutputMode.Append(), initDf).toDF()
+ }
+
+ private def testInitialStateWithStateDataSource(
+ flattenOption: Boolean)
+ (startQuery: (DataFrame, DataFrame, DataFrame,
+ MemoryStream[InitInputRow]) => DataFrame): Unit = {
+ Seq(
+ (TransformWithStateSuiteUtils.NUM_SHUFFLE_PARTITIONS.toString, "1"),
+ (TransformWithStateSuiteUtils.NUM_SHUFFLE_PARTITIONS.toString,
+ TransformWithStateSuiteUtils.NUM_SHUFFLE_PARTITIONS.toString)
Review Comment:
Was meant to pass in with different partition numbers for 1st and 2nd stream
but forgot to set the config; add config setting lines with different and same
number of partitions.
##########
sql/core/src/test/scala/org/apache/spark/sql/streaming/TransformWithStateInitialStateSuite.scala:
##########
@@ -83,6 +98,59 @@ abstract class StatefulProcessorWithInitialStateTestClass[V]
}
}
+/**
+ * Stateful processor that will take a union dataframe output from state data
source reader,
+ * with flattened state data source rows.
+ */
+class InitialStatefulProcessorWithStateDataSource
+ extends StatefulProcessorWithInitialStateTestClass[UnionInitialStateRow] {
+ override def handleInitialState(
+ key: String, initialState: UnionInitialStateRow, timerValues:
TimerValues): Unit = {
+ val stateVar = {
+ if (initialState.value.isDefined) "value"
Review Comment:
Done!
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]