arunmahadevan commented on a change in pull request #23576: [SPARK-26655] [SS]
Support multiple aggregates in append mode
URL: https://github.com/apache/spark/pull/23576#discussion_r250818472
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/WatermarkTracker.scala
##########
@@ -80,34 +80,84 @@ case object MaxWatermark extends MultipleWatermarkPolicy {
/** Tracks the watermark value of a streaming query based on a given `policy`
*/
case class WatermarkTracker(policy: MultipleWatermarkPolicy) extends Logging {
private val operatorToWatermarkMap = mutable.HashMap[Int, Long]()
+ private val statefulOperatorToWatermark = mutable.HashMap[Long, Long]()
+ private val statefulOperatorToEventTimeMap = mutable.HashMap[Long,
mutable.HashMap[Int, Long]]()
+
private var globalWatermarkMs: Long = 0
+ private def updateWaterMarkMap(eventTimeExecs: Seq[EventTimeWatermarkExec],
+ map: mutable.HashMap[Int, Long]): Unit = {
+ eventTimeExecs.zipWithIndex.foreach {
+ case (e, index) if e.eventTimeStats.value.count > 0 =>
+ logDebug(s"Observed event time stats $index:
${e.eventTimeStats.value}")
+ val newWatermarkMs = e.eventTimeStats.value.max - e.delayMs
+ val prevWatermarkMs = map.get(index)
+ if (prevWatermarkMs.isEmpty || newWatermarkMs > prevWatermarkMs.get) {
+ map.put(index, newWatermarkMs)
+ }
+
+ // Populate 0 if we haven't seen any data yet for this watermark node.
+ case (_, index) =>
+ if (!map.isDefinedAt(index)) {
+ map.put(index, 0)
+ }
+ }
+ }
+
def setWatermark(newWatermarkMs: Long): Unit = synchronized {
globalWatermarkMs = newWatermarkMs
}
+ def setOperatorWatermarks(operatorWatermarks: Map[Long, Long]): Unit =
synchronized {
+ statefulOperatorToWatermark ++= operatorWatermarks
+ }
+
def updateWatermark(executedPlan: SparkPlan): Unit = synchronized {
val watermarkOperators = executedPlan.collect {
case e: EventTimeWatermarkExec => e
}
if (watermarkOperators.isEmpty) return
- watermarkOperators.zipWithIndex.foreach {
- case (e, index) if e.eventTimeStats.value.count > 0 =>
- logDebug(s"Observed event time stats $index:
${e.eventTimeStats.value}")
- val newWatermarkMs = e.eventTimeStats.value.max - e.delayMs
- val prevWatermarkMs = operatorToWatermarkMap.get(index)
- if (prevWatermarkMs.isEmpty || newWatermarkMs > prevWatermarkMs.get) {
- operatorToWatermarkMap.put(index, newWatermarkMs)
- }
+ updateWaterMarkMap(watermarkOperators, operatorToWatermarkMap)
- // Populate 0 if we haven't seen any data yet for this watermark node.
- case (_, index) =>
- if (!operatorToWatermarkMap.isDefinedAt(index)) {
- operatorToWatermarkMap.put(index, 0)
- }
+ // compute the per stateful operator watermark
+ val statefulOperators = executedPlan.collect {
+ case s: StatefulOperator => s
}
+ statefulOperators.foreach(statefulOperator => {
+ // find the first event time child node(s)
+ val eventTimeExecs = statefulOperator match {
+ case op: UnaryExecNode =>
+ op.collectFirst {
+ case e: EventTimeWatermarkExec => e
+ }.map(Seq(_)).getOrElse(Seq())
+ case op: BinaryExecNode =>
+ val left = op.left.collectFirst {
+ case e: EventTimeWatermarkExec => e
+ }.map(Seq(_)).getOrElse(Seq())
+ val right = op.right.collectFirst {
+ case e: EventTimeWatermarkExec => e
+ }.map(Seq(_)).getOrElse(Seq())
+ left ++ right
+ }
+
+ // compute watermark for the stateful operator node
+ statefulOperator.stateInfo.foreach(state => {
+ if (eventTimeExecs.nonEmpty) {
+ updateWaterMarkMap(eventTimeExecs,
+ statefulOperatorToEventTimeMap.getOrElseUpdate(state.operatorId,
+ new mutable.HashMap[Int, Long]()))
+ val newWatermarkMs =
statefulOperatorToEventTimeMap(state.operatorId).values.toSeq.min
Review comment:
I think with global watermark there needs to be a way to make progress
across all stateful operators and looks like `min` did not work in all cases.
But I am not sure if it would make sense to choose `max` for the individual
operator level watermark. If event times in one of the inputs is lagging, the
best would be to not advance the watermark beyond it. Watermarks should ideally
advance when all input data has been observed and choosing max would cause more
events to be discarded as late data.
IMO we can just choose min here but would like to hear opinion from other
reviewers as well.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]