Github user xccui commented on a diff in the pull request:

    https://github.com/apache/flink/pull/4625#discussion_r140255052
  
    --- Diff: 
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/join/TimeBoundedStreamInnerJoin.scala
 ---
    @@ -0,0 +1,442 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + *
    + *     http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.flink.table.runtime.join
    +
    +import java.util.{ArrayList, List => JList}
    +
    +import org.apache.flink.api.common.functions.FlatJoinFunction
    +import org.apache.flink.api.common.state._
    +import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, 
TypeInformation}
    +import org.apache.flink.api.java.typeutils.ListTypeInfo
    +import org.apache.flink.configuration.Configuration
    +import org.apache.flink.streaming.api.functions.co.CoProcessFunction
    +import org.apache.flink.table.codegen.Compiler
    +import org.apache.flink.table.runtime.CRowWrappingCollector
    +import 
org.apache.flink.table.runtime.join.JoinTimeIndicator.JoinTimeIndicator
    +import org.apache.flink.table.runtime.types.CRow
    +import org.apache.flink.table.util.Logging
    +import org.apache.flink.types.Row
    +import org.apache.flink.util.Collector
    +
    +/**
    +  * A CoProcessFunction to execute time-bounded stream inner-join.
    +  * Two kinds of time criteria:
    +  * "L.time between R.time + X and R.time + Y" or "R.time between L.time - 
Y and L.time - X".
    +  *
    +  * @param leftLowerBound  the lower bound for the left stream (X in the 
criteria)
    +  * @param leftUpperBound  the upper bound for the left stream (Y in the 
criteria)
    +  * @param allowedLateness the lateness allowed for the two streams
    +  * @param leftType        the input type of left stream
    +  * @param rightType       the input type of right stream
    +  * @param genJoinFuncName the function code of other non-equi conditions
    +  * @param genJoinFuncCode the function name of other non-equi conditions
    +  * @param timeIndicator   indicate whether joining on proctime or rowtime
    +  *
    +  */
    +abstract class TimeBoundedStreamInnerJoin(
    +    private val leftLowerBound: Long,
    +    private val leftUpperBound: Long,
    +    private val allowedLateness: Long,
    +    private val leftType: TypeInformation[Row],
    +    private val rightType: TypeInformation[Row],
    +    private val genJoinFuncName: String,
    +    private val genJoinFuncCode: String,
    +    private val leftTimeIdx: Int,
    +    private val rightTimeIdx: Int,
    +    private val timeIndicator: JoinTimeIndicator)
    +    extends CoProcessFunction[CRow, CRow, CRow]
    +    with Compiler[FlatJoinFunction[Row, Row, Row]]
    +    with Logging {
    +
    +  private var cRowWrapper: CRowWrappingCollector = _
    +
    +  // the join function for other conditions
    +  private var joinFunction: FlatJoinFunction[Row, Row, Row] = _
    +
    +  // cache to store rows from the left stream
    +  private var leftCache: MapState[Long, JList[Row]] = _
    +  // cache to store rows from the right stream
    +  private var rightCache: MapState[Long, JList[Row]] = _
    +
    +  // state to record the timer on the left stream. 0 means no timer set
    +  private var leftTimerState: ValueState[Long] = _
    +  // state to record the timer on the right stream. 0 means no timer set
    +  private var rightTimerState: ValueState[Long] = _
    +
    +  private val leftRelativeSize: Long = -leftLowerBound
    +  private val rightRelativeSize: Long = leftUpperBound
    +
    +  protected var leftOperatorTime: Long = 0L
    +  protected var rightOperatorTime: Long = 0L
    +
    +  //For delayed cleanup
    +  private val cleanupDelay = (leftRelativeSize + rightRelativeSize) / 2
    +
    +  if (allowedLateness < 0) {
    +    throw new IllegalArgumentException("The allowed lateness must be 
non-negative.")
    +  }
    +
    +  /**
    +    * Get the maximum interval between receiving a row and emitting it (as 
part of a joined result).
    +    * Only reasonable for row time join.
    +    *
    +    * @return the maximum delay for the outputs
    +    */
    +  def getMaxOutputDelay: Long = Math.max(leftRelativeSize, 
rightRelativeSize) + allowedLateness
    +
    +  override def open(config: Configuration) {
    +    LOG.debug(s"Compiling JoinFunction: $genJoinFuncName \n\n " +
    +      s"Code:\n$genJoinFuncCode")
    +    val clazz = compile(
    +      getRuntimeContext.getUserCodeClassLoader,
    +      genJoinFuncName,
    +      genJoinFuncCode)
    +    LOG.debug("Instantiating JoinFunction.")
    +    joinFunction = clazz.newInstance()
    +
    +    cRowWrapper = new CRowWrappingCollector()
    +    cRowWrapper.setChange(true)
    +
    +    // Initialize the data caches.
    +    val leftListTypeInfo: TypeInformation[JList[Row]] = new 
ListTypeInfo[Row](leftType)
    +    val leftStateDescriptor: MapStateDescriptor[Long, JList[Row]] =
    +      new MapStateDescriptor[Long, JList[Row]](
    +        timeIndicator + "InnerJoinLeftCache",
    +        BasicTypeInfo.LONG_TYPE_INFO.asInstanceOf[TypeInformation[Long]],
    +        leftListTypeInfo)
    +    leftCache = getRuntimeContext.getMapState(leftStateDescriptor)
    +
    +    val rightListTypeInfo: TypeInformation[JList[Row]] = new 
ListTypeInfo[Row](rightType)
    +    val rightStateDescriptor: MapStateDescriptor[Long, JList[Row]] =
    +      new MapStateDescriptor[Long, JList[Row]](
    +        timeIndicator + "InnerJoinRightCache",
    +        BasicTypeInfo.LONG_TYPE_INFO.asInstanceOf[TypeInformation[Long]],
    +        rightListTypeInfo)
    +    rightCache = getRuntimeContext.getMapState(rightStateDescriptor)
    +
    +    // Initialize the timer states.
    +    val leftTimerStateDesc: ValueStateDescriptor[Long] =
    +      new ValueStateDescriptor[Long](timeIndicator + 
"InnerJoinLeftTimerState", classOf[Long])
    +    leftTimerState = getRuntimeContext.getState(leftTimerStateDesc)
    +
    +    val rightTimerStateDesc: ValueStateDescriptor[Long] =
    +      new ValueStateDescriptor[Long](timeIndicator + 
"InnerJoinRightTimerState", classOf[Long])
    +    rightTimerState = getRuntimeContext.getState(rightTimerStateDesc)
    +  }
    +
    +  /**
    +    * Process rows from the left stream.
    +    */
    +  override def processElement1(
    +      cRowValue: CRow,
    +      ctx: CoProcessFunction[CRow, CRow, CRow]#Context,
    +      out: Collector[CRow]): Unit = {
    +    updateOperatorTime(ctx)
    +    val rowTime: Long = getTimeForLeftStream(ctx, cRowValue)
    +    val oppositeLowerBound: Long = rowTime - rightRelativeSize
    +    val oppositeUpperBound: Long = rowTime + leftRelativeSize
    +    processElement(
    +      cRowValue,
    +      rowTime,
    +      ctx,
    +      out,
    +      leftOperatorTime,
    +      oppositeLowerBound,
    +      oppositeUpperBound,
    +      rightOperatorTime,
    +      rightTimerState,
    +      leftCache,
    +      rightCache,
    +      leftRow = true
    +    )
    +  }
    +
    +  /**
    +    * Process rows from the right stream.
    +    */
    +  override def processElement2(
    +      cRowValue: CRow,
    +      ctx: CoProcessFunction[CRow, CRow, CRow]#Context,
    +      out: Collector[CRow]): Unit = {
    +    updateOperatorTime(ctx)
    +    val rowTime: Long = getTimeForRightStream(ctx, cRowValue)
    +    val oppositeLowerBound: Long = rowTime - leftRelativeSize
    +    val oppositeUpperBound: Long =  rowTime + rightRelativeSize
    +    processElement(
    +      cRowValue,
    +      rowTime,
    +      ctx,
    +      out,
    +      rightOperatorTime,
    +      oppositeLowerBound,
    +      oppositeUpperBound,
    +      leftOperatorTime,
    +      leftTimerState,
    +      rightCache,
    +      leftCache,
    +      leftRow = false
    +    )
    +  }
    +
    +  /**
    +    * Put a row from the input stream into the cache and iterate the 
opposite cache to
    +    * output join results meeting the conditions. If there is no timer set 
for the OPPOSITE
    +    * STREAM, register one.
    +    */
    +  private def processElement(
    +      cRowValue: CRow,
    +      timeForRow: Long,
    +      ctx: CoProcessFunction[CRow, CRow, CRow]#Context,
    +      out: Collector[CRow],
    +      myWatermark: Long,
    +      oppositeLowerBound: Long,
    +      oppositeUpperBound: Long,
    +      oppositeWatermark: Long,
    +      oppositeTimeState: ValueState[Long],
    +      rowListCache: MapState[Long, JList[Row]],
    +      oppositeCache: MapState[Long, JList[Row]],
    +      leftRow: Boolean): Unit = {
    +    cRowWrapper.out = out
    +    val row = cRowValue.row
    +    if (!checkRowOutOfDate(timeForRow, myWatermark)) {
    +      // Put the row into the cache for later use.
    +      var rowList = rowListCache.get(timeForRow)
    +      if (null == rowList) {
    +        rowList = new ArrayList[Row](1)
    +      }
    +      rowList.add(row)
    +      rowListCache.put(timeForRow, rowList)
    --- End diff --
    
    Hi @fhueske, that's a good idea for me! We could start from a constant 
granularity and make it dynamic in the future.
    BTW, I'm not sure if you still remember the following comment in FLINK-6233 
😄 
    > To cope with that, I plan to split the "cache window" into continuous 
static-panes, and casting one to expired as a whole. By doing like that, we may 
store some extra records, whose time interval is equal to the static span of 
the panes, but can remove the expired data efficiently.


---

Reply via email to