lincoln-lil commented on code in PR #20324:
URL: https://github.com/apache/flink/pull/20324#discussion_r939629582
##########
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/common/CommonExecLookupJoin.java:
##########
@@ -329,8 +340,59 @@ private Transformation<RowData>
createSyncLookupJoinWithState(
isLeftOuterJoin,
isObjectReuseEnabled);
- // TODO then wrapper it into a keyed lookup function with state
FLINK-28568
- throw new UnsupportedOperationException("to be supported");
+ KeyedLookupJoinWrapper keyedLookupJoinWrapper =
+ new KeyedLookupJoinWrapper(
+ (LookupJoinRunner) processFunction,
+ StateConfigUtil.createTtlConfig(
+
config.get(ExecutionConfigOptions.IDLE_STATE_RETENTION).toMillis()),
+ InternalSerializers.create(tableSourceRowType),
+ lookupKeyContainsPrimaryKey);
+
+ KeyedProcessOperator<RowData, RowData, RowData> operator =
+ new KeyedProcessOperator<>(keyedLookupJoinWrapper);
+
+ List<Integer> refKeys =
+ allLookupKeys.entrySet().stream()
+ .filter(
+ key ->
+ !(key.getValue()
+ instanceof
LookupJoinUtil.ConstantLookupKey))
+ .map(key -> ((LookupJoinUtil.FieldRefLookupKey)
key.getValue()).index)
+ .collect(Collectors.toList());
+ RowDataKeySelector keySelector;
+
+ int parallelism = inputTransformation.getParallelism();
+ if (refKeys.isEmpty()) {
+ // all lookup keys are constants, then use an empty key selector
+ keySelector = EmptyRowDataKeySelector.INSTANCE;
+ // single parallelism for empty key shuffle
+ parallelism = 1;
+ } else {
+ // make it a deterministic asc order
+ Collections.sort(refKeys);
+ keySelector =
+ KeySelectorUtil.getRowDataSelector(
+ classLoader,
+
refKeys.stream().mapToInt(Integer::intValue).toArray(),
+ InternalTypeInfo.of(inputRowType));
+ }
+ final KeyGroupStreamPartitioner<RowData, RowData> partitioner =
+ new KeyGroupStreamPartitioner<>(
+ keySelector,
KeyGroupRangeAssignment.DEFAULT_LOWER_BOUND_MAX_PARALLELISM);
+ Transformation<RowData> partitionedTransform =
+ new PartitionTransformation<>(inputTransformation,
partitioner);
+ partitionedTransform.setParallelism(parallelism);
+
+ OneInputTransformation<RowData, RowData> transform =
+ ExecNodeUtil.createOneInputTransformation(
+ partitionedTransform,
+ createTransformationMeta(LOOKUP_JOIN_TRANSFORMATION,
config),
Review Comment:
ok
##########
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/AsyncLookupJoinITCase.scala:
##########
@@ -268,6 +268,35 @@ class AsyncLookupJoinITCase(
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
+ @Test
+ def testAggAndAsyncLeftJoinWithTryResolveMode(): Unit = {
Review Comment:
this case can cover the change, the legacy source can provide both sync and
async functions, so it can fallback to sync lookup function with state.
##########
flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/plan/nodes/exec/common/CommonExecLookupJoin.java:
##########
@@ -329,8 +340,59 @@ private Transformation<RowData>
createSyncLookupJoinWithState(
isLeftOuterJoin,
isObjectReuseEnabled);
- // TODO then wrapper it into a keyed lookup function with state
FLINK-28568
- throw new UnsupportedOperationException("to be supported");
+ KeyedLookupJoinWrapper keyedLookupJoinWrapper =
+ new KeyedLookupJoinWrapper(
+ (LookupJoinRunner) processFunction,
+ StateConfigUtil.createTtlConfig(
+
config.get(ExecutionConfigOptions.IDLE_STATE_RETENTION).toMillis()),
+ InternalSerializers.create(tableSourceRowType),
+ lookupKeyContainsPrimaryKey);
+
+ KeyedProcessOperator<RowData, RowData, RowData> operator =
+ new KeyedProcessOperator<>(keyedLookupJoinWrapper);
+
+ List<Integer> refKeys =
+ allLookupKeys.entrySet().stream()
+ .filter(
Review Comment:
yes
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]