[
https://issues.apache.org/jira/browse/FLINK-22157?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17319853#comment-17319853
]
godfrey he edited comment on FLINK-22157 at 6/29/21, 3:06 AM:
--------------------------------------------------------------
Fixed in 1.13.0: 92fbe7f1fe5f0eade036b4184cdbab8f9b791647
Fixed in 1.12.5: d1b8c5fd54e1d104387ef6acb94b9a9698378ed3
was (Author: godfreyhe):
Fixed in 1.13.0: 92fbe7f1fe5f0eade036b4184cdbab8f9b791647
Fixed in 1.12.4: d1b8c5fd54e1d104387ef6acb94b9a9698378ed3
> Join & Select a part of composite primary key will cause
> ArrayIndexOutOfBoundsException
> ---------------------------------------------------------------------------------------
>
> Key: FLINK-22157
> URL: https://issues.apache.org/jira/browse/FLINK-22157
> Project: Flink
> Issue Type: Bug
> Components: Table SQL / Planner
> Affects Versions: 1.13.0
> Reporter: Caizhi Weng
> Assignee: Caizhi Weng
> Priority: Major
> Labels: pull-request-available
> Fix For: 1.13.0
>
>
> Add the following test case to
> {{org.apache.flink.table.planner.plan.stream.sql.join.JoinTest}} to reproduce
> this bug.
> {code:scala}
> @Test
> def myTest(): Unit = {
> util.tableEnv.executeSql(
> """
> |CREATE TABLE MyTable (
> | pk1 INT,
> | pk2 BIGINT,
> | PRIMARY KEY (pk1, pk2) NOT ENFORCED
> |) WITH (
> | 'connector'='values'
> |)
> |""".stripMargin)
> util.verifyExecPlan("SELECT A.a1 FROM A LEFT JOIN MyTable ON A.a1 =
> MyTable.pk1")
> }
> {code}
> The exception stack is
> {code}
> java.lang.RuntimeException: Error while applying rule StreamPhysicalJoinRule,
> args [rel#141:FlinkLogicalJoin.LOGICAL.any.None:
> 0.[NONE].[NONE](left=RelSubset#139,right=RelSubset#140,condition==($0,
> $1),joinType=left), rel#138:FlinkLogicalCalc.LOGICAL.any.None:
> 0.[NONE].[NONE](input=RelSubset#137,select=a1),
> rel#121:FlinkLogicalTableSourceScan.LOGICAL.any.None:
> 0.[NONE].[NONE](table=[default_catalog, default_database, MyTable,
> project=[pk1]],fields=pk1)]
> at
> org.apache.calcite.plan.volcano.VolcanoRuleCall.onMatch(VolcanoRuleCall.java:256)
> at
> org.apache.calcite.plan.volcano.IterativeRuleDriver.drive(IterativeRuleDriver.java:58)
> at
> org.apache.calcite.plan.volcano.VolcanoPlanner.findBestExp(VolcanoPlanner.java:510)
> at
> org.apache.calcite.tools.Programs$RuleSetProgram.run(Programs.java:312)
> at
> org.apache.flink.table.planner.plan.optimize.program.FlinkVolcanoProgram.optimize(FlinkVolcanoProgram.scala:64)
> at
> org.apache.flink.table.planner.plan.optimize.program.FlinkChainedProgram$$anonfun$optimize$1.apply(FlinkChainedProgram.scala:62)
> at
> org.apache.flink.table.planner.plan.optimize.program.FlinkChainedProgram$$anonfun$optimize$1.apply(FlinkChainedProgram.scala:58)
> at
> scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
> at
> scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)
> at scala.collection.Iterator$class.foreach(Iterator.scala:891)
> at scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
> at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
> at
> scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)
> at scala.collection.AbstractTraversable.foldLeft(Traversable.scala:104)
> at
> org.apache.flink.table.planner.plan.optimize.program.FlinkChainedProgram.optimize(FlinkChainedProgram.scala:57)
> at
> org.apache.flink.table.planner.plan.optimize.StreamCommonSubGraphBasedOptimizer.optimizeTree(StreamCommonSubGraphBasedOptimizer.scala:163)
> at
> org.apache.flink.table.planner.plan.optimize.StreamCommonSubGraphBasedOptimizer.doOptimize(StreamCommonSubGraphBasedOptimizer.scala:79)
> at
> org.apache.flink.table.planner.plan.optimize.CommonSubGraphBasedOptimizer.optimize(CommonSubGraphBasedOptimizer.scala:77)
> at
> org.apache.flink.table.planner.delegation.PlannerBase.optimize(PlannerBase.scala:281)
> at
> org.apache.flink.table.planner.utils.TableTestUtilBase.assertPlanEquals(TableTestBase.scala:889)
> at
> org.apache.flink.table.planner.utils.TableTestUtilBase.doVerifyPlan(TableTestBase.scala:780)
> at
> org.apache.flink.table.planner.utils.TableTestUtilBase.verifyExecPlan(TableTestBase.scala:583)
> at
> org.apache.flink.table.planner.plan.stream.sql.join.JoinTest.myTest(JoinTest.scala:300)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498)
> at
> org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
> at
> org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
> at
> org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
> at
> org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
> at
> org.junit.rules.ExpectedException$ExpectedExceptionStatement.evaluate(ExpectedException.java:239)
> at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
> at org.junit.rules.ExternalResource$1.evaluate(ExternalResource.java:48)
> at org.junit.rules.RunRules.evaluate(RunRules.java:20)
> at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325)
> at
> org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:78)
> at
> org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
> at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290)
> at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71)
> at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288)
> at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58)
> at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268)
> at org.junit.runners.ParentRunner.run(ParentRunner.java:363)
> at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
> at
> com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:68)
> at
> com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:33)
> at
> com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:230)
> at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:58)
> Caused by: java.lang.RuntimeException: Error occurred while applying rule
> StreamPhysicalJoinRule
> at
> org.apache.calcite.plan.volcano.VolcanoRuleCall.transformTo(VolcanoRuleCall.java:161)
> at
> org.apache.calcite.plan.RelOptRuleCall.transformTo(RelOptRuleCall.java:268)
> at
> org.apache.calcite.plan.RelOptRuleCall.transformTo(RelOptRuleCall.java:283)
> at
> org.apache.flink.table.planner.plan.rules.physical.stream.StreamPhysicalJoinRuleBase.onMatch(StreamPhysicalJoinRuleBase.scala:90)
> at
> org.apache.calcite.plan.volcano.VolcanoRuleCall.onMatch(VolcanoRuleCall.java:229)
> ... 49 more
> Caused by: java.lang.ArrayIndexOutOfBoundsException: -1
> at org.apache.calcite.util.ImmutableBitSet.of(ImmutableBitSet.java:113)
> at
> org.apache.flink.table.planner.plan.metadata.FlinkRelMdUniqueKeys.getTableUniqueKeys(FlinkRelMdUniqueKeys.scala:76)
> at
> org.apache.flink.table.planner.plan.metadata.FlinkRelMdUniqueKeys.getUniqueKeys(FlinkRelMdUniqueKeys.scala:59)
> at GeneratedMetadataHandler_UniqueKeys.getUniqueKeys_$(Unknown Source)
> at GeneratedMetadataHandler_UniqueKeys.getUniqueKeys(Unknown Source)
> at
> org.apache.calcite.rel.metadata.RelMetadataQuery.getUniqueKeys(RelMetadataQuery.java:464)
> at
> org.apache.flink.table.planner.plan.metadata.FlinkRelMdUniqueKeys.getUniqueKeys(FlinkRelMdUniqueKeys.scala:591)
> at GeneratedMetadataHandler_UniqueKeys.getUniqueKeys_$(Unknown Source)
> at GeneratedMetadataHandler_UniqueKeys.getUniqueKeys(Unknown Source)
> at
> org.apache.calcite.rel.metadata.RelMetadataQuery.getUniqueKeys(RelMetadataQuery.java:464)
> at
> org.apache.calcite.rel.metadata.RelMetadataQuery.getUniqueKeys(RelMetadataQuery.java:445)
> at
> org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalJoin.getUniqueKeys(StreamPhysicalJoin.scala:111)
> at
> org.apache.flink.table.planner.plan.nodes.physical.stream.StreamPhysicalJoin.explainTerms(StreamPhysicalJoin.scala:107)
> at
> org.apache.calcite.rel.AbstractRelNode.getDigestItems(AbstractRelNode.java:409)
> at
> org.apache.calcite.rel.AbstractRelNode.deepHashCode(AbstractRelNode.java:391)
> at
> org.apache.calcite.rel.AbstractRelNode$InnerRelDigest.hashCode(AbstractRelNode.java:443)
> at java.util.HashMap.hash(HashMap.java:339)
> at java.util.HashMap.get(HashMap.java:557)
> at
> org.apache.calcite.plan.volcano.VolcanoPlanner.registerImpl(VolcanoPlanner.java:1150)
> at
> org.apache.calcite.plan.volcano.VolcanoPlanner.register(VolcanoPlanner.java:589)
> at
> org.apache.calcite.plan.volcano.VolcanoPlanner.ensureRegistered(VolcanoPlanner.java:604)
> at
> org.apache.calcite.plan.volcano.VolcanoRuleCall.transformTo(VolcanoRuleCall.java:148)
> ... 53 more
> {code}
> This is because {{FlinkRelMdUniqueKeys#getTableUniqueKeys}} does not consider
> the case when projections are pushed down and cause only a part of the
> composite primary key to be selected.
--
This message was sent by Atlassian Jira
(v8.3.4#803005)