KurtYoung commented on a change in pull request #9909: [FLINK-14381][table]
Partition field names should be got from CatalogTable instead of source/sink
URL: https://github.com/apache/flink/pull/9909#discussion_r338907291
##########
File path:
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/BatchExecSinkRule.scala
##########
@@ -44,32 +42,36 @@ class BatchExecSinkRule extends ConverterRule(
val sinkNode = rel.asInstanceOf[FlinkLogicalSink]
val newTrait = rel.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
var requiredTraitSet =
sinkNode.getInput.getTraitSet.replace(FlinkConventions.BATCH_PHYSICAL)
- sinkNode.sink match {
- case partitionSink: PartitionableTableSink
- if partitionSink.getPartitionFieldNames != null &&
- partitionSink.getPartitionFieldNames.nonEmpty =>
- val partitionFields = partitionSink.getPartitionFieldNames
- val partitionIndices = partitionFields
- .map(partitionSink.getTableSchema.getFieldNames.indexOf(_))
- // validate
- partitionIndices.foreach { idx =>
- if (idx < 0) {
- throw new TableException(s"Partitionable sink ${sinkNode.sinkName}
field " +
- s"${partitionFields.get(idx)} must be in the schema.")
- }
- }
+ if (sinkNode.catalogTable != null && sinkNode.catalogTable.isPartitioned) {
+ sinkNode.sink match {
+ case partitionSink: PartitionableTableSink =>
+ val partKeys = sinkNode.catalogTable.getPartitionKeys
+ if (!partKeys.isEmpty) {
+ val partitionIndices =
+
partKeys.map(partitionSink.getTableSchema.getFieldNames.indexOf(_))
+ // validate
Review comment:
don't have to validate again? we can move all validation logic to
`TableSinkUtils:validate`?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services