KurtYoung commented on a change in pull request #10224:
[FLINK-14716][table-planner-blink] Cooperate computed column with push down
rules
URL: https://github.com/apache/flink/pull/10224#discussion_r347076130
##########
File path:
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/schema/TableSourceTable.scala
##########
@@ -70,60 +90,73 @@ class TableSourceTable[T](
" via DefinedRowtimeAttributes interface.")
}
- // TODO implements this
- // TableSourceUtil.validateTableSource(tableSource)
-
- override def getRowType(typeFactory: RelDataTypeFactory): RelDataType = {
- val factory = typeFactory.asInstanceOf[FlinkTypeFactory]
- val (fieldNames, fieldTypes) = TableSourceUtil.getFieldNameType(
- catalogTable.getSchema,
- tableSource,
- selectedFields,
- streaming = isStreamingMode)
- // patch rowtime field according to WatermarkSpec
- val patchedTypes = if (isStreamingMode && watermarkSpec.isDefined) {
- // TODO: [FLINK-14473] we only support top-level rowtime attribute right
now
- val rowtime = watermarkSpec.get.getRowtimeAttribute
- if (rowtime.contains(".")) {
- throw new TableException(
- s"Nested field '$rowtime' as rowtime attribute is not supported
right now.")
- }
- val idx = fieldNames.indexOf(rowtime)
- val originalType = fieldTypes(idx).asInstanceOf[TimestampType]
- val rowtimeType = new TimestampType(
- originalType.isNullable,
- TimestampKind.ROWTIME,
- originalType.getPrecision)
- fieldTypes.patch(idx, Seq(rowtimeType), 1)
+ override def toRel(context: RelOptTable.ToRelContext): RelNode = {
+ val cluster = context.getCluster
+ if (columnExprs.isEmpty) {
+ LogicalTableScan.create(cluster, this)
} else {
- fieldTypes
+ if (!context.isInstanceOf[FlinkToRelContext]) {
+ // If the transform comes from a RelOptRule,
+ // returns the scan directly.
+ return LogicalTableScan.create(cluster, this)
+ }
+ // Get row type of physical fields.
+ val physicalFields = getRowType
+ .getFieldList
+ .filter(f => !columnExprs.contains(f.getName))
+ .toList
+ val scanRowType =
relOptSchema.getTypeFactory.createStructType(physicalFields)
+ // Copy this table with physical scan row type.
+ val newRelTable = new TableSourceTable(relOptSchema,
+ names,
+ scanRowType,
+ tableSource,
+ isStreamingMode,
+ statistic,
+ selectedFields,
+ catalogTable)
+ val scan = LogicalTableScan.create(cluster, newRelTable)
+ val toRelContext = context.asInstanceOf[FlinkToRelContext]
+ val relBuilder = toRelContext.createRelBuilder()
+ val fieldNames = rowType.getFieldNames.asScala
+ val fieldExprs = fieldNames
+ .map { name =>
+ if (columnExprs.contains(name)) {
+ columnExprs(name)
+ } else {
+ name
+ }
+ }.toArray
+ val rexNodes = toRelContext
+ .createSqlExprToRexConverter(scanRowType)
+ .convertToRexNodes(fieldExprs)
+ relBuilder.push(scan)
+ .projectNamed(rexNodes.toList, fieldNames, true)
+ .build()
}
- factory.buildRelNodeRowType(fieldNames, patchedTypes)
}
+ override def getQualifiedName: JList[String] =
explainSourceAsString(tableSource)
+
/**
* Creates a copy of this table, changing statistic.
*
* @param statistic A new FlinkStatistic.
* @return Copy of this table, substituting statistic.
*/
override def copy(statistic: FlinkStatistic): TableSourceTable[T] = {
- new TableSourceTable(tableSource, isStreamingMode, statistic, catalogTable)
+ new TableSourceTable(relOptSchema, names, rowType, tableSource,
isStreamingMode,
+ statistic, catalogTable)
}
- /**
- * Returns statistics of current table.
- */
- override def getStatistic: FlinkStatistic = statistic
-
/**
* Replaces table source with the given one, and create a new table source
table.
*
* @param tableSource tableSource to replace.
* @return new TableSourceTable
*/
def replaceTableSource(tableSource: TableSource[T]): TableSourceTable[T] = {
Review comment:
change method name to `copy`?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services