godfreyhe commented on a change in pull request #13577:
URL: https://github.com/apache/flink/pull/13577#discussion_r507722613
##########
File path:
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/functions/utils/TableSqlFunction.scala
##########
@@ -82,9 +83,16 @@ class TableSqlFunction(
override def toString: String = displayName
- override def getRowType(
+ override def getRowTypeInference: SqlReturnTypeInference = new
SqlReturnTypeInference {
+ override def inferReturnType(opBinding: SqlOperatorBinding): RelDataType =
{
+ val arguments = convertArguments(opBinding, functionImpl, getNameAsId)
+ getRowType(opBinding.getTypeFactory, arguments)
+ }
+ }
Review comment:
remove this ?
##########
File path:
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/functions/utils/AggSqlFunction.scala
##########
@@ -19,24 +19,25 @@
package org.apache.flink.table.planner.functions.utils
import org.apache.flink.table.api.ValidationException
-import org.apache.flink.table.functions.{AggregateFunction,
FunctionIdentifier, TableAggregateFunction, ImperativeAggregateFunction}
+import org.apache.flink.table.functions.{AggregateFunction,
FunctionIdentifier, ImperativeAggregateFunction, TableAggregateFunction}
+import org.apache.flink.table.planner.JList
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.functions.bridging.BridgingSqlAggFunction
-import
org.apache.flink.table.planner.functions.utils.AggSqlFunction.{createOperandTypeChecker,
createOperandTypeInference, createReturnTypeInference}
+import
org.apache.flink.table.planner.functions.utils.AggSqlFunction.{createOperandMetadata,
createOperandTypeInference, createReturnTypeInference}
import
org.apache.flink.table.planner.functions.utils.UserDefinedFunctionUtils._
import
org.apache.flink.table.runtime.types.LogicalTypeDataTypeConverter.fromDataTypeToLogicalType
import org.apache.flink.table.types.DataType
import org.apache.flink.table.types.logical.LogicalType
-import org.apache.calcite.rel.`type`.RelDataType
+import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFactory}
import org.apache.calcite.sql._
import org.apache.calcite.sql.`type`.SqlOperandTypeChecker.Consistency
import org.apache.calcite.sql.`type`._
import org.apache.calcite.sql.parser.SqlParserPos
import org.apache.calcite.sql.validate.SqlUserDefinedAggFunction
import org.apache.calcite.util.Optionality
-import java.util
+import java.util.Collections
Review comment:
unused import
##########
File path:
flink-table/flink-table-runtime-blink/src/main/java/org/apache/flink/table/runtime/util/collections/ByteHashSet.java
##########
@@ -26,7 +26,7 @@
protected boolean[] used;
- public ByteHashSet() {
+ public ByteHashSet(final int expected) {
Review comment:
unnecessary change
##########
File path:
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdPopulationSizeTest.scala
##########
@@ -66,19 +66,25 @@ class FlinkRelMdPopulationSizeTest extends
FlinkRelMdHandlerTestBase {
assertEquals(1.0, mq.getPopulationSize(logicalProject,
ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(logicalProject,
ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(logicalProject,
ImmutableBitSet.of(1)))
- assertEquals(16.22, mq.getPopulationSize(logicalProject,
ImmutableBitSet.of(2)), 1e-2)
- assertEquals(6.98, mq.getPopulationSize(logicalProject,
ImmutableBitSet.of(3)), 1e-2)
- assertEquals(20.09, mq.getPopulationSize(logicalProject,
ImmutableBitSet.of(4)), 1e-2)
- assertEquals(20.09, mq.getPopulationSize(logicalProject,
ImmutableBitSet.of(5)), 1e-2)
+ assertEquals(16.43531528030365,
Review comment:
this also need to update
##########
File path:
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdUniqueKeys.scala
##########
@@ -58,6 +61,18 @@ class FlinkRelMdUniqueKeys private extends
MetadataHandler[BuiltInMetadata.Uniqu
getTableUniqueKeys(null, rel.getTable)
}
+ def getUniqueKeys(
+ rel: TableFunctionScan,
+ mq: RelMetadataQuery,
+ ignoreNulls: Boolean): JSet[ImmutableBitSet] = {
+ if (rel.getInputs.size() == 1
+ &&
rel.getCall.asInstanceOf[RexCall].getOperator.isInstanceOf[SqlWindowTableFunction])
{
+ mq.getUniqueKeys(rel.getInput(0), ignoreNulls)
+ } else {
+ null
+ }
+ }
Review comment:
is this change necessary ?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]