KurtYoung commented on a change in pull request #10224: 
[FLINK-14716][table-planner-blink] Cooperate computed column with push down 
rules
URL: https://github.com/apache/flink/pull/10224#discussion_r347076833
 
 

 ##########
 File path: 
flink-table/flink-table-planner-blink/src/main/java/org/apache/flink/table/planner/plan/FlinkCalciteCatalogReader.java
 ##########
 @@ -62,17 +83,197 @@ public FlinkCalciteCatalogReader(
                if (originRelOptTable == null) {
                        return null;
                } else {
-                       // Wrap FlinkTable as FlinkRelOptTable to use in query 
optimization.
-                       FlinkTable table = 
originRelOptTable.unwrap(FlinkTable.class);
+                       // Wrap as linkPreparingTableBase to use in query 
optimization.
+                       CatalogSchemaTable table = 
originRelOptTable.unwrap(CatalogSchemaTable.class);
                        if (table != null) {
-                               return FlinkRelOptTable.create(
-                                       originRelOptTable.getRelOptSchema(),
-                                       originRelOptTable.getRowType(),
+                               return 
toPreparingTable(originRelOptTable.getRelOptSchema(),
                                        originRelOptTable.getQualifiedName(),
+                                       originRelOptTable.getRowType(),
                                        table);
                        } else {
                                return originRelOptTable;
                        }
                }
        }
+
+       /**
+        * Translate this {@link CatalogSchemaTable} into Flink source table.
+        */
+       public static FlinkPreparingTableBase toPreparingTable(RelOptSchema 
relOptSchema,
+                       List<String> names,
+                       RelDataType rowType,
+                       CatalogSchemaTable table) {
+               if (table.isTemporary()) {
+                       return convertTemporaryTable(relOptSchema,
+                               names,
+                               rowType,
+                               table.getObjectIdentifier(),
+                               table.getCatalogTable(),
+                               table.getStatistic(),
+                               table.isStreamingMode());
+               } else {
+                       return convertPermanentTable(relOptSchema,
+                               names,
+                               rowType,
+                               table.getObjectIdentifier(),
+                               table.getCatalogTable(),
+                               table.getStatistic(),
+                               
table.getCatalog().getTableFactory().orElse(null),
+                               table.isStreamingMode());
+               }
+       }
+
+       private static FlinkPreparingTableBase convertPermanentTable(
+                       RelOptSchema relOptSchema,
+                       List<String> names,
+                       RelDataType rowType,
+                       ObjectIdentifier objectIdentifier,
+                       CatalogBaseTable table,
+                       FlinkStatistic statistic,
+                       @Nullable TableFactory tableFactory,
+                       boolean isStreamingMode) {
+               if (table instanceof QueryOperationCatalogView) {
 
 Review comment:
   Is this possible? I was assuming the query operation can't be serialized and 
thus can't be permanent table.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to