leonardBang commented on a change in pull request #13679:
URL: https://github.com/apache/flink/pull/13679#discussion_r507632325



##########
File path: 
flink-table/flink-table-planner-blink/src/test/java/org/apache/flink/table/planner/factories/TestValuesTableFactory.java
##########
@@ -315,15 +320,28 @@ public DynamicTableSource 
createDynamicTableSource(Context context) {
        public DynamicTableSink createDynamicTableSink(Context context) {
                FactoryUtil.TableFactoryHelper helper = 
FactoryUtil.createTableFactoryHelper(this, context);
                helper.validate();
+               String sinkClass = helper.getOptions().get(TABLE_SINK_CLASS);
+
                boolean isInsertOnly = 
helper.getOptions().get(SINK_INSERT_ONLY);
                String runtimeSink = helper.getOptions().get(RUNTIME_SINK);
                int expectedNum = 
helper.getOptions().get(SINK_EXPECTED_MESSAGES_NUM);
                TableSchema schema = context.getCatalogTable().getSchema();
-               return new TestValuesTableSink(
-                       schema,
-                       context.getObjectIdentifier().getObjectName(),
-                       isInsertOnly,
-                       runtimeSink, expectedNum);
+               if (sinkClass.equals("DEFAULT")) {
+                       return new TestValuesTableSink(
+                               schema,
+                               context.getObjectIdentifier().getObjectName(),
+                               isInsertOnly,
+                               runtimeSink, expectedNum);
+               } else {
+                       try {
+                               return InstantiationUtil.instantiate(
+                                       sinkClass,
+                                       DynamicTableSink.class,
+                                       
Thread.currentThread().getContextClassLoader());
+                       } catch (FlinkException e) {

Review comment:
       Also use `TableException` ?

##########
File path: 
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/table/TableSinkITCase.scala
##########
@@ -620,4 +622,93 @@ class TableSinkITCase extends StreamingTestBase {
     val expected = List("book,1,12", "book,4,11", "fruit,3,44")
     assertEquals(expected.sorted, result.sorted)
   }
+
+  @Test
+  def testSinkContext(): Unit = {
+    val data = List(
+      rowOf("1970-01-01 00:00:00.001", localDateTime(1L), 1, 1d),
+      rowOf("1970-01-01 00:00:00.002", localDateTime(2L), 1, 2d),
+      rowOf("1970-01-01 00:00:00.003", localDateTime(3L), 1, 2d),
+      rowOf("1970-01-01 00:00:00.004", localDateTime(4L), 1, 5d),
+      rowOf("1970-01-01 00:00:00.007", localDateTime(7L), 1, 3d),
+      rowOf("1970-01-01 00:00:00.008", localDateTime(8L), 1, 3d),
+      rowOf("1970-01-01 00:00:00.016", localDateTime(16L), 1, 4d))
+
+    val dataId: String = TestValuesTableFactory.registerData(data)
+
+    val sourceDDL =
+      s"""
+         |CREATE TABLE src (
+         |  log_ts STRING,
+         |  ts TIMESTAMP(3),
+         |  a INT,
+         |  b DOUBLE,
+         |  WATERMARK FOR ts AS ts - INTERVAL '0.001' SECOND
+         |) WITH (
+         |  'connector' = 'values',
+         |  'data-id' = '$dataId'
+         |)
+      """.stripMargin
+
+    val sinkDDL =
+      s"""
+         |CREATE TABLE sink (
+         |  log_ts STRING,
+         |  ts TIMESTAMP(3),
+         |  a INT,
+         |  b DOUBLE
+         |) WITH (
+         |  'connector' = 'values',
+         |  'table-sink-class' = '${classOf[TestSinkContextTableSink].getName}'
+         |)
+      """.stripMargin
+
+    tEnv.executeSql(sourceDDL)
+    tEnv.executeSql(sinkDDL)
+
+    
//---------------------------------------------------------------------------------------
+    // Verify writing out a source directly with the rowtime attribute
+    
//---------------------------------------------------------------------------------------
+
+    tEnv.executeSql("INSERT INTO sink SELECT * FROM src").await()
+
+    val expected = List(1, 2, 3, 4, 7, 8, 16)
+    assertEquals(expected.sorted, TestSinkContextTableSink.ROWTIMES.sorted)
+
+    val sinkDDL2 =
+      s"""
+         |CREATE TABLE sink2 (
+         |  window_rowtime TIMESTAMP(3),
+         |  b DOUBLE
+         |) WITH (
+         |  'connector' = 'values',
+         |  'table-sink-class' = '${classOf[TestSinkContextTableSink].getName}'
+         |)
+      """.stripMargin
+    tEnv.executeSql(sinkDDL2)
+
+    
//---------------------------------------------------------------------------------------
+    // Verify writing out with additional operator to generate a new rowtime 
attribute
+    
//---------------------------------------------------------------------------------------
+
+    tEnv.executeSql(
+      """
+        |INSERT INTO sink2
+        |SELECT
+        |  TUMBLE_ROWTIME(ts, INTERVAL '0.005' SECOND),
+        |  SUM(b)
+        |FROM src
+        |GROUP BY TUMBLE(ts, INTERVAL '0.005' SECOND)
+        |""".stripMargin
+    ).await()
+
+    val expected2 = List(4, 9, 19)
+    assertEquals(expected2.sorted, TestSinkContextTableSink.ROWTIMES.sorted)
+  }
+
+  // 
------------------------------------------------------------------------------------------
+
+  private def localDateTime(ts: Long): LocalDateTime = {
+    new Timestamp(ts - TimeZone.getDefault.getOffset(ts)).toLocalDateTime

Review comment:
        Could we use `LocalDateTime.ofEpochSecond(epochSecond, nanoOfSecond, 
ZoneOffset.UTC)` which is more readable ?
   




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to