TanYuxin-tyx commented on code in PR #22579:
URL: https://github.com/apache/flink/pull/22579#discussion_r1194769979


##########
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/batch/sql/agg/GroupWindowITCase.scala:
##########
@@ -743,23 +744,27 @@ class GroupWindowITCase extends BatchTestBase {
     )
   }
 
-  @Test(expected = classOf[RuntimeException])
+  @Test
   def testSessionWindowWithProperties(): Unit = {
-    registerCollection(
-      "T",
-      data3WithTimestamp,
-      new RowTypeInfo(INT_TYPE_INFO, LONG_TYPE_INFO, STRING_TYPE_INFO, 
LOCAL_DATE_TIME),
-      "a, b, c, ts")
-
-    val sqlQuery =
-      "SELECT COUNT(a), " +
-        "SESSION_START(ts, INTERVAL '4' SECOND), " +
-        "SESSION_END(ts, INTERVAL '4' SECOND), " +
-        "SESSION_ROWTIME(ts, INTERVAL '4' SECOND) " +
-        "FROM T " +
-        "GROUP BY SESSION(ts, INTERVAL '4' SECOND)"
 
-    checkResult(sqlQuery, Seq())
+    assertThatThrownBy(

Review Comment:
   ditto. Through this method only has one `checkResult`,  we should also wrap 
the right location.



##########
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/batch/sql/agg/AggregateITCaseBase.scala:
##########
@@ -718,36 +722,39 @@ abstract class AggregateITCaseBase(testName: String) 
extends BatchTestBase {
     )
   }
 
-  @Test(expected = classOf[TableException])
+  @Test
   def testMultipleColumnDistinctCount(): Unit = {
-    val testData = Seq(
-      ("a", "b", "c"),
-      ("a", "b", "c"),
-      ("a", "b", "d"),
-      ("x", "y", "z"),
-      ("x", "q", null: String))
-
-    checkQuery(
-      testData,
-      "select count(distinct f0, f1) from TableName",
-      Seq(Tuple1(3L))
-    )
+    assertThatThrownBy(
+      () => {
+        val testData = Seq(
+          ("a", "b", "c"),
+          ("a", "b", "c"),
+          ("a", "b", "d"),
+          ("x", "y", "z"),
+          ("x", "q", null: String))
+
+        checkQuery(

Review Comment:
   If you wrap all the method with `assertThatThrownBy`, a exception will be 
thrown from the first `checkQuery`, then the subsequential `checkQuery` will 
never run, which is not expected. (You can check it with debug.)
   
   To fix this, you should wrap each `checkQuery` by `assertThatThrownBy`.



##########
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/batch/sql/agg/HashAggITCase.scala:
##########
@@ -80,8 +80,20 @@ class HashAggITCase(adaptiveLocalHashAggEnable: Boolean)
     )
   }
 
-  @Test
-  def testAdaptiveLocalHashAggWithLowAggregationDegree(): Unit = {
+  @ParameterizedTest
+  @MethodSource(Array("parameters"))
+  def testAdaptiveLocalHashAggWithLowAggregationDegree(
+      adaptiveLocalHashAggEnable: Boolean): Unit = {
+    if (adaptiveLocalHashAggEnable) {
+      tEnv.getConfig
+        .set(OptimizerConfigOptions.TABLE_OPTIMIZER_AGG_PHASE_STRATEGY, 
"TWO_PHASE")
+      tEnv.getConfig.set(
+        HashAggCodeGenerator.TABLE_EXEC_LOCAL_HASH_AGG_ADAPTIVE_ENABLED,
+        Boolean.box(true))
+      tEnv.getConfig.set(
+        
HashAggCodeGenerator.TABLE_EXEC_LOCAL_HASH_AGG_ADAPTIVE_SAMPLING_THRESHOLD,
+        Long.box(5L))
+    }

Review Comment:
   Because there are too many duplicated lines for the set envs, we can extract 
it with a new method like this. And all the methods can call the new method.
   
     private def setTableEnv(adaptiveLocalHashAggEnable: Boolean) = {
       if (adaptiveLocalHashAggEnable) {
         tEnv.getConfig
           .set(OptimizerConfigOptions.TABLE_OPTIMIZER_AGG_PHASE_STRATEGY, 
"TWO_PHASE")
         tEnv.getConfig.set(
           HashAggCodeGenerator.TABLE_EXEC_LOCAL_HASH_AGG_ADAPTIVE_ENABLED,
           Boolean.box(true))
         tEnv.getConfig.set(
           
HashAggCodeGenerator.TABLE_EXEC_LOCAL_HASH_AGG_ADAPTIVE_SAMPLING_THRESHOLD,
           Long.box(5L))
       }
     }



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to