fsk119 commented on code in PR #21979:
URL: https://github.com/apache/flink/pull/21979#discussion_r1142869047


##########
flink-table/flink-sql-gateway/src/test/java/org/apache/flink/table/gateway/AbstractSqlGatewayStatementTestBase.java:
##########
@@ -16,19 +16,23 @@
  * limitations under the License.
  */
 
-package org.apache.flink.table.client.cli.utils;
+package org.apache.flink.table.gateway;
 
-/**
- * A structure describes a SQL statement for testing.
- *
- * @see SqlScriptReader
- */
-public class TestSqlStatement {
-    public final String comment;
-    public final String sql;
+import org.apache.flink.table.gateway.service.utils.SqlGatewayServiceExtension;
+
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Order;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
+/** Statement test base for sql gateway. */
+public abstract class AbstractSqlGatewayStatementTestBase extends 
AbstractStatementTestBase {
+    @RegisterExtension
+    @Order(2)
+    public static final SqlGatewayServiceExtension 
SQL_GATEWAY_SERVICE_EXTENSION =
+            new 
SqlGatewayServiceExtension(MINI_CLUSTER::getClientConfiguration);
 
-    public TestSqlStatement(String comment, String sql) {
-        this.comment = comment;
-        this.sql = sql;
+    @BeforeAll
+    public static void setUp() {
+        service = SQL_GATEWAY_SERVICE_EXTENSION.getService();
     }

Review Comment:
   I think we introduce this base class is because CliClient uses a special 
SessionManager. Can we add a method let the sub-class provide the definition of 
the SessionManager? So we don't need to introduce a new class here. It's a 
little confusing for developers which to extend.



##########
flink-table/flink-sql-client/src/test/java/org/apache/flink/table/client/cli/CliClientITCase.java:
##########
@@ -153,89 +130,98 @@ static void setup(@InjectClusterClientConfiguration 
Configuration configuration)
                         classNameCodes);
         URL udfDependency = udfJar.toURI().toURL();
         String path = udfDependency.getPath();
+
         // we need to pad the displayed "jars" tableau to have the same width 
of path string
         // 4 for the "jars" characters, see `set.q` test file
         int paddingLen = path.length() - 4;
-        historyPath = Files.createTempFile(tempFolder, "history", "");
-
-        replaceVars = new HashMap<>();
         replaceVars.put("$VAR_UDF_JAR_PATH", path);
         replaceVars.put("$VAR_UDF_JAR_PATH_DASH", repeat('-', paddingLen));
         replaceVars.put("$VAR_UDF_JAR_PATH_SPACE", repeat(' ', paddingLen));
         replaceVars.put("$VAR_PIPELINE_JARS_URL", udfDependency.toString());
-        replaceVars.put("$VAR_REST_PORT", configuration.get(PORT).toString());
-        replaceVars.put("$VAR_JOBMANAGER_RPC_ADDRESS", 
configuration.get(ADDRESS));
-    }
-
-    @BeforeEach
-    void before() throws IOException {
-        // initialize new folders for every test, so the vars can be reused by 
every SQL script
-        replaceVars.put(
-                "$VAR_STREAMING_PATH",
-                Files.createTempDirectory(tempFolder, 
UUID.randomUUID().toString()).toString());
         replaceVars.put(
-                "$VAR_STREAMING_PATH2",
-                Files.createTempDirectory(tempFolder, 
UUID.randomUUID().toString()).toString());
+                "$VAR_REST_PORT", 
MINI_CLUSTER.getClientConfiguration().get(PORT).toString());
         replaceVars.put(
-                "$VAR_BATCH_PATH",
-                Files.createTempDirectory(tempFolder, 
UUID.randomUUID().toString()).toString());
-        replaceVars.put(
-                "$VAR_BATCH_PATH2",
-                Files.createTempDirectory(tempFolder, 
UUID.randomUUID().toString()).toString());
-    }
-
-    @ParameterizedTest
-    @MethodSource("sqlPaths")
-    void testSqlStatements(
-            String sqlPath, @InjectClusterClientConfiguration Configuration 
configuration)
-            throws IOException {
-        String in = getInputFromPath(sqlPath);
-        List<TestSqlStatement> testSqlStatements = parseSqlScript(in);
-        List<String> sqlStatements =
-                testSqlStatements.stream().map(s -> 
s.sql).collect(Collectors.toList());
-        List<Result> actualResults = runSqlStatements(sqlStatements, 
configuration);
-        String out = transformOutput(testSqlStatements, actualResults);
-        assertThat(out).isEqualTo(in);
-    }
+                "$VAR_JOBMANAGER_RPC_ADDRESS", 
MINI_CLUSTER.getClientConfiguration().get(ADDRESS));
 
-    /**
-     * Returns printed results for each ran SQL statements.
-     *
-     * @param statements the SQL statements to run
-     * @return the printed results on SQL Client
-     */
-    private List<Result> runSqlStatements(List<String> statements, 
Configuration configuration)
-            throws IOException {
-        final String sqlContent = String.join("", statements);
         DefaultContext defaultContext =
                 new DefaultContext(
-                        new Configuration(configuration)
+                        new 
Configuration(MINI_CLUSTER.getClientConfiguration())
                                 // Make sure we use the new cast behaviour
                                 .set(
                                         
ExecutionConfigOptions.TABLE_EXEC_LEGACY_CAST_BEHAVIOUR,
                                         
ExecutionConfigOptions.LegacyCastBehaviour.DISABLED),
                         Collections.emptyList());
+        executor =
+                Executor.create(
+                        defaultContext,
+                        InetSocketAddress.createUnresolved(
+                                
SQL_GATEWAY_REST_ENDPOINT_EXTENSION.getTargetAddress(),
+                                
SQL_GATEWAY_REST_ENDPOINT_EXTENSION.getTargetPort()),
+                        "test-session");
+    }
+
+    @AfterEach
+    public void afterEach() {
+        executor.close();
+    }
 
-        InputStream inputStream = new 
ByteArrayInputStream(sqlContent.getBytes());
+    @Override
+    protected String runSingleStatement(String statement) throws Exception {
+        String inputStatement = statement + ";\n";
+        InputStream inputStream = new 
ByteArrayInputStream(inputStatement.getBytes());
         ByteArrayOutputStream outputStream = new ByteArrayOutputStream(256);
 
-        try (final Executor executor =
-                        Executor.create(
-                                defaultContext,
-                                InetSocketAddress.createUnresolved(
-                                        
SQL_GATEWAY_REST_ENDPOINT_EXTENSION.getTargetAddress(),
-                                        
SQL_GATEWAY_REST_ENDPOINT_EXTENSION.getTargetPort()),
-                                "test-session");
-                Terminal terminal = new DumbTerminal(inputStream, 
outputStream);
+        try (Terminal terminal = new DumbTerminal(inputStream, outputStream);

Review Comment:
   It's a little different from the actual usage. Actually, we bind the 
Executor to a terminal in the SqlClient. Do you think whether it's better we 
overwrite the runStatements in the base class?



##########
flink-table/flink-sql-client/src/test/java/org/apache/flink/table/client/cli/CliClientITCase.java:
##########
@@ -80,10 +68,9 @@
 import static 
org.apache.flink.table.utils.UserDefinedFunctions.GENERATED_LOWER_UDF_CODE;
 import static 
org.apache.flink.table.utils.UserDefinedFunctions.GENERATED_UPPER_UDF_CLASS;
 import static 
org.apache.flink.table.utils.UserDefinedFunctions.GENERATED_UPPER_UDF_CODE;
-import static org.assertj.core.api.Assertions.assertThat;
 
-/** Test that runs every {@code xx.q} file in "resources/sql/" path as a test. 
*/
-class CliClientITCase {
+/** Test that runs every {@code xx.q} file in "resources/sql_single/" path as 
a test. */
+public class CliClientITCase extends AbstractStatementTestBase {

Review Comment:
   nit: Remove `public`.
   
   
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to