kl0u commented on a change in pull request #10313: [FLINK-14840] Use Executor
interface in SQL cli
URL: https://github.com/apache/flink/pull/10313#discussion_r351799371
##########
File path:
flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/gateway/local/ProgramDeployer.java
##########
@@ -18,163 +18,83 @@
package org.apache.flink.table.client.gateway.local;
-import org.apache.flink.api.common.JobExecutionResult;
-import org.apache.flink.client.ClientUtils;
-import org.apache.flink.client.deployment.ClusterDescriptor;
-import org.apache.flink.client.program.ClusterClient;
-import org.apache.flink.runtime.jobgraph.JobGraph;
-import org.apache.flink.table.client.gateway.SqlExecutionException;
-import org.apache.flink.table.client.gateway.local.result.Result;
+import org.apache.flink.api.dag.Pipeline;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.configuration.DeploymentOptions;
+import org.apache.flink.core.execution.DefaultExecutorServiceLoader;
+import org.apache.flink.core.execution.Executor;
+import org.apache.flink.core.execution.ExecutorFactory;
+import org.apache.flink.core.execution.ExecutorServiceLoader;
+import org.apache.flink.core.execution.JobClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.CompletableFuture;
/**
* The helper class to deploy a table program on the cluster.
*/
-public class ProgramDeployer<C> implements Runnable {
+public class ProgramDeployer<C> {
private static final Logger LOG =
LoggerFactory.getLogger(ProgramDeployer.class);
private final ExecutionContext<C> context;
- private final JobGraph jobGraph;
+ private final Pipeline pipeline;
private final String jobName;
- private final Result<C> result;
private final boolean awaitJobResult;
- private final BlockingQueue<JobExecutionResult> executionResultBucket;
/**
* Deploys a table program on the cluster.
*
* @param context context with deployment information
* @param jobName job name of the Flink job to be submitted
- * @param jobGraph Flink job graph
- * @param result result that receives information about the
target cluster
+ * @param pipeline Flink {@link Pipeline} to execute
* @param awaitJobResult block for a job execution result from the
cluster
*/
public ProgramDeployer(
ExecutionContext<C> context,
Review comment:
Also here, for core readability, I think it is easier to pass the
`configuration` directly instead of creating in the `deploy` and when we fetch
the result, have a `checkState` on the `ATTACHED` flag so that the reader of
the code knows what to expect. The way it is now, it seems like in the
`deploy`, we are setting the `ATTACHED` flag but it is never used.
WDYT?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services