Github user tillrohrmann commented on a diff in the pull request:
https://github.com/apache/flink/pull/6203#discussion_r199793741
--- Diff:
flink-clients/src/main/java/org/apache/flink/client/program/rest/RestClusterClient.java
---
@@ -315,36 +315,61 @@ public JobSubmissionResult submitJob(JobGraph
jobGraph, ClassLoader classLoader)
// we have to enable queued scheduling because slot will be
allocated lazily
jobGraph.setAllowQueuedScheduling(true);
- log.info("Requesting blob server port.");
- CompletableFuture<BlobServerPortResponseBody> portFuture =
sendRequest(BlobServerPortHeaders.getInstance());
+ CompletableFuture<java.nio.file.Path> jobGraphFileFuture =
CompletableFuture.supplyAsync(() -> {
+ try {
+ final java.nio.file.Path jobGraphFile =
Files.createTempFile("flink-jobgraph", ".bin");
+ try (ObjectOutputStream objectOut = new
ObjectOutputStream(Files.newOutputStream(jobGraphFile))) {
+ objectOut.writeObject(jobGraph);
+ }
+ return jobGraphFile;
+ } catch (IOException e) {
+ throw new CompletionException(new
FlinkException("Failed to serialize JobGraph.", e));
+ }
+ }, executorService);
- CompletableFuture<JobGraph> jobUploadFuture =
portFuture.thenCombine(
- getDispatcherAddress(),
- (BlobServerPortResponseBody response, String
dispatcherAddress) -> {
- final int blobServerPort = response.port;
- final InetSocketAddress address = new
InetSocketAddress(dispatcherAddress, blobServerPort);
+ CompletableFuture<Tuple2<JobSubmitRequestBody,
Collection<FileUpload>>> requestFuture =
jobGraphFileFuture.thenApply(jobGraphFile -> {
+ List<String> jarFileNames = new ArrayList<>(8);
+ List<JobSubmitRequestBody.DistributedCacheFile>
artifactFileNames = new ArrayList<>(8);
+ Collection<FileUpload> filesToUpload = new
ArrayList<>(8);
- try {
-
ClientUtils.uploadJobGraphFiles(jobGraph, () -> new BlobClient(address,
flinkConfig));
- } catch (Exception e) {
- throw new CompletionException(e);
- }
+ filesToUpload.add(new FileUpload(jobGraphFile,
RestConstants.CONTENT_TYPE_BINARY));
- return jobGraph;
- });
+ for (Path jar : jobGraph.getUserJars()) {
+ jarFileNames.add(jar.getName());
+ filesToUpload.add(new
FileUpload(Paths.get(jar.toUri()), RestConstants.CONTENT_TYPE_JAR));
+ }
- CompletableFuture<JobSubmitResponseBody> submissionFuture =
jobUploadFuture.thenCompose(
- (JobGraph jobGraphToSubmit) -> {
- log.info("Submitting job graph.");
+ for (Map.Entry<String,
DistributedCache.DistributedCacheEntry> artifacts :
jobGraph.getUserArtifacts().entrySet()) {
+ artifactFileNames.add(new
JobSubmitRequestBody.DistributedCacheFile(artifacts.getKey(), new
Path(artifacts.getValue().filePath).getName()));
+ filesToUpload.add(new
FileUpload(Paths.get(artifacts.getValue().filePath),
RestConstants.CONTENT_TYPE_BINARY));
+ }
- try {
- return sendRequest(
- JobSubmitHeaders.getInstance(),
- new
JobSubmitRequestBody(jobGraph));
- } catch (IOException ioe) {
- throw new CompletionException(new
FlinkException("Could not create JobSubmitRequestBody.", ioe));
- }
- });
+ final JobSubmitRequestBody requestBody = new
JobSubmitRequestBody(
+ jobGraphFile.getFileName().toString(),
+ jarFileNames,
+ artifactFileNames);
+
+ return Tuple2.of(requestBody,
Collections.unmodifiableCollection(filesToUpload));
+ });
+
+ final CompletableFuture<JobSubmitResponseBody> submissionFuture
= requestFuture.thenCompose(
+ requestAndFileUploads -> sendRetriableRequest(
+ JobSubmitHeaders.getInstance(),
+ EmptyMessageParameters.getInstance(),
+ requestAndFileUploads.f0,
+ requestAndFileUploads.f1,
+ isConnectionProblemOrServiceUnavailable())
+ );
+
+ submissionFuture
+ .thenCombine(jobGraphFileFuture, (ignored,
jobGraphFile) -> jobGraphFile)
+ .thenAccept(jobGraphFile -> {
--- End diff --
I think the `thenAccept` could be merged with `thenCombine` because we
already have access to the `jobGraphFile`.
---