qinf commented on code in PR #23219:
URL: https://github.com/apache/flink/pull/23219#discussion_r1307080286
##########
flink-yarn/src/test/java/org/apache/flink/yarn/YarnClusterDescriptorTest.java:
##########
@@ -543,22 +545,59 @@ void testExplicitFileShipping() throws Exception {
Files.createTempDirectory(temporaryFolder,
UUID.randomUUID().toString())
.toFile();
- assertThat(descriptor.getShipFiles()).doesNotContain(libFile,
libFolder);
+ assertThat(descriptor.getShipFiles())
+ .doesNotContain(
+ new Path(libFile.getAbsolutePath()),
+ new Path(libFolder.getAbsolutePath()));
- List<File> shipFiles = new ArrayList<>();
- shipFiles.add(libFile);
- shipFiles.add(libFolder);
+ List<Path> shipFiles = new ArrayList<>();
+ shipFiles.add(new Path(libFile.getAbsolutePath()));
+ shipFiles.add(new Path(libFolder.getAbsolutePath()));
descriptor.addShipFiles(shipFiles);
- assertThat(descriptor.getShipFiles()).contains(libFile, libFolder);
+ assertThat(descriptor.getShipFiles())
+ .contains(
+ new Path(libFile.getAbsolutePath()),
+ new Path(libFolder.getAbsolutePath()));
// only execute part of the deployment to test for shipped files
- Set<File> effectiveShipFiles = new HashSet<>();
+ Set<Path> effectiveShipFiles = new HashSet<>();
descriptor.addLibFoldersToShipFiles(effectiveShipFiles);
assertThat(effectiveShipFiles).isEmpty();
- assertThat(descriptor.getShipFiles()).hasSize(2).contains(libFile,
libFolder);
+ assertThat(descriptor.getShipFiles())
+ .hasSize(2)
+ .contains(
+ new Path(libFile.getAbsolutePath()),
+ new Path(libFolder.getAbsolutePath()));
+
+ String hdfsDir = "hdfs:///flink/hdfs_dir";
+ String hdfsFile = "hdfs:///flink/hdfs_file";
+ final org.apache.hadoop.conf.Configuration hdConf =
+ new org.apache.hadoop.conf.Configuration();
+ hdConf.set(
+ MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
+ temporaryFolder.toAbsolutePath().toString());
+ try (final MiniDFSCluster hdfsCluster = new
MiniDFSCluster.Builder(hdConf).build()) {
+ final org.apache.hadoop.fs.Path hdfsRootPath =
+ new org.apache.hadoop.fs.Path(hdfsCluster.getURI());
+ hdfsCluster.getFileSystem().mkdirs(new
org.apache.hadoop.fs.Path(hdfsDir));
+ hdfsCluster.getFileSystem().createNewFile(new
org.apache.hadoop.fs.Path(hdfsFile));
+
+ Configuration flinkConfiguration = new Configuration();
+ flinkConfiguration.set(
+ YarnConfigOptions.SHIP_FILES,
+ Arrays.asList(
+ libFile.getAbsolutePath(),
+ libFolder.getAbsolutePath(),
+ hdfsDir,
+ hdfsFile));
+ final YarnConfiguration yarnConfig = new YarnConfiguration();
+ yarnConfig.set(
+ CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
hdfsRootPath.toString());
+ createYarnClusterDescriptor(flinkConfiguration, yarnConfig);
+ }
Review Comment:
@1996fanrui Thanks for your review. I have update according to your review
suggestion.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]