Repository: hadoop Updated Branches: refs/heads/branch-3.0 c960ae0b9 -> 5db733fb0
MAPREDUCE-7059. Downward Compatibility issue: MR job fails because of unknown setErasureCodingPolicy method from 3.x client to HDFS 2.x cluster. Contributed by Jiandan Yang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5db733fb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5db733fb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5db733fb Branch: refs/heads/branch-3.0 Commit: 5db733fb09dfa075b7aa8646ceff37038be011c4 Parents: c960ae0 Author: Weiwei Yang <[email protected]> Authored: Thu Mar 1 10:18:53 2018 +0800 Committer: Weiwei Yang <[email protected]> Committed: Fri Mar 2 10:43:03 2018 +0800 ---------------------------------------------------------------------- .../hadoop/mapreduce/JobResourceUploader.java | 29 +++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5db733fb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java ---------------------------------------------------------------------- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java index 03b29bd..1dbecb6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java @@ -37,6 +37,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.RpcNoSuchMethodException; import org.apache.hadoop.mapreduce.filecache.ClientDistributedCacheManager; import org.apache.hadoop.mapreduce.filecache.DistributedCache; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -171,7 +173,7 @@ class JobResourceUploader { if (!conf.getBoolean(MRJobConfig.MR_AM_STAGING_DIR_ERASURECODING_ENABLED, MRJobConfig.DEFAULT_MR_AM_STAGING_ERASURECODING_ENABLED)) { - disableErasureCodingForPath(jtFs, submitJobDir); + disableErasureCodingForPath(submitJobDir); } // Get the resources that have been added via command line arguments in the @@ -874,13 +876,26 @@ class JobResourceUploader { return finalPath; } - private void disableErasureCodingForPath(FileSystem fs, Path path) + private void disableErasureCodingForPath(Path path) throws IOException { - if (jtFs instanceof DistributedFileSystem) { - LOG.info("Disabling Erasure Coding for path: " + path); - DistributedFileSystem dfs = (DistributedFileSystem) jtFs; - dfs.setErasureCodingPolicy(path, - SystemErasureCodingPolicies.getReplicationPolicy().getName()); + try { + if (jtFs instanceof DistributedFileSystem) { + LOG.info("Disabling Erasure Coding for path: " + path); + DistributedFileSystem dfs = (DistributedFileSystem) jtFs; + dfs.setErasureCodingPolicy(path, + SystemErasureCodingPolicies.getReplicationPolicy().getName()); + } + } catch (RemoteException e) { + if (!RpcNoSuchMethodException.class.getName().equals(e.getClassName())) { + throw e; + } else { + if (LOG.isDebugEnabled()) { + LOG.debug( + "Ignore disabling erasure coding for path {} because method " + + "disableErasureCodingForPath doesn't exist, probably " + + "talking to a lower version HDFS.", path.toString(), e); + } + } } } } --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
