Repository: hadoop Updated Branches: refs/heads/branch-2 b279f42d7 -> 34042ccbb
HDFS-10729. Improve log message for edit loading failures caused by FS limit checks. Contributed by Wei-Chiu Chuang. (cherry picked from commit 01721dd88ee532d20eda841254437da4dfd69db5) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34042ccb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34042ccb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34042ccb Branch: refs/heads/branch-2 Commit: 34042ccbb9ed4a0590849a11e67351db3b33f062 Parents: b279f42 Author: Kihwal Lee <[email protected]> Authored: Wed Aug 31 14:09:23 2016 -0500 Committer: Kihwal Lee <[email protected]> Committed: Wed Aug 31 14:09:23 2016 -0500 ---------------------------------------------------------------------- .../hadoop/hdfs/server/namenode/FSDirWriteFileOp.java | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/34042ccb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 866f351..83ac641 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -485,10 +486,13 @@ class FSDirWriteFileOp { return newNode; } } catch (IOException e) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( - "DIR* FSDirectory.unprotectedAddFile: exception when add " - + existing.getPath() + " to the file system", e); + NameNode.stateChangeLog.warn( + "DIR* FSDirectory.unprotectedAddFile: exception when add " + existing + .getPath() + " to the file system", e); + if (e instanceof FSLimitException.MaxDirectoryItemsExceededException) { + NameNode.stateChangeLog.warn("Please increase " + + "dfs.namenode.fs-limits.max-directory-items and make it " + + "consistent across all NameNodes."); } } return null; --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
