This is an automated email from the ASF dual-hosted git repository. jbrennan pushed a commit to branch branch-3.2 in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.2 by this push: new eec10a0 HDFS-15717. Improve fsck logging. (#2529) Contributed by Kihwal Lee and Ahmed Hussein eec10a0 is described below commit eec10a087e5ebfcd4ff06de12d3de381a941df50 Author: Jim Brennan <jbren...@apache.org> AuthorDate: Fri Dec 11 18:10:24 2020 +0000 HDFS-15717. Improve fsck logging. (#2529) Contributed by Kihwal Lee and Ahmed Hussein --- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 14 +++++++--- .../hadoop/hdfs/server/namenode/FsckServlet.java | 32 ++++++++++++---------- .../hadoop/hdfs/server/namenode/NamenodeFsck.java | 19 ++++++++----- .../hadoop/hdfs/server/namenode/TestFsck.java | 1 + 4 files changed, 41 insertions(+), 25 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e531686..eda6bfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -5938,13 +5938,19 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, private static UserGroupInformation getRemoteUser() throws IOException { return NameNode.getRemoteUser(); } - + /** - * Log fsck event in the audit log + * Log fsck event in the audit log. + * + * @param succeeded Whether authorization succeeded. + * @param src Path of affected source file. + * @param remoteAddress Remote address of the request. + * @throws IOException if {@link #getRemoteUser()} fails. */ - void logFsckEvent(String src, InetAddress remoteAddress) throws IOException { + void logFsckEvent(boolean succeeded, String src, InetAddress remoteAddress) + throws IOException { if (isAuditEnabled()) { - logAuditEvent(true, getRemoteUser(), + logAuditEvent(succeeded, getRemoteUser(), remoteAddress, "fsck", src, null, null); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java index 5fae9cd..e5c02e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java @@ -55,21 +55,25 @@ public class FsckServlet extends DfsServlet { final UserGroupInformation ugi = getUGI(request, conf); try { - ugi.doAs(new PrivilegedExceptionAction<Object>() { - @Override - public Object run() throws Exception { - NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); - - final FSNamesystem namesystem = nn.getNamesystem(); - final BlockManager bm = namesystem.getBlockManager(); - final int totalDatanodes = - namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); - new NamenodeFsck(conf, nn, - bm.getDatanodeManager().getNetworkTopology(), pmap, out, - totalDatanodes, remoteAddress).fsck(); - - return null; + ugi.doAs((PrivilegedExceptionAction<Object>) () -> { + NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context); + + final FSNamesystem namesystem = nn.getNamesystem(); + final BlockManager bm = namesystem.getBlockManager(); + final int totalDatanodes = + namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); + NamenodeFsck fsck = new NamenodeFsck(conf, nn, + bm.getDatanodeManager().getNetworkTopology(), pmap, out, + totalDatanodes, remoteAddress); + String auditSource = fsck.getAuditSource(); + boolean success = false; + try { + fsck.fsck(); + success = true; + } finally { + namesystem.logFsckEvent(success, auditSource, remoteAddress); } + return null; }); } catch (InterruptedException e) { response.sendError(400, e.getMessage()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 2aeca73..dfed055 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -152,6 +152,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { private boolean showMaintenanceState = false; private long staleInterval; private Tracer tracer; + private String auditSource; /** * True if we encountered an internal error during FSCK, such as not being @@ -175,7 +176,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { String path = "/"; - private String blockIds = null; + private String[] blockIds = null; // We return back N files that are corrupt; the list of files returned is // ordered by block id; to allow continuation support, pass in the last block @@ -251,9 +252,15 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { } else if (key.equals("includeSnapshots")) { this.snapshottableDirs = new ArrayList<String>(); } else if (key.equals("blockId")) { - this.blockIds = pmap.get("blockId")[0]; + this.blockIds = pmap.get("blockId")[0].split(" "); } } + this.auditSource = (blockIds != null) + ? "blocksIds=" + Arrays.asList(blockIds) : path; + } + + public String getAuditSource() { + return auditSource; } /** @@ -355,18 +362,18 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { /** * Check files on DFS, starting from the indicated path. */ - public void fsck() { + public void fsck() throws AccessControlException { final long startTime = Time.monotonicNow(); try { if(blockIds != null) { - String[] blocks = blockIds.split(" "); + namenode.getNamesystem().checkSuperuserPrivilege(); StringBuilder sb = new StringBuilder(); sb.append("FSCK started by " + UserGroupInformation.getCurrentUser() + " from " + remoteAddress + " at " + new Date()); out.println(sb); sb.append(" for blockIds: \n"); - for (String blk: blocks) { + for (String blk: blockIds) { if(blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) { out.println("Incorrect blockId format: " + blk); continue; @@ -376,7 +383,6 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { sb.append(blk + "\n"); } LOG.info("{}", sb.toString()); - namenode.getNamesystem().logFsckEvent("/", remoteAddress); out.flush(); return; } @@ -385,7 +391,6 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { + " from " + remoteAddress + " for path " + path + " at " + new Date(); LOG.info(msg); out.println(msg); - namenode.getNamesystem().logFsckEvent(path, remoteAddress); if (snapshottableDirs != null) { SnapshottableDirectoryStatus[] snapshotDirs = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index b6ef22a..7d2a45a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -252,6 +252,7 @@ public class TestFsck { file.delete(); } Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); + logger.removeAllAppenders(); logger.setLevel(Level.INFO); PatternLayout layout = new PatternLayout("%m%n"); RollingFileAppender appender = --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org