Repository: hadoop Updated Branches: refs/heads/branch-2.8 4fc070ac5 -> a1421de70
HDFS-8312. Trash does not descent into child directories to check for permissions. Contributed By Weiwei Yang via Eric Yang. (cherry picked from commit 820496fbbc00ede0484bec5511c6b12913e97356) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/587d47cf Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/587d47cf Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/587d47cf Branch: refs/heads/branch-2.8 Commit: 587d47cfec3dcb232363fdc00cadef6138783c22 Parents: 4fc070a Author: Brahma Reddy Battula <[email protected]> Authored: Tue May 16 23:35:26 2017 +0530 Committer: Brahma Reddy Battula <[email protected]> Committed: Sun Jul 23 16:09:10 2017 +0800 ---------------------------------------------------------------------- .../main/java/org/apache/hadoop/fs/Options.java | 3 +- .../apache/hadoop/fs/TrashPolicyDefault.java | 10 ++- .../ClientNamenodeProtocolTranslatorPB.java | 7 +- .../src/main/proto/ClientNamenodeProtocol.proto | 1 + ...tNamenodeProtocolServerSideTranslatorPB.java | 14 +++- .../hdfs/server/namenode/FSDirRenameOp.java | 28 +++++-- .../apache/hadoop/hdfs/TestDFSPermission.java | 82 +++++++++++++++++++- 7 files changed, 132 insertions(+), 13 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d47cf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java index da75d1c..dc50286 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java @@ -213,7 +213,8 @@ public final class Options { */ public static enum Rename { NONE((byte) 0), // No options - OVERWRITE((byte) 1); // Overwrite the rename destination + OVERWRITE((byte) 1), // Overwrite the rename destination + TO_TRASH ((byte) 2); // Rename to trash private final byte code; http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d47cf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java index 28025ff..4f4c937 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java @@ -112,6 +112,7 @@ public class TrashPolicyDefault extends TrashPolicy { return deletionInterval != 0; } + @SuppressWarnings("deprecation") @Override public boolean moveToTrash(Path path) throws IOException { if (!isEnabled()) @@ -162,10 +163,11 @@ public class TrashPolicyDefault extends TrashPolicy { trashPath = new Path(orig + Time.now()); } - if (fs.rename(path, trashPath)) { // move to current trash - LOG.info("Moved: '" + path + "' to trash at: " + trashPath); - return true; - } + // move to current trash + fs.rename(path, trashPath, + Rename.TO_TRASH); + LOG.info("Moved: '" + path + "' to trash at: " + trashPath); + return true; } catch (IOException e) { cause = e; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d47cf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index e59f2a2..011c804 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -514,16 +514,21 @@ public class ClientNamenodeProtocolTranslatorPB implements public void rename2(String src, String dst, Rename... options) throws IOException { boolean overwrite = false; + boolean toTrash = false; if (options != null) { for (Rename option : options) { if (option == Rename.OVERWRITE) { overwrite = true; + } else if (option == Rename.TO_TRASH) { + toTrash = true; } } } Rename2RequestProto req = Rename2RequestProto.newBuilder(). setSrc(src). - setDst(dst).setOverwriteDest(overwrite). + setDst(dst). + setOverwriteDest(overwrite). + setMoveToTrash(toTrash). build(); try { if (Client.isAsynchronousMode()) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d47cf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index c0c02f2..f19c1c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -244,6 +244,7 @@ message Rename2RequestProto { required string src = 1; required string dst = 2; required bool overwriteDest = 3; + optional bool moveToTrash = 4; } message Rename2ResponseProto { // void response http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d47cf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index de5a4dd..9f7c511 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.protocolPB; import java.io.IOException; +import java.util.ArrayList; import java.util.EnumSet; import java.util.List; @@ -597,10 +598,21 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements @Override public Rename2ResponseProto rename2(RpcController controller, Rename2RequestProto req) throws ServiceException { + // resolve rename options + ArrayList<Rename> optionList = new ArrayList<Rename>(); + if(req.getOverwriteDest()) { + optionList.add(Rename.OVERWRITE); + } else if(req.hasMoveToTrash()) { + optionList.add(Rename.TO_TRASH); + } + + if(optionList.isEmpty()) { + optionList.add(Rename.NONE); + } try { server.rename2(req.getSrc(), req.getDst(), - req.getOverwriteDest() ? Rename.OVERWRITE : Rename.NONE); + optionList.toArray(new Rename[optionList.size()])); } catch (IOException e) { throw new ServiceException(e); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d47cf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index fa77d5a..e6f1dd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -257,11 +257,29 @@ class FSDirRenameOp { final INodesInPath srcIIP = fsd.resolvePath(pc, src, DirOp.WRITE_LINK); final INodesInPath dstIIP = fsd.resolvePath(pc, dst, DirOp.CREATE_LINK); if (fsd.isPermissionEnabled()) { - // Rename does not operate on link targets - // Do not resolveLink when checking permissions of src and dst - // Check write access to parent of src - fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null, - false); + boolean renameToTrash = false; + if (null != options && + Arrays.asList(options). + contains(Options.Rename.TO_TRASH)) { + renameToTrash = true; + } + + if(renameToTrash) { + // if destination is the trash directory, + // besides the permission check on "rename" + // we need to enforce the check for "delete" + // otherwise, it would expose a + // security hole that stuff moved to trash + // will be deleted by superuser + fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, + FsAction.ALL, true); + } else { + // Rename does not operate on link targets + // Do not resolveLink when checking permissions of src and dst + // Check write access to parent of src + fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, + null, false); + } // Check write access to ancestor of dst fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null, false); http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d47cf/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index 0fe304d..2302cd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -39,9 +39,9 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.Trash; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; @@ -289,6 +289,86 @@ public class TestDFSPermission { FsPermission.createImmutable((short)0777)); } + @Test(timeout=30000) + public void testTrashPermission() throws Exception { + // /BSS user1:group2 777 + // /BSS/user1 user1:group2 755 + // /BSS/user1/test user1:group1 600 + Path rootDir = new Path("/BSS"); + Path user1Dir = new Path("/BSS/user1"); + Path user1File = new Path("/BSS/user1/test"); + + try { + conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10"); + fs = FileSystem.get(conf); + + fs.mkdirs(rootDir); + fs.setPermission(rootDir, new FsPermission((short) 0777)); + + fs = DFSTestUtil.login(fs, conf, USER1); + fs.mkdirs(user1Dir); + fs.setPermission(user1Dir, new FsPermission((short) 0755)); + fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME); + + create(OpType.CREATE, user1File); + fs.setOwner(user1File, USER1.getShortUserName(), GROUP1_NAME); + fs.setPermission(user1File, new FsPermission((short) 0600)); + + try { + // login as user2, attempt to delete /BSS/user1 + // this should fail because user2 has no permission to + // its sub directory. + fs = DFSTestUtil.login(fs, conf, USER2); + fs.delete(user1Dir, true); + fail("User2 should not be allowed to delete user1's dir."); + } catch (AccessControlException e) { + e.printStackTrace(); + assertTrue("Permission denied messages must carry the username", + e.getMessage().contains(USER2_NAME)); + } + + // ensure the /BSS/user1 still exists + assertTrue(fs.exists(user1Dir)); + + try { + fs = DFSTestUtil.login(fs, conf, SUPERUSER); + Trash trash = new Trash(fs, conf); + Path trashRoot = trash.getCurrentTrashDir(user1Dir); + while(true) { + trashRoot = trashRoot.getParent(); + if(trashRoot.getParent().isRoot()) { + break; + } + } + fs.mkdirs(trashRoot); + fs.setPermission(trashRoot, new FsPermission((short) 0777)); + + // login as user2, attempt to move /BSS/user1 to trash + // this should also fail otherwise the directory will be + // removed by trash emptier (emptier is running by superuser) + fs = DFSTestUtil.login(fs, conf, USER2); + Trash userTrash = new Trash(fs, conf); + assertTrue(userTrash.isEnabled()); + userTrash.moveToTrash(user1Dir); + fail("User2 should not be allowed to move" + + "user1's dir to trash"); + } catch (IOException e) { + // expect the exception is caused by permission denied + assertTrue(e.getCause() instanceof AccessControlException); + e.printStackTrace(); + assertTrue("Permission denied messages must carry the username", + e.getCause().getMessage().contains(USER2_NAME)); + } + + // ensure /BSS/user1 still exists + assertEquals(fs.exists(user1Dir), true); + } finally { + fs = DFSTestUtil.login(fs, conf, SUPERUSER); + fs.delete(rootDir, true); + conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "0"); + } + } + /* check if the ownership of a file/directory is set correctly */ @Test public void testOwnership() throws Exception { --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
