This is an automated email from the ASF dual-hosted git repository.
iwasakims pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-2.10 by this push:
new 3fc1c44 HDFS-15052. WebHDFS getTrashRoot leads to OOM due to
FileSystem object creation. (#1758)
3fc1c44 is described below
commit 3fc1c44974a3a6d17d7c340c86df9daf0b238a39
Author: Masatake Iwasaki <[email protected]>
AuthorDate: Fri Feb 21 11:56:07 2020 +0900
HDFS-15052. WebHDFS getTrashRoot leads to OOM due to FileSystem object
creation. (#1758)
(cherry picked from commit 2338d25dc7150d75fbda84cc95422380b5622224)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
(cherry picked from commit 610805ec7245769aebb36e52725522c42cb3dd88)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
---
.../java/org/apache/hadoop/hdfs/DFSUtilClient.java | 30 +++++++++++-
.../apache/hadoop/hdfs/DistributedFileSystem.java | 11 +++--
.../web/resources/NamenodeWebHdfsMethods.java | 44 ++++++++++++++----
.../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 54 ++++++++++++++++++++++
4 files changed, 124 insertions(+), 15 deletions(-)
diff --git
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 4be4c82..b6e05ce 100644
---
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -772,7 +773,7 @@ public class DFSUtilClient {
* @param ugi {@link UserGroupInformation} of current user.
* @return the home directory of current user.
*/
- public static Path getHomeDirectory(Configuration conf,
+ public static String getHomeDirectory(Configuration conf,
UserGroupInformation ugi) {
String userHomePrefix = HdfsClientConfigKeys
.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
@@ -781,6 +782,31 @@ public class DFSUtilClient {
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
}
- return new Path(userHomePrefix + "/" + ugi.getShortUserName());
+ return userHomePrefix + Path.SEPARATOR + ugi.getShortUserName();
+ }
+
+ /**
+ * Returns trash root in non-encryption zone.
+ * @param conf configuration.
+ * @param ugi user of trash owner.
+ * @return unqualified path of trash root.
+ */
+ public static String getTrashRoot(Configuration conf,
+ UserGroupInformation ugi) {
+ return getHomeDirectory(conf, ugi)
+ + Path.SEPARATOR + FileSystem.TRASH_PREFIX;
+ }
+
+ /**
+ * Returns trash root in encryption zone.
+ * @param ez encryption zone.
+ * @param ugi user of trash owner.
+ * @return unqualified path of trash root.
+ */
+ public static String getEZTrashRoot(EncryptionZone ez,
+ UserGroupInformation ugi) {
+ String ezpath = ez.getPath();
+ return (ezpath.equals("/") ? ezpath : ezpath + Path.SEPARATOR)
+ + FileSystem.TRASH_PREFIX + Path.SEPARATOR + ugi.getShortUserName();
}
}
diff --git
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index d40cfff..362768d 100644
---
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -197,7 +197,8 @@ public class DistributedFileSystem extends FileSystem
@Override
public Path getHomeDirectory() {
- return makeQualified(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi));
+ return makeQualified(
+ new Path(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi)));
}
/**
@@ -2635,8 +2636,7 @@ public class DistributedFileSystem extends FileSystem
EncryptionZone ez = dfs.getEZForPath(parentSrc);
if ((ez != null)) {
return this.makeQualified(
- new Path(new Path(ez.getPath(), FileSystem.TRASH_PREFIX),
- dfs.ugi.getShortUserName()));
+ new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi)));
}
} catch (IOException e) {
DFSClient.LOG.warn("Exception in checking the encryption zone for the " +
@@ -2663,7 +2663,8 @@ public class DistributedFileSystem extends FileSystem
// Get EZ Trash roots
final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
while (it.hasNext()) {
- Path ezTrashRoot = new Path(it.next().getPath(),
+ EncryptionZone ez = it.next();
+ Path ezTrashRoot = new Path(ez.getPath(),
FileSystem.TRASH_PREFIX);
if (!exists(ezTrashRoot)) {
continue;
@@ -2675,7 +2676,7 @@ public class DistributedFileSystem extends FileSystem
}
}
} else {
- Path userTrash = new Path(ezTrashRoot, dfs.ugi.getShortUserName());
+ Path userTrash = new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi));
try {
ret.add(getFileStatus(userTrash));
} catch (FileNotFoundException ignored) {
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 973fe71..af71f9c 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.XAttr;
@@ -76,6 +75,7 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -1097,7 +1097,7 @@ public class NamenodeWebHdfsMethods {
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETHOMEDIRECTORY: {
- String userHome = DFSUtilClient.getHomeDirectory(conf, ugi).toString();
+ String userHome = DFSUtilClient.getHomeDirectory(conf, ugi);
final String js = JsonUtil.toJsonString("Path", userHome);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
@@ -1136,7 +1136,7 @@ public class NamenodeWebHdfsMethods {
return Response.ok().build();
}
case GETTRASHROOT: {
- final String trashPath = getTrashRoot(fullpath, conf);
+ final String trashPath = getTrashRoot(conf, fullpath);
final String jsonStr = JsonUtil.toJsonString("Path", trashPath);
return Response.ok(jsonStr).type(MediaType.APPLICATION_JSON).build();
}
@@ -1178,11 +1178,39 @@ public class NamenodeWebHdfsMethods {
}
}
- private static String getTrashRoot(String fullPath,
- Configuration conf) throws IOException {
- FileSystem fs = FileSystem.get(conf != null ? conf : new Configuration());
- return fs.getTrashRoot(
- new org.apache.hadoop.fs.Path(fullPath)).toUri().getPath();
+ private String getTrashRoot(Configuration conf, String fullPath)
+ throws IOException {
+ UserGroupInformation ugi= UserGroupInformation.getCurrentUser();
+ String parentSrc = getParent(fullPath);
+ EncryptionZone ez = getRpcClientProtocol().getEZForPath(
+ parentSrc != null ? parentSrc : fullPath);
+ String trashRoot;
+ if (ez != null) {
+ trashRoot = DFSUtilClient.getEZTrashRoot(ez, ugi);
+ } else {
+ trashRoot = DFSUtilClient.getTrashRoot(conf, ugi);
+ }
+ return trashRoot;
+ }
+
+ /**
+ * Returns the parent of a path in the same way as Path#getParent.
+ * @return the parent of a path or null if at root
+ */
+ public String getParent(String path) {
+ int lastSlash = path.lastIndexOf('/');
+ int start = 0;
+ if ((path.length() == start) || // empty path
+ (lastSlash == start && path.length() == start + 1)) { // at root
+ return null;
+ }
+ String parent;
+ if (lastSlash == -1) {
+ parent = org.apache.hadoop.fs.Path.CUR_DIR;
+ } else {
+ parent = path.substring(0, lastSlash == start ? start + 1 : lastSlash);
+ }
+ return parent;
}
private static DirectoryListing getDirectoryListing(final ClientProtocol cp,
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 30ac622..4a3f924 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -34,6 +34,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.EOFException;
+import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@@ -47,6 +48,7 @@ import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
+import java.util.EnumSet;
import java.util.Map;
import java.util.Random;
@@ -58,11 +60,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage;
@@ -80,6 +84,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.hdfs.TestFileCreation;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -1398,6 +1404,54 @@ public class TestWebHDFS {
}
@Test
+ public void testGetEZTrashRoot() throws Exception {
+ final Configuration conf = WebHdfsTestUtil.createConf();
+ FileSystemTestHelper fsHelper = new FileSystemTestHelper();
+ File testRootDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+ "jceks://file" + new Path(testRootDir.toString(), "test.jks").toUri());
+ final MiniDFSCluster cluster =
+ new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ cluster.waitActive();
+ DistributedFileSystem dfs = cluster.getFileSystem();
+ final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
+ conf, WebHdfsConstants.WEBHDFS_SCHEME);
+ HdfsAdmin dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
+ dfs.getClient().setKeyProvider(
+ cluster.getNameNode().getNamesystem().getProvider());
+ final String testkey = "test_key";
+ DFSTestUtil.createKey(testkey, cluster, conf);
+
+ final Path zone1 = new Path("/zone1");
+ dfs.mkdirs(zone1, new FsPermission((short)0700));
+ dfsAdmin.createEncryptionZone(zone1, testkey,
+ EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH));
+
+ final Path insideEZ = new Path(zone1, "insideEZ");
+ dfs.mkdirs(insideEZ, new FsPermission((short)0700));
+ assertEquals(
+ dfs.getTrashRoot(insideEZ).toUri().getPath(),
+ webhdfs.getTrashRoot(insideEZ).toUri().getPath());
+
+ final Path outsideEZ = new Path("/outsideEZ");
+ dfs.mkdirs(outsideEZ, new FsPermission((short)0755));
+ assertEquals(
+ dfs.getTrashRoot(outsideEZ).toUri().getPath(),
+ webhdfs.getTrashRoot(outsideEZ).toUri().getPath());
+
+ final Path root = new Path("/");
+ assertEquals(
+ dfs.getTrashRoot(root).toUri().getPath(),
+ webhdfs.getTrashRoot(root).toUri().getPath());
+ assertEquals(
+ webhdfs.getTrashRoot(root).toUri().getPath(),
+ webhdfs.getTrashRoot(zone1).toUri().getPath());
+ assertEquals(
+ webhdfs.getTrashRoot(outsideEZ).toUri().getPath(),
+ webhdfs.getTrashRoot(zone1).toUri().getPath());
+ }
+
+ @Test
public void testStoragePolicy() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]