steveloughran commented on code in PR #4967:
URL: https://github.com/apache/hadoop/pull/4967#discussion_r991490790
##########
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java:
##########
@@ -4877,12 +4877,21 @@ public CompletableFuture<FSDataInputStream> build()
throws IOException {
}
/**
- * Return root path
- * @param path
- * @return
+ * Return path of the enclosing root for a given path
+ * The enclosing root path is a common ancestor that should be used for temp
and staging dirs
+ * as well as within encryption zones and other restricted directories.
+ *
+ * Call makeQualified on the param path to ensure the param path to ensure
its part of the correct filesystem
+ *
+ * @param path file path to find the enclosing root path for
+ * @return a path to the enclosing root
* @throws IOException
*/
+ @InterfaceAudience.Public
+ @InterfaceStability.Unstable
+ // Should this throw RuntimeException (instead of IO), so we can throw
NotInMountpointException from viewfs/rbf?
Review Comment:
really complicates the life elsewhere though
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java:
##########
@@ -2082,8 +2083,8 @@ public GetEnclosingRootResponseProto getEnclosingRoot(
RpcController controller, GetEnclosingRootRequestProto req)
throws ServiceException {
try {
- String enclosingRootPath = server.getEnclosingRoot(req.getFilename());
- return
GetEnclosingRootResponseProto.newBuilder().setEnclosingRootPath(enclosingRootPath)
+ Path enclosingRootPath = server.getEnclosingRoot(req.getFilename());
+ return
GetEnclosingRootResponseProto.newBuilder().setEnclosingRootPath(enclosingRootPath.toString())
Review Comment:
toURI().toString()
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java:
##########
@@ -521,16 +527,46 @@ public void testEnclosingRootsBase() throws Exception {
final EnumSet<CreateEncryptionZoneFlag> provisionTrash =
EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH);
hdfsAdmin.createEncryptionZone(zone1, "test_key", provisionTrash);
- RemoteIterator<EncryptionZone> zones = hdfsAdmin.listEncryptionZones();
- assertEquals(fsView.getEnclosingRoot(zone), new Path("/data"));
- assertEquals(fsView.getEnclosingRoot(zone1), zone1);
+ assertEquals(fsView.getEnclosingRoot(zone), getViewFsPath("/data",
fsView));
+ assertEquals(fsView.getEnclosingRoot(zone1), getViewFsPath(zone1, fsView));
Path nn02Ez = new Path("/mountOnNn2/EZ");
fsTarget2.mkdirs(nn02Ez);
- assertEquals(fsView.getEnclosingRoot((nn02Ez)), new Path("/mountOnNn2"));
+ assertEquals(fsView.getEnclosingRoot((nn02Ez)),
getViewFsPath("/mountOnNn2", fsView));
HdfsAdmin hdfsAdmin2 = new HdfsAdmin(cluster.getURI(1), CONF);
DFSTestUtil.createKey("test_key", cluster, 1, CONF);
hdfsAdmin2.createEncryptionZone(nn02Ez, "test_key", provisionTrash);
- assertEquals(fsView.getEnclosingRoot((nn02Ez)), nn02Ez);
+ assertEquals(fsView.getEnclosingRoot((nn02Ez)), getViewFsPath(nn02Ez,
fsView));
+ assertEquals(fsView.getEnclosingRoot(new Path(nn02Ez, "dir/dir2/file")),
getViewFsPath(nn02Ez, fsView));
+
+ // With viewfs:// scheme
+ assertEquals(fsView.getEnclosingRoot(fsView.getWorkingDirectory()),
getViewFsPath("/user", fsView));
+ }
+
+ @Test
+ public void testEnclosingRootFailure() throws IOException {
+ try {
Review Comment:
use LambdaTestUtils.intercept here and below
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java:
##########
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
Review Comment:
review/fix import grouping
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java:
##########
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.util.EnumSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import static org.junit.Assert.*;
+
+
+public class TestEnclosingRoot {
+ static final Logger LOG = LoggerFactory.getLogger(TestEncryptionZones.class);
+
+ protected Configuration conf;
+ private FileSystemTestHelper fsHelper;
+
+ protected MiniDFSCluster cluster;
+ protected HdfsAdmin dfsAdmin;
+ protected DistributedFileSystem fs;
+ private File testRootDir;
+ protected final String TEST_KEY = "test_key";
+ private static final String NS_METRICS = "FSNamesystem";
+ private static final String AUTHORIZATION_EXCEPTION_MESSAGE =
+ "User [root] is not authorized to perform [READ] on key " +
+ "with ACL name [key2]!!";
+
+ protected FileSystemTestWrapper fsWrapper;
+ protected FileContextTestWrapper fcWrapper;
+
+ protected static final EnumSet<CreateEncryptionZoneFlag> NO_TRASH =
Review Comment:
why not private?
##########
hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md:
##########
@@ -599,6 +599,25 @@ on the filesystem.
if len(FS, P) > 0: getFileStatus(P).getBlockSize() > 0
result == getFileStatus(P).getBlockSize()
+1. The outcome of this operation MUST be identical to the value of
+ `getFileStatus(P).getBlockSize()`.
+2. By inference, it MUST be > 0 for any file of length > 0.
+### `Path getEnclosingRoot(Path p)`
+
+This method is used to find a root directory for a path given. This is useful
for creating
+staging and temp directories in the same root directory. There are constraints
around how
+renames are allowed to atomically occur (ex. across hdfs volumes or across
encryption zones).
+
+#### Preconditions
+
+if no root path is found: raise IOException
+No root path would be found only if there is no mount point for the given path
+
+
+#### Postconditions
+
+The path return will not be null
+
1. The outcome of this operation MUST be identical to the value of
Review Comment:
these lines are copied from the previous section; cut
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java:
##########
@@ -141,9 +141,9 @@ public class TestDistributedFileSystem {
}
private boolean dualPortTesting = false;
-
+
Review Comment:
best to revert these, just to simplify merge/cherrypick
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java:
##########
@@ -95,6 +95,7 @@ public static void globalSetUp() throws Exception {
conf.setInt(RBFConfigKeys.DFS_ROUTER_ADMIN_MAX_COMPONENT_LENGTH_KEY, 20);
cluster.addRouterOverrides(conf);
cluster.startCluster();
+ cluster.startCluster();
Review Comment:
why the duplication
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java:
##########
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.util.EnumSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import static org.junit.Assert.*;
+
+
+public class TestEnclosingRoot {
+ static final Logger LOG = LoggerFactory.getLogger(TestEncryptionZones.class);
+
+ protected Configuration conf;
+ private FileSystemTestHelper fsHelper;
+
+ protected MiniDFSCluster cluster;
+ protected HdfsAdmin dfsAdmin;
+ protected DistributedFileSystem fs;
+ private File testRootDir;
+ protected final String TEST_KEY = "test_key";
+ private static final String NS_METRICS = "FSNamesystem";
Review Comment:
separate from fields, ideally at top of class
##########
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java:
##########
@@ -25,6 +25,7 @@
import java.util.Map;
import java.util.stream.Collectors;
+import org.apache.hadoop.fs.Path;
Review Comment:
move to just above import org.apache.hadoop.fs.QuotaUsage;
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java:
##########
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.util.EnumSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import static org.junit.Assert.*;
+
+
+public class TestEnclosingRoot {
+ static final Logger LOG = LoggerFactory.getLogger(TestEncryptionZones.class);
Review Comment:
make private, fix name
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java:
##########
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.util.EnumSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import static org.junit.Assert.*;
+
+
+public class TestEnclosingRoot {
+ static final Logger LOG = LoggerFactory.getLogger(TestEncryptionZones.class);
+
+ protected Configuration conf;
+ private FileSystemTestHelper fsHelper;
+
+ protected MiniDFSCluster cluster;
+ protected HdfsAdmin dfsAdmin;
+ protected DistributedFileSystem fs;
+ private File testRootDir;
+ protected final String TEST_KEY = "test_key";
+ private static final String NS_METRICS = "FSNamesystem";
+ private static final String AUTHORIZATION_EXCEPTION_MESSAGE =
+ "User [root] is not authorized to perform [READ] on key " +
+ "with ACL name [key2]!!";
+
+ protected FileSystemTestWrapper fsWrapper;
+ protected FileContextTestWrapper fcWrapper;
+
+ protected static final EnumSet<CreateEncryptionZoneFlag> NO_TRASH =
+ EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH);
+
+ protected String getKeyProviderURI() {
+ return JavaKeyStoreProvider.SCHEME_NAME + "://file" +
+ new Path(testRootDir.toString(), "test.jks").toUri();
+ }
+
+ @Before
+ public void setup() throws Exception {
+ conf = new HdfsConfiguration();
+ fsHelper = new FileSystemTestHelper();
+ // Set up java key store
+ String testRoot = fsHelper.getTestRootDir();
+ testRootDir = new File(testRoot).getAbsoluteFile();
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+ getKeyProviderURI());
+
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
true);
+ // Lower the batch size for testing
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
+ 2);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ cluster.waitActive();
+ GenericTestUtils.setLogLevel(
+ LoggerFactory.getLogger(EncryptionZoneManager.class), Level.TRACE);
+ fs = cluster.getFileSystem();
+ fsWrapper = new FileSystemTestWrapper(fs);
+ fcWrapper = new FileContextTestWrapper(
+ FileContext.getFileContext(cluster.getURI(), conf));
+ dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
+ setProvider();
+ // Create a test key
+ DFSTestUtil.createKey(TEST_KEY, cluster, conf);
+ }
+
+ protected void setProvider() {
+ // Need to set the client's KeyProvider to the NN's for JKS,
+ // else the updates do not get flushed properly
+ fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
+ .getProvider());
+ }
+
+ @After
+ public void teardown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ EncryptionFaultInjector.instance = new EncryptionFaultInjector();
+ }
+
+ @Test
+ public void testBasicOperations() throws Exception {
+ final Path rootDir = new Path("/");
+ final Path zone1 = new Path(rootDir, "zone1");
+ final Path zone1FileDNE = new Path(zone1, "newDNE.txt");
+
+ assertEquals(fs.getEnclosingRoot(rootDir), rootDir);
Review Comment:
order of equals tests wrong.
i would prefer you to use AssertJ and its asserts, including a description
of what is being checked
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java:
##########
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.util.EnumSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import static org.junit.Assert.*;
+
+
+public class TestEnclosingRoot {
Review Comment:
extend AbstractHadoopTestBase for timeouts, thread names, etc
##########
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java:
##########
@@ -3913,15 +3913,14 @@ public Path doCall(final Path p) throws IOException {
@Override
public Path next(final FileSystem fs, final Path p)
throws IOException {
- if (fs instanceof DistributedFileSystem) {
- DistributedFileSystem myDfs = (DistributedFileSystem) fs;
- return myDfs.getEnclosingRoot(p);
- } else {
+ if (!(fs instanceof DistributedFileSystem)) {
throw new UnsupportedOperationException(
- "Cannot call getEZForPath"
+ "Cannot call getEnclosingRoot"
Review Comment:
does this still hold?
##########
hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md:
##########
@@ -599,6 +599,25 @@ on the filesystem.
if len(FS, P) > 0: getFileStatus(P).getBlockSize() > 0
result == getFileStatus(P).getBlockSize()
+1. The outcome of this operation MUST be identical to the value of
+ `getFileStatus(P).getBlockSize()`.
+2. By inference, it MUST be > 0 for any file of length > 0.
+### `Path getEnclosingRoot(Path p)`
+
+This method is used to find a root directory for a path given. This is useful
for creating
+staging and temp directories in the same root directory. There are constraints
around how
+renames are allowed to atomically occur (ex. across hdfs volumes or across
encryption zones).
+
+#### Preconditions
+
+if no root path is found: raise IOException
+No root path would be found only if there is no mount point for the given path
+
+
+#### Postconditions
+
+The path return will not be null
Review Comment:
better
a path representing the "root" directory for a mounted fs
##########
hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md:
##########
@@ -599,6 +599,25 @@ on the filesystem.
if len(FS, P) > 0: getFileStatus(P).getBlockSize() > 0
result == getFileStatus(P).getBlockSize()
+1. The outcome of this operation MUST be identical to the value of
+ `getFileStatus(P).getBlockSize()`.
+2. By inference, it MUST be > 0 for any file of length > 0.
+### `Path getEnclosingRoot(Path p)`
Review Comment:
add a newline above
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java:
##########
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.util.EnumSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import static org.junit.Assert.*;
+
+
+public class TestEnclosingRoot {
+ static final Logger LOG = LoggerFactory.getLogger(TestEncryptionZones.class);
+
+ protected Configuration conf;
+ private FileSystemTestHelper fsHelper;
+
+ protected MiniDFSCluster cluster;
+ protected HdfsAdmin dfsAdmin;
+ protected DistributedFileSystem fs;
+ private File testRootDir;
+ protected final String TEST_KEY = "test_key";
+ private static final String NS_METRICS = "FSNamesystem";
+ private static final String AUTHORIZATION_EXCEPTION_MESSAGE =
+ "User [root] is not authorized to perform [READ] on key " +
+ "with ACL name [key2]!!";
+
+ protected FileSystemTestWrapper fsWrapper;
+ protected FileContextTestWrapper fcWrapper;
+
+ protected static final EnumSet<CreateEncryptionZoneFlag> NO_TRASH =
+ EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH);
+
+ protected String getKeyProviderURI() {
+ return JavaKeyStoreProvider.SCHEME_NAME + "://file" +
+ new Path(testRootDir.toString(), "test.jks").toUri();
+ }
+
+ @Before
+ public void setup() throws Exception {
+ conf = new HdfsConfiguration();
+ fsHelper = new FileSystemTestHelper();
+ // Set up java key store
+ String testRoot = fsHelper.getTestRootDir();
+ testRootDir = new File(testRoot).getAbsoluteFile();
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+ getKeyProviderURI());
+
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY,
true);
+ // Lower the batch size for testing
+ conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
+ 2);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+ cluster.waitActive();
+ GenericTestUtils.setLogLevel(
+ LoggerFactory.getLogger(EncryptionZoneManager.class), Level.TRACE);
+ fs = cluster.getFileSystem();
+ fsWrapper = new FileSystemTestWrapper(fs);
+ fcWrapper = new FileContextTestWrapper(
+ FileContext.getFileContext(cluster.getURI(), conf));
+ dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
+ setProvider();
+ // Create a test key
+ DFSTestUtil.createKey(TEST_KEY, cluster, conf);
+ }
+
+ protected void setProvider() {
+ // Need to set the client's KeyProvider to the NN's for JKS,
+ // else the updates do not get flushed properly
+ fs.getClient().setKeyProvider(cluster.getNameNode().getNamesystem()
+ .getProvider());
+ }
+
+ @After
+ public void teardown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ EncryptionFaultInjector.instance = new EncryptionFaultInjector();
Review Comment:
put in a finally clause in case cluster.shutdown fails?
##########
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEnclosingRoot.java:
##########
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.util.EnumSet;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import static org.junit.Assert.*;
+
+
+public class TestEnclosingRoot {
+ static final Logger LOG = LoggerFactory.getLogger(TestEncryptionZones.class);
+
+ protected Configuration conf;
+ private FileSystemTestHelper fsHelper;
+
+ protected MiniDFSCluster cluster;
+ protected HdfsAdmin dfsAdmin;
+ protected DistributedFileSystem fs;
+ private File testRootDir;
+ protected final String TEST_KEY = "test_key";
+ private static final String NS_METRICS = "FSNamesystem";
+ private static final String AUTHORIZATION_EXCEPTION_MESSAGE =
+ "User [root] is not authorized to perform [READ] on key " +
+ "with ACL name [key2]!!";
+
+ protected FileSystemTestWrapper fsWrapper;
+ protected FileContextTestWrapper fcWrapper;
+
+ protected static final EnumSet<CreateEncryptionZoneFlag> NO_TRASH =
+ EnumSet.of(CreateEncryptionZoneFlag.NO_TRASH);
+
+ protected String getKeyProviderURI() {
+ return JavaKeyStoreProvider.SCHEME_NAME + "://file" +
+ new Path(testRootDir.toString(), "test.jks").toUri();
+ }
+
+ @Before
+ public void setup() throws Exception {
Review Comment:
once the test extends AbstractHadoopTestBase this test method and the one
below should call their superclass. but if you plan > 1 test method, it would
be good to save the cluster to a static field on the first setup() call where
it is invoked and non-null; with an afterClass teardown to close it at the end
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]