mukul1987 commented on a change in pull request #2166:
URL: https://github.com/apache/hadoop/pull/2166#discussion_r461497163
##########
File path:
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
##########
@@ -71,29 +71,9 @@
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
-import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
-import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.*;
Review comment:
Expand wildcard imports.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
##########
@@ -501,7 +509,31 @@ public void write(DataOutput out) throws IOException {
return statusList.toArray(
new SnapshottableDirectoryStatus[statusList.size()]);
}
-
+
+ /**
+ * List all the snapshots under a snapshottable directory.
+ */
+ public SnapshotStatus[] getSnapshotListing(INodesInPath iip)
+ throws IOException {
+ INodeDirectory srcRoot = getSnapshottableRoot(iip);
+ ReadOnlyList<Snapshot> snapshotList = srcRoot.
+ getDirectorySnapshottableFeature().getSnapshotList();
+ SnapshotStatus[] statuses = new SnapshotStatus[snapshotList.size()];
+ for (int count = 0; count < snapshotList.size(); count++) {
+ Snapshot s = snapshotList.get(count);
+ Snapshot.Root dir = s.getRoot();
+ statuses[count] = new SnapshotStatus(dir.getModificationTime(),
+ dir.getAccessTime(), dir.getFsPermission(),
+ EnumSet.noneOf(HdfsFileStatus.Flags.class),
+ dir.getUserName(), dir.getGroupName(),
+ dir.getLocalNameBytes(), dir.getId(),
+ dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
Review comment:
This snapid should be the id of the snapshot here ?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java
##########
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
Review comment:
wildcard imports here.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
##########
@@ -501,7 +509,31 @@ public void write(DataOutput out) throws IOException {
return statusList.toArray(
new SnapshottableDirectoryStatus[statusList.size()]);
}
-
+
+ /**
+ * List all the snapshots under a snapshottable directory.
+ */
+ public SnapshotStatus[] getSnapshotListing(INodesInPath iip)
+ throws IOException {
+ INodeDirectory srcRoot = getSnapshottableRoot(iip);
+ ReadOnlyList<Snapshot> snapshotList = srcRoot.
+ getDirectorySnapshottableFeature().getSnapshotList();
+ SnapshotStatus[] statuses = new SnapshotStatus[snapshotList.size()];
+ for (int count = 0; count < snapshotList.size(); count++) {
+ Snapshot s = snapshotList.get(count);
+ Snapshot.Root dir = s.getRoot();
+ statuses[count] = new SnapshotStatus(dir.getModificationTime(),
+ dir.getAccessTime(), dir.getFsPermission(),
+ EnumSet.noneOf(HdfsFileStatus.Flags.class),
+ dir.getUserName(), dir.getGroupName(),
+ dir.getLocalNameBytes(), dir.getId(),
+ dir.getChildrenNum(Snapshot.CURRENT_STATE_ID),
Review comment:
Can we add a comment here on why the ID here should not be snapID ?
And that childrenNumber can be a wrong value.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java
##########
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/**
+ * Tests listSnapshot.
+ */
+public class TestListSnapshot {
+
+ static final short REPLICATION = 3;
+
+ private final Path dir1 = new Path("/TestSnapshot1");
+
+ Configuration conf;
+ MiniDFSCluster cluster;
+ FSNamesystem fsn;
+ DistributedFileSystem hdfs;
+
+ @Before
+ public void setUp() throws Exception {
+ conf = new Configuration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
+ .build();
+ cluster.waitActive();
+ fsn = cluster.getNamesystem();
+ hdfs = cluster.getFileSystem();
+ hdfs.mkdirs(dir1);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+
+ /**
+ * Test listing all the snapshottable directories.
+ */
+ @Test(timeout = 60000)
+ public void testListSnapshot() throws Exception {
+ fsn.getSnapshotManager().setAllowNestedSnapshots(true);
Review comment:
The flag should be set to false ?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestListSnapshot.java
##########
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/**
+ * Tests listSnapshot.
+ */
+public class TestListSnapshot {
+
+ static final short REPLICATION = 3;
+
+ private final Path dir1 = new Path("/TestSnapshot1");
+
+ Configuration conf;
+ MiniDFSCluster cluster;
+ FSNamesystem fsn;
+ DistributedFileSystem hdfs;
+
+ @Before
+ public void setUp() throws Exception {
+ conf = new Configuration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
+ .build();
+ cluster.waitActive();
+ fsn = cluster.getNamesystem();
+ hdfs = cluster.getFileSystem();
+ hdfs.mkdirs(dir1);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+
+ /**
+ * Test listing all the snapshottable directories.
+ */
+ @Test(timeout = 60000)
+ public void testListSnapshot() throws Exception {
+ fsn.getSnapshotManager().setAllowNestedSnapshots(true);
+
+ // Initially there is no snapshottable directories in the system
+ SnapshotStatus[] snapshotStatuses = null;
+ SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing();
+ assertNull(dirs);
+ LambdaTestUtils.intercept(SnapshotException.class,
+ "Directory is not a " + "snapshottable directory",
+ () -> hdfs.getSnapshotListing(dir1));
+ // Make root as snapshottable
+ final Path root = new Path("/");
+ hdfs.allowSnapshot(root);
+ dirs = hdfs.getSnapshottableDirListing();
+ assertEquals(1, dirs.length);
+ assertEquals("", dirs[0].getDirStatus().getLocalName());
+ assertEquals(root, dirs[0].getFullPath());
+ snapshotStatuses = hdfs.getSnapshotListing(root);
+ assertTrue(snapshotStatuses.length == 0);
+ // Make root non-snaphsottable
+ hdfs.disallowSnapshot(root);
+ dirs = hdfs.getSnapshottableDirListing();
+ assertNull(dirs);
+ snapshotStatuses = hdfs.getSnapshotListing(root);
+ assertTrue(snapshotStatuses.length == 0);
+
+ // Make dir1 as snapshottable
+ hdfs.allowSnapshot(dir1);
+ hdfs.createSnapshot(dir1, "s0");
+ snapshotStatuses = hdfs.getSnapshotListing(dir1);
+ assertEquals(1, snapshotStatuses.length);
+ assertEquals("s0", snapshotStatuses[0].getDirStatus().
+ getLocalName());
+ assertEquals(SnapshotTestHelper.getSnapshotRoot(dir1, "s0"),
+ snapshotStatuses[0].getFullPath());
+ // snapshot id is zero
+ assertEquals(0, snapshotStatuses[0].getSnapshotID());
+ // Create a snapshot for dir2
Review comment:
create 2 snapshots for dir1 ?
##########
File path: hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
##########
@@ -563,13 +563,34 @@ message SnapshottableDirectoryStatusProto {
required bytes parent_fullpath = 4;
}
+/**
+ * Status of a snapshot directory: besides the normal information for
+ * a directory status, also include snapshot ID, and
+ * the full path of the parent directory.
+ */
+message SnapshotStatusProto {
+ required HdfsFileStatusProto dirStatus = 1;
+
+ // Fields specific for snapshot directory
+ required uint32 snapshotID = 2;
+ required bytes parent_fullpath = 3;
Review comment:
we should also dispay if a particular snapshot is deleted or not.
##########
File path: hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
##########
@@ -563,13 +563,34 @@ message SnapshottableDirectoryStatusProto {
required bytes parent_fullpath = 4;
}
+/**
+ * Status of a snapshot directory: besides the normal information for
+ * a directory status, also include snapshot ID, and
+ * the full path of the parent directory.
+ */
+message SnapshotStatusProto {
+ required HdfsFileStatusProto dirStatus = 1;
+
+ // Fields specific for snapshot directory
+ required uint32 snapshotID = 2;
+ required bytes parent_fullpath = 3;
Review comment:
we should also dispay if a particular snapshot is deleted/garbage
collected or not.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]