Author: szetszwo
Date: Mon Jan 7 01:43:21 2013
New Revision: 1429643
URL: http://svn.apache.org/viewvc?rev=1429643&view=rev
Log:
HDFS-4230. Support listing of all the snapshottable directories. Contributed
by Jing Zhao
Added:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt
Mon Jan 7 01:43:21 2013
@@ -92,3 +92,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4357. Fix a bug that if an inode is replaced, further INode operations
should apply to the new inode. (Jing Zhao via szetszwo)
+
+ HDFS-4230. Support listing of all the snapshottable directories. (Jing Zhao
+ via szetszwo)
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
Mon Jan 7 01:43:21 2013
@@ -118,6 +118,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
@@ -1929,6 +1930,18 @@ public class DFSClient implements java.i
checkOpen();
namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
}
+
+ /**
+ * Get all the current snapshottable directories.
+ * @return All the current snapshottable directories
+ * @throws IOException
+ * @see ClientProtocol#getSnapshottableDirListing()
+ */
+ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
+ throws IOException {
+ checkOpen();
+ return namenode.getSnapshottableDirListing();
+ }
/**
* Allow snapshot on a directory.
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
Mon Jan 7 01:43:21 2013
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -942,4 +943,13 @@ public class DistributedFileSystem exten
String snapshotNewName) throws IOException {
dfs.renameSnapshot(path, snapshotOldName, snapshotNewName);
}
+
+ /**
+ * @return All the snapshottable directories
+ * @throws IOException
+ */
+ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
+ throws IOException {
+ return dfs.getSnapshottableDirListing();
+ }
}
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
Mon Jan 7 01:43:21 2013
@@ -525,6 +525,16 @@ public interface ClientProtocol {
boolean needLocation)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
+
+ /**
+ * Get listing of all the snapshottable directories
+ *
+ * @return Information about all the current snapshottable directory
+ * @throws IOException If an I/O error occurred
+ */
+ @Idempotent
+ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
+ throws IOException;
///////////////////////////////////////
// System issues and management
Added:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java?rev=1429643&view=auto
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
(added)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
Mon Jan 7 01:43:21 2013
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtil;
+
+/**
+ * Metadata about a snapshottable directory
+ */
+public class SnapshottableDirectoryStatus {
+ /** Basic information of the snapshottable directory */
+ private HdfsFileStatus dirStatus;
+
+ /** Number of snapshots that have been taken*/
+ private int snapshotNumber;
+
+ /** Number of snapshots allowed. */
+ private int snapshotQuota;
+
+ /** Full path of the parent. */
+ private byte[] parentFullPath;
+
+ public SnapshottableDirectoryStatus(long modification_time, long access_time,
+ FsPermission permission, String owner, String group, byte[] localName,
+ int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
+ this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
+ access_time, permission, owner, group, null, localName);
+ this.snapshotNumber = snapshotNumber;
+ this.snapshotQuota = snapshotQuota;
+ this.parentFullPath = parentFullPath;
+ }
+
+ /**
+ * @return Number of snapshots that have been taken for the directory
+ */
+ public int getSnapshotNumber() {
+ return snapshotNumber;
+ }
+
+ /**
+ * @return Number of snapshots allowed for the directory
+ */
+ public int getSnapshotQuota() {
+ return snapshotQuota;
+ }
+
+ /**
+ * @return Full path of the parent
+ */
+ public byte[] getParentFullPath() {
+ return parentFullPath;
+ }
+
+ /**
+ * @return The basic information of the directory
+ */
+ public HdfsFileStatus getDirStatus() {
+ return dirStatus;
+ }
+
+ /**
+ * @return Full path of the file
+ */
+ public Path getFullPath() {
+ String parentFullPathStr = (parentFullPath == null ||
parentFullPath.length == 0) ? null
+ : DFSUtil.bytes2String(parentFullPath);
+ return parentFullPathStr == null ? new Path(dirStatus.getLocalName())
+ : new Path(parentFullPathStr, dirStatus.getLocalName());
+ }
+}
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
Mon Jan 7 01:43:21 2013
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@@ -87,6 +88,8 @@ import org.apache.hadoop.hdfs.protocol.p
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto;
+import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
+import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
@@ -906,4 +909,24 @@ public class ClientNamenodeProtocolServe
throw new ServiceException(e);
}
}
+
+ static final GetSnapshottableDirListingResponseProto
NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE
+ = GetSnapshottableDirListingResponseProto.newBuilder().build();
+ @Override
+ public GetSnapshottableDirListingResponseProto getSnapshottableDirListing(
+ RpcController controller, GetSnapshottableDirListingRequestProto request)
+ throws ServiceException {
+ try {
+ SnapshottableDirectoryStatus[] result = server
+ .getSnapshottableDirListing();
+ if (result != null) {
+ return GetSnapshottableDirListingResponseProto.newBuilder().
+ setSnapshottableDirList(PBHelper.convert(result)).build();
+ } else {
+ return NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE;
+ }
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
}
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
Mon Jan 7 01:43:21 2013
@@ -42,11 +42,11 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
@@ -63,10 +63,10 @@ import org.apache.hadoop.hdfs.protocol.p
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
-import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
+import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
@@ -79,6 +79,8 @@ import org.apache.hadoop.hdfs.protocol.p
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
+import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
+import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
@@ -111,6 +113,7 @@ import org.apache.hadoop.io.EnumSetWrita
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
+import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.security.AccessControlException;
@@ -877,4 +880,22 @@ public class ClientNamenodeProtocolTrans
throw ProtobufHelper.getRemoteException(e);
}
}
+
+ @Override
+ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
+ throws IOException {
+ GetSnapshottableDirListingRequestProto req =
+ GetSnapshottableDirListingRequestProto.newBuilder().build();
+ try {
+ GetSnapshottableDirListingResponseProto result = rpcProxy
+ .getSnapshottableDirListing(null, req);
+
+ if (result.hasSnapshottableDirList()) {
+ return PBHelper.convert(result.getSnapshottableDirList());
+ }
+ return null;
+ } catch (ServiceException e) {
+ throw ProtobufHelper.getRemoteException(e);
+ }
+ }
}
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
Mon Jan 7 01:43:21 2013
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
@@ -93,6 +94,8 @@ import org.apache.hadoop.hdfs.protocol.p
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto;
+import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
+import
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
import
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@@ -1021,7 +1024,6 @@ public class PBHelper {
return new EnumSetWritable<CreateFlag>(result);
}
-
public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
if (fs == null)
return null;
@@ -1036,6 +1038,21 @@ public class PBHelper {
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null);
}
+ public static SnapshottableDirectoryStatus convert(
+ SnapshottableDirectoryStatusProto sdirStatusProto) {
+ if (sdirStatusProto == null) {
+ return null;
+ }
+ return new SnapshottableDirectoryStatus(sdirStatusProto.getDirStatus()
+ .getModificationTime(), sdirStatusProto.getDirStatus().getAccessTime(),
+ PBHelper.convert(sdirStatusProto.getDirStatus().getPermission()),
+ sdirStatusProto.getDirStatus().getOwner(), sdirStatusProto
+ .getDirStatus().getGroup(), sdirStatusProto.getDirStatus()
+ .getPath().toByteArray(), sdirStatusProto.getSnapshotNumber(),
+ sdirStatusProto.getSnapshotQuota(), sdirStatusProto.getParentFullpath()
+ .toByteArray());
+ }
+
public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
if (fs == null)
return null;
@@ -1070,6 +1087,25 @@ public class PBHelper {
return builder.build();
}
+ public static SnapshottableDirectoryStatusProto convert(
+ SnapshottableDirectoryStatus status) {
+ if (status == null) {
+ return null;
+ }
+ int snapshotNumber = status.getSnapshotNumber();
+ int snapshotQuota = status.getSnapshotQuota();
+ byte[] parentFullPath = status.getParentFullPath();
+ ByteString parentFullPathBytes = ByteString
+ .copyFrom(parentFullPath == null ? new byte[0] : parentFullPath);
+ HdfsFileStatusProto fs = convert(status.getDirStatus());
+ SnapshottableDirectoryStatusProto.Builder builder =
+ SnapshottableDirectoryStatusProto
+ .newBuilder().setSnapshotNumber(snapshotNumber)
+ .setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes)
+ .setDirStatus(fs);
+ return builder.build();
+ }
+
public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) {
if (fs == null) return null;
final int len = fs.length;
@@ -1311,5 +1347,38 @@ public class PBHelper {
return JournalInfoProto.newBuilder().setClusterID(j.getClusterId())
.setLayoutVersion(j.getLayoutVersion())
.setNamespaceID(j.getNamespaceId()).build();
+ }
+
+ public static SnapshottableDirectoryStatus[] convert(
+ SnapshottableDirectoryListingProto sdlp) {
+ if (sdlp == null)
+ return null;
+ List<SnapshottableDirectoryStatusProto> list = sdlp
+ .getSnapshottableDirListingList();
+ if (list.isEmpty()) {
+ return new SnapshottableDirectoryStatus[0];
+ } else {
+ SnapshottableDirectoryStatus[] result =
+ new SnapshottableDirectoryStatus[list.size()];
+ for (int i = 0; i < list.size(); i++) {
+ result[i] = (SnapshottableDirectoryStatus) PBHelper
+ .convert(list.get(i));
+ }
+ return result;
+ }
+ }
+
+ public static SnapshottableDirectoryListingProto convert(
+ SnapshottableDirectoryStatus[] status) {
+ if (status == null)
+ return null;
+ SnapshottableDirectoryStatusProto[] protos =
+ new SnapshottableDirectoryStatusProto[status.length];
+ for (int i = 0; i < status.length; i++) {
+ protos[i] = PBHelper.convert(status[i]);
+ }
+ List<SnapshottableDirectoryStatusProto> protoList = Arrays.asList(protos);
+ return SnapshottableDirectoryListingProto.newBuilder()
+ .addAllSnapshottableDirListing(protoList).build();
}
}
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
Mon Jan 7 01:43:21 2013
@@ -23,6 +23,7 @@ import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -677,6 +678,8 @@ public class FSDirectory implements Clos
+ error);
throw new IOException(error);
}
+ List<INodeDirectorySnapshottable> snapshottableDirs =
+ new ArrayList<INodeDirectorySnapshottable>();
if (dstInode != null) { // Destination exists
// It's OK to rename a file to a symlink and vice versa
if (dstInode.isDirectory() != srcInode.isDirectory()) {
@@ -702,7 +705,7 @@ public class FSDirectory implements Clos
throw new IOException(error);
}
}
- INode snapshotNode = hasSnapshot(dstInode);
+ INode snapshotNode = hasSnapshot(dstInode, snapshottableDirs);
if (snapshotNode != null) {
error = "The direcotry " + dstInode.getFullPathName()
+ " cannot be deleted for renaming since "
@@ -770,6 +773,12 @@ public class FSDirectory implements Clos
filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks);
getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
}
+
+ if (snapshottableDirs.size() > 0) {
+ // There are snapshottable directories (without snapshots) to be
+ // deleted. Need to update the SnapshotManager.
+ namesystem.removeSnapshottableDirs(snapshottableDirs);
+ }
return filesDeleted >0;
}
} finally {
@@ -1034,13 +1043,20 @@ public class FSDirectory implements Clos
// snapshottable dir with snapshots, or its descendants have
// snapshottable dir with snapshots
INode targetNode = inodes[inodes.length-1];
- INode snapshotNode = hasSnapshot(targetNode);
+ List<INodeDirectorySnapshottable> snapshottableDirs =
+ new ArrayList<INodeDirectorySnapshottable>();
+ INode snapshotNode = hasSnapshot(targetNode, snapshottableDirs);
if (snapshotNode != null) {
throw new IOException("The direcotry " + targetNode.getFullPathName()
+ " cannot be deleted since " + snapshotNode.getFullPathName()
+ " is snapshottable and already has snapshots");
}
filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, now);
+ if (snapshottableDirs.size() > 0) {
+ // There are some snapshottable directories without snapshots to be
+ // deleted. Need to update the SnapshotManager.
+ namesystem.removeSnapshottableDirs(snapshottableDirs);
+ }
}
} finally {
writeUnlock();
@@ -1160,18 +1176,28 @@ public class FSDirectory implements Clos
* Check if the given INode (or one of its descendants) is snapshottable and
* already has snapshots.
*
- * @param target The given INode
+ * @param target
+ * The given INode
+ * @param snapshottableDirs
+ * The list of directories that are snapshottable but do not have
+ * snapshots yet
* @return The INode which is snapshottable and already has snapshots.
*/
- private static INode hasSnapshot(INode target) {
+ private static INode hasSnapshot(INode target,
+ List<INodeDirectorySnapshottable> snapshottableDirs) {
if (target instanceof INodeDirectory) {
INodeDirectory targetDir = (INodeDirectory) target;
- if (targetDir.isSnapshottable()
- && ((INodeDirectorySnapshottable) targetDir).getNumSnapshots() > 0) {
- return target;
- }
+ if (targetDir.isSnapshottable()) {
+ INodeDirectorySnapshottable ssTargetDir =
+ (INodeDirectorySnapshottable) targetDir;
+ if (ssTargetDir.getNumSnapshots() > 0) {
+ return target;
+ } else {
+ snapshottableDirs.add(ssTargetDir);
+ }
+ }
for (INode child : targetDir.getChildrenList(null)) {
- INode snapshotDir = hasSnapshot(child);
+ INode snapshotDir = hasSnapshot(child, snapshottableDirs);
if (snapshotDir != null) {
return snapshotDir;
}
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
Mon Jan 7 01:43:21 2013
@@ -137,6 +137,7 @@ import org.apache.hadoop.hdfs.protocol.D
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -175,6 +176,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithLink;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
@@ -5750,6 +5752,39 @@ public class FSNamesystem implements Nam
newSnapshotRoot.toString(), null);
}
}
+
+ /**
+ * Get the list of all the current snapshottable directories
+ * @return The list of all the current snapshottable directories
+ * @throws IOException
+ */
+ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
+ throws IOException {
+ readLock();
+ try {
+ checkOperation(OperationCategory.READ);
+
+ SnapshottableDirectoryStatus[] status = snapshotManager
+ .getSnapshottableDirListing();
+ if (auditLog.isInfoEnabled() && isExternalInvocation()) {
+ logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
+ "listSnapshottableDirectory", null, null, null);
+ }
+ return status;
+ } finally {
+ readUnlock();
+ }
+ }
+
+ /**
+ * Remove a list of INodeDirectorySnapshottable from the SnapshotManager
+ * @param toRemove the list of INodeDirectorySnapshottable to be removed
+ */
+ void removeSnapshottableDirs(List<INodeDirectorySnapshottable> toRemove) {
+ if (snapshotManager != null) {
+ snapshotManager.removeSnapshottableDirs(toRemove);
+ }
+ }
/**
* Default AuditLogger implementation; used when no access logger is
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
Mon Jan 7 01:43:21 2013
@@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
@@ -1115,4 +1116,13 @@ class NameNodeRpcServer implements Namen
metrics.incrRenameSnapshotOps();
namesystem.renameSnapshot(snapshotRoot, snapshotOldName, snapshotNewName);
}
+
+ @Override // Client Protocol
+ public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
+ throws IOException {
+ SnapshottableDirectoryStatus[] status = namesystem
+ .getSnapshottableDirListing();
+ metrics.incrListSnapshottableDirOps();
+ return status;
+ }
}
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
Mon Jan 7 01:43:21 2013
@@ -65,6 +65,8 @@ public class NameNodeMetrics {
MutableCounterLong createSnapshotOps;
@Metric("Number of renameSnapshot operations")
MutableCounterLong renameSnapshotOps;
+ @Metric("Number of listSnapshottableDirectory operations")
+ MutableCounterLong listSnapshottableDirOps;
@Metric("Journal transactions") MutableRate transactions;
@Metric("Journal syncs") MutableRate syncs;
@@ -183,6 +185,10 @@ public class NameNodeMetrics {
renameSnapshotOps.incr();
}
+ public void incrListSnapshottableDirOps() {
+ listSnapshottableDirOps.incr();
+ }
+
public void addTransaction(long latency) {
transactions.add(latency);
}
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
Mon Jan 7 01:43:21 2013
@@ -22,6 +22,8 @@ import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
@@ -156,4 +158,35 @@ public class SnapshotManager implements
return numSnapshots.get();
}
+ /**
+ * @return All the current snapshottable directories
+ */
+ public SnapshottableDirectoryStatus[] getSnapshottableDirListing() {
+ if (snapshottables.isEmpty()) {
+ return null;
+ }
+
+ SnapshottableDirectoryStatus[] status =
+ new SnapshottableDirectoryStatus[snapshottables.size()];
+ for (int i = 0; i < snapshottables.size(); i++) {
+ INodeDirectorySnapshottable dir = snapshottables.get(i);
+ status[i] = new SnapshottableDirectoryStatus(dir.getModificationTime(),
+ dir.getAccessTime(), dir.getFsPermission(), dir.getUserName(),
+ dir.getGroupName(), dir.getLocalNameBytes(), dir.getNumSnapshots(),
+ dir.getSnapshotQuota(), dir.getParent() == null ? new byte[0]
+ : DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
+ }
+ return status;
+ }
+
+ /**
+ * Remove snapshottable directories from {@link #snapshottables}
+ * @param toRemoveList A list of INodeDirectorySnapshottable to be removed
+ */
+ public void removeSnapshottableDirs(
+ List<INodeDirectorySnapshottable> toRemoveList) {
+ if (toRemoveList != null) {
+ this.snapshottables.removeAll(toRemoveList);
+ }
+ }
}
\ No newline at end of file
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
Mon Jan 7 01:43:21 2013
@@ -208,6 +208,12 @@ message GetListingResponseProto {
optional DirectoryListingProto dirList = 1;
}
+message GetSnapshottableDirListingRequestProto { // no input parameters
+}
+message GetSnapshottableDirListingResponseProto {
+ optional SnapshottableDirectoryListingProto snapshottableDirList = 1;
+}
+
message RenewLeaseRequestProto {
required string clientName = 1;
}
@@ -557,4 +563,6 @@ service ClientNamenodeProtocol {
returns(AllowSnapshotResponseProto);
rpc disallowSnapshot(DisallowSnapshotRequestProto)
returns(DisallowSnapshotResponseProto);
+ rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto)
+ returns(GetSnapshottableDirListingResponseProto);
}
Modified:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1429643&r1=1429642&r2=1429643&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
(original)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
Mon Jan 7 01:43:21 2013
@@ -152,7 +152,7 @@ message LocatedBlocksProto {
/**
- * Status of a file, directory or symlink
+ * Status of a file, directory or symlink
* Optionally includes a file's block locations if requested by client on the
rpc call.
*/
message HdfsFileStatusProto {
@@ -214,6 +214,27 @@ message DirectoryListingProto {
}
/**
+ * Status of a snapshottable directory: besides the normal information for
+ * a directory status, also include snapshot quota, number of snapshots, and
+ * the full path of the parent directory.
+ */
+message SnapshottableDirectoryStatusProto {
+ required HdfsFileStatusProto dirStatus = 1;
+
+ // Fields specific for snapshottable directory
+ required uint32 snapshot_quota = 2;
+ required uint32 snapshot_number = 3;
+ required bytes parent_fullpath = 4;
+}
+
+/**
+ * Snapshottable directory listing
+ */
+message SnapshottableDirectoryListingProto {
+ repeated SnapshottableDirectoryStatusProto snapshottableDirListing = 1;
+}
+
+/**
* Common node information shared by all the nodes in the cluster
*/
message StorageInfoProto {
Added:
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java?rev=1429643&view=auto
==============================================================================
---
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java
(added)
+++
hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java
Mon Jan 7 01:43:21 2013
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestSnapshottableDirListing {
+
+ static final long seed = 0;
+ static final short REPLICATION = 3;
+ static final long BLOCKSIZE = 1024;
+
+ private final Path dir1 = new Path("/TestSnapshot1");
+ private final Path dir2 = new Path("/TestSnapshot2");
+
+ Configuration conf;
+ MiniDFSCluster cluster;
+ FSNamesystem fsn;
+ DistributedFileSystem hdfs;
+
+ @Before
+ public void setUp() throws Exception {
+ conf = new Configuration();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
+ .build();
+ cluster.waitActive();
+ fsn = cluster.getNamesystem();
+ hdfs = cluster.getFileSystem();
+ hdfs.mkdirs(dir1);
+ hdfs.mkdirs(dir2);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * Test listing all the snapshottable directories
+ */
+ @Test
+ public void testListSnapshottableDir() throws Exception {
+ // Initially there is no snapshottable directories in the system
+ SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing();
+ assertNull(dirs);
+
+ // Make dir1 as snapshottable
+ hdfs.allowSnapshot(dir1.toString());
+ dirs = hdfs.getSnapshottableDirListing();
+ assertEquals(1, dirs.length);
+ assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
+ assertEquals(dir1, dirs[0].getFullPath());
+ // There is no snapshot for dir1 yet
+ assertEquals(0, dirs[0].getSnapshotNumber());
+
+ // Make dir2 as snapshottable
+ hdfs.allowSnapshot(dir2.toString());
+ dirs = hdfs.getSnapshottableDirListing();
+ assertEquals(2, dirs.length);
+ assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
+ assertEquals(dir1, dirs[0].getFullPath());
+ assertEquals(dir2.getName(), dirs[1].getDirStatus().getLocalName());
+ assertEquals(dir2, dirs[1].getFullPath());
+ // There is no snapshot for dir2 yet
+ assertEquals(0, dirs[1].getSnapshotNumber());
+
+ // Create dir3
+ final Path dir3 = new Path("/TestSnapshot3");
+ hdfs.mkdirs(dir3);
+ // Rename dir3 to dir2
+ hdfs.rename(dir3, dir2, Rename.OVERWRITE);
+ // Now we only have one snapshottable dir: dir1
+ dirs = hdfs.getSnapshottableDirListing();
+ assertEquals(1, dirs.length);
+ assertEquals(dir1, dirs[0].getFullPath());
+
+ // Make dir2 snapshottable again
+ hdfs.allowSnapshot(dir2.toString());
+ // Create a snapshot for dir2
+ hdfs.createSnapshot("s1", dir2.toString());
+ hdfs.createSnapshot("s2", dir2.toString());
+ dirs = hdfs.getSnapshottableDirListing();
+ // There are now 2 snapshots for dir2
+ assertEquals(dir2, dirs[1].getFullPath());
+ assertEquals(2, dirs[1].getSnapshotNumber());
+
+ // Create sub-dirs under dir1
+ Path sub1 = new Path(dir1, "sub1");
+ Path file1 = new Path(sub1, "file1");
+ Path sub2 = new Path(dir1, "sub2");
+ Path file2 = new Path(sub2, "file2");
+ DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
+ DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
+ // Make sub1 and sub2 snapshottable
+ hdfs.allowSnapshot(sub1.toString());
+ hdfs.allowSnapshot(sub2.toString());
+ dirs = hdfs.getSnapshottableDirListing();
+ assertEquals(4, dirs.length);
+ assertEquals(dir1, dirs[0].getFullPath());
+ assertEquals(dir2, dirs[1].getFullPath());
+ assertEquals(sub1, dirs[2].getFullPath());
+ assertEquals(sub2, dirs[3].getFullPath());
+
+ // reset sub1
+ hdfs.disallowSnapshot(sub1.toString());
+ dirs = hdfs.getSnapshottableDirListing();
+ assertEquals(3, dirs.length);
+ assertEquals(dir1, dirs[0].getFullPath());
+ assertEquals(dir2, dirs[1].getFullPath());
+ assertEquals(sub2, dirs[2].getFullPath());
+
+ // Remove dir1, both dir1 and sub2 will be removed
+ hdfs.delete(dir1, true);
+ dirs = hdfs.getSnapshottableDirListing();
+ assertEquals(1, dirs.length);
+ assertEquals(dir2.getName(), dirs[0].getDirStatus().getLocalName());
+ assertEquals(dir2, dirs[0].getFullPath());
+ }
+}