Author: szetszwo
Date: Thu Feb 20 23:47:58 2014
New Revision: 1570392

URL: http://svn.apache.org/r1570392
Log:
Merge r1569890 through r1570391 from trunk.

Added:
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
      - copied unchanged from r1570391, 
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
Modified:
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/   (props 
changed)
    hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ 
  (props changed)
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java
    
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java

Propchange: hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1570084-1570391

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
Thu Feb 20 23:47:58 2014
@@ -253,12 +253,6 @@ Trunk (Unreleased)
     HDFS-5719. FSImage#doRollback() should close prevState before return
     (Ted Yu via brandonli)
 
-    HDFS-5768. Consolidate the serialization code in 
DelegationTokenSecretManager 
-    (Haohui Mai via brandonli)
-
-    HDFS-5775. Consolidate the code for serialization in CacheManager
-    (Haohui Mai via brandonli)
-
     HDFS-5794. Fix the inconsistency of layout version number of 
     ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
 
@@ -424,6 +418,12 @@ Release 2.4.0 - UNRELEASED
     HDFS-5868. Make hsync implementation pluggable on the DataNode.
     (Buddy Taylor via Arpit Agarwal)
 
+    HDFS-5768. Consolidate the serialization code in 
DelegationTokenSecretManager 
+    (Haohui Mai via brandonli)
+
+    HDFS-5775. Consolidate the code for serialization in CacheManager
+    (Haohui Mai via brandonli)
+
   OPTIMIZATIONS
 
     HDFS-5790. LeaseManager.findPath is very slow when many leases need 
recovery
@@ -537,6 +537,12 @@ Release 2.4.0 - UNRELEASED
 
     HDFS-5979. Typo and logger fix for fsimage PB code. (wang)
 
+    HDFS-5962. Mtime and atime are not persisted for symbolic links. (Akira
+    Ajisaka via kihwal)
+
+    HDFS-5944. LeaseManager:findLeaseWithPrefixPath can't handle path like 
/a/b/
+    and cause SecondaryNameNode failed do checkpoint (Yunjiong Zhao via 
brandonli)
+
   BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
 
     HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)

Propchange: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged 
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1570084-1570391

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 (original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 Thu Feb 20 23:47:58 2014
@@ -377,8 +377,6 @@ public class DFSConfigKeys extends Commo
   public static final int     DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
   public static final boolean DFS_SUPPORT_APPEND_DEFAULT = true;
-  public static final String  DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";
-  public static final boolean DFS_HTTPS_ENABLE_DEFAULT = false;
   public static final String  DFS_HTTP_POLICY_KEY = "dfs.http.policy";
   public static final String  DFS_HTTP_POLICY_DEFAULT =  
HttpConfig.Policy.HTTP_ONLY.name();
   public static final String  DFS_DEFAULT_CHUNK_VIEW_SIZE_KEY = 
"dfs.default.chunk.view.size";

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 (original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 Thu Feb 20 23:47:58 2014
@@ -1553,44 +1553,11 @@ public class DFSUtil {
   }
 
   /**
-   * Get http policy. Http Policy is chosen as follows:
-   * <ol>
-   * <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only
-   * https endpoints are started on configured https ports</li>
-   * <li>This configuration is overridden by dfs.https.enable configuration, if
-   * it is set to true. In that case, both http and https endpoints are 
stared.</li>
-   * <li>All the above configurations are overridden by dfs.http.policy
-   * configuration. With this configuration you can set http-only, https-only
-   * and http-and-https endpoints.</li>
-   * </ol>
-   * See hdfs-default.xml documentation for more details on each of the above
-   * configuration settings.
+   * Get http policy.
    */
   public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
-    String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
-    if (policyStr == null) {
-      boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
-          DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
-
-      boolean hadoopSsl = conf.getBoolean(
-          CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
-          CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
-
-      if (hadoopSsl) {
-        LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
-            + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
-            + ".");
-      }
-      if (https) {
-        LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
-            + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
-            + ".");
-      }
-
-      return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
-          : HttpConfig.Policy.HTTP_ONLY;
-    }
-
+    String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY,
+        DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT);
     HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
     if (policy == null) {
       throw new HadoopIllegalArgumentException("Unregonized value '"

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 (original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
 Thu Feb 20 23:47:58 2014
@@ -279,7 +279,8 @@ public final class FSImageFormatPBINode 
           parent.getLoaderContext().getStringTable());
 
       INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(),
-          permissions, 0, 0, s.getTarget().toStringUtf8());
+          permissions, s.getModificationTime(), s.getAccessTime(),
+          s.getTarget().toStringUtf8());
 
       return sym;
     }
@@ -482,7 +483,9 @@ public final class FSImageFormatPBINode 
       INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink
           .newBuilder()
           .setPermission(buildPermissionStatus(n, state.getStringMap()))
-          .setTarget(ByteString.copyFrom(n.getSymlink()));
+          .setTarget(ByteString.copyFrom(n.getSymlink()))
+          .setModificationTime(n.getModificationTime())
+          .setAccessTime(n.getAccessTime());
 
       INodeSection.INode r = buildINodeCommon(n)
           .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build();

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
 (original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
 Thu Feb 20 23:47:58 2014
@@ -339,7 +339,12 @@ public class LeaseManager {
     }
 
     final Map<String, Lease> entries = new HashMap<String, Lease>();
-    final int srclen = prefix.length();
+    int srclen = prefix.length();
+    
+    // prefix may ended with '/'
+    if (prefix.charAt(srclen - 1) == Path.SEPARATOR_CHAR) {
+      srclen -= 1;
+    }
 
     for(Map.Entry<String, Lease> entry : 
path2lease.tailMap(prefix).entrySet()) {
       final String p = entry.getKey();

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
 (original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
 Thu Feb 20 23:47:58 2014
@@ -170,8 +170,9 @@ final class LsrPBImage {
       PermissionStatus p = FSImageFormatPBINode.Loader.loadPermission(
           d.getPermission(), stringTable);
       out.print(String.format("-%s  - %8s %10s %10s %10d %s%s -> %s\n", p
-          .getPermission().toString(), p.getUserName(), p.getGroupName(), 0, 0,
-          parent, inode.getName().toStringUtf8(), 
d.getTarget().toStringUtf8()));
+          .getPermission().toString(), p.getUserName(), p.getGroupName(), d
+          .getModificationTime(), 0, parent, inode.getName().toStringUtf8(),
+          d.getTarget().toStringUtf8()));
     }
       break;
     default:

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
 (original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
 Thu Feb 20 23:47:58 2014
@@ -289,8 +289,9 @@ public final class PBImageXmlWriter {
   }
 
   private void dumpINodeSymlink(INodeSymlink s) {
-    o("permission", dumpPermission(s.getPermission())).o("target",
-        s.getTarget().toStringUtf8());
+    o("permission", dumpPermission(s.getPermission()))
+        .o("target", s.getTarget().toStringUtf8())
+        .o("mtime", s.getModificationTime()).o("atime", s.getAccessTime());
   }
 
   private void dumpNameSection(InputStream in) throws IOException {

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
 (original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
 Thu Feb 20 23:47:58 2014
@@ -131,6 +131,8 @@ message INodeSection {
   message INodeSymlink {
     optional fixed64 permission = 1;
     optional bytes target = 2;
+    optional uint64 modificationTime = 3;
+    optional uint64 accessTime = 4;
   }
 
   message INode {
@@ -301,4 +303,4 @@ message CacheManagerSection {
   required uint32 numDirectives   = 3;
   // repeated CachePoolInfoProto pools
   // repeated CacheDirectiveInfoProto directives
-}
\ No newline at end of file
+}

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java
 (original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java
 Thu Feb 20 23:47:58 2014
@@ -17,12 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_AND_HTTPS;
-import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_ONLY;
-
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
-import org.junit.Assert;
 import org.junit.Test;
 
 public final class TestHttpPolicy {
@@ -33,22 +29,4 @@ public final class TestHttpPolicy {
     conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "invalid");
     DFSUtil.getHttpPolicy(conf);
   }
-
-  @Test
-  public void testDeprecatedConfiguration() {
-    Configuration conf = new Configuration(false);
-    Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
-
-    conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
-    Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
-
-    conf = new Configuration(false);
-    conf.setBoolean(DFSConfigKeys.HADOOP_SSL_ENABLED_KEY, true);
-    Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
-
-    conf = new Configuration(false);
-    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HTTP_ONLY.name());
-    conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
-    Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
-  }
 }

Modified: 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java?rev=1570392&r1=1570391&r2=1570392&view=diff
==============================================================================
--- 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 (original)
+++ 
hadoop/common/branches/HDFS-5535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 Thu Feb 20 23:47:58 2014
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSOutputS
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
@@ -135,4 +136,51 @@ public class TestFSImage {
       }
     }
   }
+
+  /**
+   * Ensure mtime and atime can be loaded from fsimage.
+   */
+  @Test(timeout=60000)
+  public void testLoadMtimeAtime() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      DistributedFileSystem hdfs = cluster.getFileSystem();
+      String userDir = hdfs.getHomeDirectory().toUri().getPath().toString();
+      Path file = new Path(userDir, "file");
+      Path dir = new Path(userDir, "/dir");
+      Path link = new Path(userDir, "/link");
+      hdfs.createNewFile(file);
+      hdfs.mkdirs(dir);
+      hdfs.createSymlink(file, link, false);
+
+      long mtimeFile = hdfs.getFileStatus(file).getModificationTime();
+      long atimeFile = hdfs.getFileStatus(file).getAccessTime();
+      long mtimeDir = hdfs.getFileStatus(dir).getModificationTime();
+      long mtimeLink = hdfs.getFileLinkStatus(link).getModificationTime();
+      long atimeLink = hdfs.getFileLinkStatus(link).getAccessTime();
+
+      // save namespace and restart cluster
+      hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
+      hdfs.saveNamespace();
+      hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
+      cluster.shutdown();
+      cluster = new MiniDFSCluster.Builder(conf).format(false)
+          .numDataNodes(1).build();
+      cluster.waitActive();
+      hdfs = cluster.getFileSystem();
+      
+      assertEquals(mtimeFile, hdfs.getFileStatus(file).getModificationTime());
+      assertEquals(atimeFile, hdfs.getFileStatus(file).getAccessTime());
+      assertEquals(mtimeDir, hdfs.getFileStatus(dir).getModificationTime());
+      assertEquals(mtimeLink, 
hdfs.getFileLinkStatus(link).getModificationTime());
+      assertEquals(atimeLink, hdfs.getFileLinkStatus(link).getAccessTime());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }


Reply via email to