Author: szetszwo
Date: Fri Jun 15 23:00:36 2012
New Revision: 1350825
URL: http://svn.apache.org/viewvc?rev=1350825&view=rev
Log:
HDFS-3518. Add a utility method HdfsUtils.isHealthy(uri) for checking if the
given HDFS is healthy.
Added:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1350825&r1=1350824&r2=1350825&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jun 15
23:00:36 2012
@@ -13,8 +13,6 @@ Trunk (unreleased changes)
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
- HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo)
-
IMPROVEMENTS
HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
@@ -162,9 +160,14 @@ Branch-2 ( Unreleased changes )
NEW FEATURES
+ HDFS-744. Support hsync in HDFS. (Lars Hofhansl via szetszwo)
+
HDFS-3042. Automatic failover support for NameNode HA (todd)
(see dedicated section below for breakdown of subtasks)
+ HDFS-3518. Add a utility method HdfsUtils.isHealthy(uri) for checking if
+ the given HDFS is healthy. (szetszwo)
+
IMPROVEMENTS
HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1350825&r1=1350824&r2=1350825&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
Fri Jun 15 23:00:36 2012
@@ -96,7 +96,7 @@ public class DistributedFileSystem exten
*/
@Override
public String getScheme() {
- return "hdfs";
+ return HdfsConstants.HDFS_URI_SCHEME;
}
@Deprecated
Added:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java?rev=1350825&view=auto
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
(added)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
Fri Jun 15 23:00:36 2012
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * The public utility API for HDFS.
+ */
[email protected]
[email protected]
+public class HdfsUtils {
+ private static final Log LOG = LogFactory.getLog(HdfsUtils.class);
+
+ /**
+ * Is the HDFS healthy?
+ * HDFS is considered as healthy if it is up and not in safemode.
+ *
+ * @param uri the HDFS URI. Note that the URI path is ignored.
+ * @return true if HDFS is healthy; false, otherwise.
+ */
+ public static boolean isHealthy(URI uri) {
+ //check scheme
+ final String scheme = uri.getScheme();
+ if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(scheme)) {
+ throw new IllegalArgumentException("The scheme is not "
+ + HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
+ }
+
+ final Configuration conf = new Configuration();
+ //disable FileSystem cache
+ conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
+ //disable client retry for rpc connection and rpc calls
+ conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, false);
+ conf.setInt(
+ CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+
+ DistributedFileSystem fs = null;
+ try {
+ fs = (DistributedFileSystem)FileSystem.get(uri, conf);
+ final boolean safemode = fs.setSafeMode(SafeModeAction.SAFEMODE_GET);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Is namenode in safemode? " + safemode + "; uri=" + uri);
+ }
+
+ fs.close();
+ fs = null;
+ return !safemode;
+ } catch(IOException e) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Got an exception for uri=" + uri, e);
+ }
+ return false;
+ } finally {
+ IOUtils.cleanup(LOG, fs);
+ }
+ }
+}
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL:
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1350825&r1=1350824&r2=1350825&view=diff
==============================================================================
---
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
(original)
+++
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
Fri Jun 15 23:00:36 2012
@@ -31,6 +31,7 @@ import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.net.SocketTimeoutException;
+import java.net.URI;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
@@ -51,6 +52,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsUtils;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -823,9 +825,11 @@ public class TestDFSClientRetries extend
.build();
try {
cluster.waitActive();
+ final DistributedFileSystem dfs = cluster.getFileSystem();
+ final URI uri = dfs.getUri();
+ assertTrue(HdfsUtils.isHealthy(uri));
//create a file
- final DistributedFileSystem dfs = cluster.getFileSystem();
final long length = 1L << 20;
final Path file1 = new Path(dir, "foo");
DFSTestUtil.createFile(dfs, file1, length, numDatanodes, 20120406L);
@@ -835,7 +839,9 @@ public class TestDFSClientRetries extend
assertEquals(length, s1.getLen());
//shutdown namenode
+ assertTrue(HdfsUtils.isHealthy(uri));
cluster.shutdownNameNode(0);
+ assertFalse(HdfsUtils.isHealthy(uri));
//namenode is down, create another file in a thread
final Path file3 = new Path(dir, "file");
@@ -860,8 +866,10 @@ public class TestDFSClientRetries extend
try {
//sleep, restart, and then wait active
TimeUnit.SECONDS.sleep(30);
+ assertFalse(HdfsUtils.isHealthy(uri));
cluster.restartNameNode(0, false);
cluster.waitActive();
+ assertTrue(HdfsUtils.isHealthy(uri));
} catch (Exception e) {
exceptions.add(e);
}
@@ -877,7 +885,9 @@ public class TestDFSClientRetries extend
assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file3));
//enter safe mode
+ assertTrue(HdfsUtils.isHealthy(uri));
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ assertFalse(HdfsUtils.isHealthy(uri));
//leave safe mode in a new thread
new Thread(new Runnable() {
@@ -886,7 +896,9 @@ public class TestDFSClientRetries extend
try {
//sleep and then leave safe mode
TimeUnit.SECONDS.sleep(30);
+ assertFalse(HdfsUtils.isHealthy(uri));
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+ assertTrue(HdfsUtils.isHealthy(uri));
} catch (Exception e) {
exceptions.add(e);
}
@@ -898,6 +910,8 @@ public class TestDFSClientRetries extend
DFSTestUtil.createFile(dfs, file2, length, numDatanodes, 20120406L);
assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file2));
+ assertTrue(HdfsUtils.isHealthy(uri));
+
//make sure it won't retry on exceptions like FileNotFoundException
final Path nonExisting = new Path(dir, "nonExisting");
LOG.info("setPermission: " + nonExisting);