Author: todd
Date: Thu May 3 01:56:33 2012
New Revision: 1333288
URL: http://svn.apache.org/viewvc?rev=1333288&view=rev
Log:
HADOOP-8279. Allow manual failover to be invoked when auto-failover is enabled.
Contributed by Todd Lipcon.
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1333288&r1=1333287&r2=1333288&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
(original)
+++
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
Thu May 3 01:56:33 2012
@@ -352,4 +352,6 @@ public class DFSConfigKeys extends Commo
public static final String DFS_HA_FENCE_METHODS_KEY =
"dfs.ha.fencing.methods";
public static final String DFS_HA_AUTO_FAILOVER_ENABLED_KEY =
"dfs.ha.automatic-failover.enabled";
public static final boolean DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT = false;
+ public static final String DFS_HA_ZKFC_PORT_KEY = "dfs.ha.zkfc.port";
+ public static final int DFS_HA_ZKFC_PORT_DEFAULT = 8019;
}
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java?rev=1333288&r1=1333287&r2=1333288&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
(original)
+++
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
Thu May 3 01:56:33 2012
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.ZKFCProtocol;
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -47,6 +48,8 @@ public class HDFSPolicyProvider extends
new Service("security.namenode.protocol.acl", NamenodeProtocol.class),
new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL,
HAServiceProtocol.class),
+ new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,
+ ZKFCProtocol.class),
new Service(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY,
RefreshAuthorizationPolicyProtocol.class),
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1333288&r1=1333287&r2=1333288&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Thu May 3 01:56:33 2012
@@ -181,7 +181,8 @@ public class NameNode {
DFS_NAMENODE_BACKUP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY,
- DFS_HA_FENCE_METHODS_KEY
+ DFS_HA_FENCE_METHODS_KEY,
+ DFS_HA_ZKFC_PORT_KEY
};
/**
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java?rev=1333288&r1=1333287&r2=1333288&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
(original)
+++
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
Thu May 3 01:56:33 2012
@@ -30,11 +30,18 @@ import org.apache.hadoop.classification.
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.ZKFailoverController;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import
org.apache.hadoop.hdfs.server.namenode.ha.proto.HAZKInfoProtos.ActiveNodeInfo;
+import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.ToolRunner;
@@ -48,6 +55,7 @@ public class DFSZKFailoverController ext
LogFactory.getLog(DFSZKFailoverController.class);
private NNHAServiceTarget localTarget;
private Configuration localNNConf;
+ private AccessControlList adminAcl;
@Override
protected HAServiceTarget dataToTarget(byte[] data) {
@@ -68,15 +76,19 @@ public class DFSZKFailoverController ext
ret + ": Stored protobuf was " + proto + ", address from our own " +
"configuration for this NameNode was " + ret.getAddress());
}
+
+ ret.setZkfcPort(proto.getZkfcPort());
return ret;
}
@Override
protected byte[] targetToData(HAServiceTarget target) {
InetSocketAddress addr = target.getAddress();
+
return ActiveNodeInfo.newBuilder()
.setHostname(addr.getHostName())
.setPort(addr.getPort())
+ .setZkfcPort(target.getZKFCAddress().getPort())
.setNameserviceId(localTarget.getNameServiceId())
.setNamenodeId(localTarget.getNameNodeId())
.build()
@@ -84,6 +96,24 @@ public class DFSZKFailoverController ext
}
@Override
+ protected InetSocketAddress getRpcAddressToBindTo() {
+ int zkfcPort = getZkfcPort(localNNConf);
+ return new InetSocketAddress(localTarget.getAddress().getAddress(),
+ zkfcPort);
+ }
+
+
+ @Override
+ protected PolicyProvider getPolicyProvider() {
+ return new HDFSPolicyProvider();
+ }
+
+ static int getZkfcPort(Configuration conf) {
+ return conf.getInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY,
+ DFSConfigKeys.DFS_HA_ZKFC_PORT_DEFAULT);
+ }
+
+ @Override
public void setConf(Configuration conf) {
localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
@@ -98,10 +128,21 @@ public class DFSZKFailoverController ext
localTarget = new NNHAServiceTarget(localNNConf, nsId, nnId);
+ // Setup ACLs
+ adminAcl = new AccessControlList(
+ conf.get(DFSConfigKeys.DFS_ADMIN, " "));
+
super.setConf(localNNConf);
LOG.info("Failover controller configured for NameNode " +
nsId + "." + nnId);
}
+
+
+ @Override
+ protected void initRPC() throws IOException {
+ super.initRPC();
+ localTarget.setZkfcPort(rpcServer.getAddress().getPort());
+ }
@Override
public HAServiceTarget getLocalTarget() {
@@ -127,4 +168,19 @@ public class DFSZKFailoverController ext
System.exit(ToolRunner.run(
new DFSZKFailoverController(), args));
}
+
+ @Override
+ protected void checkRpcAdminAccess() throws IOException,
AccessControlException {
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ UserGroupInformation zkfcUgi = UserGroupInformation.getLoginUser();
+ if (adminAcl.isUserAllowed(ugi) ||
+ ugi.getShortUserName().equals(zkfcUgi.getShortUserName())) {
+ LOG.info("Allowed RPC access from " + ugi + " at " +
Server.getRemoteAddress());
+ return;
+ }
+ String msg = "Disallowed RPC access from " + ugi + " at " +
+ Server.getRemoteAddress() + ". Not listed in " +
DFSConfigKeys.DFS_ADMIN;
+ LOG.warn(msg);
+ throw new AccessControlException(msg);
+ }
}
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java?rev=1333288&r1=1333287&r2=1333288&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
(original)
+++
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
Thu May 3 01:56:33 2012
@@ -45,12 +45,13 @@ public class NNHAServiceTarget extends H
private static final String NAMENODE_ID_KEY = "namenodeid";
private final InetSocketAddress addr;
+ private InetSocketAddress zkfcAddr;
private NodeFencer fencer;
private BadFencingConfigurationException fenceConfigError;
private final String nnId;
private final String nsId;
private final boolean autoFailoverEnabled;
-
+
public NNHAServiceTarget(Configuration conf,
String nsId, String nnId) {
Preconditions.checkNotNull(nnId);
@@ -77,17 +78,26 @@ public class NNHAServiceTarget extends H
}
this.addr = NetUtils.createSocketAddr(serviceAddr,
NameNode.DEFAULT_PORT);
+
+ this.autoFailoverEnabled = targetConf.getBoolean(
+ DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
+ DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
+ if (autoFailoverEnabled) {
+ int port = DFSZKFailoverController.getZkfcPort(targetConf);
+ if (port != 0) {
+ setZkfcPort(port);
+ }
+ }
+
try {
this.fencer = NodeFencer.create(targetConf,
DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY);
} catch (BadFencingConfigurationException e) {
this.fenceConfigError = e;
}
+
this.nnId = nnId;
this.nsId = nsId;
- this.autoFailoverEnabled = targetConf.getBoolean(
- DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,
- DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT);
}
/**
@@ -99,6 +109,21 @@ public class NNHAServiceTarget extends H
}
@Override
+ public InetSocketAddress getZKFCAddress() {
+ Preconditions.checkState(autoFailoverEnabled,
+ "ZKFC address not relevant when auto failover is off");
+ assert zkfcAddr != null;
+
+ return zkfcAddr;
+ }
+
+ void setZkfcPort(int port) {
+ assert autoFailoverEnabled;
+
+ this.zkfcAddr = new InetSocketAddress(addr.getAddress(), port);
+ }
+
+ @Override
public void checkFencingConfigured() throws BadFencingConfigurationException
{
if (fenceConfigError != null) {
throw fenceConfigError;
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto?rev=1333288&r1=1333287&r2=1333288&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
(original)
+++
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
Thu May 3 01:56:33 2012
@@ -24,4 +24,5 @@ message ActiveNodeInfo {
required string hostname = 3;
required int32 port = 4;
+ required int32 zkfcPort = 5;
}
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java?rev=1333288&r1=1333287&r2=1333288&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
(original)
+++
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
Thu May 3 01:56:33 2012
@@ -22,10 +22,10 @@ import static org.junit.Assert.*;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.ha.ClientBaseWithFixes;
-import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.ZKFailoverController;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
@@ -62,6 +62,15 @@ public class TestDFSZKFailoverController
AlwaysSucceedFencer.class.getName());
conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
+ // Turn off IPC client caching, so that the suite can handle
+ // the restart of the daemons between test cases.
+ conf.setInt(
+ CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
+ 0);
+
+ conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10003);
+ conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10004);
+
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001))
@@ -101,18 +110,6 @@ public class TestDFSZKFailoverController
}
/**
- * Test that, when automatic failover is enabled, the manual
- * failover script refuses to run.
- */
- @Test(timeout=10000)
- public void testManualFailoverIsDisabled() throws Exception {
- DFSHAAdmin admin = new DFSHAAdmin();
- admin.setConf(conf);
- int rc = admin.run(new String[]{"-failover", "nn1", "nn2"});
- assertEquals(-1, rc);
- }
-
- /**
* Test that automatic failover is triggered by shutting the
* active NN down.
*/
@@ -148,6 +145,29 @@ public class TestDFSZKFailoverController
thr2.zkfc.getLocalTarget().getAddress());
}
+ @Test(timeout=30000)
+ public void testManualFailover() throws Exception {
+ thr2.zkfc.getLocalTarget().getZKFCProxy(conf, 15000).gracefulFailover();
+ waitForHAState(0, HAServiceState.STANDBY);
+ waitForHAState(1, HAServiceState.ACTIVE);
+
+ thr1.zkfc.getLocalTarget().getZKFCProxy(conf, 15000).gracefulFailover();
+ waitForHAState(0, HAServiceState.ACTIVE);
+ waitForHAState(1, HAServiceState.STANDBY);
+ }
+
+ @Test(timeout=30000)
+ public void testManualFailoverWithDFSHAAdmin() throws Exception {
+ DFSHAAdmin tool = new DFSHAAdmin();
+ tool.setConf(conf);
+ tool.run(new String[]{"-failover", "nn1", "nn2"});
+ waitForHAState(0, HAServiceState.STANDBY);
+ waitForHAState(1, HAServiceState.ACTIVE);
+ tool.run(new String[]{"-failover", "nn2", "nn1"});
+ waitForHAState(0, HAServiceState.ACTIVE);
+ waitForHAState(1, HAServiceState.STANDBY);
+ }
+
private void waitForHAState(int nnidx, final HAServiceState state)
throws TimeoutException, InterruptedException {
final NameNode nn = cluster.getNameNode(nnidx);
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java?rev=1333288&r1=1333287&r2=1333288&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
(original)
+++
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
Thu May 3 01:56:33 2012
@@ -38,7 +38,7 @@ import org.apache.hadoop.ha.HAServicePro
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HAServiceTarget;
import org.apache.hadoop.ha.HealthCheckFailedException;
-import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.ha.ZKFCProtocol;
import org.apache.hadoop.test.MockitoUtil;
import org.junit.Before;
@@ -56,6 +56,7 @@ public class TestDFSHAAdmin {
private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
private String errOutput;
private HAServiceProtocol mockProtocol;
+ private ZKFCProtocol mockZkfcProtocol;
private static final String NSID = "ns1";
@@ -88,6 +89,7 @@ public class TestDFSHAAdmin {
@Before
public void setup() throws IOException {
mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
+ mockZkfcProtocol = MockitoUtil.mockProtocol(ZKFCProtocol.class);
tool = new DFSHAAdmin() {
@Override
@@ -97,7 +99,9 @@ public class TestDFSHAAdmin {
// OVerride the target to return our mock protocol
try {
Mockito.doReturn(mockProtocol).when(spy).getProxy(
- Mockito.<Configuration>any(), Mockito.anyInt());
+ Mockito.<Configuration>any(), Mockito.anyInt());
+ Mockito.doReturn(mockZkfcProtocol).when(spy).getZKFCProxy(
+ Mockito.<Configuration>any(), Mockito.anyInt());
} catch (IOException e) {
throw new AssertionError(e); // mock setup doesn't really throw
}
@@ -172,8 +176,6 @@ public class TestDFSHAAdmin {
assertTrue(errOutput.contains("Refusing to manually manage"));
assertEquals(-1, runTool("-transitionToStandby", "nn1"));
assertTrue(errOutput.contains("Refusing to manually manage"));
- assertEquals(-1, runTool("-failover", "nn1", "nn2"));
- assertTrue(errOutput.contains("Refusing to manually manage"));
Mockito.verify(mockProtocol, Mockito.never())
.transitionToActive(anyReqInfo());
@@ -186,12 +188,10 @@ public class TestDFSHAAdmin {
assertEquals(0, runTool("-transitionToActive", "-forcemanual", "nn1"));
setupConfirmationOnSystemIn();
assertEquals(0, runTool("-transitionToStandby", "-forcemanual", "nn1"));
- setupConfirmationOnSystemIn();
- assertEquals(0, runTool("-failover", "-forcemanual", "nn1", "nn2"));
- Mockito.verify(mockProtocol, Mockito.times(2)).transitionToActive(
+ Mockito.verify(mockProtocol, Mockito.times(1)).transitionToActive(
reqInfoCaptor.capture());
- Mockito.verify(mockProtocol, Mockito.times(2)).transitionToStandby(
+ Mockito.verify(mockProtocol, Mockito.times(1)).transitionToStandby(
reqInfoCaptor.capture());
// All of the RPCs should have had the "force" source
@@ -300,6 +300,19 @@ public class TestDFSHAAdmin {
tool.setConf(conf);
assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
+
+ @Test
+ public void testFailoverWithAutoHa() throws Exception {
+
Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+ // Turn on auto-HA in the config
+ HdfsConfiguration conf = getHAConf();
+ conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
+ conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "shell(true)");
+ tool.setConf(conf);
+
+ assertEquals(0, runTool("-failover", "nn1", "nn2"));
+ Mockito.verify(mockZkfcProtocol).gracefulFailover();
+ }
@Test
public void testForceFenceOptionListedBeforeArgs() throws Exception {
Modified:
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml?rev=1333288&r1=1333287&r2=1333288&view=diff
==============================================================================
---
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml
(original)
+++
hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-policy.xml
Thu May 3 01:56:33 2012
@@ -116,5 +116,11 @@
<description>ACL for HAService protocol used by HAAdmin to manage the
active and stand-by states of namenode.</description>
</property>
+ <property>
+ <name>security.zkfc.protocol.acl</name>
+ <value>*</value>
+ <description>ACL for access to the ZK Failover Controller
+ </description>
+ </property>
</configuration>