This is an automated email from the ASF dual-hosted git repository.

wuzhiguo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/bigtop-manager.git


The following commit(s) were added to refs/heads/main by this push:
     new c0d667db feat: add HA deployment function for HDFS (#268)
c0d667db is described below

commit c0d667db01a39b062668cfd86ac6b83e185825dd
Author: lvkaihua <[email protected]>
AuthorDate: Fri Dec 19 11:08:39 2025 +0800

    feat: add HA deployment function for HDFS (#268)
---
 .../server/command/job/service/ServiceAddJob.java  |  4 +-
 .../bigtop/3.3.0/services/hadoop/metainfo.xml      | 14 +++-
 .../stack/bigtop/v3_3_0/hadoop/HadoopParams.java   | 71 +++++++++++++++++--
 .../stack/bigtop/v3_3_0/hadoop/HadoopSetup.java    | 72 ++++++++++++++++++-
 ...{NameNodeScript.java => JournalNodeScript.java} | 33 ++-------
 .../stack/bigtop/v3_3_0/hadoop/NameNodeScript.java | 80 ++++++++++++++++++++--
 .../{NameNodeScript.java => ZkfcScript.java}       | 39 ++++-------
 .../v3_3_0/hadoop/JournalNodeScriptTest.java       | 66 ++++++++++++++++++
 .../stack/bigtop/v3_3_0/hadoop/ZkfcScriptTest.java | 66 ++++++++++++++++++
 9 files changed, 381 insertions(+), 64 deletions(-)

diff --git 
a/bigtop-manager-server/src/main/java/org/apache/bigtop/manager/server/command/job/service/ServiceAddJob.java
 
b/bigtop-manager-server/src/main/java/org/apache/bigtop/manager/server/command/job/service/ServiceAddJob.java
index a5589d8e..4d3d286f 100644
--- 
a/bigtop-manager-server/src/main/java/org/apache/bigtop/manager/server/command/job/service/ServiceAddJob.java
+++ 
b/bigtop-manager-server/src/main/java/org/apache/bigtop/manager/server/command/job/service/ServiceAddJob.java
@@ -93,7 +93,9 @@ public class ServiceAddJob extends AbstractServiceJob {
                         String componentName = 
componentHost.getComponentName();
                         List<String> hostnames = componentHost.getHostnames();
                         if (CollectionUtils.isEmpty(hostnames)) {
-                            throw new RuntimeException("No hostnames found for 
component " + componentName);
+                            //                            throw new 
RuntimeException("No hostnames found for component "
+                            // + componentName);
+                            continue;
                         }
                         componentHostsMap.put(componentName, hostnames);
                     }
diff --git 
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/metainfo.xml
 
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/metainfo.xml
index e85dabc7..e2091435 100644
--- 
a/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/metainfo.xml
+++ 
b/bigtop-manager-server/src/main/resources/stacks/bigtop/3.3.0/services/hadoop/metainfo.xml
@@ -52,11 +52,23 @@
                 <category>server</category>
                 <cardinality>1+</cardinality>
             </component>
+            <component>
+                <name>zkfc</name>
+                <display-name>zkfc</display-name>
+                <category>server</category>
+                <cardinality>0-2</cardinality>
+            </component>
+            <component>
+                <name>journalnode</name>
+                <display-name>journalnode</display-name>
+                <category>server</category>
+                <cardinality>0-3</cardinality>
+            </component>
             <component>
                 <name>secondarynamenode</name>
                 <display-name>SNameNode</display-name>
                 <category>server</category>
-                <cardinality>1</cardinality>
+                <cardinality>0-1</cardinality>
             </component>
 
             <!-- Yarn Components -->
diff --git 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/HadoopParams.java
 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/HadoopParams.java
index 9c1798a5..6e262d4e 100644
--- 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/HadoopParams.java
+++ 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/HadoopParams.java
@@ -33,11 +33,14 @@ import lombok.Getter;
 import lombok.NoArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 
+import java.io.File;
 import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 @Getter
 @Slf4j
@@ -64,6 +67,9 @@ public class HadoopParams extends BigtopParams {
     private String dfsNameNodeDir;
     private String dfsNameNodeCheckPointDir;
     private String dfsDomainSocketPathPrefix;
+    private String dfsJourNalNodeDir;
+    private String dfsHttpPort;
+    private String journalHttpPort;
 
     private String nodeManagerLogDir = "/hadoop/yarn/log";
     private String nodeManagerLocalDir = "/hadoop/yarn/local";
@@ -103,9 +109,27 @@ public class HadoopParams extends BigtopParams {
     public Map<String, Object> coreSite() {
         Map<String, Object> coreSite = 
LocalSettings.configurations(getServiceName(), "core-site");
         List<String> namenodeList = LocalSettings.componentHosts("namenode");
-        if (!namenodeList.isEmpty()) {
+        List<String> zookeeperServerHosts = 
LocalSettings.componentHosts("zookeeper_server");
+        Map<String, Object> ZKPort = LocalSettings.configurations("zookeeper", 
"zoo.cfg");
+        String clientPort = (String) ZKPort.get("clientPort");
+        StringBuilder zkString = new StringBuilder();
+        for (int i = 0; i < zookeeperServerHosts.size(); i++) {
+            String host = zookeeperServerHosts.get(i);
+            if (host == null || host.trim().isEmpty()) {
+                continue;
+            }
+            zkString.append(host.trim()).append(":").append(clientPort);
+            if (i != zookeeperServerHosts.size() - 1) {
+                zkString.append(",");
+            }
+        }
+        if (!namenodeList.isEmpty() && namenodeList.size() == 1) {
             coreSite.put(
                     "fs.defaultFS", ((String) 
coreSite.get("fs.defaultFS")).replace("localhost", namenodeList.get(0)));
+        } else if (!namenodeList.isEmpty() && namenodeList.size() == 2) {
+            coreSite.put(
+                    "fs.defaultFS", ((String) 
coreSite.get("fs.defaultFS")).replace("localhost:8020", "nameservice1"));
+            coreSite.put("ha.zookeeper.quorum", zkString);
         }
         return coreSite;
     }
@@ -119,7 +143,8 @@ public class HadoopParams extends BigtopParams {
     public Map<String, Object> hdfsSite() {
         Map<String, Object> hdfsSite = 
LocalSettings.configurations(getServiceName(), "hdfs-site");
         List<String> namenodeList = LocalSettings.componentHosts("namenode");
-        if (!namenodeList.isEmpty()) {
+        List<String> journalNodeList = 
LocalSettings.componentHosts("journalnode");
+        if (!namenodeList.isEmpty() && namenodeList.size() == 1) {
             hdfsSite.put(
                     "dfs.namenode.rpc-address",
                     ((String) 
hdfsSite.get("dfs.namenode.rpc-address")).replace("0.0.0.0", 
namenodeList.get(0)));
@@ -129,6 +154,26 @@ public class HadoopParams extends BigtopParams {
             hdfsSite.put(
                     "dfs.namenode.https-address",
                     ((String) 
hdfsSite.get("dfs.namenode.https-address")).replace("0.0.0.0", 
namenodeList.get(0)));
+        } else if (!namenodeList.isEmpty() && namenodeList.size() == 2) {
+            hdfsSite.remove("dfs.namenode.http-address");
+            hdfsSite.put("dfs.ha.automatic-failover.enabled", "true");
+            hdfsSite.put("dfs.nameservices", "nameservice1");
+            hdfsSite.put("dfs.ha.namenodes.nameservice1", "nn1,nn2");
+            hdfsSite.put("dfs.namenode.rpc-address.nameservice1.nn1", 
namenodeList.get(0) + ":8020");
+            hdfsSite.put("dfs.namenode.rpc-address.nameservice1.nn2", 
namenodeList.get(1) + ":8020");
+            hdfsSite.put("dfs.namenode.http-address.nameservice1.nn1", 
namenodeList.get(0) + ":9870");
+            hdfsSite.put("dfs.namenode.http-address.nameservice1.nn2", 
namenodeList.get(1) + ":9870");
+            hdfsSite.put(
+                    "dfs.namenode.shared.edits.dir",
+                    "qjournal://" + journalNodeList.get(0) + ":8485;" + 
journalNodeList.get(1) + ":8485;"
+                            + journalNodeList.get(2) + ":8485" + 
"/nameservice1");
+            hdfsSite.put("dfs.journalnode.edits.dir", "/hadoop/dfs/journal");
+            hdfsSite.put(
+                    "dfs.client.failover.proxy.provider.nameservice1",
+                    
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
+            hdfsSite.put("dfs.journalnode.edits.dir", "/hadoop/dfs/journal");
+            hdfsSite.put("dfs.ha.fencing.methods", "shell(/bin/true)");
+            hdfsSite.put("dfs.replication", "3");
         }
 
         // Configure native library dependent settings
@@ -139,12 +184,30 @@ public class HadoopParams extends BigtopParams {
         nameNodeFormattedDirs = Arrays.stream(dfsNameNodeDir.split(","))
                 .map(x -> x + "/namenode-formatted/")
                 .toList();
-
+        String dfsHttpAddress = (String) 
hdfsSite.get("dfs.namenode.http-address.nameservice1.nn1");
+        if (dfsHttpAddress != null && dfsHttpAddress.contains(":")) {
+            String[] parts = dfsHttpAddress.split(":");
+            if (parts.length >= 2) {
+                dfsHttpPort = parts[1].trim();
+            }
+        }
+        String journalHttpAddress = (String) 
hdfsSite.get("dfs.namenode.shared.edits.dir");
+        Pattern pattern = Pattern.compile(":(\\d{1,5})");
+        Matcher matcher = pattern.matcher(journalHttpAddress);
+        if (matcher.find()) {
+            journalHttpPort = matcher.group(1);
+            log.info("find jounalnode port: " + journalHttpPort);
+        } else {
+            log.warn("not found journalnode port!");
+        }
         String dfsDomainSocketPath = (String) 
hdfsSite.get("dfs.domain.socket.path");
         if (StringUtils.isNotBlank(dfsDomainSocketPath)) {
-            dfsDomainSocketPathPrefix = 
dfsDomainSocketPath.replace("dn._PORT", "");
+            File file = new File(dfsDomainSocketPath);
+            dfsDomainSocketPathPrefix = file.getParent();
+            //            dfsDomainSocketPathPrefix = 
dfsDomainSocketPath.replace("dn._PORT", "");
         }
         dfsNameNodeCheckPointDir = (String) 
hdfsSite.get("dfs.namenode.checkpoint.dir");
+        dfsJourNalNodeDir = (String) hdfsSite.get("dfs.journalnode.edits.dir");
         return hdfsSite;
     }
 
diff --git 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/HadoopSetup.java
 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/HadoopSetup.java
index 88375ace..b979a012 100644
--- 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/HadoopSetup.java
+++ 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/HadoopSetup.java
@@ -23,6 +23,7 @@ import org.apache.bigtop.manager.common.shell.ShellResult;
 import org.apache.bigtop.manager.stack.core.enums.ConfigType;
 import org.apache.bigtop.manager.stack.core.exception.StackException;
 import org.apache.bigtop.manager.stack.core.spi.param.Params;
+import org.apache.bigtop.manager.stack.core.utils.LocalSettings;
 import org.apache.bigtop.manager.stack.core.utils.linux.LinuxFileUtils;
 import org.apache.bigtop.manager.stack.core.utils.linux.LinuxOSUtils;
 
@@ -33,8 +34,12 @@ import lombok.NoArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 
 import java.io.File;
+import java.net.InetSocketAddress;
+import java.net.Socket;
 import java.text.MessageFormat;
+import java.util.List;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
 @Slf4j
 @NoArgsConstructor(access = AccessLevel.PRIVATE)
@@ -75,6 +80,14 @@ public class HadoopSetup {
                             Constants.PERMISSION_755,
                             true);
                 }
+                case "journalnode": {
+                    LinuxFileUtils.createDirectories(
+                            hadoopParams.getDfsJourNalNodeDir(),
+                            hadoopUser,
+                            hadoopGroup,
+                            Constants.PERMISSION_755,
+                            true);
+                }
                 case "datanode": {
                     LinuxFileUtils.createDirectories(
                             hadoopParams.getDfsDomainSocketPathPrefix(),
@@ -229,9 +242,13 @@ public class HadoopSetup {
     public static void formatNameNode(HadoopParams hadoopParams) {
         if (!isNameNodeFormatted(hadoopParams)) {
             String formatCmd = MessageFormat.format(
-                    "{0}/hdfs --config {1} namenode -format -nonInteractive",
+                    "{0}/hdfs --config {1} namenode -format -nonInteractive 
-force",
                     hadoopParams.binDir(), hadoopParams.confDir());
             try {
+                boolean allJnReachable = 
checkAllJournalNodesPortReachable(hadoopParams);
+                if (!allJnReachable) {
+                    throw new StackException("Cannot format NameNode: Some 
JournalNodes are unreachable.");
+                }
                 LinuxOSUtils.sudoExecCmd(formatCmd, hadoopParams.user());
             } catch (Exception e) {
                 throw new StackException(e);
@@ -248,6 +265,59 @@ public class HadoopSetup {
         }
     }
 
+    private static boolean checkAllJournalNodesPortReachable(HadoopParams 
hadoopParams) throws InterruptedException {
+        List<String> journalNodeList = 
LocalSettings.componentHosts("journalnode");
+        String port = hadoopParams.getJournalHttpPort();
+        if (journalNodeList == null || journalNodeList.isEmpty()) {
+            throw new IllegalArgumentException("JournalNode host list cannot 
be empty!");
+        }
+        int retryCount = 0;
+        int maxRetry = 100;
+        long retryIntervalMs = 2000;
+        int connectTimeoutMs = 1000;
+        while (retryCount < maxRetry) {
+            boolean allReachable = true;
+            for (String host : journalNodeList) {
+                boolean isReachable = false;
+                Socket socket = null;
+                try {
+                    socket = new Socket();
+                    socket.connect(new InetSocketAddress(host, 
Integer.parseInt(port)), connectTimeoutMs);
+                    isReachable = true;
+                    log.info("JournalNode [{}:{}] is reachable.", host, port);
+                } catch (Exception e) {
+                    allReachable = false;
+                    log.warn(
+                            "JournalNode [{}:{}] is NOT reachable (retry 
{}/{}). Error: {}",
+                            host,
+                            port,
+                            retryCount + 1,
+                            maxRetry,
+                            e.getMessage());
+                } finally {
+                    if (socket != null && !socket.isClosed()) {
+                        try {
+                            socket.close();
+                        } catch (Exception e) {
+                            log.debug("Failed to close socket for [{}:{}].", 
host, port, e);
+                        }
+                    }
+                }
+            }
+            if (allReachable) {
+                log.info("All {} JournalNodes are reachable. Proceeding to 
format NameNode.", journalNodeList.size());
+                return true;
+            }
+            retryCount++;
+            if (retryCount < maxRetry) {
+                log.info("Waiting {}ms before next retry ({} remaining).", 
retryIntervalMs, maxRetry - retryCount);
+                TimeUnit.MILLISECONDS.sleep(retryIntervalMs);
+            }
+        }
+        log.error("Failed to reach all JournalNodes after {} retries. 
JournalNode list: {}", maxRetry, journalNodeList);
+        return false;
+    }
+
     public static boolean isNameNodeFormatted(HadoopParams hadoopParams) {
 
         boolean isFormatted = false;
diff --git 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/JournalNodeScript.java
similarity index 72%
copy from 
bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
copy to 
bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/JournalNodeScript.java
index 69c0bf33..dffebb2f 100644
--- 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
+++ 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/JournalNodeScript.java
@@ -33,8 +33,7 @@ import java.util.Properties;
 
 @Slf4j
 @AutoService(Script.class)
-public class NameNodeScript extends AbstractServerScript {
-
+public class JournalNodeScript extends AbstractServerScript {
     @Override
     public ShellResult add(Params params) {
         Properties properties = new Properties();
@@ -55,9 +54,7 @@ public class NameNodeScript extends AbstractServerScript {
         configure(params);
         HadoopParams hadoopParams = (HadoopParams) params;
 
-        HadoopSetup.formatNameNode(hadoopParams);
-
-        String cmd = MessageFormat.format("{0}/hdfs --daemon start namenode", 
hadoopParams.binDir());
+        String cmd = MessageFormat.format("{0}/hdfs --daemon start 
journalnode", hadoopParams.binDir());
         try {
             return LinuxOSUtils.sudoExecCmd(cmd, hadoopParams.user());
         } catch (Exception e) {
@@ -68,7 +65,7 @@ public class NameNodeScript extends AbstractServerScript {
     @Override
     public ShellResult stop(Params params) {
         HadoopParams hadoopParams = (HadoopParams) params;
-        String cmd = MessageFormat.format("{0}/hdfs --daemon stop namenode", 
hadoopParams.binDir());
+        String cmd = MessageFormat.format("{0}/hdfs --daemon stop 
journalnode", hadoopParams.binDir());
         try {
             return LinuxOSUtils.sudoExecCmd(cmd, hadoopParams.user());
         } catch (Exception e) {
@@ -79,31 +76,11 @@ public class NameNodeScript extends AbstractServerScript {
     @Override
     public ShellResult status(Params params) {
         HadoopParams hadoopParams = (HadoopParams) params;
-        return LinuxOSUtils.checkProcess(hadoopParams.getNameNodePidFile());
-    }
-
-    public ShellResult rebalanceHdfs(Params params) {
-        HadoopParams hadoopParams = (HadoopParams) params;
-        String cmd = MessageFormat.format("{0}/hdfs balancer", 
hadoopParams.binDir());
-        try {
-            return LinuxOSUtils.sudoExecCmd(cmd, hadoopParams.user());
-        } catch (Exception e) {
-            throw new StackException(e);
-        }
-    }
-
-    public ShellResult printTopology(Params params) {
-        HadoopParams hadoopParams = (HadoopParams) params;
-        String cmd = MessageFormat.format("{0}/hdfs dfsadmin -printTopology", 
hadoopParams.binDir());
-        try {
-            return LinuxOSUtils.sudoExecCmd(cmd, hadoopParams.user());
-        } catch (Exception e) {
-            throw new StackException(e);
-        }
+        return LinuxOSUtils.checkProcess(hadoopParams.getJournalNodePidFile());
     }
 
     @Override
     public String getComponentName() {
-        return "namenode";
+        return "journalnode";
     }
 }
diff --git 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
index 69c0bf33..b57dcb2b 100644
--- 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
+++ 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
@@ -23,13 +23,20 @@ import 
org.apache.bigtop.manager.stack.core.exception.StackException;
 import org.apache.bigtop.manager.stack.core.spi.param.Params;
 import org.apache.bigtop.manager.stack.core.spi.script.AbstractServerScript;
 import org.apache.bigtop.manager.stack.core.spi.script.Script;
+import org.apache.bigtop.manager.stack.core.utils.LocalSettings;
 import org.apache.bigtop.manager.stack.core.utils.linux.LinuxOSUtils;
 
 import com.google.auto.service.AutoService;
 import lombok.extern.slf4j.Slf4j;
 
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.net.HttpURLConnection;
+import java.net.URL;
 import java.text.MessageFormat;
+import java.util.List;
 import java.util.Properties;
+import java.util.stream.Collectors;
 
 @Slf4j
 @AutoService(Script.class)
@@ -54,17 +61,80 @@ public class NameNodeScript extends AbstractServerScript {
     public ShellResult start(Params params) {
         configure(params);
         HadoopParams hadoopParams = (HadoopParams) params;
-
-        HadoopSetup.formatNameNode(hadoopParams);
-
-        String cmd = MessageFormat.format("{0}/hdfs --daemon start namenode", 
hadoopParams.binDir());
+        String hostname = hadoopParams.hostname();
+        List<String> namenodeList = LocalSettings.componentHosts("namenode");
         try {
-            return LinuxOSUtils.sudoExecCmd(cmd, hadoopParams.user());
+            if (namenodeList != null && !namenodeList.isEmpty() && 
hostname.equals(namenodeList.get(0))) {
+                HadoopSetup.formatNameNode(hadoopParams);
+                String startCmd = MessageFormat.format("{0}/hdfs --daemon 
start namenode", hadoopParams.binDir());
+                ShellResult result = LinuxOSUtils.sudoExecCmd(startCmd, 
hadoopParams.user());
+                if (result.getExitCode() != 0) {
+                    throw new StackException("Failed to start primary 
NameNode: " + result.getErrMsg());
+                }
+                return result;
+            } else if (namenodeList != null && namenodeList.size() >= 2 && 
hostname.equals(namenodeList.get(1))) {
+                boolean isPrimaryReady = 
waitForNameNodeReady(namenodeList.get(0), hadoopParams);
+                if (!isPrimaryReady) {
+                    throw new StackException("Primary NameNode is not ready, 
cannot bootstrap standby");
+                }
+                String bootstrapCmd = MessageFormat.format(
+                        "{0}/hdfs namenode -bootstrapStandby -nonInteractive", 
hadoopParams.binDir());
+                ShellResult bootstrapResult = 
LinuxOSUtils.sudoExecCmd(bootstrapCmd, hadoopParams.user());
+                if (bootstrapResult.getExitCode() != 0) {
+                    throw new StackException("Failed to bootstrap standby 
NameNode: " + bootstrapResult.getErrMsg());
+                }
+
+                String startCmd = MessageFormat.format("{0}/hdfs --daemon 
start namenode", hadoopParams.binDir());
+                ShellResult startResult = LinuxOSUtils.sudoExecCmd(startCmd, 
hadoopParams.user());
+                if (startResult.getExitCode() != 0) {
+                    throw new StackException("Failed to start standby 
NameNode: " + startResult.getErrMsg());
+                }
+                return startResult;
+            } else {
+                throw new StackException("Current host is not in NameNode HA 
list: " + hostname);
+            }
         } catch (Exception e) {
             throw new StackException(e);
         }
     }
 
+    private boolean waitForNameNodeReady(String namenodeHost, HadoopParams 
hadoopParams) {
+        String httpPort = hadoopParams.getDfsHttpPort();
+        long timeout = 5 * 60 * 1000;
+        long interval = 3000;
+        long deadline = System.currentTimeMillis() + timeout;
+
+        while (System.currentTimeMillis() < deadline) {
+            try {
+                URL url = new URL("http://"; + namenodeHost + ":" + httpPort
+                        + 
"/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus");
+                HttpURLConnection connection = (HttpURLConnection) 
url.openConnection();
+                connection.setConnectTimeout(2000);
+                connection.setReadTimeout(2000);
+                connection.setRequestMethod("GET");
+
+                if (connection.getResponseCode() == HttpURLConnection.HTTP_OK) 
{
+                    try (BufferedReader reader =
+                            new BufferedReader(new 
InputStreamReader(connection.getInputStream()))) {
+                        String response = 
reader.lines().collect(Collectors.joining());
+                        if (response.contains("active")) {
+                            return true;
+                        }
+                    }
+                }
+            } catch (Exception e) {
+                log.warn("Waiting for NameNode to be ready: " + 
e.getMessage());
+            }
+            try {
+                Thread.sleep(interval);
+            } catch (InterruptedException ie) {
+                Thread.currentThread().interrupt();
+                return false;
+            }
+        }
+        return false;
+    }
+
     @Override
     public ShellResult stop(Params params) {
         HadoopParams hadoopParams = (HadoopParams) params;
diff --git 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/ZkfcScript.java
similarity index 76%
copy from 
bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
copy to 
bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/ZkfcScript.java
index 69c0bf33..c19cb270 100644
--- 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/NameNodeScript.java
+++ 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/main/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/ZkfcScript.java
@@ -33,8 +33,7 @@ import java.util.Properties;
 
 @Slf4j
 @AutoService(Script.class)
-public class NameNodeScript extends AbstractServerScript {
-
+public class ZkfcScript extends AbstractServerScript {
     @Override
     public ShellResult add(Params params) {
         Properties properties = new Properties();
@@ -51,24 +50,25 @@ public class NameNodeScript extends AbstractServerScript {
     }
 
     @Override
-    public ShellResult start(Params params) {
+    public ShellResult init(Params params) {
         configure(params);
         HadoopParams hadoopParams = (HadoopParams) params;
 
-        HadoopSetup.formatNameNode(hadoopParams);
-
-        String cmd = MessageFormat.format("{0}/hdfs --daemon start namenode", 
hadoopParams.binDir());
+        String formatCmd = MessageFormat.format(
+                "{0}/hdfs --config {1} zkfc -formatZK", hadoopParams.binDir(), 
hadoopParams.confDir());
         try {
-            return LinuxOSUtils.sudoExecCmd(cmd, hadoopParams.user());
+            return LinuxOSUtils.sudoExecCmd(formatCmd, hadoopParams.user());
         } catch (Exception e) {
             throw new StackException(e);
         }
     }
 
     @Override
-    public ShellResult stop(Params params) {
+    public ShellResult start(Params params) {
+        configure(params);
         HadoopParams hadoopParams = (HadoopParams) params;
-        String cmd = MessageFormat.format("{0}/hdfs --daemon stop namenode", 
hadoopParams.binDir());
+
+        String cmd = MessageFormat.format("{0}/hdfs --daemon start zkfc", 
hadoopParams.binDir());
         try {
             return LinuxOSUtils.sudoExecCmd(cmd, hadoopParams.user());
         } catch (Exception e) {
@@ -77,14 +77,9 @@ public class NameNodeScript extends AbstractServerScript {
     }
 
     @Override
-    public ShellResult status(Params params) {
-        HadoopParams hadoopParams = (HadoopParams) params;
-        return LinuxOSUtils.checkProcess(hadoopParams.getNameNodePidFile());
-    }
-
-    public ShellResult rebalanceHdfs(Params params) {
+    public ShellResult stop(Params params) {
         HadoopParams hadoopParams = (HadoopParams) params;
-        String cmd = MessageFormat.format("{0}/hdfs balancer", 
hadoopParams.binDir());
+        String cmd = MessageFormat.format("{0}/hdfs --daemon stop zkfc", 
hadoopParams.binDir());
         try {
             return LinuxOSUtils.sudoExecCmd(cmd, hadoopParams.user());
         } catch (Exception e) {
@@ -92,18 +87,14 @@ public class NameNodeScript extends AbstractServerScript {
         }
     }
 
-    public ShellResult printTopology(Params params) {
+    @Override
+    public ShellResult status(Params params) {
         HadoopParams hadoopParams = (HadoopParams) params;
-        String cmd = MessageFormat.format("{0}/hdfs dfsadmin -printTopology", 
hadoopParams.binDir());
-        try {
-            return LinuxOSUtils.sudoExecCmd(cmd, hadoopParams.user());
-        } catch (Exception e) {
-            throw new StackException(e);
-        }
+        return LinuxOSUtils.checkProcess(hadoopParams.getZkfcPidFile());
     }
 
     @Override
     public String getComponentName() {
-        return "namenode";
+        return "zkfc";
     }
 }
diff --git 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/test/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/JournalNodeScriptTest.java
 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/test/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/JournalNodeScriptTest.java
new file mode 100644
index 00000000..1c06877d
--- /dev/null
+++ 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/test/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/JournalNodeScriptTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.hadoop;
+
+import org.apache.bigtop.manager.stack.core.spi.param.Params;
+
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+public class JournalNodeScriptTest {
+
+    private final JournalNodeScript JournalNodeScript = new 
JournalNodeScript();
+
+    @Test
+    public void testGetComponentName() {
+        assertEquals("journalnode", JournalNodeScript.getComponentName());
+    }
+
+    @Test
+    public void testAddParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> 
JournalNodeScript.add(params));
+    }
+
+    @Test
+    public void testConfigureParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> 
JournalNodeScript.configure(params));
+    }
+
+    @Test
+    public void testStartParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> 
JournalNodeScript.start(params));
+    }
+
+    @Test
+    public void testStopParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> 
JournalNodeScript.stop(params));
+    }
+
+    @Test
+    public void testStatusParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> 
JournalNodeScript.status(params));
+    }
+}
diff --git 
a/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/test/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/ZkfcScriptTest.java
 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/test/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/ZkfcScriptTest.java
new file mode 100644
index 00000000..19a29b39
--- /dev/null
+++ 
b/bigtop-manager-stack/bigtop-manager-stack-bigtop/src/test/java/org/apache/bigtop/manager/stack/bigtop/v3_3_0/hadoop/ZkfcScriptTest.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *    https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bigtop.manager.stack.bigtop.v3_3_0.hadoop;
+
+import org.apache.bigtop.manager.stack.core.spi.param.Params;
+
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+public class ZkfcScriptTest {
+
+    private final ZkfcScript ZkfcScript = new ZkfcScript();
+
+    @Test
+    public void testGetComponentName() {
+        assertEquals("zkfc", ZkfcScript.getComponentName());
+    }
+
+    @Test
+    public void testAddParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> ZkfcScript.add(params));
+    }
+
+    @Test
+    public void testConfigureParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> 
ZkfcScript.configure(params));
+    }
+
+    @Test
+    public void testStartParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> 
ZkfcScript.start(params));
+    }
+
+    @Test
+    public void testStopParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> 
ZkfcScript.stop(params));
+    }
+
+    @Test
+    public void testStatusParamsNull() {
+        Params params = null;
+        assertThrows(NullPointerException.class, () -> 
ZkfcScript.status(params));
+    }
+}


Reply via email to