This is an automated email from the ASF dual-hosted git repository.

hapylestat pushed a commit to branch branch-2.7
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.7 by this push:
     new 7a801d3  [AMBARI-25208] : Enable/Disable HBase Cross Cluster 
Replication (with common-service changes for branch-2.7) (#3013) (virajjasani  
via dgrinenko)
7a801d3 is described below

commit 7a801d3cdd5a7ac47d9984d39a1328772764a4a9
Author: Viraj Jasani <[email protected]>
AuthorDate: Fri Jun 19 21:51:56 2020 +0530

    [AMBARI-25208] : Enable/Disable HBase Cross Cluster Replication (with 
common-service changes for branch-2.7) (#3013) (virajjasani  via dgrinenko)
---
 .../common-services/HBASE/0.96.0.2.0/metainfo.xml  |  16 ++
 .../0.96.0.2.0/package/files/hbase_replication.rb  | 257 +++++++++++++++++++++
 .../0.96.0.2.0/package/scripts/hbase_master.py     |  42 ++++
 .../0.96.0.2.0/package/scripts/params_linux.py     |   6 +
 .../0.96.0.2.0/package/scripts/params_windows.py   |   1 +
 .../BackgroundCustomCommandExecutionTest.java      |  95 +++++++-
 .../stacks/HDP/2.0.5/services/HBASE/metainfo.xml   |  16 ++
 ambari-web/app/controllers/main/service/item.js    | 140 +++++++++++
 ambari-web/app/messages.js                         |  15 ++
 ambari-web/app/models/host_component.js            |  16 ++
 ambari-web/app/styles/alerts.less                  |  20 ++
 ambari-web/app/styles/application.less             |   3 +
 .../modal_popups/update_replication_popup.hbs      |  61 +++++
 ambari-web/app/utils/ajax/ajax.js                  |  51 ++++
 ambari-web/app/views/main/service/item.js          |  14 +-
 15 files changed, 748 insertions(+), 5 deletions(-)

diff --git 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
index 49c26b9..18a3e11 100644
--- 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
+++ 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/metainfo.xml
@@ -70,6 +70,22 @@
                 <timeout>600</timeout>
               </commandScript>
             </customCommand>
+            <customCommand>
+              <name>UPDATE_REPLICATION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>STOP_REPLICATION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
           </customCommands>
         </component>
 
diff --git 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/hbase_replication.rb
 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/hbase_replication.rb
new file mode 100644
index 0000000..39db86e
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/files/hbase_replication.rb
@@ -0,0 +1,257 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove replication peers
+
+require 'optparse'
+include Java
+
+java_import org.apache.hadoop.hbase.HBaseConfiguration
+java_import org.apache.hadoop.hbase.HConstants
+java_import org.apache.hadoop.hbase.client.replication.ReplicationAdmin
+java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig
+java_import org.apache.commons.logging.Log
+java_import org.apache.commons.logging.LogFactory
+java_import org.apache.hadoop.hbase.util.VersionInfo
+
+# Name of this script
+NAME = "hbase_replication"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] 
add|remove|update|list <peerId|delimited_peerIds> 
<cluster_key|delimited_cluster_keys>"
+  opts.separator 'Add remote a single Slave cluster for replication.  List 
Slave Clusters.  Or Update Slave clusters to given hash delimited new slave 
clusters.'
+  opts.on('-h', '--help', 'Display usage information') do
+    puts opts
+    exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+    options[:debug] = true
+  end
+end
+optparse.parse!
+
+def getConfiguration()
+  hbase_twenty = VersionInfo.getVersion().match('0\.20\..*')
+  # Get configuration to use.
+  if hbase_twenty
+    c = HBaseConfiguration.new()
+  else
+    c = HBaseConfiguration.create()
+    end
+    # Set hadoop filesystem configuration using the hbase.rootdir.
+    # Otherwise, we'll always use localhost though the hbase.rootdir
+    # might be pointing at hdfs location. Do old and new key for fs.
+    c.set("fs.default.name", c.get(HConstants::HBASE_DIR))
+    c.set("fs.defaultFS", c.get(HConstants::HBASE_DIR))
+    return c
+end
+
+def removePeer(options, peerId)
+    unless peerId !~ /\D/
+      raise 'peerId should be Integer ID for peer cluster'
+    end
+    replAdm = ReplicationAdmin.new(getConfiguration())
+    replAdm.removePeer(peerId)
+end
+
+#wrapper over addPeer method of ReplicationAdmin
+#args - tableCfs the table and column-family list which will be replicated for 
this peer.
+#A map from tableName to column family names. An empty collection can be passed
+#to indicate replicating all column families. Pass null(nil in ruby) for 
replicating all table and column
+#families
+# clusterKey:  zkquorum:port:parentzkey
+# 
c7007.ambari.apache.org,c7008.ambari.apache.org,c7009.ambari.apache.org:2181:/hbase
+def addPeer(options, peerId, clusterKey, tableCfs=nil, endpointClass=nil, 
isTemporary="false")
+    unless peerId !~ /\D/
+      raise 'peerId should be Integer ID for peer cluster'
+    end
+    replAdm = ReplicationAdmin.new(getConfiguration())
+    replPeerConfig = ReplicationPeerConfig.new()
+    replPeerConfig.setClusterKey(clusterKey)
+    printf "Ading Peer Id %s with ClusterKey %s\n", peerId, clusterKey
+    #replPeerConfig.getConfiguration().put("IS_TEMPORARY", cluster.isTemporary)
+    if endpointClass
+        replPeerConfig.setReplicationEndpointImpl(endpointClass)
+        peerId.gsub!("-", "*") # odr TenantReplicationEndpoint expects hyphens 
to be escaped to astericks
+    end
+    replPeerConfig.getConfiguration().put("IS_TEMPORARY", isTemporary)
+
+    replAdm.addPeer(peerId, replPeerConfig, tableCfs)
+end
+
+# Gets the list of configured replication peers for a cluster.
+#
+#This method will return list of peers using ReplicationAdmin interface
+#  The resulting array will be the rows of existing peers.
+#   [["id1","hosts:port","state"],
+#    ["id2","hosts:port","state"]]
+#
+def getReplicationPeers()
+  replAdm = ReplicationAdmin.new(getConfiguration())
+
+  repPeers = Array.new()
+  repPeers = replAdm.listPeerConfigs
+  existingPeerClusters = Array.new
+  repPeers.entrySet().each do |e|
+    state = replAdm.getPeerState(e.key)
+    existingPeerClusters.push([ e.key, e.value.getClusterKey, state ])
+  end
+  
+  replAdm.close()
+
+  return existingPeerClusters
+end
+  
+
+# list peers
+def listPeers(options)
+  '''
+  peersList = getReplicationPeers
+  puts "\n\nOutput\n PEER_ID  CLUSTER_KEY  STATE :\n"
+  peersList.each {|peer| puts "#{peer[0]}  #{peer[1]}  #{peer[2]}"
+  '''
+  servers = getReplicationPeers
+  puts "Replication Peers are: " + servers.size().to_s  
+  servers.each {|server| puts server}
+end
+
+arguments = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug]) 
+    logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+    logger.setLevel(org.apache.log4j.Level::WARN)
+  end
+  return apacheLogger
+end
+
+
+def updateReplicationPeers(options, delimitedPeerIds, delimitedClusterKeys, 
delimiterChar='#')
+    # Using delimited Given PeerIds and ClusterKeys, get All Slave Clusters 
Desired and fill in inputSlaveClusters
+    peerIds = delimitedPeerIds.split(delimiterChar)
+    clusterKeys = delimitedClusterKeys.split(delimiterChar)
+    peer_ids_count = peerIds.size()
+
+    if peer_ids_count != clusterKeys.size()
+      raise "PeerIds " + peer_ids_count.to_s + " and ClusterKeys " + 
clusterKeys.size().to_s + " must be equal in number" 
+    end
+
+    inputSlaveClusters = Array.new
+    i = 0
+
+    while i < peer_ids_count  do
+      inputSlaveClusters.push([ peerIds[i], clusterKeys[i] ])
+      i +=1
+    end
+
+    toBeAddedClusters = inputSlaveClusters.dup
+    existingPeerClusters = getReplicationPeers
+    toBeRemovedClusters = existingPeerClusters.dup
+
+    #Validate existing instance file and figure out new clusters to be added
+    inputSlaveClusters.each do |inputSlaveCluster|
+        # Handle case for empty slave list
+        printf "Required Peer Id %s with ClusterKey %s\n ", 
inputSlaveCluster[0], inputSlaveCluster[1]
+        existingPeerClusters.each do |existingPeerCluster|
+            # Handle case for empty slave list
+            printf "Comparing with existing replication peer %s and Cluster 
Key %s\n", existingPeerCluster[0], existingPeerCluster[1]
+            if existingPeerCluster[0].eql? inputSlaveCluster[0]  and 
!existingPeerCluster[1].eql? inputSlaveCluster[1]
+                  raise "Conflict in instance file with existing peers, same 
peer id already exists for different cluster " + existingPeerCluster[1]
+            elsif !existingPeerCluster[0].eql? inputSlaveCluster[0]  and 
existingPeerCluster[1].eql? inputSlaveCluster[1]
+                  raise "Conflict in instance file with existing peers, same 
cluster already exists with different peer id " + existingPeerCluster[0]
+            end
+            if existingPeerCluster[0].eql? inputSlaveCluster[0]
+                  puts "Cluster already exists in peers list. So ignoring..."
+                  toBeAddedClusters.delete(inputSlaveCluster)
+                  break
+            end
+          end
+    end
+
+    #Compare existing peers with new peers from instance file and fetch peers 
to be removed
+    existingPeerClusters.each do |toBeRemovedCluster|
+        printf "Existing replication peer %s and Cluster Key %s\n", 
toBeRemovedCluster[0], toBeRemovedCluster[1]
+        inputSlaveClusters.each do |inputSlaveCluster|
+            # Handle case for empty slave list
+            printf "Comparing with configured replication peer %s and 
ClusterKey %s\n", inputSlaveCluster[0], inputSlaveCluster[1]
+            if toBeRemovedCluster[0].eql? inputSlaveCluster[0]
+                  puts "Same also exists in ml configued so deleting from 
remove list"
+                  toBeRemovedClusters.delete(toBeRemovedCluster)
+                  break
+            end
+        end
+      end
+
+    if toBeAddedClusters.size == 0
+        puts "Nothing need to be added..."
+    else
+        toBeAddedClusters.each do |toBeAddedCluster|
+            printf "To Be Added Cluster peerId: %s, clusterKey: %s\n", 
toBeAddedCluster[0], toBeAddedCluster[1]
+            addPeer(options, toBeAddedCluster[0], toBeAddedCluster[1])
+        end
+    end
+
+    if toBeRemovedClusters.size == 0
+        puts "Nothing need to be removed..."
+    else
+        toBeRemovedClusters.each do |toBeRemovedCluster|
+            printf "To Be Removed Cluster peerId: %s, clusterKey: %s\n", 
toBeRemovedCluster[0], toBeRemovedCluster[1]
+            removePeer(options, toBeRemovedCluster[0])
+        end
+    end
+end
+
+
+# Create a logger and save it to ruby global
+$LOG = configureLogging(options)
+case ARGV[0]
+  when 'add'
+    if ARGV.length < 3
+      puts optparse
+      exit 1
+    end
+    peerId = ARGV[1]
+    clusterKey = ARGV[2]
+    addPeer(options, peerId, clusterKey)
+  when 'remove'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    peerId = ARGV[1]
+    removePeer(options, peerId)
+  when 'update'
+    if ARGV.length < 2
+      puts optparse
+      exit 1
+    end
+    delimitedPeerIds = ARGV[1]
+    delimitedClusterKeys = ARGV[2]
+    updateReplicationPeers(options, delimitedPeerIds, delimitedClusterKeys)    
+  when 'list'
+    listPeers(options)
+  else
+    puts optparse
+    exit 3
+end
diff --git 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
index a5f1e35..d8ff0ef 100644
--- 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
+++ 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
@@ -19,6 +19,9 @@ limitations under the License.
 """
 
 import sys
+
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import StaticFile
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.check_process_status import 
check_process_status
@@ -68,6 +71,24 @@ class HbaseMasterWindows(HbaseMaster):
     env.set_params(status_params)
     check_windows_service_status(status_params.hbase_master_win_service_name)
 
+  def stop_replication(self, env):
+    import params
+    env.set_params(params)
+    File(params.hbase_replication, content=StaticFile("hbase_replication.rb"), 
owner=params.hbase_user, mode="f")
+    remove_replication_values = "remove 
{0}".format(params.hbase_replication_peers)
+    replication_cmd = format(
+      "cmd /c {hbase_executable} org.jruby.Main {hbase_replication} " + 
remove_replication_values)
+    Execute(replication_cmd, user=params.hbase_user, logoutput=True)
+
+  def update_replication(self, env):
+    import params
+    env.set_params(params)
+    File(params.hbase_replication, content=StaticFile("hbase_replication.rb"), 
owner=params.hbase_user, mode="f")
+    update_replication_values = "update {0} 
{1}".format(params.hbase_replication_peers,
+                                                        
params.hbase_replication_cluster_keys)
+    update_replication_cmd = format(
+      "cmd /c {hbase_executable} org.jruby.Main {hbase_replication} " + 
update_replication_values)
+    Execute(update_replication_cmd, user=params.hbase_user, logoutput=True)
 
 
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
@@ -95,6 +116,27 @@ class HbaseMasterDefault(HbaseMaster):
 
     check_process_status(status_params.hbase_master_pid_file)
 
+  def stop_replication(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    File(params.hbase_replication, content=StaticFile("hbase_replication.rb"), 
owner=params.hbase_user, mode=0755)
+    remove_replication_values = "remove 
{0}".format(params.hbase_replication_peers)
+    replication_cmd = format(
+      "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} 
{master_security_config} org.jruby.Main {hbase_replication} "
+      + remove_replication_values)
+    Execute(replication_cmd, user=params.hbase_user, logoutput=True)
+
+  def update_replication(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    File(params.hbase_replication, content=StaticFile("hbase_replication.rb"), 
owner=params.hbase_user, mode=0755)
+    update_replication_values = "update {0} 
{1}".format(params.hbase_replication_peers,
+                                                        
params.hbase_replication_cluster_keys)
+    update_replication_cmd = format(
+      "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} 
{master_security_config} org.jruby.Main {hbase_replication} "
+      + update_replication_values)
+    Execute(update_replication_cmd, user=params.hbase_user, logoutput=True)
+
   def get_log_folder(self):
     import params
     return params.log_dir
diff --git 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index d2216e4..c702104 100644
--- 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -76,6 +76,7 @@ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
 region_mover = "/usr/lib/hbase/bin/region_mover.rb"
 region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+hbase_replication = "/usr/lib/hbase/bin/hbase_replication.rb"
 hbase_cmd = "/usr/lib/hbase/bin/hbase"
 hbase_max_direct_memory_size = None
 
@@ -91,6 +92,7 @@ if stack_version_formatted and 
check_stack_feature(StackFeature.ROLLING_UPGRADE,
   
daemon_script=format("{stack_root}/current/{component_directory}/bin/hbase-daemon.sh")
   region_mover = 
format("{stack_root}/current/{component_directory}/bin/region_mover.rb")
   region_drainer = 
format("{stack_root}/current/{component_directory}/bin/draining_servers.rb")
+  hbase_replication = 
format("{stack_root}/current/{component_directory}/bin/hbase_replication.rb")
   hbase_cmd = format("{stack_root}/current/{component_directory}/bin/hbase")
 
 
@@ -313,6 +315,10 @@ ambari_server_hostname = 
config['ambariLevelParams']['ambari_server_host']
 enable_ranger_hbase = 
default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled",
 "No")
 enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False
 
+# HBase Cross Cluster Replication
+hbase_replication_peers=default("/commandParams/replication_peers", "")
+hbase_replication_cluster_keys=default("/commandParams/replication_cluster_keys",
 "")
+
 # ranger hbase properties
 if enable_ranger_hbase:
   # get ranger policy url
diff --git 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py
 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py
index ddc9e93..c9ce3d2 100644
--- 
a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py
+++ 
b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py
@@ -34,6 +34,7 @@ hbase_user = hadoop_user
 #decomm params
 region_drainer = os.path.join(hbase_bin_dir,"draining_servers.rb")
 region_mover = os.path.join(hbase_bin_dir,"region_mover.rb")
+hbase_replication = os.path.join(hbase_bin_dir, "hbase_replication.rb")
 hbase_excluded_hosts = config['commandParams']['excluded_hosts']
 hbase_drain_only = config['commandParams']['mark_draining_only']
 
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
index 89d0630..c2a4b8e 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/BackgroundCustomCommandExecutionTest.java
@@ -77,6 +77,13 @@ public class BackgroundCustomCommandExecutionTest {
   private Clusters clusters;
 
   private static final String REQUEST_CONTEXT_PROPERTY = "context";
+  private static final String UPDATE_REPLICATION_PARAMS = "{\n" +
+          "              \"replication_cluster_keys\": 
c7007.ambari.apache.org,c7008.ambari.apache.org,c7009.ambari.apache.org:2181:/hbase,\n"
 +
+          "              \"replication_peers\": 1\n" +
+          "            }";
+  private static final String STOP_REPLICATION_PARAMS = "{\n" +
+          "              \"replication_peers\": 1\n" +
+          "            }";
 
   @Captor ArgumentCaptor<Request> requestCapture;
   @Mock ActionManager am;
@@ -180,15 +187,18 @@ public class BackgroundCustomCommandExecutionTest {
     createCluster("c1");
     addHost("c6401","c1");
     addHost("c6402","c1");
+    addHost("c7007", "c1");
     clusters.updateHostMappings(clusters.getHost("c6401"));
     clusters.updateHostMappings(clusters.getHost("c6402"));
+    clusters.updateHostMappings(clusters.getHost("c7007"));
 
     clusters.getCluster("c1");
     createService("c1", "HDFS", null);
-
-    createServiceComponent("c1","HDFS","NAMENODE", State.INIT);
-
-    createServiceComponentHost("c1","HDFS","NAMENODE","c6401", null);
+    createService("c1", "HBASE", null);
+    createServiceComponent("c1", "HDFS", "NAMENODE", State.INIT);
+    createServiceComponent("c1", "HBASE", "HBASE_MASTER", State.INIT);
+    createServiceComponentHost("c1", "HDFS", "NAMENODE", "c6401", null);
+    createServiceComponentHost("c1", "HBASE", "HBASE_MASTER", "c7007", null);
   }
   private void addHost(String hostname, String clusterName) throws 
AmbariException {
     clusters.addHost(hostname);
@@ -206,6 +216,83 @@ public class BackgroundCustomCommandExecutionTest {
     host.setHostAttributes(hostAttributes);
   }
 
+
+  @SuppressWarnings("serial")
+  @Test
+  public void testUpdateHBaseReplicationCustomCommand()
+          throws AuthorizationException, AmbariException, 
IllegalAccessException,
+          NoSuchFieldException {
+    createClusterFixture();
+    Map<String, String> requestProperties = new HashMap<String, String>() {
+      {
+        put(REQUEST_CONTEXT_PROPERTY, "Enable Cross Cluster HBase 
Replication");
+        put("command", "UPDATE_REPLICATION");
+        put("parameters", UPDATE_REPLICATION_PARAMS);
+      }
+    };
+    ExecuteActionRequest actionRequest = new ExecuteActionRequest("c1",
+            "UPDATE_REPLICATION", new HashMap<>(), false);
+    actionRequest.getResourceFilters().add(new RequestResourceFilter("HBASE", 
"HBASE_MASTER",
+            Collections.singletonList("c7007")));
+
+    controller.createAction(actionRequest, requestProperties);
+
+    Mockito.verify(am, Mockito.times(1))
+            .sendActions(requestCapture.capture(), 
any(ExecuteActionRequest.class));
+
+    Request request = requestCapture.getValue();
+    Assert.assertNotNull(request);
+    Assert.assertNotNull(request.getStages());
+    Assert.assertEquals(1, request.getStages().size());
+    Stage stage = request.getStages().iterator().next();
+
+    Assert.assertEquals(1, stage.getHosts().size());
+
+    List<ExecutionCommandWrapper> commands = 
stage.getExecutionCommands("c7007");
+    Assert.assertEquals(1, commands.size());
+    ExecutionCommand command = commands.get(0).getExecutionCommand();
+    Assert.assertEquals(AgentCommandType.EXECUTION_COMMAND, 
command.getCommandType());
+    Assert.assertEquals("UPDATE_REPLICATION", 
command.getCommandParams().get("custom_command"));
+  }
+
+  @SuppressWarnings("serial")
+  @Test
+  public void testStopHBaseReplicationCustomCommand()
+          throws AuthorizationException, AmbariException, 
IllegalAccessException,
+          NoSuchFieldException {
+    createClusterFixture();
+    Map<String, String> requestProperties = new HashMap<String, String>() {
+      {
+        put(REQUEST_CONTEXT_PROPERTY, "Disable Cross Cluster HBase 
Replication");
+        put("command", "STOP_REPLICATION");
+        put("parameters", STOP_REPLICATION_PARAMS);
+      }
+    };
+    ExecuteActionRequest actionRequest = new ExecuteActionRequest("c1",
+            "STOP_REPLICATION", new HashMap<>(), false);
+    actionRequest.getResourceFilters().add(new RequestResourceFilter("HBASE", 
"HBASE_MASTER",
+            Collections.singletonList("c7007")));
+
+    controller.createAction(actionRequest, requestProperties);
+
+    Mockito.verify(am, Mockito.times(1))
+            .sendActions(requestCapture.capture(), 
any(ExecuteActionRequest.class));
+
+    Request request = requestCapture.getValue();
+    Assert.assertNotNull(request);
+    Assert.assertNotNull(request.getStages());
+    Assert.assertEquals(1, request.getStages().size());
+    Stage stage = request.getStages().iterator().next();
+
+    Assert.assertEquals(1, stage.getHosts().size());
+
+    List<ExecutionCommandWrapper> commands = 
stage.getExecutionCommands("c7007");
+    Assert.assertEquals(1, commands.size());
+    ExecutionCommand command = commands.get(0).getExecutionCommand();
+    Assert.assertEquals(AgentCommandType.EXECUTION_COMMAND, 
command.getCommandType());
+    Assert.assertEquals("STOP_REPLICATION", 
command.getCommandParams().get("custom_command"));
+  }
+
   private void createCluster(String clusterName) throws AmbariException, 
AuthorizationException {
     ClusterRequest r = new ClusterRequest(null, clusterName, 
State.INSTALLED.name(),
         SecurityType.NONE, STACK_ID.getStackId(), null);
diff --git 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HBASE/metainfo.xml 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HBASE/metainfo.xml
index 322a190..57e6125 100644
--- 
a/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HBASE/metainfo.xml
+++ 
b/ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HBASE/metainfo.xml
@@ -59,6 +59,22 @@
                 <timeout>600</timeout>
               </commandScript>
             </customCommand>
+            <customCommand>
+              <name>UPDATE_REPLICATION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+            <customCommand>
+              <name>STOP_REPLICATION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
           </customCommands>
         </component>
 
diff --git a/ambari-web/app/controllers/main/service/item.js 
b/ambari-web/app/controllers/main/service/item.js
index a1a80c5..e51e996 100644
--- a/ambari-web/app/controllers/main/service/item.js
+++ b/ambari-web/app/controllers/main/service/item.js
@@ -942,6 +942,146 @@ App.MainServiceItemController = 
Em.Controller.extend(App.SupportClientConfigsDow
     });
   },
 
+  /**
+   * On click handler for Update HBase Replication command from items menu
+   */
+  updateHBaseReplication: function () {
+    const controller = this;
+    App.ModalPopup.show({
+      classNames: ['sixty-percent-width-modal', 'service-params-popup'],
+      header: 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.context'),
+      primary: Em.I18n.t('common.enable'),
+      secondary: Em.I18n.t('common.cancel'),
+      peerId: '',
+      parentzkey: '/hbase',
+      zkport: '2181',
+      zkquorum: '',
+      errorMessage: 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.promptError'),
+      isInvalid: function () {
+        const zkquorum = this.get('zkquorum');
+        const zkport = this.get('zkport');
+        const parentzkey = this.get('parentzkey');
+        const peerId = this.get('peerId');
+        if (zkquorum && zkport && parentzkey && peerId) {
+          if (isNaN(zkport) || isNaN(peerId)) {
+            return true;
+          }
+          const zkquorumArray = zkquorum.split(',');
+          return zkquorumArray.length < 2;
+        } else {
+          return true;
+        }
+      }.property('zkquorum', 'zkport', 'parentzkey', 'peerId'),
+      disablePrimary: Em.computed.alias('isInvalid'),
+      onPrimary: function () {
+        if (this.get('isInvalid')) {
+          return;
+        }
+        App.ajax.send({
+          name: 'service.item.updateHBaseReplication',
+          sender: controller,
+          data: {
+            hosts: 
App.Service.find('HBASE').get('hostComponents').findProperty('componentName', 
'HBASE_MASTER').get('hostName'),
+            replication_peers: this.get('peerId'),
+            replication_cluster_keys: this.get('zkquorum') + ':' + 
this.get('zkport') + ":" + this.get('parentzkey')
+          },
+          success: 'updateHBaseReplicationSuccessCallback',
+          error: 'updateHBaseReplicationErrorCallback',
+          showLoadingPopup: true
+        });
+        this.hide();
+      },
+      bodyClass: Ember.View.extend({
+        templateName: 
require('templates/common/modal_popups/update_replication_popup'),
+        zkquorumText: 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.zkquorumText.prompt'),
+        zkportText: 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.zkportText.prompt'),
+        parentzkeyText: 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.parentzkeyText.prompt'),
+        peerIdText: 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.peerIdText.prompt')
+      })
+    });
+  },
+
+  updateHBaseReplicationSuccessCallback: function (data) {
+    if (data.Requests.id) {
+      App.router.get('backgroundOperationsController').showPopup();
+    }
+  },
+
+  updateHBaseReplicationErrorCallback: function (data) {
+    var error = 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.error');
+    if (data && data.responseText) {
+      try {
+        const json = $.parseJSON(data.responseText);
+        error += json.message;
+      } catch (err) {
+        console.log(err);
+      }
+    }
+    
App.showAlertPopup(Em.I18n.t('services.service.actions.run.updateHBaseReplication.error'),
 error);
+  },
+
+
+  /**
+   * On click handler for Stop HBase Replication command from items menu
+   */
+  stopHBaseReplication: function () {
+    const controller = this;
+    App.ModalPopup.show({
+      classNames: ['forty-percent-width-modal'],
+      header: 
Em.I18n.t('services.service.actions.run.stopHBaseReplication.context'),
+      primary: Em.I18n.t('common.disable'),
+      secondary: Em.I18n.t('common.cancel'),
+      inputValue: '',
+      errorMessage: 
Em.I18n.t('services.service.actions.run.stopHBaseReplication.promptError'),
+      isInvalid: function () {
+        const inputValue = this.get('inputValue');
+        return !inputValue || isNaN(inputValue);
+      }.property('inputValue'),
+      disablePrimary: Em.computed.alias('isInvalid'),
+      onPrimary: function () {
+        if (this.get('isInvalid')) {
+          return;
+        }
+        App.ajax.send({
+          name: 'service.item.stopHBaseReplication',
+          sender: controller,
+          data: {
+            hosts: 
App.Service.find('HBASE').get('hostComponents').findProperty('componentName', 
'HBASE_MASTER').get('hostName'),
+            replication_peers: this.get('inputValue')
+          },
+          success: 'stopHBaseReplicationSuccessCallback',
+          error: 'stopHBaseReplicationErrorCallback',
+          showLoadingPopup: true
+        });
+        this.hide();
+      },
+      bodyClass: Ember.View.extend({
+        templateName: require('templates/common/modal_popups/prompt_popup'),
+        text: 
Em.I18n.t('services.service.actions.run.stopHBaseReplication.prompt'),
+      })
+    });
+  },
+
+  stopHBaseReplicationSuccessCallback: function (data) {
+    if (data.Requests.id) {
+      App.router.get('backgroundOperationsController').showPopup();
+    }
+  },
+
+  stopHBaseReplicationErrorCallback: function (data) {
+    var error = 
Em.I18n.t('services.service.actions.run.stopHBaseReplication.error');
+    if (data && data.responseText) {
+      try {
+        const json = $.parseJSON(data.responseText);
+        error += json.message;
+      } catch (err) {
+        console.log(err);
+      }
+    }
+    
App.showAlertPopup(Em.I18n.t('services.service.actions.run.stopHBaseReplication.error'),
 error);
+  },
+
+
   restartAllHostComponents: function (serviceName) {
     const serviceDisplayName = this.get('content.displayName'),
       bodyMessage = Em.Object.create({
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 9b8ab6e..652b7c5 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2097,6 +2097,21 @@ Em.I18n.translations = {
   'services.service.actions.run.stopLdapKnox.title':'Stop Demo LDAP Knox 
Gateway',
   'services.service.actions.run.stopLdapKnox.context':'Stop Demo LDAP',
   'services.service.actions.run.startStopLdapKnox.error': 'Error during remote 
command: ',
+  'services.service.actions.run.updateHBaseReplication.label': 'Start/Update 
HBase Replication',
+  'services.service.actions.run.updateHBaseReplication.context': 'Enable Cross 
Cluster HBase Replication',
+  'services.service.actions.run.updateHBaseReplication.title': 'HBase 
Replication: Start/Update',
+  'services.service.actions.run.updateHBaseReplication.promptError': 'ZKQuorum 
should be comma(,) separated ZK nodes. Port and Peer ID should be number',
+  'services.service.actions.run.updateHBaseReplication.zkquorumText.prompt': 
'Peer Cluster ZK Quorum: ',
+  'services.service.actions.run.updateHBaseReplication.zkportText.prompt': 
'Peer Cluster ZK Port: ',
+  'services.service.actions.run.updateHBaseReplication.parentzkeyText.prompt': 
'Parent ZKey for HBase: ',
+  'services.service.actions.run.updateHBaseReplication.peerIdText.prompt': 
'Peer Cluster ID: ',
+  'services.service.actions.run.updateHBaseReplication.error':'Error while 
setting up HBase Cross Cluster Replication ',
+  'services.service.actions.run.stopHBaseReplication.label': 'Stop HBase 
Replication',
+  'services.service.actions.run.stopHBaseReplication.context': 'Disable Cross 
Cluster HBase Replication',
+  'services.service.actions.run.stopHBaseReplication.title': 'HBase 
Replication: Stop',
+  'services.service.actions.run.stopHBaseReplication.promptError': 'Peer ID 
should be number',
+  'services.service.actions.run.stopHBaseReplication.error': 'Error while 
disabling HBase Cross Cluster Replication ',
+  'services.service.actions.run.stopHBaseReplication.prompt': 'Peer Cluster ID 
to remove from Replication Peer list: ',
 
   // Hive Server Interactive custom command to restart LLAP
   'services.service.actions.run.restartLLAP':'Restart LLAP',
diff --git a/ambari-web/app/models/host_component.js 
b/ambari-web/app/models/host_component.js
index e63be0d..52a7252 100644
--- a/ambari-web/app/models/host_component.js
+++ b/ambari-web/app/models/host_component.js
@@ -555,6 +555,22 @@ App.HostComponentActionMap = {
         label: Em.I18n.t('admin.nameNodeFederation.button.enable'),
         cssClass: 'icon icon-sitemap',
         disabled: !App.get('isHaEnabled')
+      },
+      UPDATE_REPLICATION: {
+        action: 'updateHBaseReplication',
+        customCommand: 'UPDATE_REPLICATION',
+        context: 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.context'),
+        label: 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.label'),
+        cssClass: 'glyphicon glyphicon-refresh',
+        disabled: false
+      },
+      STOP_REPLICATION: {
+        action: 'stopHBaseReplication',
+        customCommand: 'STOP_REPLICATION',
+        context: 
Em.I18n.t('services.service.actions.run.stopHBaseReplication.context'),
+        label: 
Em.I18n.t('services.service.actions.run.stopHBaseReplication.label'),
+        cssClass: 'glyphicon glyphicon-refresh',
+        disabled: false
       }
     };
   },
diff --git a/ambari-web/app/styles/alerts.less 
b/ambari-web/app/styles/alerts.less
index b092adc..a2f93b0 100644
--- a/ambari-web/app/styles/alerts.less
+++ b/ambari-web/app/styles/alerts.less
@@ -528,6 +528,26 @@
   width: 95%;
 }
 
+.service-params-popup {
+  .modal {
+    .modal-dialog {
+      width: 55%;
+      .modal-content {
+        height: 60%;
+        .modal-body {
+          padding-left: 0;
+          padding-right: 0;
+          height: 75%;
+          font-size: 14px;
+          .definition-latest-text, .timeago {
+            font-size: 12px;
+          }
+        }
+      }
+    }
+  }
+}
+
 .alerts-popup-wrap {
   #alert-info {
     overflow: auto;
diff --git a/ambari-web/app/styles/application.less 
b/ambari-web/app/styles/application.less
index efec846..72c1507 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -2081,6 +2081,9 @@ input[type="radio"].align-checkbox, 
input[type="checkbox"].align-checkbox {
   .prompt-input {
     width: 80px;
   }
+  .large-prompt-input {
+    width: 70%;
+  }
 }
 
 #views {
diff --git 
a/ambari-web/app/templates/common/modal_popups/update_replication_popup.hbs 
b/ambari-web/app/templates/common/modal_popups/update_replication_popup.hbs
new file mode 100644
index 0000000..ff1b19d
--- /dev/null
+++ b/ambari-web/app/templates/common/modal_popups/update_replication_popup.hbs
@@ -0,0 +1,61 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+<div class="prompt-popup">
+    <form>
+        <div {{bindAttr class=":form-group 
view.parentView.isInvalid:has-error"}}>
+            <h4>{{view.title}}</h4>
+            <p>{{view.description}}</p>
+            <br/>
+            <label class="control-label">{{view.zkquorumText}}</label>
+            <div>
+                &nbsp;&nbsp;
+                {{view Em.TextField class="large-prompt-input form-control" 
valueBinding="view.parentView.zkquorum"}}
+            </div>
+            <br/>
+            <label class="control-label">{{view.zkportText}}</label>
+            <div>
+                &nbsp;&nbsp;
+                {{view Em.TextField class="prompt-input form-control" 
valueBinding="view.parentView.zkport"}}
+            </div>
+            <br/>
+            <label class="control-label">{{view.parentzkeyText}}</label>
+            <div>
+                &nbsp;&nbsp;
+                {{view Em.TextField class="prompt-input form-control" 
valueBinding="view.parentView.parentzkey"}}
+            </div>
+            <br/>
+            <label class="control-label">{{view.peerIdText}}</label>
+            <div>
+                &nbsp;&nbsp;
+                {{view Em.TextField class="prompt-input form-control" 
valueBinding="view.parentView.peerId"}}
+            </div>
+            <div>
+                {{#if view.parentView.isChanged}}
+                    <a href="#" data-toggle="tooltip" class="btn-sm"
+                        {{action "doRestoreDefaultValue" this 
target="view.parentView"}}
+                        {{translateAttr 
data-original-title="common.reset.default"}}>
+                        <i class="icon-undo"></i>
+                    </a>
+                {{/if}}
+            </div>
+            {{#if view.parentView.isInvalid}}
+                <span class="help-block 
validation-block">{{view.parentView.errorMessage}}</span>
+            {{/if}}
+        </div>
+    </form>
+</div>
diff --git a/ambari-web/app/utils/ajax/ajax.js 
b/ambari-web/app/utils/ajax/ajax.js
index 1a899e6..9ebbfee 100644
--- a/ambari-web/app/utils/ajax/ajax.js
+++ b/ambari-web/app/utils/ajax/ajax.js
@@ -701,6 +701,57 @@ var urls = {
     }
   },
 
+  'service.item.updateHBaseReplication': {
+    'real': '/clusters/{clusterName}/requests',
+    'mock': '',
+    'format': function (data) {
+      return {
+        type: 'POST',
+        data: JSON.stringify({
+          RequestInfo: {
+            'context': 
Em.I18n.t('services.service.actions.run.updateHBaseReplication.context'),
+            'command': 'UPDATE_REPLICATION',
+            "parameters": {
+              "replication_cluster_keys": data.replication_cluster_keys,
+              "replication_peers": data.replication_peers
+            }
+          },
+          "Requests/resource_filters": [{
+            'service_name': 'HBASE',
+            'component_name': 'HBASE_MASTER',
+            'hosts': data.hosts
+          }]
+        })
+      }
+    }
+  },
+
+  'service.item.stopHBaseReplication': {
+    'real': '/clusters/{clusterName}/requests',
+    'mock': '',
+    'format': function (data) {
+      return {
+        type: 'POST',
+        data: JSON.stringify({
+          RequestInfo: {
+            'context': 
Em.I18n.t('services.service.actions.run.stopHBaseReplication.context'),
+            'command': 'STOP_REPLICATION',
+            "parameters": {
+              "replication_peers": data.replication_peers
+            }
+          },
+          "Requests/resource_filters": [{
+            'service_name': 'HBASE',
+            'component_name': 'HBASE_MASTER',
+            'hosts': data.hosts
+          }]
+        })
+      }
+    }
+  },
+
+
+
   'service.item.executeCustomCommand': {
     'real': '/clusters/{clusterName}/requests',
     'mock': '',
diff --git a/ambari-web/app/views/main/service/item.js 
b/ambari-web/app/views/main/service/item.js
index c01634c..60d7fa4 100644
--- a/ambari-web/app/views/main/service/item.js
+++ b/ambari-web/app/views/main/service/item.js
@@ -35,7 +35,7 @@ App.MainServiceItemView = 
Em.View.extend(App.HiveInteractiveCheck, {
   mastersExcludedCommands: {
     'NAMENODE': ['DECOMMISSION', 'REBALANCEHDFS'],
     'RESOURCEMANAGER': ['DECOMMISSION', 'REFRESHQUEUES'],
-    'HBASE_MASTER': ['DECOMMISSION'],
+    'HBASE_MASTER': ['DECOMMISSION', 'UPDATE_REPLICATION', 'STOP_REPLICATION'],
     'KNOX_GATEWAY': ['STARTDEMOLDAP','STOPDEMOLDAP'],
     'HAWQMASTER': ['IMMEDIATE_STOP_HAWQ_SERVICE', 'RUN_HAWQ_CHECK', 
'HAWQ_CLEAR_CACHE', 'REMOVE_HAWQ_STANDBY', 'RESYNC_HAWQ_STANDBY'],
     'HAWQSEGMENT': ['IMMEDIATE_STOP_HAWQ_SEGMENT'],
@@ -269,6 +269,18 @@ App.MainServiceItemView = 
Em.View.extend(App.HiveInteractiveCheck, {
           }
         }
 
+        const hMasterComponent = 
App.StackServiceComponent.find().findProperty('componentName', 'HBASE_MASTER');
+        if (serviceName === 'HBASE' && hMasterComponent) {
+          const hMasterCustomCommands = hMasterComponent.get('customCommands');
+          if (hMasterCustomCommands && 
hMasterCustomCommands.contains('UPDATE_REPLICATION')) {
+            options.push(actionMap.UPDATE_REPLICATION);
+          }
+          if (hMasterCustomCommands && 
hMasterCustomCommands.contains('STOP_REPLICATION')) {
+            options.push(actionMap.STOP_REPLICATION);
+          }
+        }
+
+
         /**
          * Display all custom commands of Master and StandBy on Service page.
          **/

Reply via email to