http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb new file mode 100644 index 0000000..5bcb5b6 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/draining_servers.rb @@ -0,0 +1,164 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Add or remove servers from draining mode via zookeeper + +require 'optparse' +include Java + +import org.apache.hadoop.hbase.HBaseConfiguration +import org.apache.hadoop.hbase.client.HBaseAdmin +import org.apache.hadoop.hbase.zookeeper.ZKUtil +import org.apache.commons.logging.Log +import org.apache.commons.logging.LogFactory + +# Name of this script +NAME = "draining_servers" + +# Do command-line parsing +options = {} +optparse = OptionParser.new do |opts| + opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..." + opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' + + 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space' + opts.on('-h', '--help', 'Display usage information') do + puts opts + exit + end + options[:debug] = false + opts.on('-d', '--debug', 'Display extra debug logging') do + options[:debug] = true + end +end +optparse.parse! + +# Return array of servernames where servername is hostname+port+startcode +# comma-delimited +def getServers(admin) + serverInfos = admin.getClusterStatus().getServerInfo() + servers = [] + for server in serverInfos + servers << server.getServerName() + end + return servers +end + +def getServerNames(hostOrServers, config) + ret = [] + + for hostOrServer in hostOrServers + # check whether it is already serverName. No need to connect to cluster + parts = hostOrServer.split(',') + if parts.size() == 3 + ret << hostOrServer + else + admin = HBaseAdmin.new(config) if not admin + servers = getServers(admin) + + hostOrServer = hostOrServer.gsub(/:/, ",") + for server in servers + ret << server if server.start_with?(hostOrServer) + end + end + end + + admin.close() if admin + return ret +end + +def addServers(options, hostOrServers) + config = HBaseConfiguration.create() + servers = getServerNames(hostOrServers, config) + + zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil) + parentZnode = zkw.drainingZNode + + begin + for server in servers + node = ZKUtil.joinZNode(parentZnode, server) + ZKUtil.createAndFailSilent(zkw, node) + end + ensure + zkw.close() + end +end + +def removeServers(options, hostOrServers) + config = HBaseConfiguration.create() + servers = getServerNames(hostOrServers, config) + + zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil) + parentZnode = zkw.drainingZNode + + begin + for server in servers + node = ZKUtil.joinZNode(parentZnode, server) + ZKUtil.deleteNodeFailSilent(zkw, node) + end + ensure + zkw.close() + end +end + +# list servers in draining mode +def listServers(options) + config = HBaseConfiguration.create() + + zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil) + parentZnode = zkw.drainingZNode + + servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode) + servers.each {|server| puts server} +end + +hostOrServers = ARGV[1..ARGV.size()] + +# Create a logger and disable the DEBUG-level annoying client logging +def configureLogging(options) + apacheLogger = LogFactory.getLog(NAME) + # Configure log4j to not spew so much + unless (options[:debug]) + logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase") + logger.setLevel(org.apache.log4j.Level::WARN) + logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper") + logger.setLevel(org.apache.log4j.Level::WARN) + end + return apacheLogger +end + +# Create a logger and save it to ruby global +$LOG = configureLogging(options) +case ARGV[0] + when 'add' + if ARGV.length < 2 + puts optparse + exit 1 + end + addServers(options, hostOrServers) + when 'remove' + if ARGV.length < 2 + puts optparse + exit 1 + end + removeServers(options, hostOrServers) + when 'list' + listServers(options) + else + puts optparse + exit 3 +end
http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh new file mode 100644 index 0000000..cde19e4 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbase-smoke-cleanup.sh @@ -0,0 +1,23 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# +disable 'ambarismoketest' +drop 'ambarismoketest' +exit \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh new file mode 100644 index 0000000..8b085e8 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/files/hbaseSmokeVerify.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# +conf_dir=$1 +data=$2 +hbase_cmd=$3 +echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify +cat /tmp/hbase_chk_verify +echo "Looking for $data" +tr -d '\n|\t| ' < /tmp/hbase_chk_verify | grep -q $data +if [ "$?" -ne 0 ] +then + exit 1 +fi + +grep -q '1 row(s)' /tmp/hbase_chk_verify http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py new file mode 100644 index 0000000..5561e10 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/__init__.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py new file mode 100644 index 0000000..f98b9b9 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/functions.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import os +import re +import math +import datetime + +from resource_management.core.shell import checked_call + +def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max): + """ + @param heapsize_str: str (e.g '1000m') + @param xmn_percent: float (e.g 0.2) + @param xmn_max: integer (e.g 512) + """ + heapsize = int(re.search('\d+',heapsize_str).group(0)) + heapsize_unit = re.search('\D+',heapsize_str).group(0) + xmn_val = int(math.floor(heapsize*xmn_percent)) + xmn_val -= xmn_val % 8 + + result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val + return str(result_xmn_val) + heapsize_unit + +def ensure_unit_for_memory(memory_size): + memory_size_values = re.findall('\d+', str(memory_size)) + memory_size_unit = re.findall('\D+', str(memory_size)) + + if len(memory_size_values) > 0: + unit = 'm' + if len(memory_size_unit) > 0: + unit = memory_size_unit[0] + if unit not in ['b', 'k', 'm', 'g', 't', 'p']: + raise Exception("Memory size unit error. %s - wrong unit" % unit) + return "%s%s" % (memory_size_values[0], unit) + else: + raise Exception('Memory size can not be calculated') http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py new file mode 100644 index 0000000..fced4fc --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" +import os +import sys +from resource_management.libraries.script.script import Script +from resource_management.libraries.resources.xml_config import XmlConfig +from resource_management.libraries.resources.template_config import TemplateConfig +from resource_management.libraries.functions.format import format +from resource_management.core.source import Template, InlineTemplate +from resource_management.core.resources import Package +from resource_management.core.resources.service import ServiceConfig +from resource_management.core.resources.system import Directory, Execute, File +from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl +from ambari_commons import OSConst +from resource_management.libraries.functions.constants import StackFeature +from resource_management.libraries.functions.stack_features import check_stack_feature + +@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY) +def hbase(name=None): + import params + XmlConfig("hbase-site.xml", + conf_dir = params.hbase_conf_dir, + configurations = params.config['configurations']['hbase-site'], + configuration_attributes=params.config['configuration_attributes']['hbase-site'] + ) + + if params.service_map.has_key(name): + # Manually overriding service logon user & password set by the installation package + service_name = params.service_map[name] + ServiceConfig(service_name, + action="change_user", + username = params.hbase_user, + password = Script.get_password(params.hbase_user)) + +# name is 'master' or 'regionserver' or 'queryserver' or 'client' +@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT) +def hbase(name=None): + import params + + Directory( params.etc_prefix_dir, + mode=0755 + ) + + Directory( params.hbase_conf_dir, + owner = params.hbase_user, + group = params.user_group, + create_parents = True + ) + + Directory(params.java_io_tmpdir, + create_parents = True, + mode=0777 + ) + + # If a file location is specified in ioengine parameter, + # ensure that directory exists. Otherwise create the + # directory with permissions assigned to hbase:hadoop. + ioengine_input = params.ioengine_param + if ioengine_input != None: + if ioengine_input.startswith("file:/"): + ioengine_fullpath = ioengine_input[5:] + ioengine_dir = os.path.dirname(ioengine_fullpath) + Directory(ioengine_dir, + owner = params.hbase_user, + group = params.user_group, + create_parents = True, + mode = 0755 + ) + + parent_dir = os.path.dirname(params.tmp_dir) + # In case if we have several placeholders in path + while ("${" in parent_dir): + parent_dir = os.path.dirname(parent_dir) + if parent_dir != os.path.abspath(os.sep) : + Directory (parent_dir, + create_parents = True, + cd_access="a", + ) + Execute(("chmod", "1777", parent_dir), sudo=True) + + XmlConfig( "hbase-site.xml", + conf_dir = params.hbase_conf_dir, + configurations = params.config['configurations']['hbase-site'], + configuration_attributes=params.config['configuration_attributes']['hbase-site'], + owner = params.hbase_user, + group = params.user_group + ) + + if check_stack_feature(StackFeature.PHOENIX_CORE_HDFS_SITE_REQUIRED, params.version_for_stack_feature_checks): + XmlConfig( "core-site.xml", + conf_dir = params.hbase_conf_dir, + configurations = params.config['configurations']['core-site'], + configuration_attributes=params.config['configuration_attributes']['core-site'], + owner = params.hbase_user, + group = params.user_group + ) + if 'hdfs-site' in params.config['configurations']: + XmlConfig( "hdfs-site.xml", + conf_dir = params.hbase_conf_dir, + configurations = params.config['configurations']['hdfs-site'], + configuration_attributes=params.config['configuration_attributes']['hdfs-site'], + owner = params.hbase_user, + group = params.user_group + ) + else: + File(format("{params.hbase_conf_dir}/hdfs-site.xml"), + action="delete" + ) + File(format("{params.hbase_conf_dir}/core-site.xml"), + action="delete" + ) + + if 'hbase-policy' in params.config['configurations']: + XmlConfig( "hbase-policy.xml", + conf_dir = params.hbase_conf_dir, + configurations = params.config['configurations']['hbase-policy'], + configuration_attributes=params.config['configuration_attributes']['hbase-policy'], + owner = params.hbase_user, + group = params.user_group + ) + # Manually overriding ownership of file installed by hadoop package + else: + File( format("{params.hbase_conf_dir}/hbase-policy.xml"), + owner = params.hbase_user, + group = params.user_group + ) + + File(format("{hbase_conf_dir}/hbase-env.sh"), + owner = params.hbase_user, + content=InlineTemplate(params.hbase_env_sh_template), + group = params.user_group, + ) + + # On some OS this folder could be not exists, so we will create it before pushing there files + Directory(params.limits_conf_dir, + create_parents = True, + owner='root', + group='root' + ) + + File(os.path.join(params.limits_conf_dir, 'hbase.conf'), + owner='root', + group='root', + mode=0644, + content=Template("hbase.conf.j2") + ) + + hbase_TemplateConfig( 'regionservers') + + if params.security_enabled: + hbase_TemplateConfig( format("hbase_{name}_jaas.conf")) + + if name != "client": + Directory( params.pid_dir, + owner = params.hbase_user, + create_parents = True, + cd_access = "a", + mode = 0755, + ) + + Directory (params.log_dir, + owner = params.hbase_user, + create_parents = True, + cd_access = "a", + mode = 0755, + ) + + if (params.log4j_props != None): + File(format("{params.hbase_conf_dir}/log4j.properties"), + mode=0644, + group=params.user_group, + owner=params.hbase_user, + content=InlineTemplate(params.log4j_props) + ) + elif (os.path.exists(format("{params.hbase_conf_dir}/log4j.properties"))): + File(format("{params.hbase_conf_dir}/log4j.properties"), + mode=0644, + group=params.user_group, + owner=params.hbase_user + ) + if name == "master": + params.HdfsResource(params.hbase_hdfs_root_dir, + type="directory", + action="create_on_execute", + owner=params.hbase_user + ) + params.HdfsResource(params.hbase_staging_dir, + type="directory", + action="create_on_execute", + owner=params.hbase_user, + mode=0711 + ) + if params.create_hbase_home_directory: + params.HdfsResource(params.hbase_home_directory, + type="directory", + action="create_on_execute", + owner=params.hbase_user, + mode=0755 + ) + params.HdfsResource(None, action="execute") + + if params.phoenix_enabled: + Package(params.phoenix_package, + retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability, + retry_count=params.agent_stack_retry_count) + +def hbase_TemplateConfig(name, tag=None): + import params + + TemplateConfig( format("{hbase_conf_dir}/{name}"), + owner = params.hbase_user, + template_tag = tag + ) http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py new file mode 100644 index 0000000..c8128ab --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_client.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import sys +from resource_management.libraries.script.script import Script +from resource_management.libraries.functions import conf_select, stack_select +from resource_management.libraries.functions.constants import StackFeature +from resource_management.libraries.functions.stack_features import check_stack_feature +from hbase import hbase +from ambari_commons import OSCheck, OSConst +from ambari_commons.os_family_impl import OsFamilyImpl +from resource_management.core.exceptions import ClientComponentHasNoStatus + +class HbaseClient(Script): + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + self.configure(env) + + def configure(self, env): + import params + env.set_params(params) + hbase(name='client') + + def status(self, env): + raise ClientComponentHasNoStatus() + + +@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY) +class HbaseClientWindows(HbaseClient): + pass + + +@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT) +class HbaseClientDefault(HbaseClient): + def get_component_name(self): + return "hbase-client" + + def pre_upgrade_restart(self, env, upgrade_type=None): + import params + env.set_params(params) + + if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): + conf_select.select(params.stack_name, "hbase", params.version) + stack_select.select("hbase-client", params.version) + + # phoenix may not always be deployed + try: + stack_select.select("phoenix-client", params.version) + except Exception as e: + print "Ignoring error due to missing phoenix-client" + print str(e) + + + # set all of the hadoop clients since hbase client is upgraded as part + # of the final "CLIENTS" group and we need to ensure that hadoop-client + # is also set + conf_select.select(params.stack_name, "hadoop", params.version) + stack_select.select("hadoop-client", params.version) + + +if __name__ == "__main__": + HbaseClient().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py new file mode 100644 index 0000000..7358674 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_decommission.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" +from resource_management.core.resources.system import Execute, File +from resource_management.core.source import StaticFile +from resource_management.libraries.functions.format import format +from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl +from ambari_commons import OSConst + +@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY) +def hbase_decommission(env): + import params + + env.set_params(params) + File(params.region_drainer, content=StaticFile("draining_servers.rb"), owner=params.hbase_user, mode="f") + + hosts = params.hbase_excluded_hosts.split(",") + for host in hosts: + if host: + if params.hbase_drain_only == True: + regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} remove {host}") + Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True) + else: + regiondrainer_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_drainer} add {host}") + regionmover_cmd = format("cmd /c {hbase_executable} org.jruby.Main {region_mover} unload {host}") + Execute(regiondrainer_cmd, user=params.hbase_user, logoutput=True) + Execute(regionmover_cmd, user=params.hbase_user, logoutput=True) + + +@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT) +def hbase_decommission(env): + import params + + env.set_params(params) + kinit_cmd = params.kinit_cmd_master + + File(params.region_drainer, + content=StaticFile("draining_servers.rb"), + mode=0755 + ) + + if params.hbase_excluded_hosts and params.hbase_excluded_hosts.split(","): + hosts = params.hbase_excluded_hosts.split(",") + elif params.hbase_included_hosts and params.hbase_included_hosts.split(","): + hosts = params.hbase_included_hosts.split(",") + + if params.hbase_drain_only: + for host in hosts: + if host: + regiondrainer_cmd = format( + "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} remove {host}") + Execute(regiondrainer_cmd, + user=params.hbase_user, + logoutput=True + ) + pass + pass + + else: + for host in hosts: + if host: + regiondrainer_cmd = format( + "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} add {host}") + regionmover_cmd = format( + "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_mover} unload {host}") + + Execute(regiondrainer_cmd, + user=params.hbase_user, + logoutput=True + ) + + Execute(regionmover_cmd, + user=params.hbase_user, + logoutput=True + ) + pass + pass + pass http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py new file mode 100644 index 0000000..d2c8089 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_master.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import sys +from resource_management.libraries.script.script import Script +from resource_management.libraries.functions.format import format +from resource_management.libraries.functions.check_process_status import check_process_status +from resource_management.libraries.functions.security_commons import build_expectations, \ + cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \ + FILE_TYPE_XML +from hbase import hbase +from hbase_service import hbase_service +from hbase_decommission import hbase_decommission +import upgrade +from setup_ranger_hbase import setup_ranger_hbase +from ambari_commons import OSCheck, OSConst +from ambari_commons.os_family_impl import OsFamilyImpl + + +class HbaseMaster(Script): + def configure(self, env): + import params + env.set_params(params) + hbase(name='master') + + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + + def decommission(self, env): + import params + env.set_params(params) + hbase_decommission(env) + + +@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY) +class HbaseMasterWindows(HbaseMaster): + def start(self, env): + import status_params + self.configure(env) + Service(status_params.hbase_master_win_service_name, action="start") + + def stop(self, env): + import status_params + env.set_params(status_params) + Service(status_params.hbase_master_win_service_name, action="stop") + + def status(self, env): + import status_params + env.set_params(status_params) + check_windows_service_status(status_params.hbase_master_win_service_name) + + + +@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT) +class HbaseMasterDefault(HbaseMaster): + def get_component_name(self): + return "hbase-master" + + def pre_upgrade_restart(self, env, upgrade_type=None): + import params + env.set_params(params) + upgrade.prestart(env, "hbase-master") + + def start(self, env, upgrade_type=None): + import params + env.set_params(params) + self.configure(env) # for security + setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-master") + hbase_service('master', action = 'start') + + def stop(self, env, upgrade_type=None): + import params + env.set_params(params) + hbase_service('master', action = 'stop') + + def status(self, env): + import status_params + env.set_params(status_params) + + check_process_status(status_params.hbase_master_pid_file) + + def security_status(self, env): + import status_params + + env.set_params(status_params) + if status_params.security_enabled: + props_value_check = {"hbase.security.authentication" : "kerberos", + "hbase.security.authorization": "true"} + props_empty_check = ['hbase.master.keytab.file', + 'hbase.master.kerberos.principal'] + props_read_check = ['hbase.master.keytab.file'] + hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check, + props_read_check) + + hbase_expectations = {} + hbase_expectations.update(hbase_site_expectations) + + security_params = get_params_from_filesystem(status_params.hbase_conf_dir, + {'hbase-site.xml': FILE_TYPE_XML}) + result_issues = validate_security_config_properties(security_params, hbase_expectations) + if not result_issues: # If all validations passed successfully + try: + # Double check the dict before calling execute + if ( 'hbase-site' not in security_params + or 'hbase.master.keytab.file' not in security_params['hbase-site'] + or 'hbase.master.kerberos.principal' not in security_params['hbase-site']): + self.put_structured_out({"securityState": "UNSECURED"}) + self.put_structured_out( + {"securityIssuesFound": "Keytab file or principal are not set property."}) + return + + cached_kinit_executor(status_params.kinit_path_local, + status_params.hbase_user, + security_params['hbase-site']['hbase.master.keytab.file'], + security_params['hbase-site']['hbase.master.kerberos.principal'], + status_params.hostname, + status_params.tmp_dir) + self.put_structured_out({"securityState": "SECURED_KERBEROS"}) + except Exception as e: + self.put_structured_out({"securityState": "ERROR"}) + self.put_structured_out({"securityStateErrorInfo": str(e)}) + else: + issues = [] + for cf in result_issues: + issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf])) + self.put_structured_out({"securityIssuesFound": ". ".join(issues)}) + self.put_structured_out({"securityState": "UNSECURED"}) + else: + self.put_structured_out({"securityState": "UNSECURED"}) + + def get_log_folder(self): + import params + return params.log_dir + + def get_user(self): + import params + return params.hbase_user + + def get_pid_files(self): + import status_params + return [status_params.hbase_master_pid_file] + +if __name__ == "__main__": + HbaseMaster().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py new file mode 100644 index 0000000..226e7fd5 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_regionserver.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import sys + +from resource_management.core import shell +from resource_management.libraries.script.script import Script +from resource_management.libraries.functions.format import format +from resource_management.libraries.functions.check_process_status import check_process_status +from resource_management.libraries.functions.security_commons import build_expectations, \ + cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \ + FILE_TYPE_XML + +from ambari_commons import OSCheck, OSConst +from ambari_commons.os_family_impl import OsFamilyImpl + +from hbase import hbase +from hbase_service import hbase_service +import upgrade +from setup_ranger_hbase import setup_ranger_hbase + + +class HbaseRegionServer(Script): + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + + def configure(self, env): + import params + env.set_params(params) + hbase(name='regionserver') + + def decommission(self, env): + print "Decommission not yet implemented!" + + + +@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY) +class HbaseRegionServerWindows(HbaseRegionServer): + def start(self, env): + import status_params + self.configure(env) + Service(status_params.hbase_regionserver_win_service_name, action="start") + + def stop(self, env): + import status_params + env.set_params(status_params) + Service(status_params.hbase_regionserver_win_service_name, action="stop") + + def status(self, env): + import status_params + env.set_params(status_params) + check_windows_service_status(status_params.hbase_regionserver_win_service_name) + + + +@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT) +class HbaseRegionServerDefault(HbaseRegionServer): + def get_component_name(self): + return "hbase-regionserver" + + def pre_upgrade_restart(self, env, upgrade_type=None): + import params + env.set_params(params) + upgrade.prestart(env, "hbase-regionserver") + + def post_upgrade_restart(self, env, upgrade_type=None): + import params + env.set_params(params) + upgrade.post_regionserver(env) + + def start(self, env, upgrade_type=None): + import params + env.set_params(params) + self.configure(env) # for security + setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-regionserver") + + hbase_service('regionserver', action='start') + + def stop(self, env, upgrade_type=None): + import params + env.set_params(params) + + hbase_service( 'regionserver', + action = 'stop' + ) + + def status(self, env): + import status_params + env.set_params(status_params) + + check_process_status(status_params.regionserver_pid_file) + + def security_status(self, env): + import status_params + + env.set_params(status_params) + if status_params.security_enabled: + props_value_check = {"hbase.security.authentication" : "kerberos", + "hbase.security.authorization": "true"} + props_empty_check = ['hbase.regionserver.keytab.file', + 'hbase.regionserver.kerberos.principal'] + props_read_check = ['hbase.regionserver.keytab.file'] + hbase_site_expectations = build_expectations('hbase-site', props_value_check, props_empty_check, + props_read_check) + + hbase_expectations = {} + hbase_expectations.update(hbase_site_expectations) + + security_params = get_params_from_filesystem(status_params.hbase_conf_dir, + {'hbase-site.xml': FILE_TYPE_XML}) + result_issues = validate_security_config_properties(security_params, hbase_expectations) + if not result_issues: # If all validations passed successfully + try: + # Double check the dict before calling execute + if ( 'hbase-site' not in security_params + or 'hbase.regionserver.keytab.file' not in security_params['hbase-site'] + or 'hbase.regionserver.kerberos.principal' not in security_params['hbase-site']): + self.put_structured_out({"securityState": "UNSECURED"}) + self.put_structured_out( + {"securityIssuesFound": "Keytab file or principal are not set property."}) + return + + cached_kinit_executor(status_params.kinit_path_local, + status_params.hbase_user, + security_params['hbase-site']['hbase.regionserver.keytab.file'], + security_params['hbase-site']['hbase.regionserver.kerberos.principal'], + status_params.hostname, + status_params.tmp_dir) + self.put_structured_out({"securityState": "SECURED_KERBEROS"}) + except Exception as e: + self.put_structured_out({"securityState": "ERROR"}) + self.put_structured_out({"securityStateErrorInfo": str(e)}) + else: + issues = [] + for cf in result_issues: + issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf])) + self.put_structured_out({"securityIssuesFound": ". ".join(issues)}) + self.put_structured_out({"securityState": "UNSECURED"}) + else: + self.put_structured_out({"securityState": "UNSECURED"}) + + def get_log_folder(self): + import params + return params.log_dir + + def get_user(self): + import params + return params.hbase_user + + def get_pid_files(self): + import status_params + return [status_params.regionserver_pid_file] + +if __name__ == "__main__": + HbaseRegionServer().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py new file mode 100644 index 0000000..1dbd560 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_service.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from resource_management.libraries.functions.format import format +from resource_management.libraries.functions.show_logs import show_logs +from resource_management.core.shell import as_sudo +from resource_management.core.resources.system import Execute, File + +def hbase_service( + name, + action = 'start'): # 'start' or 'stop' or 'status' + + import params + + role = name + cmd = format("{daemon_script} --config {hbase_conf_dir}") + pid_file = format("{pid_dir}/hbase-{hbase_user}-{role}.pid") + pid_expression = as_sudo(["cat", pid_file]) + no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1") + + if action == 'start': + daemon_cmd = format("{cmd} start {role}") + + try: + Execute ( daemon_cmd, + not_if = no_op_test, + user = params.hbase_user + ) + except: + show_logs(params.log_dir, params.hbase_user) + raise + elif action == 'stop': + daemon_cmd = format("{cmd} stop {role}") + + try: + Execute ( daemon_cmd, + user = params.hbase_user, + only_if = no_op_test, + # BUGFIX: hbase regionserver sometimes hangs when nn is in safemode + timeout = params.hbase_regionserver_shutdown_timeout, + on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `{pid_expression}`"), + ) + except: + show_logs(params.log_dir, params.hbase_user) + raise + + File(pid_file, + action = "delete", + ) http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py new file mode 100644 index 0000000..e5bb781 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/hbase_upgrade.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import sys +from resource_management.libraries.script import Script +from resource_management.libraries.functions.format import format +from resource_management.core.resources.system import Execute + +class HbaseMasterUpgrade(Script): + + def take_snapshot(self, env): + import params + + snap_cmd = "echo 'snapshot_all' | {0} shell".format(params.hbase_cmd) + + exec_cmd = "{0} {1}".format(params.kinit_cmd, snap_cmd) + + Execute(exec_cmd, user=params.hbase_user) + + def restore_snapshot(self, env): + import params + print "TODO AMBARI-12698" + +if __name__ == "__main__": + HbaseMasterUpgrade().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py new file mode 100644 index 0000000..e0607f3 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" +from ambari_commons import OSCheck +from resource_management.libraries.functions.default import default + +if OSCheck.is_windows_family(): + from params_windows import * +else: + from params_linux import * + +retryAble = default("/commandParams/command_retry_enabled", False) \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py new file mode 100644 index 0000000..b7e2b89 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_linux.py @@ -0,0 +1,426 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" +import status_params +import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set. + +from functions import calc_xmn_from_xms, ensure_unit_for_memory + +from ambari_commons.constants import AMBARI_SUDO_BINARY +from ambari_commons.os_check import OSCheck +from ambari_commons.str_utils import string_set_intersection + +from resource_management.libraries.resources.hdfs_resource import HdfsResource +from resource_management.libraries.functions import conf_select +from resource_management.libraries.functions import stack_select +from resource_management.libraries.functions import format +from resource_management.libraries.functions import StackFeature +from resource_management.libraries.functions.stack_features import check_stack_feature +from resource_management.libraries.functions.stack_features import get_stack_feature_version +from resource_management.libraries.functions.default import default +from resource_management.libraries.functions import get_kinit_path +from resource_management.libraries.functions import is_empty +from resource_management.libraries.functions import get_unique_id_and_date +from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources +from resource_management.libraries.script.script import Script +from resource_management.libraries.functions.expect import expect +from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames +from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config + +# server configurations +config = Script.get_config() +exec_tmp_dir = Script.get_tmp_dir() +sudo = AMBARI_SUDO_BINARY + +stack_name = status_params.stack_name +agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability'] +agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int) +version = default("/commandParams/version", None) +component_directory = status_params.component_directory +etc_prefix_dir = "/etc/hbase" + +stack_version_unformatted = status_params.stack_version_unformatted +stack_version_formatted = status_params.stack_version_formatted +stack_root = status_params.stack_root + +# get the correct version to use for checking stack features +version_for_stack_feature_checks = get_stack_feature_version(config) + +stack_supports_ranger_kerberos = check_stack_feature(StackFeature.RANGER_KERBEROS_SUPPORT, version_for_stack_feature_checks) +stack_supports_ranger_audit_db = check_stack_feature(StackFeature.RANGER_AUDIT_DB_SUPPORT, version_for_stack_feature_checks) + +# hadoop default parameters +hadoop_bin_dir = stack_select.get_hadoop_dir("bin") +hadoop_conf_dir = conf_select.get_hadoop_conf_dir() +daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh" +region_mover = "/usr/lib/hbase/bin/region_mover.rb" +region_drainer = "/usr/lib/hbase/bin/draining_servers.rb" +hbase_cmd = "/usr/lib/hbase/bin/hbase" +hbase_max_direct_memory_size = None + +# hadoop parameters for stacks supporting rolling_upgrade +if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted): + daemon_script = format('{stack_root}/current/hbase-client/bin/hbase-daemon.sh') + region_mover = format('{stack_root}/current/hbase-client/bin/region_mover.rb') + region_drainer = format('{stack_root}/current/hbase-client/bin/draining_servers.rb') + hbase_cmd = format('{stack_root}/current/hbase-client/bin/hbase') + + hbase_max_direct_memory_size = default('configurations/hbase-env/hbase_max_direct_memory_size', None) + + daemon_script=format("{stack_root}/current/{component_directory}/bin/hbase-daemon.sh") + region_mover = format("{stack_root}/current/{component_directory}/bin/region_mover.rb") + region_drainer = format("{stack_root}/current/{component_directory}/bin/draining_servers.rb") + hbase_cmd = format("{stack_root}/current/{component_directory}/bin/hbase") + + +hbase_conf_dir = status_params.hbase_conf_dir +limits_conf_dir = status_params.limits_conf_dir + +hbase_user_nofile_limit = default("/configurations/hbase-env/hbase_user_nofile_limit", "32000") +hbase_user_nproc_limit = default("/configurations/hbase-env/hbase_user_nproc_limit", "16000") + +# no symlink for phoenix-server at this point +phx_daemon_script = format('{stack_root}/current/phoenix-server/bin/queryserver.py') + +hbase_excluded_hosts = config['commandParams']['excluded_hosts'] +hbase_drain_only = default("/commandParams/mark_draining_only",False) +hbase_included_hosts = config['commandParams']['included_hosts'] + +hbase_user = status_params.hbase_user +hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name'] +smokeuser = config['configurations']['cluster-env']['smokeuser'] +_authentication = config['configurations']['core-site']['hadoop.security.authentication'] +security_enabled = config['configurations']['cluster-env']['security_enabled'] + +# this is "hadoop-metrics.properties" for 1.x stacks +metric_prop_file_name = "hadoop-metrics2-hbase.properties" + +# not supporting 32 bit jdk. +java64_home = config['hostLevelParams']['java_home'] +java_version = expect("/hostLevelParams/java_version", int) + +log_dir = config['configurations']['hbase-env']['hbase_log_dir'] +java_io_tmpdir = default("/configurations/hbase-env/hbase_java_io_tmpdir", "/tmp") +master_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_master_heapsize']) + +regionserver_heapsize = ensure_unit_for_memory(config['configurations']['hbase-env']['hbase_regionserver_heapsize']) +regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max'] +regionserver_xmn_percent = expect("/configurations/hbase-env/hbase_regionserver_xmn_ratio", float) +regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max) + +hbase_regionserver_shutdown_timeout = expect('/configurations/hbase-env/hbase_regionserver_shutdown_timeout', int, 30) + +phoenix_hosts = default('/clusterHostInfo/phoenix_query_server_hosts', []) +phoenix_enabled = default('/configurations/hbase-env/phoenix_sql_enabled', False) +has_phoenix = len(phoenix_hosts) > 0 + +underscored_version = stack_version_unformatted.replace('.', '_') +dashed_version = stack_version_unformatted.replace('.', '-') +if OSCheck.is_redhat_family() or OSCheck.is_suse_family(): + phoenix_package = format("phoenix_{underscored_version}_*") +elif OSCheck.is_ubuntu_family(): + phoenix_package = format("phoenix-{dashed_version}-.*") + +pid_dir = status_params.pid_dir +tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir'] +local_dir = config['configurations']['hbase-site']['hbase.local.dir'] +ioengine_param = default('/configurations/hbase-site/hbase.bucketcache.ioengine', None) + +client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf") +master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf") +regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf") +queryserver_jaas_config_file = format("{hbase_conf_dir}/hbase_queryserver_jaas.conf") + +ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", [])) +has_metric_collector = not len(ams_collector_hosts) == 0 +if has_metric_collector: + if 'cluster-env' in config['configurations'] and \ + 'metrics_collector_vip_port' in config['configurations']['cluster-env']: + metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port'] + else: + metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188") + if metric_collector_web_address.find(':') != -1: + metric_collector_port = metric_collector_web_address.split(':')[1] + else: + metric_collector_port = '6188' + if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY": + metric_collector_protocol = 'https' + else: + metric_collector_protocol = 'http' + metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "") + metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "") + metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "") + + pass +metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60) +metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10) + +# if hbase is selected the hbase_rs_hosts, should not be empty, but still default just in case +if 'slave_hosts' in config['clusterHostInfo']: + rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves +else: + rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') + +smoke_test_user = config['configurations']['cluster-env']['smokeuser'] +smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name'] +smokeuser_permissions = "RWXCA" +service_check_data = get_unique_id_and_date() +user_group = config['configurations']['cluster-env']["user_group"] + +if security_enabled: + _hostname_lowercase = config['hostname'].lower() + master_jaas_princ = config['configurations']['hbase-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase) + master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file'] + regionserver_jaas_princ = config['configurations']['hbase-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase) + _queryserver_jaas_princ = config['configurations']['hbase-site']['phoenix.queryserver.kerberos.principal'] + if not is_empty(_queryserver_jaas_princ): + queryserver_jaas_princ =_queryserver_jaas_princ.replace('_HOST',_hostname_lowercase) + +regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file'] +queryserver_keytab_path = config['configurations']['hbase-site']['phoenix.queryserver.keytab.file'] +smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab'] +hbase_user_keytab = config['configurations']['hbase-env']['hbase_user_keytab'] +kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None)) +if security_enabled: + kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};") + kinit_cmd_master = format("{kinit_path_local} -kt {master_keytab_path} {master_jaas_princ};") + master_security_config = format("-Djava.security.auth.login.config={hbase_conf_dir}/hbase_master_jaas.conf") +else: + kinit_cmd = "" + kinit_cmd_master = "" + master_security_config = "" + +#log4j.properties +# HBase log4j settings +hbase_log_maxfilesize = default('configurations/hbase-log4j/hbase_log_maxfilesize',256) +hbase_log_maxbackupindex = default('configurations/hbase-log4j/hbase_log_maxbackupindex',20) +hbase_security_log_maxfilesize = default('configurations/hbase-log4j/hbase_security_log_maxfilesize',256) +hbase_security_log_maxbackupindex = default('configurations/hbase-log4j/hbase_security_log_maxbackupindex',20) + +if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])): + log4j_props = config['configurations']['hbase-log4j']['content'] +else: + log4j_props = None + +hbase_env_sh_template = config['configurations']['hbase-env']['content'] + +hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir'] +hbase_staging_dir = "/apps/hbase/staging" +#for create_hdfs_directory +hostname = config["hostname"] +hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] +hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] +hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] + +hdfs_site = config['configurations']['hdfs-site'] +default_fs = config['configurations']['core-site']['fs.defaultFS'] + +dfs_type = default("/commandParams/dfs_type", "") + +import functools +#create partial functions with common arguments for every HdfsResource call +#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code +HdfsResource = functools.partial( + HdfsResource, + user=hdfs_user, + hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore", + security_enabled = security_enabled, + keytab = hdfs_user_keytab, + kinit_path_local = kinit_path_local, + hadoop_bin_dir = hadoop_bin_dir, + hadoop_conf_dir = hadoop_conf_dir, + principal_name = hdfs_principal_name, + hdfs_site = hdfs_site, + default_fs = default_fs, + immutable_paths = get_not_managed_resources(), + dfs_type = dfs_type +) + +zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent'] +hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum'] +hbase_zookeeper_property_clientPort = config['configurations']['hbase-site']['hbase.zookeeper.property.clientPort'] +hbase_security_authentication = config['configurations']['hbase-site']['hbase.security.authentication'] +hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication'] + +# ranger hbase plugin section start + +# to get db connector jar +jdk_location = config['hostLevelParams']['jdk_location'] + +# ranger host +ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", []) +has_ranger_admin = not len(ranger_admin_hosts) == 0 + +# ranger support xml_configuration flag, instead of depending on ranger xml_configurations_supported/ranger-env introduced, using stack feature +xml_configurations_supported = check_stack_feature(StackFeature.RANGER_XML_CONFIGURATION, version_for_stack_feature_checks) + +# ambari-server hostname +ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0] + +# ranger hbase plugin enabled property +enable_ranger_hbase = default("/configurations/ranger-hbase-plugin-properties/ranger-hbase-plugin-enabled", "No") +enable_ranger_hbase = True if enable_ranger_hbase.lower() == 'yes' else False + +# ranger hbase properties +if enable_ranger_hbase: + # get ranger policy url + policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url'] + if xml_configurations_supported: + policymgr_mgr_url = config['configurations']['ranger-hbase-security']['ranger.plugin.hbase.policy.rest.url'] + + if not is_empty(policymgr_mgr_url) and policymgr_mgr_url.endswith('/'): + policymgr_mgr_url = policymgr_mgr_url.rstrip('/') + + # ranger audit db user + xa_audit_db_user = default('/configurations/admin-properties/audit_db_user', 'rangerlogger') + + # ranger hbase service/repository name + repo_name = str(config['clusterName']) + '_hbase' + repo_name_value = config['configurations']['ranger-hbase-security']['ranger.plugin.hbase.service.name'] + if not is_empty(repo_name_value) and repo_name_value != "{{repo_name}}": + repo_name = repo_name_value + + common_name_for_certificate = config['configurations']['ranger-hbase-plugin-properties']['common.name.for.certificate'] + repo_config_username = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_USERNAME'] + ranger_plugin_properties = config['configurations']['ranger-hbase-plugin-properties'] + policy_user = config['configurations']['ranger-hbase-plugin-properties']['policy_user'] + repo_config_password = config['configurations']['ranger-hbase-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'] + + # ranger-env config + ranger_env = config['configurations']['ranger-env'] + + # create ranger-env config having external ranger credential properties + if not has_ranger_admin and enable_ranger_hbase: + external_admin_username = default('/configurations/ranger-hbase-plugin-properties/external_admin_username', 'admin') + external_admin_password = default('/configurations/ranger-hbase-plugin-properties/external_admin_password', 'admin') + external_ranger_admin_username = default('/configurations/ranger-hbase-plugin-properties/external_ranger_admin_username', 'amb_ranger_admin') + external_ranger_admin_password = default('/configurations/ranger-hbase-plugin-properties/external_ranger_admin_password', 'amb_ranger_admin') + ranger_env = {} + ranger_env['admin_username'] = external_admin_username + ranger_env['admin_password'] = external_admin_password + ranger_env['ranger_admin_username'] = external_ranger_admin_username + ranger_env['ranger_admin_password'] = external_ranger_admin_password + + xa_audit_db_password = '' + if not is_empty(config['configurations']['admin-properties']['audit_db_password']) and stack_supports_ranger_audit_db and has_ranger_admin: + xa_audit_db_password = config['configurations']['admin-properties']['audit_db_password'] + + downloaded_custom_connector = None + previous_jdbc_jar_name = None + driver_curl_source = None + driver_curl_target = None + previous_jdbc_jar = None + + if has_ranger_admin and stack_supports_ranger_audit_db: + xa_audit_db_flavor = config['configurations']['admin-properties']['DB_FLAVOR'] + jdbc_jar_name, previous_jdbc_jar_name, audit_jdbc_url, jdbc_driver = get_audit_configs(config) + + downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None + driver_curl_source = format("{jdk_location}/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None + driver_curl_target = format("{stack_root}/current/{component_directory}/lib/{jdbc_jar_name}") if stack_supports_ranger_audit_db else None + previous_jdbc_jar = format("{stack_root}/current/{component_directory}/lib/{previous_jdbc_jar_name}") if stack_supports_ranger_audit_db else None + sql_connector_jar = '' + + if security_enabled: + master_principal = config['configurations']['hbase-site']['hbase.master.kerberos.principal'] + + hbase_ranger_plugin_config = { + 'username': repo_config_username, + 'password': repo_config_password, + 'hadoop.security.authentication': hadoop_security_authentication, + 'hbase.security.authentication': hbase_security_authentication, + 'hbase.zookeeper.property.clientPort': hbase_zookeeper_property_clientPort, + 'hbase.zookeeper.quorum': hbase_zookeeper_quorum, + 'zookeeper.znode.parent': zookeeper_znode_parent, + 'commonNameForCertificate': common_name_for_certificate, + 'hbase.master.kerberos.principal': master_principal if security_enabled else '' + } + + hbase_ranger_plugin_repo = { + 'isActive': 'true', + 'config': json.dumps(hbase_ranger_plugin_config), + 'description': 'hbase repo', + 'name': repo_name, + 'repositoryType': 'hbase', + 'assetType': '2' + } + + custom_ranger_service_config = generate_ranger_service_config(ranger_plugin_properties) + if len(custom_ranger_service_config) > 0: + hbase_ranger_plugin_config.update(custom_ranger_service_config) + + if stack_supports_ranger_kerberos and security_enabled: + hbase_ranger_plugin_config['policy.download.auth.users'] = hbase_user + hbase_ranger_plugin_config['tag.download.auth.users'] = hbase_user + hbase_ranger_plugin_config['policy.grantrevoke.auth.users'] = hbase_user + + if stack_supports_ranger_kerberos: + hbase_ranger_plugin_config['ambari.service.check.user'] = policy_user + + hbase_ranger_plugin_repo = { + 'isEnabled': 'true', + 'configs': hbase_ranger_plugin_config, + 'description': 'hbase repo', + 'name': repo_name, + 'type': 'hbase' + } + + ranger_hbase_principal = None + ranger_hbase_keytab = None + if stack_supports_ranger_kerberos and security_enabled and 'hbase-master' in component_directory.lower(): + ranger_hbase_principal = master_jaas_princ + ranger_hbase_keytab = master_keytab_path + elif stack_supports_ranger_kerberos and security_enabled and 'hbase-regionserver' in component_directory.lower(): + ranger_hbase_principal = regionserver_jaas_princ + ranger_hbase_keytab = regionserver_keytab_path + + xa_audit_db_is_enabled = False + if xml_configurations_supported and stack_supports_ranger_audit_db: + xa_audit_db_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.db'] + + xa_audit_hdfs_is_enabled = config['configurations']['ranger-hbase-audit']['xasecure.audit.destination.hdfs'] if xml_configurations_supported else False + ssl_keystore_password = config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password'] if xml_configurations_supported else None + ssl_truststore_password = config['configurations']['ranger-hbase-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password'] if xml_configurations_supported else None + credential_file = format('/etc/ranger/{repo_name}/cred.jceks') + + # for SQLA explicitly disable audit to DB for Ranger + if has_ranger_admin and stack_supports_ranger_audit_db and xa_audit_db_flavor.lower() == 'sqla': + xa_audit_db_is_enabled = False + +# need this to capture cluster name from where ranger hbase plugin is enabled +cluster_name = config['clusterName'] + +# ranger hbase plugin section end + +create_hbase_home_directory = check_stack_feature(StackFeature.HBASE_HOME_DIRECTORY, stack_version_formatted) +hbase_home_directory = format("/user/{hbase_user}") + +atlas_hosts = default('/clusterHostInfo/atlas_server_hosts', []) +has_atlas = len(atlas_hosts) > 0 + +metadata_user = default('/configurations/atlas-env/metadata_user', None) +atlas_graph_storage_hostname = default('/configurations/application-properties/atlas.graph.storage.hostname', None) +atlas_graph_storage_hbase_table = default('/configurations/application-properties/atlas.graph.storage.hbase.table', None) +atlas_audit_hbase_tablename = default('/configurations/application-properties/atlas.audit.hbase.tablename', None) + +if has_atlas: + zk_hosts_matches = string_set_intersection(atlas_graph_storage_hostname, hbase_zookeeper_quorum) + atlas_with_managed_hbase = len(zk_hosts_matches) > 0 +else: + atlas_with_managed_hbase = False http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py new file mode 100644 index 0000000..ddc9e93 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/params_windows.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import os +import status_params +from resource_management.libraries.script.script import Script + +# server configurations +config = Script.get_config() +hbase_conf_dir = os.environ["HBASE_CONF_DIR"] +hbase_bin_dir = os.path.join(os.environ["HBASE_HOME"],'bin') +hbase_executable = os.path.join(hbase_bin_dir,"hbase.cmd") +stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],"..")) +hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"] +hbase_user = hadoop_user + +#decomm params +region_drainer = os.path.join(hbase_bin_dir,"draining_servers.rb") +region_mover = os.path.join(hbase_bin_dir,"region_mover.rb") +hbase_excluded_hosts = config['commandParams']['excluded_hosts'] +hbase_drain_only = config['commandParams']['mark_draining_only'] + +service_map = { + 'master' : status_params.hbase_master_win_service_name, + 'regionserver' : status_params.hbase_regionserver_win_service_name +} http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py new file mode 100644 index 0000000..77820cc --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_queryserver.py @@ -0,0 +1,92 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from resource_management.libraries.functions import conf_select +from resource_management.libraries.functions import stack_select +from resource_management.libraries.functions import StackFeature +from resource_management.libraries.functions.stack_features import check_stack_feature +from resource_management.libraries.script import Script +from phoenix_service import phoenix_service +from hbase import hbase + +# Note: Phoenix Query Server is only applicable to stack version supporting Phoenix. +class PhoenixQueryServer(Script): + + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + + + def get_component_name(self): + return "phoenix-server" + + + def configure(self, env): + import params + env.set_params(params) + hbase(name='queryserver') + + + def start(self, env, upgrade_type=None): + import params + env.set_params(params) + self.configure(env) + phoenix_service('start') + + + def stop(self, env, upgrade_type=None): + import params + env.set_params(params) + phoenix_service('stop') + + + def pre_upgrade_restart(self, env, upgrade_type=None): + import params + env.set_params(params) + + if params.stack_version_formatted and check_stack_feature(StackFeature.PHOENIX, params.stack_version_formatted): + # phoenix uses hbase configs + conf_select.select(params.stack_name, "hbase", params.version) + stack_select.select("phoenix-server", params.version) + + + def status(self, env): + import status_params + env.set_params(status_params) + phoenix_service('status') + + + def security_status(self, env): + self.put_structured_out({"securityState": "UNSECURED"}) + + def get_log_folder(self): + import params + return params.log_dir + + def get_user(self): + import params + return params.hbase_user + + def get_pid_files(self): + import status_params + return [status_params.phoenix_pid_file] + +if __name__ == "__main__": + PhoenixQueryServer().execute() \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py new file mode 100644 index 0000000..42d9cd1 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/phoenix_service.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import errno +from resource_management.core.logger import Logger +from resource_management.core.resources.system import Execute +from resource_management.core.resources.system import File +from resource_management.libraries.functions import check_process_status, format + +# Note: Phoenix Query Server is only applicable to phoenix version stacks and above. +def phoenix_service(action = 'start'): # 'start', 'stop', 'status' + # Note: params should already be imported before calling phoenix_service() + import status_params + pid_file = status_params.phoenix_pid_file + no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1") + + if action == "status": + check_process_status(pid_file) + else: + env = {'JAVA_HOME': format("{java64_home}"), 'HBASE_CONF_DIR': format("{hbase_conf_dir}")} + daemon_cmd = format("{phx_daemon_script} {action}") + if action == 'start': + Execute(daemon_cmd, + user=format("{hbase_user}"), + environment=env) + + elif action == 'stop': + Execute(daemon_cmd, + user=format("{hbase_user}"), + environment=env + ) + try: + File(pid_file, action = "delete") + except OSError as exc: + # OSError: [Errno 2] No such file or directory + if exc.errno == errno.ENOENT: + Logger.info("Did not remove '{0}' as it did not exist".format(pid_file)) + else: + raise http://git-wip-us.apache.org/repos/asf/ambari/blob/41006ebe/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py new file mode 100644 index 0000000..5184ea7 --- /dev/null +++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/package/scripts/service_check.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +from resource_management.libraries.script.script import Script +from resource_management.libraries.functions.format import format +from resource_management.core.resources.system import Execute, File +from resource_management.core.source import StaticFile +from resource_management.core.source import Template +import functions +from ambari_commons import OSCheck, OSConst +from ambari_commons.os_family_impl import OsFamilyImpl + + +class HbaseServiceCheck(Script): + pass + + +@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY) +class HbaseServiceCheckWindows(HbaseServiceCheck): + def service_check(self, env): + import params + env.set_params(params) + smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd") + service = "HBASE" + Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hbase_user, logoutput=True) + + +@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT) +class HbaseServiceCheckDefault(HbaseServiceCheck): + def service_check(self, env): + import params + env.set_params(params) + + output_file = "/apps/hbase/data/ambarismoketest" + smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal} &&") if params.security_enabled else "" + hbase_servicecheck_file = format("{exec_tmp_dir}/hbase-smoke.sh") + hbase_servicecheck_cleanup_file = format("{exec_tmp_dir}/hbase-smoke-cleanup.sh") + + File( format("{exec_tmp_dir}/hbaseSmokeVerify.sh"), + content = StaticFile("hbaseSmokeVerify.sh"), + mode = 0755 + ) + + File(hbase_servicecheck_cleanup_file, + content = StaticFile("hbase-smoke-cleanup.sh"), + mode = 0755 + ) + + File( hbase_servicecheck_file, + mode = 0755, + content = Template('hbase-smoke.sh.j2') + ) + + if params.security_enabled: + hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh") + grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}") + + File( hbase_grant_premissions_file, + owner = params.hbase_user, + group = params.user_group, + mode = 0644, + content = Template('hbase_grant_permissions.j2') + ) + + Execute( grantprivelegecmd, + user = params.hbase_user, + logoutput = True + ) + + servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}") + smokeverifycmd = format("{exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}") + cleanupCmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_cleanup_file}") + Execute(format("{servicecheckcmd} && {smokeverifycmd} && {cleanupCmd}"), + tries = 6, + try_sleep = 5, + user = params.smoke_test_user, + logoutput = True + ) + +if __name__ == "__main__": + HbaseServiceCheck().execute() +