http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py deleted file mode 100644 index 48dc4b0..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py +++ /dev/null @@ -1,256 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os -from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil -from resource_management.core.resources.jcepolicyinfo import JcePolicyInfo - -from resource_management import * - -def setup_hadoop(): - """ - Setup hadoop files and directories - """ - import params - - Execute(("setenforce","0"), - only_if="test -f /selinux/enforce", - not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)", - sudo=True, - ) - - #directories - if params.has_namenode or params.dfs_type == 'HCFS': - Directory(params.hdfs_log_dir_prefix, - create_parents = True, - owner='root', - group=params.user_group, - mode=0775, - cd_access='a', - ) - if params.has_namenode: - Directory(params.hadoop_pid_dir_prefix, - create_parents = True, - owner='root', - group='root', - cd_access='a', - ) - Directory(params.hadoop_tmp_dir, - create_parents = True, - owner=params.hdfs_user, - cd_access='a', - ) - #files - if params.security_enabled: - tc_owner = "root" - else: - tc_owner = params.hdfs_user - - # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS. - if params.sysprep_skip_copy_fast_jar_hdfs: - print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped" - elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs): - # for source-code of jar goto contrib/fast-hdfs-resource - File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"), - mode=0644, - content=StaticFile("fast-hdfs-resource.jar") - ) - - if os.path.exists(params.hadoop_conf_dir): - File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'), - owner=tc_owner, - content=Template('commons-logging.properties.j2') - ) - - health_check_template_name = "health_check" - File(os.path.join(params.hadoop_conf_dir, health_check_template_name), - owner=tc_owner, - content=Template(health_check_template_name + ".j2") - ) - - log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties") - if (params.log4j_props != None): - File(log4j_filename, - mode=0644, - group=params.user_group, - owner=params.hdfs_user, - content=InlineTemplate(params.log4j_props) - ) - elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))): - File(log4j_filename, - mode=0644, - group=params.user_group, - owner=params.hdfs_user, - ) - - if params.hadoop_metrics2_properties_content: - File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"), - owner=params.hdfs_user, - group=params.user_group, - content=InlineTemplate(params.hadoop_metrics2_properties_content) - ) - else: - File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"), - owner=params.hdfs_user, - group=params.user_group, - content=Template("hadoop-metrics2.properties.j2") - ) - - if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list: - create_dirs() - - create_microsoft_r_dir() - - -def setup_configs(): - """ - Creates configs for services HDFS mapred - """ - import params - - if params.has_namenode or params.dfs_type == 'HCFS': - if os.path.exists(params.hadoop_conf_dir): - File(params.task_log4j_properties_location, - content=StaticFile("task-log4j.properties"), - mode=0755 - ) - - if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')): - File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'), - owner=params.hdfs_user, - group=params.user_group - ) - if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')): - File(os.path.join(params.hadoop_conf_dir, 'masters'), - owner=params.hdfs_user, - group=params.user_group - ) - -def create_javahome_symlink(): - if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"): - Directory("/usr/jdk64/", - create_parents = True, - ) - Link("/usr/jdk/jdk1.6.0_31", - to="/usr/jdk64/jdk1.6.0_31", - ) - -def create_dirs(): - import params - params.HdfsResource(params.hdfs_tmp_dir, - type="directory", - action="create_on_execute", - owner=params.hdfs_user, - mode=0777 - ) - params.HdfsResource(params.smoke_hdfs_user_dir, - type="directory", - action="create_on_execute", - owner=params.smoke_user, - mode=params.smoke_hdfs_user_mode - ) - params.HdfsResource(None, - action="execute" - ) - -def create_microsoft_r_dir(): - import params - if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs: - directory = '/user/RevoShare' - try: - params.HdfsResource(directory, - type="directory", - action="create_on_execute", - owner=params.hdfs_user, - mode=0777) - params.HdfsResource(None, action="execute") - except Exception as exception: - Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception))) - -def setup_unlimited_key_jce_policy(): - """ - Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the stack use different JDK) - """ - import params - __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip) - if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip: - __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name) - -def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name): - """ - Sets up the unlimited key JCE policy if needed. - - The following criteria must be met: - - * The cluster has not been previously prepared (sys preped) - cluster-env/sysprep_skip_setup_jce = False - * Ambari is managing the host's JVM - /hostLevelParams/jdk_name is set - * Either security is enabled OR a service requires it - /hostLevelParams/unlimited_key_jce_required = True - * The unlimited key JCE policy has not already been installed - - If the conditions are met, the following steps are taken to install the unlimited key JCE policy JARs - - 1. The unlimited key JCE policy ZIP file is downloaded from the Ambari server and stored in the - Ambari agent's temporary directory - 2. The existing JCE policy JAR files are deleted - 3. The downloaded ZIP file is unzipped into the proper JCE policy directory - - :return: None - """ - import params - - if params.sysprep_skip_setup_jce: - Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped") - - elif not custom_jdk_name: - Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari") - - elif not params.unlimited_key_jce_required: - Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required") - - else: - jcePolicyInfo = JcePolicyInfo(custom_java_home) - - if jcePolicyInfo.is_unlimited_key_jce_policy(): - Logger.info("The unlimited key JCE policy is required, and appears to have been installed.") - - elif custom_jce_name is None: - raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.") - - else: - Logger.info("The unlimited key JCE policy is required, and needs to be installed.") - - jce_zip_target = format("{artifact_dir}/{custom_jce_name}") - jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}") - java_security_dir = format("{custom_java_home}/jre/lib/security") - - Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target)) - Directory(params.artifact_dir, create_parents=True) - File(jce_zip_target, content=DownloadSource(jce_zip_source)) - - Logger.debug("Removing existing JCE policy JAR files: {0}.".format(java_security_dir)) - File(format("{java_security_dir}/US_export_policy.jar"), action="delete") - File(format("{java_security_dir}/local_policy.jar"), action="delete") - - Logger.debug("Unzipping the unlimited key JCE policy files from {0} into {1}.".format(jce_zip_target, java_security_dir)) - extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir) - Execute(extract_cmd, - only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"), - path=['/bin/', '/usr/bin'], - sudo=True - )
http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2 deleted file mode 100644 index 2197ba5..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2 +++ /dev/null @@ -1,43 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -#Logging Implementation - -#Log4J -org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger - -#JDK Logger -#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2 deleted file mode 100644 index 1adba80..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2 +++ /dev/null @@ -1,21 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -{% for host in hdfs_exclude_file %} -{{host}} -{% endfor %} http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 deleted file mode 100644 index 2cd9aa8..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 +++ /dev/null @@ -1,107 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# syntax: [prefix].[source|sink|jmx].[instance].[options] -# See package.html for org.apache.hadoop.metrics2 for details - -{% if has_ganglia_server %} -*.period=60 - -*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31 -*.sink.ganglia.period=10 - -# default for supportsparse is false -*.sink.ganglia.supportsparse=true - -.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both -.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40 - -# Hook up to the server -namenode.sink.ganglia.servers={{ganglia_server_host}}:8661 -datanode.sink.ganglia.servers={{ganglia_server_host}}:8659 -jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662 -tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658 -maptask.sink.ganglia.servers={{ganglia_server_host}}:8660 -reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660 -resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664 -nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657 -historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666 -journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654 -nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649 -supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650 - -resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue - -{% endif %} - -{% if has_metric_collector %} - -*.period={{metrics_collection_period}} -*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar -*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink -*.sink.timeline.period={{metrics_collection_period}} -*.sink.timeline.sendInterval={{metrics_report_interval}}000 -*.sink.timeline.slave.host.name={{hostname}} -*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}} -*.sink.timeline.protocol={{metric_collector_protocol}} -*.sink.timeline.port={{metric_collector_port}} -*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}} -*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}} - -# HTTPS properties -*.sink.timeline.truststore.path = {{metric_truststore_path}} -*.sink.timeline.truststore.type = {{metric_truststore_type}} -*.sink.timeline.truststore.password = {{metric_truststore_password}} - -datanode.sink.timeline.collector.hosts={{ams_collector_hosts}} -namenode.sink.timeline.collector.hosts={{ams_collector_hosts}} -resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}} -nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}} -jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}} -journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}} -applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}} - -resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue - -{% if is_nn_client_port_configured %} -# Namenode rpc ports customization -namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}} -{% endif %} -{% if is_nn_dn_port_configured %} -namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}} -{% endif %} -{% if is_nn_healthcheck_port_configured %} -namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}} -{% endif %} - -{% endif %} http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2 deleted file mode 100644 index 0a03d17..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2 +++ /dev/null @@ -1,81 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -#!/bin/bash -# -#/* -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -err=0; - -function check_disks { - - for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do - fsdev="" - fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`; - if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then - msg_="$msg_ $m(u)" - else - msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`" - fi - done - - if [ -z "$msg_" ] ; then - echo "disks ok" ; exit 0 - else - echo "$msg_" ; exit 2 - fi - -} - -# Run all checks -for check in disks ; do - msg=`check_${check}` ; - if [ $? -eq 0 ] ; then - ok_msg="$ok_msg$msg," - else - err_msg="$err_msg$msg," - fi -done - -if [ ! -z "$err_msg" ] ; then - echo -n "ERROR $err_msg " -fi -if [ ! -z "$ok_msg" ] ; then - echo -n "OK: $ok_msg" -fi - -echo - -# Success! -exit 0 http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2 deleted file mode 100644 index 4a9e713..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2 +++ /dev/null @@ -1,21 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} - -{% for host in slave_hosts %} -{{host}} -{% endfor %} http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/topology_mappings.data.j2 ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/topology_mappings.data.j2 deleted file mode 100644 index 15034d6..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/topology_mappings.data.j2 +++ /dev/null @@ -1,24 +0,0 @@ -{# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 - # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#} -[network_topology] -{% for host in all_hosts %} -{% if host in slave_hosts %} -{{host}}={{all_racks[loop.index-1]}} -{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}} -{% endif %} -{% endfor %} http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py index 6363c59..df7f0cd 100644 --- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py +++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py @@ -59,7 +59,7 @@ class ECSClient(Script): File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"), mode=0644, - content=StaticFile("/var/lib/ambari-agent/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar") + content=StaticFile("/var/lib/ambari-agent/cache/stack-hooks/before-START/files/fast-hdfs-resource.jar") ) def setup_hadoop_env(self, env): http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py deleted file mode 100644 index 8bae9e6..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -from resource_management.libraries.script.hook import Hook -from shared_initialization import link_configs -from shared_initialization import setup_config -from shared_initialization import setup_stack_symlinks - -class AfterInstallHook(Hook): - - def hook(self, env): - import params - - env.set_params(params) - setup_stack_symlinks(self.stroutfile) - setup_config() - - link_configs(self.stroutfile) - -if __name__ == "__main__": - AfterInstallHook().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py deleted file mode 100644 index 34dfe70..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py +++ /dev/null @@ -1,109 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os - -from ambari_commons.constants import AMBARI_SUDO_BINARY -from resource_management.libraries.script import Script -from resource_management.libraries.script.script import get_config_lock_file -from resource_management.libraries.functions import default -from resource_management.libraries.functions import format -from resource_management.libraries.functions import conf_select -from resource_management.libraries.functions import stack_select -from resource_management.libraries.functions import format_jvm_option -from resource_management.libraries.functions.version import format_stack_version -from string import lower - -config = Script.get_config() -tmp_dir = Script.get_tmp_dir() - -dfs_type = default("/commandParams/dfs_type", "") -stack_root = Script.get_stack_root() - -is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1 -host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False) - -sudo = AMBARI_SUDO_BINARY - -stack_version_unformatted = config['hostLevelParams']['stack_version'] -stack_version_formatted = format_stack_version(stack_version_unformatted) - -# service name -service_name = config['serviceName'] - -# logsearch configuration -logsearch_logfeeder_conf = "/etc/ambari-logsearch-logfeeder/conf" - -agent_cache_dir = config['hostLevelParams']['agentCacheDir'] -service_package_folder = config['commandParams']['service_package_folder'] -logsearch_service_name = service_name.lower().replace("_", "-") -logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json" -logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2" -logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path) - -# default hadoop params -mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*") -hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec") -hadoop_conf_empty_dir = None - -versioned_stack_root = format('{stack_root}/current') - -#security params -security_enabled = config['configurations']['cluster-env']['security_enabled'] - -#java params -java_home = config['hostLevelParams']['java_home'] - -#hadoop params -hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] -hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix'] -hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger'] - -jsvc_path = "/usr/lib/bigtop-utils" - -hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize'] -namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize'] -namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize'] -namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize'] -namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m") -namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m") - -jtnode_opt_newsize = "200m" -jtnode_opt_maxnewsize = "200m" -jtnode_heapsize = "1024m" -ttnode_heapsize = "1024m" - -dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize'] -mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce") -mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce") - -#users and groups -hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] -user_group = config['configurations']['cluster-env']['user_group'] - -namenode_host = default("/clusterHostInfo/namenode_host", []) -has_namenode = not len(namenode_host) == 0 - -if has_namenode or dfs_type == 'HCFS': - hadoop_conf_dir = conf_select.get_hadoop_conf_dir() - -link_configs_lock_file = get_config_lock_file() -stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file") - -upgrade_suspended = default("/roleParams/upgrade_suspended", False) http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py deleted file mode 100644 index 0ffd5a5..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" -import os - -import ambari_simplejson as json -from ambari_jinja2 import Environment as JinjaEnvironment -from resource_management.core.logger import Logger -from resource_management.core.resources.system import Directory, File -from resource_management.core.source import InlineTemplate, Template -from resource_management.libraries.functions import conf_select -from resource_management.libraries.functions import stack_select -from resource_management.libraries.functions.default import default -from resource_management.libraries.functions.format import format -from resource_management.libraries.functions.version import compare_versions -from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock -from resource_management.libraries.resources.xml_config import XmlConfig -from resource_management.libraries.script import Script - - -def setup_stack_symlinks(struct_out_file): - """ - Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a - stack version, such as "2.3". This should always be called after a component has been - installed to ensure that all HDP pointers are correct. The stack upgrade logic does not - interact with this since it's done via a custom command and will not trigger this hook. - :return: - """ - import params - if params.upgrade_suspended: - Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade") - return - - if params.host_sys_prepped: - Logger.warning("Skipping running stack-selector-tool becase this is a sys_prepped host. This may cause symlink pointers not to be created for HDP componets installed later on top of an already sys_prepped host.") - return - - # get the packages which the stack-select tool should be used on - stack_select_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL) - if stack_select_packages is None: - return - - json_version = load_version(struct_out_file) - - if not json_version: - Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file)) - return - - # On parallel command execution this should be executed by a single process at a time. - with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True): - for package in stack_select_packages: - stack_select.select(package, json_version) - - -def setup_config(): - import params - stackversion = params.stack_version_unformatted - Logger.info("FS Type: {0}".format(params.dfs_type)) - - is_hadoop_conf_dir_present = False - if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir): - is_hadoop_conf_dir_present = True - else: - Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.") - - if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'): - # create core-site only if the hadoop config diretory exists - XmlConfig("core-site.xml", - conf_dir=params.hadoop_conf_dir, - configurations=params.config['configurations']['core-site'], - configuration_attributes=params.config['configuration_attributes']['core-site'], - owner=params.hdfs_user, - group=params.user_group, - only_if=format("ls {hadoop_conf_dir}")) - - Directory(params.logsearch_logfeeder_conf, - mode=0755, - cd_access='a', - create_parents=True - ) - - if params.logsearch_config_file_exists: - File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name), - content=Template(params.logsearch_config_file_path,extra_imports=[default]) - ) - else: - Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path) - - -def load_version(struct_out_file): - """ - Load version from file. Made a separate method for testing - """ - json_version = None - try: - if os.path.exists(struct_out_file): - with open(struct_out_file, 'r') as fp: - json_info = json.load(fp) - json_version = json_info['version'] - except: - pass - - return json_version - - -def link_configs(struct_out_file): - """ - Links configs, only on a fresh install of HDP-2.3 and higher - """ - import params - - if not Script.is_stack_greater_or_equal("2.3"): - Logger.info("Can only link configs for HDP-2.3 and higher.") - return - - json_version = load_version(struct_out_file) - - if not json_version: - Logger.info("Could not load 'version' from {0}".format(struct_out_file)) - return - - # On parallel command execution this should be executed by a single process at a time. - with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True): - for k, v in conf_select.get_package_dirs().iteritems(): - conf_select.convert_conf_directories_to_symlinks(k, json_version, v) \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh deleted file mode 100644 index 08542c4..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -username=$1 -directories=$2 - -function find_available_uid() { - for ((i=1001; i<=2000; i++)) - do - grep -q $i /etc/passwd - if [ "$?" -ne 0 ] - then - newUid=$i - break - fi - done -} - -find_available_uid - -if [ $newUid -eq 0 ] -then - echo "Failed to find Uid between 1000 and 2000" - exit 1 -fi - -set -e - -dir_array=($(echo $directories | sed 's/,/\n/g')) -old_uid=$(id -u $username) -sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E" -echo "Changing uid of $username from $old_uid to $newUid" -echo "Changing directory permisions for ${dir_array[@]}" -$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done -exit 0 http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py deleted file mode 100644 index c34be0b..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -from resource_management import * -from shared_initialization import * - -class BeforeAnyHook(Hook): - - def hook(self, env): - import params - env.set_params(params) - - setup_users() - if params.has_namenode or params.dfs_type == 'HCFS': - setup_hadoop_env() - setup_java() - -if __name__ == "__main__": - BeforeAnyHook().execute() - http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py deleted file mode 100644 index cee0519..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py +++ /dev/null @@ -1,254 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import collections -import re -import os -import ast - -import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set. - -from resource_management.libraries.script import Script -from resource_management.libraries.functions import default -from resource_management.libraries.functions import format -from resource_management.libraries.functions import conf_select -from resource_management.libraries.functions import stack_select -from resource_management.libraries.functions import format_jvm_option -from resource_management.libraries.functions.is_empty import is_empty -from resource_management.libraries.functions.version import format_stack_version -from resource_management.libraries.functions.version import compare_versions -from resource_management.libraries.functions.expect import expect -from resource_management.libraries.functions import StackFeature -from resource_management.libraries.functions.stack_features import check_stack_feature -from resource_management.libraries.functions.stack_features import get_stack_feature_version -from ambari_commons.os_check import OSCheck -from ambari_commons.constants import AMBARI_SUDO_BINARY - - -config = Script.get_config() -tmp_dir = Script.get_tmp_dir() - -dfs_type = default("/commandParams/dfs_type", "") -stack_root = Script.get_stack_root() - -artifact_dir = format("{tmp_dir}/AMBARI-artifacts/") -jdk_name = default("/hostLevelParams/jdk_name", None) -java_home = config['hostLevelParams']['java_home'] -java_version = expect("/hostLevelParams/java_version", int) -jdk_location = config['hostLevelParams']['jdk_location'] - -sudo = AMBARI_SUDO_BINARY - -ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0] - -stack_version_unformatted = config['hostLevelParams']['stack_version'] -stack_version_formatted = format_stack_version(stack_version_unformatted) - -upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", "")) -version = default("/commandParams/version", None) -# Handle upgrade and downgrade -if (upgrade_type is not None) and version: - stack_version_formatted = format_stack_version(version) - -ambari_java_home = default("/commandParams/ambari_java_home", None) -ambari_jdk_name = default("/commandParams/ambari_jdk_name", None) - -security_enabled = config['configurations']['cluster-env']['security_enabled'] -hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] - -# Some datanode settings -dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None) -dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None) -dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None) -dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None) -secure_dn_ports_are_in_use = False - -def get_port(address): - """ - Extracts port from the address like 0.0.0.0:1019 - """ - if address is None: - return None - m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address) - if m is not None: - return int(m.group(2)) - else: - return None - -def is_secure_port(port): - """ - Returns True if port is root-owned at *nix systems - """ - if port is not None: - return port < 1024 - else: - return False - -# hadoop default params -mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*") - -# upgrades would cause these directories to have a version instead of "current" -# which would cause a lot of problems when writing out hadoop-env.sh; instead -# force the use of "current" in the hook -hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000") -hadoop_home = stack_select.get_hadoop_dir("home") -hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec") - -hadoop_conf_empty_dir = None -hadoop_secure_dn_user = hdfs_user -hadoop_dir = "/etc/hadoop" -versioned_stack_root = format('{stack_root}/current') -hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir") -datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'] -is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']) - -if not security_enabled: - hadoop_secure_dn_user = '""' -else: - dfs_dn_port = get_port(dfs_dn_addr) - dfs_dn_http_port = get_port(dfs_dn_http_addr) - dfs_dn_https_port = get_port(dfs_dn_https_addr) - # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports - if dfs_http_policy == "HTTPS_ONLY": - secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port) - elif dfs_http_policy == "HTTP_AND_HTTPS": - secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port) - else: # params.dfs_http_policy == "HTTP_ONLY" or not defined: - secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) - if secure_dn_ports_are_in_use: - hadoop_secure_dn_user = hdfs_user - else: - hadoop_secure_dn_user = '""' - -#hadoop params -hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] -hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix'] -hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger'] - -jsvc_path = "/usr/lib/bigtop-utils" - -hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize'] -namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize'] -namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize'] -namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize'] -namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m") -namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m") - -jtnode_opt_newsize = "200m" -jtnode_opt_maxnewsize = "200m" -jtnode_heapsize = "1024m" -ttnode_heapsize = "1024m" - -dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize'] -nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize'] -mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce") -mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce") -hadoop_env_sh_template = config['configurations']['hadoop-env']['content'] - -#users and groups -hbase_user = config['configurations']['hbase-env']['hbase_user'] -smoke_user = config['configurations']['cluster-env']['smokeuser'] -gmetad_user = config['configurations']['ganglia-env']["gmetad_user"] -gmond_user = config['configurations']['ganglia-env']["gmond_user"] -tez_user = config['configurations']['tez-env']["tez_user"] -oozie_user = config['configurations']['oozie-env']["oozie_user"] -falcon_user = config['configurations']['falcon-env']["falcon_user"] -ranger_user = config['configurations']['ranger-env']["ranger_user"] -zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"] -zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"] - -user_group = config['configurations']['cluster-env']['user_group'] - -ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", []) -namenode_host = default("/clusterHostInfo/namenode_host", []) -hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", []) -oozie_servers = default("/clusterHostInfo/oozie_server", []) -falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", []) -ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", []) -zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", []) - -# get the correct version to use for checking stack features -version_for_stack_feature_checks = get_stack_feature_version(config) - -has_namenode = not len(namenode_host) == 0 -has_ganglia_server = not len(ganglia_server_hosts) == 0 -has_tez = 'tez-site' in config['configurations'] -has_hbase_masters = not len(hbase_master_hosts) == 0 -has_oozie_server = not len(oozie_servers) == 0 -has_falcon_server_hosts = not len(falcon_server_hosts) == 0 -has_ranger_admin = not len(ranger_admin_hosts) == 0 -has_zeppelin_master = not len(zeppelin_master_hosts) == 0 -stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks) - -# HDFS High Availability properties -dfs_ha_enabled = False -dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None) -if dfs_ha_nameservices is None: - dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None) -dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None) -if dfs_ha_namenode_ids: - dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",") - dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list) - if dfs_ha_namenode_ids_array_len > 1: - dfs_ha_enabled = True - - -if has_namenode or dfs_type == 'HCFS': - hadoop_conf_dir = conf_select.get_hadoop_conf_dir() - hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure") - -hbase_tmp_dir = "/tmp/hbase-hbase" - -proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users") -ranger_group = config['configurations']['ranger-env']['ranger_group'] -dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"] - -sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False) -ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False) -fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"] - -smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}") -if has_hbase_masters: - hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}") -#repo params -repo_info = config['hostLevelParams']['repo_info'] -service_repo_info = default("/hostLevelParams/service_repo_info",None) - -user_to_groups_dict = {} - -#Append new user-group mapping to the dict -try: - user_group_map = ast.literal_eval(config['hostLevelParams']['user_groups']) - for key in user_group_map.iterkeys(): - user_to_groups_dict[key] = user_group_map[key] -except ValueError: - print('User Group mapping (user_group) is missing in the hostLevelParams') - -user_to_gid_dict = collections.defaultdict(lambda:user_group) - -user_list = json.loads(config['hostLevelParams']['user_list']) -group_list = json.loads(config['hostLevelParams']['group_list']) -host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False) - -tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"] -override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower() - -# if NN HA on secure clutser, access Zookeper securely -if stack_supports_zk_security and dfs_ha_enabled and security_enabled: - hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf -Dzookeeper.sasl.clientconfig=Client") http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py deleted file mode 100644 index dbd1727..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py +++ /dev/null @@ -1,239 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os -import re -import getpass -import tempfile -from copy import copy -from resource_management.libraries.functions.version import compare_versions -from resource_management import * - -def setup_users(): - """ - Creates users before cluster installation - """ - import params - - should_create_users_and_groups = False - if params.host_sys_prepped: - should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups - else: - should_create_users_and_groups = not params.ignore_groupsusers_create - - if should_create_users_and_groups: - for group in params.group_list: - Group(group, - ) - - for user in params.user_list: - User(user, - gid = params.user_to_gid_dict[user], - groups = params.user_to_groups_dict[user], - fetch_nonlocal_groups = params.fetch_nonlocal_groups - ) - - if params.override_uid == "true": - set_uid(params.smoke_user, params.smoke_user_dirs) - else: - Logger.info('Skipping setting uid for smoke user as host is sys prepped') - else: - Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on') - pass - - - if params.has_hbase_masters: - Directory (params.hbase_tmp_dir, - owner = params.hbase_user, - mode=0775, - create_parents = True, - cd_access="a", - ) - if params.override_uid == "true": - set_uid(params.hbase_user, params.hbase_user_dirs) - else: - Logger.info('Skipping setting uid for hbase user as host is sys prepped') - - if should_create_users_and_groups: - if params.has_namenode: - create_dfs_cluster_admins() - if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0: - create_tez_am_view_acls() - else: - Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped') - -def create_dfs_cluster_admins(): - """ - dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names> - """ - import params - - groups_list = create_users_and_groups(params.dfs_cluster_administrators_group) - - User(params.hdfs_user, - groups = params.user_to_groups_dict[params.hdfs_user] + groups_list, - fetch_nonlocal_groups = params.fetch_nonlocal_groups - ) - -def create_tez_am_view_acls(): - - """ - tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names> - """ - import params - - if not params.tez_am_view_acls.startswith("*"): - create_users_and_groups(params.tez_am_view_acls) - -def create_users_and_groups(user_and_groups): - - import params - - parts = re.split('\s', user_and_groups) - if len(parts) == 1: - parts.append("") - - users_list = parts[0].strip(",").split(",") if parts[0] else [] - groups_list = parts[1].strip(",").split(",") if parts[1] else [] - - if users_list: - User(users_list, - fetch_nonlocal_groups = params.fetch_nonlocal_groups - ) - - if groups_list: - Group(copy(groups_list), - ) - return groups_list - -def set_uid(user, user_dirs): - """ - user_dirs - comma separated directories - """ - import params - - File(format("{tmp_dir}/changeUid.sh"), - content=StaticFile("changeToSecureUid.sh"), - mode=0555) - ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower() - Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"), - not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})")) - -def setup_hadoop_env(): - import params - stackversion = params.stack_version_unformatted - Logger.info("FS Type: {0}".format(params.dfs_type)) - if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS': - if params.security_enabled: - tc_owner = "root" - else: - tc_owner = params.hdfs_user - - # create /etc/hadoop - Directory(params.hadoop_dir, mode=0755) - - # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/ - if Script.is_stack_less_than("2.2"): - Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root", - group=params.user_group ) - - Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir, - not_if=format("ls {hadoop_conf_dir}")) - - # write out hadoop-env.sh, but only if the directory exists - if os.path.exists(params.hadoop_conf_dir): - File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner, - group=params.user_group, - content=InlineTemplate(params.hadoop_env_sh_template)) - - # Create tmp dir for java.io.tmpdir - # Handle a situation when /tmp is set to noexec - Directory(params.hadoop_java_io_tmpdir, - owner=params.hdfs_user, - group=params.user_group, - mode=01777 - ) - -def setup_java(): - """ - Install jdk using specific params. - Install ambari jdk as well if the stack and ambari jdk are different. - """ - import params - __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name) - if params.ambari_java_home and params.ambari_java_home != params.java_home: - __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name) - -def __setup_java(custom_java_home, custom_jdk_name): - """ - Installs jdk using specific params, that comes from ambari-server - """ - import params - java_exec = format("{custom_java_home}/bin/java") - - if not os.path.isfile(java_exec): - if not params.jdk_name: # if custom jdk is used. - raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host.")) - - jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}") - java_dir = os.path.dirname(params.java_home) - - Directory(params.artifact_dir, - create_parents = True, - ) - - File(jdk_curl_target, - content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")), - not_if = format("test -f {jdk_curl_target}") - ) - - File(jdk_curl_target, - mode = 0755, - ) - - tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir) - - try: - if params.jdk_name.endswith(".bin"): - chmod_cmd = ("chmod", "+x", jdk_curl_target) - install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}") - elif params.jdk_name.endswith(".gz"): - chmod_cmd = ("chmod","a+x", java_dir) - install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}") - - Directory(java_dir - ) - - Execute(chmod_cmd, - sudo = True, - ) - - Execute(install_cmd, - ) - - finally: - Directory(tmp_java_dir, action="delete") - - File(format("{custom_java_home}/bin/java"), - mode=0755, - cd_access="a", - ) - Execute(('chmod', '-R', '755', params.java_home), - sudo = True, - ) http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py deleted file mode 100644 index ce17776..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import sys -from resource_management import * -from shared_initialization import * -from repo_initialization import * - -class BeforeInstallHook(Hook): - - def hook(self, env): - import params - - self.run_custom_hook('before-ANY') - env.set_params(params) - - install_repos() - install_packages() - -if __name__ == "__main__": - BeforeInstallHook().execute() http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py deleted file mode 100644 index 50c5a40..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -from ambari_commons.constants import AMBARI_SUDO_BINARY -from resource_management.libraries.functions.version import format_stack_version, compare_versions -from resource_management.core.system import System -from resource_management.libraries.script.script import Script -from resource_management.libraries.functions import default, format -from resource_management.libraries.functions.expect import expect - -config = Script.get_config() -tmp_dir = Script.get_tmp_dir() -sudo = AMBARI_SUDO_BINARY - -stack_version_unformatted = config['hostLevelParams']['stack_version'] -agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability'] -agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int) -stack_version_formatted = format_stack_version(stack_version_unformatted) - -#users and groups -hbase_user = config['configurations']['hbase-env']['hbase_user'] -smoke_user = config['configurations']['cluster-env']['smokeuser'] -gmetad_user = config['configurations']['ganglia-env']["gmetad_user"] -gmond_user = config['configurations']['ganglia-env']["gmond_user"] -tez_user = config['configurations']['tez-env']["tez_user"] - -user_group = config['configurations']['cluster-env']['user_group'] -proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users") - -hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix'] - -# repo templates -repo_rhel_suse = config['configurations']['cluster-env']['repo_suse_rhel_template'] -repo_ubuntu = config['configurations']['cluster-env']['repo_ubuntu_template'] - -#hosts -hostname = config["hostname"] -ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0] -rm_host = default("/clusterHostInfo/rm_host", []) -slave_hosts = default("/clusterHostInfo/slave_hosts", []) -oozie_servers = default("/clusterHostInfo/oozie_server", []) -hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", []) -hive_server_host = default("/clusterHostInfo/hive_server_host", []) -hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", []) -hs_host = default("/clusterHostInfo/hs_host", []) -jtnode_host = default("/clusterHostInfo/jtnode_host", []) -namenode_host = default("/clusterHostInfo/namenode_host", []) -zk_hosts = default("/clusterHostInfo/zookeeper_hosts", []) -ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", []) -storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", []) -falcon_host = default('/clusterHostInfo/falcon_server_hosts', []) - -has_sqoop_client = 'sqoop-env' in config['configurations'] -has_namenode = not len(namenode_host) == 0 -has_hs = not len(hs_host) == 0 -has_resourcemanager = not len(rm_host) == 0 -has_slaves = not len(slave_hosts) == 0 -has_oozie_server = not len(oozie_servers) == 0 -has_hcat_server_host = not len(hcat_server_hosts) == 0 -has_hive_server_host = not len(hive_server_host) == 0 -has_hbase_masters = not len(hbase_master_hosts) == 0 -has_zk_host = not len(zk_hosts) == 0 -has_ganglia_server = not len(ganglia_server_hosts) == 0 -has_storm_server = not len(storm_server_hosts) == 0 -has_falcon_server = not len(falcon_host) == 0 -has_tez = 'tez-site' in config['configurations'] - -is_namenode_master = hostname in namenode_host -is_jtnode_master = hostname in jtnode_host -is_rmnode_master = hostname in rm_host -is_hsnode_master = hostname in hs_host -is_hbase_master = hostname in hbase_master_hosts -is_slave = hostname in slave_hosts -if has_ganglia_server: - ganglia_server_host = ganglia_server_hosts[0] - -hbase_tmp_dir = "/tmp/hbase-hbase" - -#security params -security_enabled = config['configurations']['cluster-env']['security_enabled'] - -#java params -java_home = config['hostLevelParams']['java_home'] -artifact_dir = format("{tmp_dir}/AMBARI-artifacts/") -jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user -jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user -jce_location = config['hostLevelParams']['jdk_location'] -jdk_location = config['hostLevelParams']['jdk_location'] -ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False) -host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False) - -smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}") -if has_hbase_masters: - hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}") -#repo params -repo_info = config['hostLevelParams']['repo_info'] -service_repo_info = default("/hostLevelParams/service_repo_info",None) - -repo_file = default("/repositoryFile", None) http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py deleted file mode 100644 index 357bc62..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -from ambari_commons.os_check import OSCheck -from resource_management.libraries.resources.repository import Repository -from resource_management.libraries.functions.repository_util import create_repo_files, CommandRepository -from resource_management.core.logger import Logger -import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set. - -# components_lits = repoName + postfix -_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"] - -def _alter_repo(action, repo_string, repo_template): - """ - @param action: "delete" or "create" - @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]" - """ - repo_dicts = json.loads(repo_string) - - if not isinstance(repo_dicts, list): - repo_dicts = [repo_dicts] - - if 0 == len(repo_dicts): - Logger.info("Repository list is empty. Ambari may not be managing the repositories.") - else: - Logger.info("Initializing {0} repositories".format(str(len(repo_dicts)))) - - for repo in repo_dicts: - if not 'baseUrl' in repo: - repo['baseUrl'] = None - if not 'mirrorsList' in repo: - repo['mirrorsList'] = None - - ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX - - Repository(repo['repoId'], - action = action, - base_url = repo['baseUrl'], - mirror_list = repo['mirrorsList'], - repo_file_name = repo['repoName'], - repo_template = repo_template, - components = ubuntu_components, # ubuntu specific - ) - -def install_repos(): - import params - if params.host_sys_prepped: - return - - template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu - - # use this newer way of specifying repositories, if available - if params.repo_file is not None: - create_repo_files(template, CommandRepository(params.repo_file)) - return - - _alter_repo("create", params.repo_info, template) - - if params.service_repo_info: - _alter_repo("create", params.service_repo_info, template) http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py deleted file mode 100644 index 1609050..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -import os - -from resource_management.libraries.functions import stack_tools -from resource_management.libraries.functions.version import compare_versions -from resource_management.core.resources.packaging import Package - -def install_packages(): - import params - if params.host_sys_prepped: - return - - packages = ['unzip', 'curl'] - if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0: - stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME) - packages.append(stack_selector_package) - Package(packages, - retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability, - retry_count=params.agent_stack_retry_count) http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py deleted file mode 100644 index 14b9d99..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py +++ /dev/null @@ -1,29 +0,0 @@ -""" -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -""" - -from resource_management import * - -class BeforeRestartHook(Hook): - - def hook(self, env): - self.run_custom_hook('before-START') - -if __name__ == "__main__": - BeforeRestartHook().execute() - http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh deleted file mode 100644 index 68aa96d..0000000 --- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env bash -# -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# - -export hdfs_user=$1 -shift -export conf_dir=$1 -shift -export bin_dir=$1 -shift -export mark_dir=$1 -shift -export name_dirs=$* - -export EXIT_CODE=0 -export command="namenode -format" -export list_of_non_empty_dirs="" - -mark_file=/var/run/hadoop/hdfs/namenode-formatted -if [[ -f ${mark_file} ]] ; then - /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file} - /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir} -fi - -if [[ ! -d $mark_dir ]] ; then - for dir in `echo $name_dirs | tr ',' ' '` ; do - echo "NameNode Dirname = $dir" - cmd="ls $dir | wc -l | grep -q ^0$" - eval $cmd - if [[ $? -ne 0 ]] ; then - (( EXIT_CODE = $EXIT_CODE + 1 )) - list_of_non_empty_dirs="$list_of_non_empty_dirs $dir" - fi - done - - if [[ $EXIT_CODE == 0 ]] ; then - /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}" - (( EXIT_CODE = $EXIT_CODE | $? )) - else - echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}" - fi -else - echo "${mark_dir} exists. Namenode DFS already formatted" -fi - -exit $EXIT_CODE - http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar deleted file mode 100644 index 6c993bf..0000000 Binary files a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar and /dev/null differ
