AMBARI-13251. RU - HDFS_Client restart and hdp-select causes dfs_data_dir_mount.hist to be lost, move file to static location (alejandro)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6a5a6a39 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6a5a6a39 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6a5a6a39 Branch: refs/heads/branch-dev-patch-upgrade Commit: 6a5a6a3963035a9967dc82cd5a1795c23dca2bc3 Parents: 4d44ec7 Author: Alejandro Fernandez <[email protected]> Authored: Mon Sep 28 11:13:19 2015 -0700 Committer: Alejandro Fernandez <[email protected]> Committed: Mon Sep 28 11:16:21 2015 -0700 ---------------------------------------------------------------------- .../src/main/python/ambari_agent/Controller.py | 28 ++++++++++++++++ .../resource_management/TestDatanodeHelper.py | 4 +-- .../server/upgrade/UpgradeCatalog212.java | 18 +++++++++++ .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml | 7 ---- .../alerts/alert_datanode_unmounted_data_dir.py | 15 +++------ .../2.1.0.2.0/package/scripts/hdfs_datanode.py | 12 +++++-- .../2.1.0.2.0/package/scripts/params_linux.py | 2 +- .../services/HDFS/configuration/hadoop-env.xml | 5 --- .../0.8/services/HDFS/package/scripts/params.py | 2 +- .../services/HDFS/configuration/hadoop-env.xml | 7 ---- .../test_alert_datanode_unmounted_data_dir.py | 34 ++++---------------- .../python/stacks/2.0.6/HDFS/test_datanode.py | 16 +++++++-- .../stacks/2.0.6/configs/altfs_plus_hdfs.json | 3 +- .../stacks/2.0.6/configs/client-upgrade.json | 1 - .../python/stacks/2.0.6/configs/default.json | 3 +- .../2.0.6/configs/default_hive_nn_ha.json | 3 +- .../2.0.6/configs/default_hive_nn_ha_2.json | 3 +- .../2.0.6/configs/default_hive_non_hdfs.json | 3 +- .../2.0.6/configs/default_no_install.json | 3 +- .../2.0.6/configs/default_oozie_mysql.json | 3 +- .../default_update_exclude_file_only.json | 3 +- .../python/stacks/2.0.6/configs/flume_22.json | 3 +- .../python/stacks/2.0.6/configs/hbase-2.2.json | 1 - .../stacks/2.0.6/configs/hbase-check-2.2.json | 1 - .../stacks/2.0.6/configs/hbase-rs-2.2.json | 1 - .../stacks/2.0.6/configs/hbase_no_phx.json | 3 +- .../stacks/2.0.6/configs/hbase_with_phx.json | 3 +- .../python/stacks/2.0.6/configs/nn_ru_lzo.json | 1 - .../2.0.6/configs/oozie_existing_sqla.json | 3 +- .../2.0.6/configs/ranger-namenode-start.json | 1 - .../python/stacks/2.0.6/configs/secured.json | 3 +- .../stacks/2.1/configs/client-upgrade.json | 1 - .../test/python/stacks/2.2/configs/default.json | 1 - .../2.2/configs/default_custom_path_config.json | 1 - .../stacks/2.2/configs/falcon-upgrade.json | 1 - .../python/stacks/2.2/configs/hive-upgrade.json | 1 - .../journalnode-upgrade-hdfs-secure.json | 1 - .../stacks/2.2/configs/journalnode-upgrade.json | 1 - .../python/stacks/2.2/configs/knox_upgrade.json | 1 - .../stacks/2.2/configs/oozie-downgrade.json | 1 - .../stacks/2.2/configs/oozie-upgrade.json | 1 - .../2.2/configs/pig-service-check-secure.json | 1 - .../2.2/configs/ranger-admin-default.json | 1 - .../2.2/configs/ranger-admin-upgrade.json | 1 - .../2.2/configs/ranger-usersync-upgrade.json | 1 - .../2.2/configs/spark-job-history-server.json | 1 - .../stacks/2.3/configs/hbase_default.json | 1 - .../python/stacks/2.3/configs/hbase_secure.json | 1 - .../stacks/2.3/configs/spark_default.json | 1 - .../ru_execute_tasks_namenode_prepare.json | 1 - .../data/configurations/config_versions.json | 3 -- .../data/configurations/service_versions.json | 3 -- .../data/stacks/HDP-2.2/configurations.json | 14 -------- ambari-web/app/data/BIGTOP/site_properties.js | 12 ------- ambari-web/app/data/HDP2/site_properties.js | 8 ----- 55 files changed, 97 insertions(+), 157 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-agent/src/main/python/ambari_agent/Controller.py ---------------------------------------------------------------------- diff --git a/ambari-agent/src/main/python/ambari_agent/Controller.py b/ambari-agent/src/main/python/ambari_agent/Controller.py index 8746172..74a8eac 100644 --- a/ambari-agent/src/main/python/ambari_agent/Controller.py +++ b/ambari-agent/src/main/python/ambari_agent/Controller.py @@ -28,6 +28,7 @@ import threading import urllib2 import pprint from random import randint +import subprocess import hostname import security @@ -45,6 +46,8 @@ from ambari_agent.ClusterConfiguration import ClusterConfiguration from ambari_agent.RecoveryManager import RecoveryManager from ambari_agent.HeartbeatHandlers import HeartbeatStopHandlers, bind_signal_handlers from ambari_agent.ExitHelper import ExitHelper +from resource_management.libraries.functions.version import compare_versions + logger = logging.getLogger(__name__) AGENT_AUTO_RESTART_EXIT_CODE = 77 @@ -96,6 +99,8 @@ class Controller(threading.Thread): self.cluster_configuration = ClusterConfiguration(cluster_config_cache_dir) + self.move_data_dir_mount_file() + self.alert_scheduler_handler = AlertSchedulerHandler(alerts_cache_dir, stacks_cache_dir, common_services_cache_dir, host_scripts_cache_dir, self.cluster_configuration, config) @@ -435,6 +440,29 @@ class Controller(threading.Thread): logger.debug("LiveStatus.CLIENT_COMPONENTS" + str(LiveStatus.CLIENT_COMPONENTS)) logger.debug("LiveStatus.COMPONENTS" + str(LiveStatus.COMPONENTS)) + def move_data_dir_mount_file(self): + """ + In Ambari 2.1.2, we moved the dfs_data_dir_mount.hist to a static location + because /etc/hadoop/conf points to a symlink'ed location that would change during + Rolling Upgrade. + """ + try: + if compare_versions(self.version, "2.1.2") >= 0: + source_file = "/etc/hadoop/conf/dfs_data_dir_mount.hist" + destination_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist" + if os.path.exists(source_file) and not os.path.exists(destination_file): + command = "mkdir -p %s" % os.path.dirname(destination_file) + logger.info("Moving Data Dir Mount History file. Executing command: %s" % command) + return_code = subprocess.call(command, shell=True) + logger.info("Return code: %d" % return_code) + + command = "mv %s %s" % (source_file, destination_file) + logger.info("Moving Data Dir Mount History file. Executing command: %s" % command) + return_code = subprocess.call(command, shell=True) + logger.info("Return code: %d" % return_code) + except Exception, e: + logger.info("Exception in move_data_dir_mount_file(). Error: {0}".format(str(e))) + def main(argv=None): # Allow Ctrl-C http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py ---------------------------------------------------------------------- diff --git a/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py b/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py index 70539ac..2d9996c 100644 --- a/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py +++ b/ambari-agent/src/test/python/resource_management/TestDatanodeHelper.py @@ -59,7 +59,7 @@ class TestDatanodeHelper(TestCase): grid2 = "/grid/2/data" params = StubParams() - params.data_dir_mount_file = "/etc/hadoop/conf/dfs_data_dir_mount.hist" + params.data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist" params.dfs_data_dir = "{0},{1},{2}".format(grid0, grid1, grid2) @@ -70,7 +70,7 @@ class TestDatanodeHelper(TestCase): Test that the data dirs are normalized by removing leading and trailing whitespace, and case sensitive. """ params = StubParams() - params.data_dir_mount_file = "/etc/hadoop/conf/dfs_data_dir_mount.hist" + params.data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist" params.dfs_data_dir = "/grid/0/data , /grid/1/data ,/GRID/2/Data/" # Function under test http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java index 37a87ab..342e280 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java @@ -199,6 +199,7 @@ public class UpgradeCatalog212 extends AbstractUpgradeCatalog { updateHbaseAndClusterConfigurations(); updateKafkaConfigurations(); updateStormConfigs(); + removeDataDirMountConfig(); } protected void updateStormConfigs() throws AmbariException { @@ -381,4 +382,21 @@ public class UpgradeCatalog212 extends AbstractUpgradeCatalog { dbAccessor.addColumn(HOST_ROLE_COMMAND_TABLE, new DBColumnInfo(HOST_ROLE_COMMAND_SKIP_COLUMN, Integer.class, 1, 0, false)); } + + protected void removeDataDirMountConfig() throws AmbariException { + Set<String> properties = new HashSet<>(); + properties.add("dfs.datanode.data.dir.mount.file"); + + AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class); + Clusters clusters = ambariManagementController.getClusters(); + + if (clusters != null) { + Map<String, Cluster> clusterMap = clusters.getClusters(); + if (clusterMap != null && !clusterMap.isEmpty()) { + for (final Cluster cluster : clusterMap.values()) { + removeConfigurationPropertiesFromCluster(cluster, "hadoop-env", properties); + } + } + } + } } http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml index 8cae26c..5d1d976 100644 --- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml +++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml @@ -174,13 +174,6 @@ <description>User to run HDFS as</description> </property> <property> - <name>dfs.datanode.data.dir.mount.file</name> - <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value> - <display-name>File that stores mount point for each data dir</display-name> - <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description> - </property> - - <property> <name>hdfs_user_nofile_limit</name> <value>128000</value> <description>Max open files limit setting for HDFS user.</description> http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py index 2912406..f95daac 100644 --- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py +++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_datanode_unmounted_data_dir.py @@ -30,7 +30,7 @@ RESULT_STATE_CRITICAL = 'CRITICAL' RESULT_STATE_UNKNOWN = 'UNKNOWN' DFS_DATA_DIR = '{{hdfs-site/dfs.datanode.data.dir}}' -DATA_DIR_MOUNT_FILE = '{{hadoop-env/dfs.datanode.data.dir.mount.file}}' +DATA_DIR_MOUNT_FILE = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist" logger = logging.getLogger() @@ -62,23 +62,16 @@ def execute(configurations={}, parameters={}, host_name=None): if DFS_DATA_DIR not in configurations: return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(DFS_DATA_DIR)]) - if DATA_DIR_MOUNT_FILE not in configurations: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script'.format(DATA_DIR_MOUNT_FILE)]) - dfs_data_dir = configurations[DFS_DATA_DIR] - data_dir_mount_file = configurations[DATA_DIR_MOUNT_FILE] if dfs_data_dir is None: return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(DFS_DATA_DIR)]) - if data_dir_mount_file is None: - return (RESULT_STATE_UNKNOWN, ['{0} is a required parameter for the script and the value is null'.format(DATA_DIR_MOUNT_FILE)]) - data_dir_mount_file_exists = True # This follows symlinks and will return False for a broken link (even in the middle of the linked list) - if not os.path.exists(data_dir_mount_file): + if not os.path.exists(DATA_DIR_MOUNT_FILE): data_dir_mount_file_exists = False - warnings.append("File not found, {0} .".format(data_dir_mount_file)) + warnings.append("File not found, {0} .".format(DATA_DIR_MOUNT_FILE)) valid_data_dirs = set() # data dirs that have been normalized data_dirs_not_exist = set() # data dirs that do not exist @@ -129,7 +122,7 @@ def execute(configurations={}, parameters={}, host_name=None): class Params: def __init__(self, mount_file): self.data_dir_mount_file = mount_file - params = Params(data_dir_mount_file) + params = Params(DATA_DIR_MOUNT_FILE) # This dictionary contains the expected values of <data_dir, mount_point> # Hence, we only need to analyze the data dirs that are currently on the root partition http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py index 34ec8cd..e225927 100644 --- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py +++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_datanode.py @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. """ - +import os from resource_management import * from resource_management.libraries.functions.dfs_datanode_helper import handle_dfs_data_dir from utils import service @@ -48,11 +48,19 @@ def datanode(action=None): owner=params.hdfs_user, group=params.user_group) + if not os.path.isdir(os.path.dirname(params.data_dir_mount_file)): + Directory(os.path.dirname(params.data_dir_mount_file), + recursive=True, + mode=0755, + owner=params.hdfs_user, + group=params.user_group) + + data_dir_to_mount_file_content = handle_dfs_data_dir(create_dirs, params) File(params.data_dir_mount_file, owner=params.hdfs_user, group=params.user_group, mode=0644, - content=handle_dfs_data_dir(create_dirs, params) + content=data_dir_to_mount_file_content ) elif action == "start" or action == "stop": http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py index 4302976..563c234 100644 --- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py +++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py @@ -236,7 +236,7 @@ fs_checkpoint_dirs = default("/configurations/hdfs-site/dfs.namenode.checkpoint. dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir'] dfs_data_dir = ",".join([re.sub(r'^\[.+\]', '', dfs_dir.strip()) for dfs_dir in dfs_data_dir.split(",")]) -data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file'] +data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist" # HDFS High Availability properties dfs_ha_enabled = False http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml index c6dd202..2737bf4 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml @@ -83,11 +83,6 @@ <property-type>USER</property-type> <description>User to run HDFS as</description> </property> - <property> - <name>dfs.datanode.data.dir.mount.file</name> - <value>/etc/hadoop/conf/dfs_data_dir_mount.hist</value> - <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description> - </property> <!-- hadoop-env.sh --> <property> http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py index fb0a4db..e302685 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py @@ -140,7 +140,7 @@ namenode_formatted_mark_dir = format("/var/lib/hdfs/namenode/formatted/") fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir'] -data_dir_mount_file = config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file'] +data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist" dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None) dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None) http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml index 243b4ff..8433005 100644 --- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml +++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/services/HDFS/configuration/hadoop-env.xml @@ -46,13 +46,6 @@ </value-attributes> </property> <property> - <name>dfs.datanode.data.dir.mount.file</name> - <value>file:///c:/hadoop/conf/dfs_data_dir_mount.hist</value> - <display-name>File that stores mount point for each data dir</display-name> - <description>File path that contains the last known mount point for each data dir. This file is used to avoid creating a DFS data dir on the root drive (and filling it up) if a path was previously mounted on a drive.</description> - </property> - - <property> <name>proxyuser_group</name> <deleted>true</deleted> </property> http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py index 4406231..16400b1 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py +++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_alert_datanode_unmounted_data_dir.py @@ -30,7 +30,7 @@ from stacks.utils.RMFTestCase import * import resource_management.libraries.functions.file_system COMMON_SERVICES_ALERTS_DIR = "HDFS/2.1.0.2.0/package/alerts" -DATA_DIR_MOUNT_HIST_FILE_PATH = "/etc/hadoop/conf/dfs_data_dir_mount.hist" +DATA_DIR_MOUNT_HIST_FILE_PATH = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist" file_path = os.path.dirname(os.path.abspath(__file__)) file_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(file_path))))) @@ -69,23 +69,6 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase): "{{hdfs-site/dfs.datanode.data.dir}}": "" } [status, messages] = alert.execute(configurations=configs) - self.assertEqual(status, RESULT_STATE_UNKNOWN) - self.assertTrue(messages is not None and len(messages) == 1) - self.assertTrue('is a required parameter for the script' in messages[0]) - - configs = { - "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH - } - [status, messages] = alert.execute(configurations=configs) - self.assertEqual(status, RESULT_STATE_UNKNOWN) - self.assertTrue(messages is not None and len(messages) == 1) - self.assertTrue('is a required parameter for the script' in messages[0]) - - configs = { - "{{hdfs-site/dfs.datanode.data.dir}}": "", - "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH - } - [status, messages] = alert.execute(configurations=configs) self.assertNotEqual(status, RESULT_STATE_UNKNOWN) @patch("resource_management.libraries.functions.file_system.get_mount_point_for_dir") @@ -97,8 +80,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase): does not exist. """ configs = { - "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data", - "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH + "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data" } # Mock calls @@ -121,8 +103,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase): and this coincides with the expected values. """ configs = { - "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data", - "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH + "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data" } # Mock calls @@ -147,8 +128,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase): Test that the status is OK when the mount points match the expected values. """ configs = { - "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data", - "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH + "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data" } # Mock calls @@ -174,8 +154,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase): and at least one data dir is on a mount and at least one data dir is on the root partition. """ configs = { - "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data", - "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH + "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data" } # Mock calls @@ -199,8 +178,7 @@ class TestAlertDataNodeUnmountedDataDir(RMFTestCase): became unmounted. """ configs = { - "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data", - "{{hadoop-env/dfs.datanode.data.dir.mount.file}}": DATA_DIR_MOUNT_HIST_FILE_PATH + "{{hdfs-site/dfs.datanode.data.dir}}": "/grid/0/data,/grid/1/data,/grid/2/data,/grid/3/data" } # Mock calls http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py index 1b4bec6..0ec1104 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py +++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py @@ -349,6 +349,12 @@ class TestDatanode(RMFTestCase): mode = 0751, recursive = True, ) + self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/datanode', + owner = 'hdfs', + group = 'hadoop', + mode = 0755, + recursive = True + ) self.assertResourceCalled('Directory', '/hadoop/hdfs/data', owner = 'hdfs', ignore_failures = True, @@ -358,7 +364,7 @@ class TestDatanode(RMFTestCase): cd_access='a' ) content = resource_management.libraries.functions.dfs_datanode_helper.DATA_DIR_TO_MOUNT_HEADER - self.assertResourceCalled('File', '/etc/hadoop/conf/dfs_data_dir_mount.hist', + self.assertResourceCalled('File', '/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist', owner = 'hdfs', group = 'hadoop', mode = 0644, @@ -421,6 +427,12 @@ class TestDatanode(RMFTestCase): mode = 0751, recursive = True, ) + self.assertResourceCalled('Directory', '/var/lib/ambari-agent/data/datanode', + owner = 'hdfs', + group = 'hadoop', + mode = 0755, + recursive = True + ) self.assertResourceCalled('Directory', '/hadoop/hdfs/data', owner = 'hdfs', ignore_failures = True, @@ -430,7 +442,7 @@ class TestDatanode(RMFTestCase): cd_access='a' ) content = resource_management.libraries.functions.dfs_datanode_helper.DATA_DIR_TO_MOUNT_HEADER - self.assertResourceCalled('File', '/etc/hadoop/conf/dfs_data_dir_mount.hist', + self.assertResourceCalled('File', '/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist', owner = 'hdfs', group = 'hadoop', mode = 0644, http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json index f6c9bb4..3a5a1c6 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json @@ -451,8 +451,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json index c13e5c9..8e9f81f 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json @@ -378,7 +378,6 @@ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HAD OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/ java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_ OPTS\"", "hdfs_user": "hdfs", "namenode_opt_newsize": "200m", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", "hadoop_root_logger": "INFO,RFA", "hadoop_heapsize": "1024", "namenode_opt_maxpermsize": "256m", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json index a0da7f0..57365b6 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json @@ -507,8 +507,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json index b644411..591f561 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json @@ -255,8 +255,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json index 770e085..c66d479 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json @@ -257,8 +257,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json index 988f38e..093cb1e 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json @@ -507,8 +507,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json index 4cf647b..403c48f 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json @@ -480,8 +480,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json index 286a728..fc8b118 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json @@ -449,8 +449,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json index b2fd6e8..6a09aff 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json @@ -441,8 +441,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json index 4f30caa..32b40f1 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/flume_22.json @@ -441,8 +441,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json index e7a516f..f7f8bba 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json @@ -360,7 +360,6 @@ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HAD OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/ java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_ OPTS\"", "hdfs_user": "hdfs", "namenode_opt_newsize": "200m", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", "hadoop_root_logger": "INFO,RFA", "hadoop_heapsize": "1024", "namenode_opt_maxpermsize": "256m", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json index 83120eb..3cba607 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json @@ -355,7 +355,6 @@ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HAD OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/ java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_ OPTS\"", "hdfs_user": "hdfs", "namenode_opt_newsize": "200m", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", "hadoop_root_logger": "INFO,RFA", "hadoop_heapsize": "1024", "namenode_opt_maxpermsize": "256m", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json index 349a1cf..5060f2c 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json @@ -360,7 +360,6 @@ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HAD OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/ java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_ OPTS\"", "hdfs_user": "hdfs", "namenode_opt_newsize": "200m", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", "hadoop_root_logger": "INFO,RFA", "hadoop_heapsize": "1024", "namenode_opt_maxpermsize": "256m", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json index b71c4c8..6fd2933 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json @@ -207,8 +207,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json index a7187f9..0a1cbaa 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json @@ -207,8 +207,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json index a026259..9c8c0e2 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json @@ -152,7 +152,6 @@ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HAD OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/ java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_ OPTS\"", "hdfs_user": "hdfs", "namenode_opt_newsize": "256m", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", "hadoop_root_logger": "INFO,RFA", "hadoop_heapsize": "1024", "namenode_opt_maxpermsize": "256m", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json index 01856df..6424403 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json @@ -449,8 +449,7 @@ "dtnode_heapsize": "1024m", "proxyuser_group": "users", "hadoop_heapsize": "1024", - "hadoop_pid_dir_prefix": "/var/run/hadoop", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hadoop_pid_dir_prefix": "/var/run/hadoop" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json index 84c798b..2a40a46 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json @@ -234,7 +234,6 @@ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HAD OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/ java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_ OPTS\"", "hdfs_user": "hdfs", "namenode_opt_newsize": "256m", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", "hadoop_root_logger": "INFO,RFA", "hadoop_heapsize": "1024", "namenode_opt_maxpermsize": "256m", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json index c9e20e2..a2c41e4 100644 --- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json +++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json @@ -568,8 +568,7 @@ "proxyuser_group": "users", "hadoop_heapsize": "1024", "hadoop_pid_dir_prefix": "/var/run/hadoop", - "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist" + "hdfs_user_keytab": "/etc/security/keytabs/hdfs.headless.keytab" }, "hive-env": { "hcat_pid_dir": "/var/run/webhcat", http://git-wip-us.apache.org/repos/asf/ambari/blob/6a5a6a39/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json ---------------------------------------------------------------------- diff --git a/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json index ca0b1a6..11e18ff 100644 --- a/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json +++ b/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json @@ -349,7 +349,6 @@ "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME. All others are\n# optional. When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use. Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options. Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appende d to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStam ps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options. Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored. $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from. Unset by default.\n# export HAD OOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/ java/*mysql* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n if [ -d \"/etc/tez/conf/\" ]; then\n # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_ OPTS\"", "hdfs_user": "hdfs", "namenode_opt_newsize": "200m", - "dfs.datanode.data.dir.mount.file": "/etc/hadoop/conf/dfs_data_dir_mount.hist", "hadoop_root_logger": "INFO,RFA", "hadoop_heapsize": "1024", "namenode_opt_maxpermsize": "256m",
