ambari git commit: AMBARI-16417: More information for Standby sync alert

2016-05-11 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 9c5122aaf -> b9ed455d5


AMBARI-16417: More information for Standby sync alert


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b9ed455d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b9ed455d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b9ed455d

Branch: refs/heads/trunk
Commit: b9ed455d5df1c5cec968234e0e6c7019413b68c8
Parents: 9c5122a
Author: Jun Aoki 
Authored: Wed May 11 14:20:28 2016 -0700
Committer: Jun Aoki 
Committed: Wed May 11 14:20:28 2016 -0700

--
 .../2.0.0/package/alerts/alert_sync_status.py   | 24 
 .../stacks/2.3/HAWQ/test_alert_sync_status.py   |  8 +++
 2 files changed, 18 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/b9ed455d/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/alerts/alert_sync_status.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/alerts/alert_sync_status.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/alerts/alert_sync_status.py
index c94be9e..e916f07 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/alerts/alert_sync_status.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/alerts/alert_sync_status.py
@@ -60,11 +60,12 @@ def execute(configurations={}, parameters={}, 
host_name=None):
return (RESULT_STATE_SKIPPED, ['HAWQSTANDBY is not installed.'])
 
   try:
-sync_status = get_sync_status(configurations[HAWQMASTER_PORT])
-if sync_status in ('Synchronized', 'Synchronizing'):
+summary_state, error_message = 
get_sync_status(configurations[HAWQMASTER_PORT])
+
+if summary_state in ('Synchronized', 'Synchronizing'):
   return (RESULT_STATE_OK, ['HAWQSTANDBY is in sync with HAWQMASTER.'])
-elif sync_status == 'Not Synchronized':
-  return (RESULT_STATE_WARNING, ['HAWQSTANDBY is not in sync with 
HAWQMASTER.'])
+elif summary_state == 'Not Synchronized':
+  return (RESULT_STATE_WARNING, ['HAWQSTANDBY is not in sync with 
HAWQMASTER. ERROR: ' + error_message])
   except Exception, e:
 logger.exception('[Alert] Retrieving HAWQSTANDBY sync status from 
HAWQMASTER fails on host, {0}:'.format(host_name))
 logger.exception(str(e))
@@ -78,14 +79,17 @@ def get_sync_status(port):
   Gets the sync status of HAWQSTANDBY from HAWQMASTER by running a SQL command.
   summary_state can be of the following values: ('Synchronized', 
'Synchronizing', 'Not Synchronized', 'None', 'Not Configured', 'Unknown')
   """
-  query = "SELECT summary_state FROM gp_master_mirroring"
-  cmd = "source {0} && psql -p {1} -t -d template1 -c 
\"{2};\"".format(HAWQ_GREENPLUM_PATH_FILE, port, query)
+  
+  query = "SELECT summary_state, error_message FROM gp_master_mirroring"
+  cmd = "source {0} && psql -p {1} -t --no-align -d template1 -c 
\"{2};\"".format(HAWQ_GREENPLUM_PATH_FILE, port, query)
 
-  returncode, output = call(cmd,
-user=HAWQ_USER,
-timeout=60)
+  returncode, output = call(cmd, user=HAWQ_USER, timeout=60)
 
   if returncode:
 raise
 
-  return output.strip()
+  split_output = output.split("|")
+  summary_state = split_output[0].strip()
+  error_message = split_output[1].strip()
+
+  return (summary_state, error_message)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b9ed455d/ambari-server/src/test/python/stacks/2.3/HAWQ/test_alert_sync_status.py
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_alert_sync_status.py 
b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_alert_sync_status.py
index 7d030dc..fd4f474 100644
--- a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_alert_sync_status.py
+++ b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_alert_sync_status.py
@@ -91,7 +91,7 @@ class TestAlertSyncStatus(RMFTestCase):
 }
 
 # Mock calls
-get_sync_status_mock.return_value = 'Synchronized'
+get_sync_status_mock.return_value = ('Synchronized', "")
 
 [status, messages] = alert_sync_status.execute(configurations=configs)
 self.assertEqual(status, RESULT_STATE_OK)
@@ -110,7 +110,7 @@ class TestAlertSyncStatus(RMFTestCase):
 }
 
 # Mock calls
-get_sync_status_mock.return_value = 'Synchronizing'
+get_sync_status_mock.return_value = ('Synchronizing', "")
 
 [status, messages] = alert_sync_status.execute(configurations=configs)
 self.assertEqual(status, RESULT_STATE_OK)
@@ -129,12 +129,12 @@ class TestAlertSyncStatus(RMFTestCase):
 }
 
 # Mock calls
-get_sync_status_mock.return_value = 'N

ambari git commit: AMBARI-15852: Changing HAWQ Ports through Ambari prevents HAWQ service from restarting

2016-04-15 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-2.2.2 900b4fc63 -> 209ade07b


AMBARI-15852: Changing HAWQ Ports through Ambari prevents HAWQ service from 
restarting


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/209ade07
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/209ade07
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/209ade07

Branch: refs/heads/branch-2.2.2
Commit: 209ade07b42b606188d8257955f47bc79eb8f4c1
Parents: 900b4fc
Author: Jun Aoki 
Authored: Fri Apr 15 11:29:57 2016 -0700
Committer: Jun Aoki 
Committed: Fri Apr 15 11:29:57 2016 -0700

--
 .../HAWQ/2.0.0/package/scripts/common.py| 12 +---
 .../2.0.0/package/scripts/hawq_constants.py | 15 ++
 .../HAWQ/2.0.0/package/scripts/hawqmaster.py|  4 +--
 .../HAWQ/2.0.0/package/scripts/hawqsegment.py   |  2 +-
 .../HAWQ/2.0.0/package/scripts/hawqstandby.py   |  2 +-
 .../HAWQ/2.0.0/package/scripts/hawqstatus.py| 25 +++--
 .../HAWQ/2.0.0/package/scripts/utils.py |  8 +++---
 .../python/stacks/2.3/HAWQ/test_hawqmaster.py   |  4 ++-
 .../python/stacks/2.3/HAWQ/test_hawqsegment.py  |  5 ++--
 .../python/stacks/2.3/HAWQ/test_hawqstandby.py  |  4 ++-
 .../test/python/stacks/2.3/HAWQ/test_utils.py   | 29 
 11 files changed, 73 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/209ade07/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
index 413cf1a..b929430 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
@@ -31,6 +31,7 @@ import xml.etree.ElementTree as ET
 import utils
 import hawq_constants
 import custom_params
+import hawqstatus
 
 def setup_user():
   """
@@ -234,7 +235,7 @@ def __update_sysctl_file_suse():
 raise Fail("Failed to update sysctl.conf file ")
 
 
-def get_local_hawq_site_property(property_name):
+def get_local_hawq_site_property_value(property_name):
   """
   Fetches the value of the property specified, from the local hawq-site.xml.
   """
@@ -293,17 +294,20 @@ def start_component(component_name, port, data_dir):
   if os.path.exists(os.path.join(data_dir, 
hawq_constants.postmaster_opts_filename)):
 return utils.exec_hawq_operation(hawq_constants.START,
  "{0} -a -v".format(component_name),
- 
not_if=utils.chk_hawq_process_status_cmd(port))
+ 
not_if=utils.generate_hawq_process_status_cmd(component_name, port))
 
   utils.exec_hawq_operation(hawq_constants.INIT, "{0} -a 
-v".format(component_name))
 
-def stop_component(component_name, port, mode):
+def stop_component(component_name, mode):
   """
   Stops the component
+  Unlike start_component, port is obtained from local hawq-site.xml as Ambari 
pontentially have a new value through UI.
   """
+  port_property_name = 
hawq_constants.COMPONENT_ATTRIBUTES_MAP[component_name]['port_property']
+  port_number = get_local_hawq_site_property_value(port_property_name)
   utils.exec_hawq_operation(hawq_constants.STOP,
 "{0} -M {1} -a -v".format(component_name, mode),
-only_if=utils.chk_hawq_process_status_cmd(port, 
component_name))
+
only_if=utils.generate_hawq_process_status_cmd(component_name, port_number))
 
 def __check_dfs_truncate_enforced():
   """

http://git-wip-us.apache.org/repos/asf/ambari/blob/209ade07/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
index 3f6c371..4ce0c94 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
@@ -66,3 +66,18 @@ pxf_hdfs_test_dir = 
"/user/{0}/hawq_pxf_hdfs_service_check".format(hawq_user)
 # Timeouts
 default_exec_timeout = 600
 hawq_operation_exec_timeout = 900
+
+COMPONENT_ATTRIBUTES_MAP = {
+  MASTER: {
+'port_property': 'hawq_master_address_port',
+'process_name': 'postgres'
+  },
+  STANDBY: {
+

git commit: AMBARI-7638 Correct evident spelling and grammatical errors in global strings (nandat via jaoki)

2014-10-09 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk c5495dd87 -> edbd1f7d4


AMBARI-7638 Correct evident spelling and grammatical errors in global strings 
(nandat via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/edbd1f7d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/edbd1f7d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/edbd1f7d

Branch: refs/heads/trunk
Commit: edbd1f7d4eb3509ceb29d54b40445a8bcaf8f1c3
Parents: c5495dd
Author: Jun Aoki 
Authored: Thu Oct 9 13:39:22 2014 -0700
Committer: Jun Aoki 
Committed: Thu Oct 9 13:39:22 2014 -0700

--
 ambari-web/app/controllers/main/host/details.js |  2 +-
 .../app/controllers/wizard/step0_controller.js  |  2 +-
 ambari-web/app/messages.js  | 16 ++--
 ambari-web/app/utils/host_progress_popup.js |  2 +-
 .../test/utils/host_progress_popup_test.js  | 27 
 5 files changed, 38 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/edbd1f7d/ambari-web/app/controllers/main/host/details.js
--
diff --git a/ambari-web/app/controllers/main/host/details.js 
b/ambari-web/app/controllers/main/host/details.js
index 35a38ea..75a555b 100644
--- a/ambari-web/app/controllers/main/host/details.js
+++ b/ambari-web/app/controllers/main/host/details.js
@@ -944,7 +944,7 @@ App.MainHostDetailsController = Em.Controller.extend({
 return App.ModalPopup.show({
   header: Em.I18n.t('common.warning'),
   message: function () {
-return 
Em.I18n.t('hostPopup.reccomendation.beforeDecommission').format(App.format.components["HBASE_REGIONSERVER"]);
+return 
Em.I18n.t('hostPopup.recommendation.beforeDecommission').format(App.format.components["HBASE_REGIONSERVER"]);
   }.property(),
   bodyClass: Ember.View.extend({
 template: Em.Handlebars.compile('{{message}}')

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbd1f7d/ambari-web/app/controllers/wizard/step0_controller.js
--
diff --git a/ambari-web/app/controllers/wizard/step0_controller.js 
b/ambari-web/app/controllers/wizard/step0_controller.js
index dd7935d..1cff37d 100644
--- a/ambari-web/app/controllers/wizard/step0_controller.js
+++ b/ambari-web/app/controllers/wizard/step0_controller.js
@@ -38,7 +38,7 @@ App.WizardStep0Controller = Em.Controller.extend({
   this.set('clusterNameError', 
Em.I18n.t('installer.step0.clusterName.error.required'));
   return true;
 } else if (/\s/.test(clusterName)) {
-  this.set('clusterNameError', 
Em.I18n.t('installer.step0.clusterName.error.whitespaces'));
+  this.set('clusterNameError', 
Em.I18n.t('installer.step0.clusterName.error.whitespace'));
   return true;
 } else if (/[^\w\s]/gi.test(clusterName)) {
   this.set('clusterNameError', 
Em.I18n.t('installer.step0.clusterName.error.specialChar'));

http://git-wip-us.apache.org/repos/asf/ambari/blob/edbd1f7d/ambari-web/app/messages.js
--
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 95b5d8c..4008fbc 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -257,7 +257,7 @@ Em.I18n.translations = {
   'hostPopup.status.category.success':'Success ({0})',
   'hostPopup.status.category.aborted':'Aborted ({0})',
   'hostPopup.status.category.timedout':'Timedout ({0})',
-  'hostPopup.header.postFix':' Background Operations Running',
+  'hostPopup.header.postFix':' Background Operation{0} Running',
   'hostPopup.serviceInfo.showMore':'Show more...',
   'hostPopup.bgop.abortRequest.title': 'Abort operation',
   'hostPopup.bgop.abortRequest.confirmation.body': 'Are you sure you want to 
abort \'{0}\' operation?',
@@ -268,7 +268,7 @@ Em.I18n.translations = {
   'hostPopup.bgop.sourceRequestSchedule.aborted': 'Future operations of this 
batch request have been aborted',
   'hostPopup.bgop.abort.rollingRestart': 'Abort Rolling Restart',
   'hostPopup.warning.alertsTimeOut': 'Maintenance Mode has been turned {0}. It 
may take a few minutes for the alerts to be {1}.',
-  'hostPopup.reccomendation.beforeDecommission': '{0} Maintenance Mode is pre 
required for decommissioning.',
+  'hostPopup.recommendation.beforeDecommission&

git commit: AMBARI-7639 HDFS config will not save (nandat via jaoki)

2014-10-09 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 06f54386c -> 4d5d882db


AMBARI-7639 HDFS config will not save (nandat via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4d5d882d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4d5d882d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4d5d882d

Branch: refs/heads/trunk
Commit: 4d5d882db57041898be7286dfd948e6e8a946202
Parents: 06f5438
Author: Jun Aoki 
Authored: Thu Oct 9 17:20:13 2014 -0700
Committer: Jun Aoki 
Committed: Thu Oct 9 17:20:13 2014 -0700

--
 .../controllers/main/service/info/configs.js|  22 +-
 .../main/service/info/config_test.js| 210 +++
 2 files changed, 221 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/4d5d882d/ambari-web/app/controllers/main/service/info/configs.js
--
diff --git a/ambari-web/app/controllers/main/service/info/configs.js 
b/ambari-web/app/controllers/main/service/info/configs.js
index 2210920..386ec96 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -1313,23 +1313,23 @@ App.MainServiceInfoConfigsController = 
Em.Controller.extend(App.ServerValidatorM
 if (serviceName === 'HDFS') {
   var hdfsConfigs = this.get('stepConfigs').findProperty('serviceName', 
'HDFS').get('configs');
   if (App.get('isHadoop2Stack')) {
-if (hdfsConfigs.findProperty('name', 
'dfs.namenode.name.dir').get('isNotDefaultValue') ||
-  hdfsConfigs.findProperty('name', 
'dfs.namenode.checkpoint.dir').get('isNotDefaultValue') ||
-  hdfsConfigs.findProperty('name', 
'dfs.datanode.data.dir').get('isNotDefaultValue')) {
-  dirChanged = true;
+if ((hdfsConfigs.findProperty('name', 'dfs.namenode.name.dir') && 
hdfsConfigs.findProperty('name', 
'dfs.namenode.name.dir').get('isNotDefaultValue')) ||
+(hdfsConfigs.findProperty('name', 'dfs.namenode.checkpoint.dir') 
&& hdfsConfigs.findProperty('name', 
'dfs.namenode.checkpoint.dir').get('isNotDefaultValue')) ||
+(hdfsConfigs.findProperty('name', 'dfs.datanode.data.dir') && 
hdfsConfigs.findProperty('name', 
'dfs.datanode.data.dir').get('isNotDefaultValue'))) {
+dirChanged = true;
 }
   } else {
-if (hdfsConfigs.findProperty('name', 
'dfs.name.dir').get('isNotDefaultValue') ||
-  hdfsConfigs.findProperty('name', 
'fs.checkpoint.dir').get('isNotDefaultValue') ||
-  hdfsConfigs.findProperty('name', 
'dfs.data.dir').get('isNotDefaultValue')) {
-  dirChanged = true;
+if ((hdfsConfigs.findProperty('name', 'dfs.name.dir') && 
hdfsConfigs.findProperty('name', 'dfs.name.dir').get('isNotDefaultValue')) ||
+(hdfsConfigs.findProperty('name', 'fs.checkpoint.dir') && 
hdfsConfigs.findProperty('name', 'fs.checkpoint.dir').get('isNotDefaultValue')) 
||
+(hdfsConfigs.findProperty('name', 'dfs.data.dir') && 
hdfsConfigs.findProperty('name', 'dfs.data.dir').get('isNotDefaultValue'))) {
+ dirChanged = true;
 }
   }
 } else if (serviceName === 'MAPREDUCE') {
   var mapredConfigs = this.get('stepConfigs').findProperty('serviceName', 
'MAPREDUCE').get('configs');
-  if (mapredConfigs.findProperty('name', 
'mapred.local.dir').get('isNotDefaultValue') ||
-mapredConfigs.findProperty('name', 
'mapred.system.dir').get('isNotDefaultValue')) {
-dirChanged = true;
+  if ((mapredConfigs.findProperty('name', 'mapred.local.dir') && 
mapredConfigs.findProperty('name', 
'mapred.local.dir').get('isNotDefaultValue')) ||
+  (mapredConfigs.findProperty('name', 'mapred.system.dir') && 
mapredConfigs.findProperty('name', 
'mapred.system.dir').get('isNotDefaultValue'))) {
+dirChanged = true;
   }
 }
 return dirChanged;

http://git-

git commit: AMBARI-7622 TestActionScheduler fails occasionally on builds.a.o stating expected: but was: (jaoki)

2014-10-13 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk a0d37dcc3 -> 535bcb476


AMBARI-7622 TestActionScheduler fails occasionally on builds.a.o stating 
expected: but was: (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/535bcb47
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/535bcb47
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/535bcb47

Branch: refs/heads/trunk
Commit: 535bcb476b6ef41f11d738ece7b5cf63a4d7b232
Parents: a0d37dc
Author: Jun Aoki 
Authored: Mon Oct 13 15:40:00 2014 -0700
Committer: Jun Aoki 
Committed: Mon Oct 13 15:40:35 2014 -0700

--
 .../org/apache/ambari/server/actionmanager/TestActionScheduler.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/535bcb47/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
--
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
index a20f252..b9a38a4 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
@@ -324,6 +324,7 @@ public class TestActionScheduler {
   }
 
   @Test
+  @Ignore // This is temporary disabled as discussed here 
https://reviews.apache.org/r/26510/
   public void testOpFailedEventRaisedForAbortedHostRole() throws Exception {
 ActionQueue aq = new ActionQueue();
 Properties properties = new Properties();



git commit: AMBARI-7622 TestActionScheduler fails occasionally on builds.a.o stating expected: but was: (jaoki)

2014-10-13 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-1.7.0 1d974624b -> 27d0867eb


AMBARI-7622 TestActionScheduler fails occasionally on builds.a.o stating 
expected: but was: (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/27d0867e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/27d0867e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/27d0867e

Branch: refs/heads/branch-1.7.0
Commit: 27d0867ebf56b891a02185885fabdc9a47954713
Parents: 1d97462
Author: Jun Aoki 
Authored: Mon Oct 13 15:41:56 2014 -0700
Committer: Jun Aoki 
Committed: Mon Oct 13 15:41:56 2014 -0700

--
 .../org/apache/ambari/server/actionmanager/TestActionScheduler.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/27d0867e/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
--
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
index a20f252..b9a38a4 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
@@ -324,6 +324,7 @@ public class TestActionScheduler {
   }
 
   @Test
+  @Ignore // This is temporary disabled as discussed here 
https://reviews.apache.org/r/26510/
   public void testOpFailedEventRaisedForAbortedHostRole() throws Exception {
 ActionQueue aq = new ActionQueue();
 Properties properties = new Properties();



git commit: AMBARI-7640 Validation required for adding a Group Name (nandat via jaoki)

2014-10-13 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 6cf98ff4a -> a5716364c


AMBARI-7640 Validation required for adding a Group Name (nandat via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a5716364
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a5716364
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a5716364

Branch: refs/heads/trunk
Commit: a5716364cba4cbbd58c55d915b3ac0e86433dca3
Parents: 6cf98ff
Author: Jun Aoki 
Authored: Mon Oct 13 17:13:02 2014 -0700
Committer: Jun Aoki 
Committed: Mon Oct 13 17:13:02 2014 -0700

--
 ambari-web/app/assets/test/tests.js   |  1 +
 .../service/manage_config_groups_controller.js| 15 ---
 ambari-web/app/messages.js|  1 +
 ambari-web/app/utils/validator.js | 10 ++
 ambari-web/test/utils/validator_test.js   | 18 ++
 5 files changed, 42 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/a5716364/ambari-web/app/assets/test/tests.js
--
diff --git a/ambari-web/app/assets/test/tests.js 
b/ambari-web/app/assets/test/tests.js
index 95e9073..74f8a4c 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -71,6 +71,7 @@ var files = ['test/init_model_test',
   'test/controllers/main/host/configs_service_test',
   'test/controllers/main/host/details_test',
   'test/controllers/main/service/add_controller_test',
+  'test/controllers/main/service/manage_config_groups_controller_test',
   'test/controllers/main/service/reassign_controller_test',
   'test/controllers/main/service/reassign/step2_controller_test',
   'test/controllers/main/service/reassign/step4_controller_test',

http://git-wip-us.apache.org/repos/asf/ambari/blob/a5716364/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
--
diff --git 
a/ambari-web/app/controllers/main/service/manage_config_groups_controller.js 
b/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
index 56c77f4..2110a87 100644
--- a/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
+++ b/ambari-web/app/controllers/main/service/manage_config_groups_controller.js
@@ -18,6 +18,7 @@
 
 
 var App = require('app');
+var validator = require('utils/validator');
 var hostsManagement = require('utils/hosts');
 var numberUtils = require('utils/number_utils');
 
@@ -427,19 +428,23 @@ App.ManageConfigGroupsController = Em.Controller.extend({
   validate: function () {
 var warningMessage = '';
 var originalGroup = self.get('selectedConfigGroup');
+var groupName = this.get('configGroupName').trim();
 if (originalGroup.get('description') !== this.get('configGroupDesc') 
&& !this.get('isDescriptionDirty')) {
   this.set('isDescriptionDirty', true);
 }
-if (originalGroup.get('name').trim() === 
this.get('configGroupName').trim()) {
+if (originalGroup.get('name').trim() === groupName) {
   if (this.get('isDescriptionDirty')) {
 warningMessage = '';
   } else {
 warningMessage = 
Em.I18n.t("config.group.selection.dialog.err.name.exists");
   }
 } else {
-  if 
(self.get('configGroups').mapProperty('name').contains(this.get('configGroupName').trim()))
 {
+  if 
(self.get('configGroups').mapProperty('name').contains(groupName)) {
 warningMessage = 
Em.I18n.t("config.group.selection.dialog.err.name.exists");
   }
+  else if (groupName && !validator.isValidConfigGroupName(groupName)) {
+warningMessage = Em.I18n.t("form.validator.configGroupName");
+  }
 }
 this.set('warningMessage', warningMessage);
   }.observes('configGroupName', 'configGroupDesc'),
@@ -479,9 +484,13 @@ App.ManageConfigGroupsController = Em.Controller.extend({
   },
   validate: function () {
 var warningMessage = '';
-if 
(self.get('configGroups').mapProperty('name').contains(this.get('configGroupName').trim()))
 {
+var groupName = this.get('configGroupName').trim();
+if (self.get('configGroups').mapProperty('na

[2/3] AMBARI-7354 Allow different stack have different predefined site properties (rmeneses via jaoki)

2014-10-15 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/ecb5aff2/ambari-web/app/data/BIGTOP/site_properties.js
--
diff --git a/ambari-web/app/data/BIGTOP/site_properties.js 
b/ambari-web/app/data/BIGTOP/site_properties.js
new file mode 100644
index 000..1270e2a
--- /dev/null
+++ b/ambari-web/app/data/BIGTOP/site_properties.js
@@ -0,0 +1,3724 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+module.exports =
+{
+  "configProperties": [
+  //* BIGTOP stack 
**
+  
/**HDFS***/
+{
+  "id": "site property",
+  "name": "dfs.namenode.checkpoint.dir",
+  "displayName": "SecondaryNameNode Checkpoint directories",
+  "defaultDirectory": "/hadoop/hdfs/namesecondary",
+  "displayType": "directories",
+  "isOverridable": false,
+  "serviceName": "HDFS",
+  "category": "SECONDARY_NAMENODE",
+  "index": 1
+},
+{
+  "id": "site property",
+  "name": "dfs.namenode.checkpoint.period",
+  "displayName": "HDFS Maximum Checkpoint Delay",
+  "displayType": "int",
+  "unit": "seconds",
+  "category": "General",
+  "serviceName": "HDFS",
+  "index": 3
+},
+{
+  "id": "site property",
+  "name": "dfs.namenode.name.dir",
+  "displayName": "NameNode directories",
+  "defaultDirectory": "/hadoop/hdfs/namenode",
+  "displayType": "directories",
+  "isOverridable": false,
+  "serviceName": "HDFS",
+  "category": "NAMENODE",
+  "index": 1
+},
+{
+  "id": "site property",
+  "name": "dfs.webhdfs.enabled",
+  "displayName": "WebHDFS enabled",
+  "displayType": "checkbox",
+  "isOverridable": false,
+  "category": "General",
+  "serviceName": "HDFS",
+  "index": 0
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.failed.volumes.tolerated",
+  "displayName": "DataNode volumes failure toleration",
+  "displayType": "int",
+  "category": "DATANODE",
+  "serviceName": "HDFS",
+  "index": 3
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.data.dir.mount.file",
+  "displayName": "File that stores mount point for each data dir",
+  "description": "File path that contains the last known mount point for 
each data dir. This file is used to avoid creating a DFS data dir on the root 
drive (and filling it up) if a path was previously mounted on a drive.",
+  "defaultValue": "/etc/hadoop/conf/dfs_data_dir_mount.hist",
+  "displayType": "directory",
+  "isVisible": true,
+  "category": "DATANODE",
+  "serviceName": "HDFS",
+  "filename": "hadoop-env.xml",
+  "index": 4
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.data.dir",
+  "displayName": "DataNode directories",
+  "defaultDirectory": "/hadoop/hdfs/data",
+  "displayType": "directories",
+  "category": "DATANODE",
+  "serviceName": "HDFS",
+  "index": 1
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.data.dir.perm",
+  "displayName": "DataNode directories permission",
+  "displayType": "int",
+  "category": "DATANODE",
+  "serviceName": "HDFS"
+},
+{
+  "id": "site property",
+  "name": "dfs.replication",
+  "displayName": "Block replication",
+  "displayType": "int",
+  "category": "General",
+  "serviceName": "HDFS"
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.du.reserved",
+  "displayName": "Reserved space for HDFS",
+  "displayType": "int",
+  "unit": "bytes",
+  "category": "General",
+  "serviceName": "HDFS",
+  "index": 2
+},
+{
+  "id": "site property",
+  "name": "dfs.client.read.shortcircuit",
+  "displayName": "HDFS Short-circuit read",
+  "displayType": "checkbox",
+  "category": "Advanced hdfs-site",
+  "serviceName": "HDFS"
+},
+{
+  "id": "site property",
+  "name": "apache_artifacts_d

[1/3] AMBARI-7354 Allow different stack have different predefined site properties (rmeneses via jaoki)

2014-10-15 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 4386b4fc0 -> ecb5aff2f


http://git-wip-us.apache.org/repos/asf/ambari/blob/ecb5aff2/ambari-web/app/utils/config.js
--
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index b31c5ad..cd5b094 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -92,13 +92,18 @@ App.config = Em.Object.create({
   }.property('App.isHadoop2Stack'),
 
   preDefinedSiteProperties: function () {
+var sitePropertiesForCurrentStack = 
this.preDefinedConfigFile('site_properties');
+if (sitePropertiesForCurrentStack) {
+  return sitePropertiesForCurrentStack.configProperties;
+}
+
 if (App.get('isHadoop22Stack')) {
   return require('data/HDP2.2/site_properties').configProperties;
 } else if (App.get('isHadoop2Stack')) {
   return require('data/HDP2/site_properties').configProperties;
 }
 return require('data/site_properties').configProperties;
-  }.property('App.isHadoop2Stack', 'App.isHadoop22Stack'),
+  }.property('App.isHadoop2Stack', 'App.isHadoop22Stack', 
'App.currentStackName'),
 
   preDefinedCustomConfigs: function () {
 if (App.get('isHadoop2Stack')) {
@@ -107,6 +112,13 @@ App.config = Em.Object.create({
 return require('data/custom_configs');
   }.property('App.isHadoop2Stack'),
 
+  preDefinedConfigFile: function(file) {
+try {
+  return require('data/{0}/{1}'.format(App.get('currentStackName'), file));
+} catch(err) {
+  // the file doesn't exist, which might be expected.
+}
+  },
   //categories which contain custom configs
   categoriesWithCustom: ['CapacityScheduler'],
 
@@ -118,32 +130,34 @@ App.config = Em.Object.create({
   createContentProperties: function (configs) {
 var services = App.StackService.find();
 var contentProperties = [];
-services.forEach(function (service) {
-  if (service.get('configTypes')) {
-Object.keys(service.get('configTypes')).forEach(function (type) {
-  var contentProperty = configs.filterProperty('filename', type + 
'.xml').someProperty('name', 'content');
-  if (contentProperty && (type.endsWith('-log4j') || 
type.endsWith('-env'))) {
-var property = {
-  "id": "site property",
-  "name": "content",
-  "displayName": type.endsWith('-env') ? type + ' template' : 
"content",
-  "value": "",
-  "defaultValue": "",
-  "description": type + " properties",
-  "displayType": "content",
-  "isOverridable": true,
-  "isRequired": false,
-  "isVisible": true,
-  "showLabel": type.endsWith('-env'),
-  "serviceName": service.get('serviceName'),
-  "filename": type + '.xml',
-  "category": "Advanced " + type
-};
-contentProperties.pushObject(property);
-  }
-}, this);
-  }
-}, this);
+if (configs) {
+  services.forEach(function (service) {
+if (service.get('configTypes')) {
+  Object.keys(service.get('configTypes')).forEach(function (type) {
+var contentProperty = configs.filterProperty('filename', type + 
'.xml').someProperty('name', 'content');
+if (contentProperty && (type.endsWith('-log4j') || 
type.endsWith('-env'))) {
+  var property = {
+"id": "site property",
+"name": "content",
+"displayName": type.endsWith('-env') ? type + ' template' : 
"content",
+"value": "",
+"defaultValue": "",
+"description": type + " properties",
+"displayType": "content",
+"isOverridable": true,
+"isRequired": false,
+"isVisible": true,
+"showLabel": type.endsWith('-env'),
+"serviceName": service.get('serviceName'),
+"filename": type + '.xml',
+"category": "Advanced " + type
+  };
+  contentProperties.pushObject(property);
+}
+  }, this);
+}
+  }, this);
+}
 return contentProperties;
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ecb5aff2/ambari-web/test/utils/config_test.js
--
diff --git a/ambari-web/test/utils/config_test.js 
b/ambari-web/test/utils/config_test.js
index 528be46..b04a404 100644
--- a/ambari-web/test/utils/config_test.js
+++ b/ambari-web/test/utils/config_test.js
@@ -421,4 +421,52 @@ describe('App.config', function () {
   expect(ServiceConfig.get('configCategories.length')).to.eql(1);
 });
   });
+
+  describe('#preDefinedConfigFile', function() {
+before(function() {
+  setups.setupStackVersion(this, 'BIGTOP-0.8');
+});
+
+it('bigtop site pr

[3/3] git commit: AMBARI-7354 Allow different stack have different predefined site properties (rmeneses via jaoki)

2014-10-15 Thread jaoki
AMBARI-7354 Allow different stack have different predefined site properties 
(rmeneses via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ecb5aff2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ecb5aff2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ecb5aff2

Branch: refs/heads/trunk
Commit: ecb5aff2f855ee4a26eb847a9805bb48682ba0f2
Parents: 4386b4f
Author: Jun Aoki 
Authored: Wed Oct 15 15:57:08 2014 -0700
Committer: Jun Aoki 
Committed: Wed Oct 15 15:57:08 2014 -0700

--
 ambari-web/app/data/BIGTOP/site_properties.js | 3724 
 ambari-web/app/utils/config.js|   68 +-
 ambari-web/test/utils/config_test.js  |   48 +
 3 files changed, 3813 insertions(+), 27 deletions(-)
--




[3/3] git commit: AMBARI-7354 Allow different stack have different predefined site properties (rmeneses via jaoki)

2014-10-15 Thread jaoki
AMBARI-7354 Allow different stack have different predefined site properties 
(rmeneses via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/99c64cdb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/99c64cdb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/99c64cdb

Branch: refs/heads/branch-1.7.0
Commit: 99c64cdbf2ef070071bd82693a6b9cfb48c15bcb
Parents: 69d0f1a
Author: Jun Aoki 
Authored: Wed Oct 15 15:58:55 2014 -0700
Committer: Jun Aoki 
Committed: Wed Oct 15 15:58:55 2014 -0700

--
 ambari-web/app/data/BIGTOP/site_properties.js | 3724 
 ambari-web/app/utils/config.js|   68 +-
 ambari-web/test/utils/config_test.js  |   48 +
 3 files changed, 3813 insertions(+), 27 deletions(-)
--




[2/3] AMBARI-7354 Allow different stack have different predefined site properties (rmeneses via jaoki)

2014-10-15 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/99c64cdb/ambari-web/app/data/BIGTOP/site_properties.js
--
diff --git a/ambari-web/app/data/BIGTOP/site_properties.js 
b/ambari-web/app/data/BIGTOP/site_properties.js
new file mode 100644
index 000..1270e2a
--- /dev/null
+++ b/ambari-web/app/data/BIGTOP/site_properties.js
@@ -0,0 +1,3724 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+module.exports =
+{
+  "configProperties": [
+  //* BIGTOP stack 
**
+  
/**HDFS***/
+{
+  "id": "site property",
+  "name": "dfs.namenode.checkpoint.dir",
+  "displayName": "SecondaryNameNode Checkpoint directories",
+  "defaultDirectory": "/hadoop/hdfs/namesecondary",
+  "displayType": "directories",
+  "isOverridable": false,
+  "serviceName": "HDFS",
+  "category": "SECONDARY_NAMENODE",
+  "index": 1
+},
+{
+  "id": "site property",
+  "name": "dfs.namenode.checkpoint.period",
+  "displayName": "HDFS Maximum Checkpoint Delay",
+  "displayType": "int",
+  "unit": "seconds",
+  "category": "General",
+  "serviceName": "HDFS",
+  "index": 3
+},
+{
+  "id": "site property",
+  "name": "dfs.namenode.name.dir",
+  "displayName": "NameNode directories",
+  "defaultDirectory": "/hadoop/hdfs/namenode",
+  "displayType": "directories",
+  "isOverridable": false,
+  "serviceName": "HDFS",
+  "category": "NAMENODE",
+  "index": 1
+},
+{
+  "id": "site property",
+  "name": "dfs.webhdfs.enabled",
+  "displayName": "WebHDFS enabled",
+  "displayType": "checkbox",
+  "isOverridable": false,
+  "category": "General",
+  "serviceName": "HDFS",
+  "index": 0
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.failed.volumes.tolerated",
+  "displayName": "DataNode volumes failure toleration",
+  "displayType": "int",
+  "category": "DATANODE",
+  "serviceName": "HDFS",
+  "index": 3
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.data.dir.mount.file",
+  "displayName": "File that stores mount point for each data dir",
+  "description": "File path that contains the last known mount point for 
each data dir. This file is used to avoid creating a DFS data dir on the root 
drive (and filling it up) if a path was previously mounted on a drive.",
+  "defaultValue": "/etc/hadoop/conf/dfs_data_dir_mount.hist",
+  "displayType": "directory",
+  "isVisible": true,
+  "category": "DATANODE",
+  "serviceName": "HDFS",
+  "filename": "hadoop-env.xml",
+  "index": 4
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.data.dir",
+  "displayName": "DataNode directories",
+  "defaultDirectory": "/hadoop/hdfs/data",
+  "displayType": "directories",
+  "category": "DATANODE",
+  "serviceName": "HDFS",
+  "index": 1
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.data.dir.perm",
+  "displayName": "DataNode directories permission",
+  "displayType": "int",
+  "category": "DATANODE",
+  "serviceName": "HDFS"
+},
+{
+  "id": "site property",
+  "name": "dfs.replication",
+  "displayName": "Block replication",
+  "displayType": "int",
+  "category": "General",
+  "serviceName": "HDFS"
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.du.reserved",
+  "displayName": "Reserved space for HDFS",
+  "displayType": "int",
+  "unit": "bytes",
+  "category": "General",
+  "serviceName": "HDFS",
+  "index": 2
+},
+{
+  "id": "site property",
+  "name": "dfs.client.read.shortcircuit",
+  "displayName": "HDFS Short-circuit read",
+  "displayType": "checkbox",
+  "category": "Advanced hdfs-site",
+  "serviceName": "HDFS"
+},
+{
+  "id": "site property",
+  "name": "apache_artifacts_d

[1/3] AMBARI-7354 Allow different stack have different predefined site properties (rmeneses via jaoki)

2014-10-15 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-1.7.0 69d0f1a3d -> 99c64cdbf


http://git-wip-us.apache.org/repos/asf/ambari/blob/99c64cdb/ambari-web/app/utils/config.js
--
diff --git a/ambari-web/app/utils/config.js b/ambari-web/app/utils/config.js
index b31c5ad..cd5b094 100644
--- a/ambari-web/app/utils/config.js
+++ b/ambari-web/app/utils/config.js
@@ -92,13 +92,18 @@ App.config = Em.Object.create({
   }.property('App.isHadoop2Stack'),
 
   preDefinedSiteProperties: function () {
+var sitePropertiesForCurrentStack = 
this.preDefinedConfigFile('site_properties');
+if (sitePropertiesForCurrentStack) {
+  return sitePropertiesForCurrentStack.configProperties;
+}
+
 if (App.get('isHadoop22Stack')) {
   return require('data/HDP2.2/site_properties').configProperties;
 } else if (App.get('isHadoop2Stack')) {
   return require('data/HDP2/site_properties').configProperties;
 }
 return require('data/site_properties').configProperties;
-  }.property('App.isHadoop2Stack', 'App.isHadoop22Stack'),
+  }.property('App.isHadoop2Stack', 'App.isHadoop22Stack', 
'App.currentStackName'),
 
   preDefinedCustomConfigs: function () {
 if (App.get('isHadoop2Stack')) {
@@ -107,6 +112,13 @@ App.config = Em.Object.create({
 return require('data/custom_configs');
   }.property('App.isHadoop2Stack'),
 
+  preDefinedConfigFile: function(file) {
+try {
+  return require('data/{0}/{1}'.format(App.get('currentStackName'), file));
+} catch(err) {
+  // the file doesn't exist, which might be expected.
+}
+  },
   //categories which contain custom configs
   categoriesWithCustom: ['CapacityScheduler'],
 
@@ -118,32 +130,34 @@ App.config = Em.Object.create({
   createContentProperties: function (configs) {
 var services = App.StackService.find();
 var contentProperties = [];
-services.forEach(function (service) {
-  if (service.get('configTypes')) {
-Object.keys(service.get('configTypes')).forEach(function (type) {
-  var contentProperty = configs.filterProperty('filename', type + 
'.xml').someProperty('name', 'content');
-  if (contentProperty && (type.endsWith('-log4j') || 
type.endsWith('-env'))) {
-var property = {
-  "id": "site property",
-  "name": "content",
-  "displayName": type.endsWith('-env') ? type + ' template' : 
"content",
-  "value": "",
-  "defaultValue": "",
-  "description": type + " properties",
-  "displayType": "content",
-  "isOverridable": true,
-  "isRequired": false,
-  "isVisible": true,
-  "showLabel": type.endsWith('-env'),
-  "serviceName": service.get('serviceName'),
-  "filename": type + '.xml',
-  "category": "Advanced " + type
-};
-contentProperties.pushObject(property);
-  }
-}, this);
-  }
-}, this);
+if (configs) {
+  services.forEach(function (service) {
+if (service.get('configTypes')) {
+  Object.keys(service.get('configTypes')).forEach(function (type) {
+var contentProperty = configs.filterProperty('filename', type + 
'.xml').someProperty('name', 'content');
+if (contentProperty && (type.endsWith('-log4j') || 
type.endsWith('-env'))) {
+  var property = {
+"id": "site property",
+"name": "content",
+"displayName": type.endsWith('-env') ? type + ' template' : 
"content",
+"value": "",
+"defaultValue": "",
+"description": type + " properties",
+"displayType": "content",
+"isOverridable": true,
+"isRequired": false,
+"isVisible": true,
+"showLabel": type.endsWith('-env'),
+"serviceName": service.get('serviceName'),
+"filename": type + '.xml',
+"category": "Advanced " + type
+  };
+  contentProperties.pushObject(property);
+}
+  }, this);
+}
+  }, this);
+}
 return contentProperties;
   },
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/99c64cdb/ambari-web/test/utils/config_test.js
--
diff --git a/ambari-web/test/utils/config_test.js 
b/ambari-web/test/utils/config_test.js
index 528be46..b04a404 100644
--- a/ambari-web/test/utils/config_test.js
+++ b/ambari-web/test/utils/config_test.js
@@ -421,4 +421,52 @@ describe('App.config', function () {
   expect(ServiceConfig.get('configCategories.length')).to.eql(1);
 });
   });
+
+  describe('#preDefinedConfigFile', function() {
+before(function() {
+  setups.setupStackVersion(this, 'BIGTOP-0.8');
+});
+
+it('bigtop 

git commit: AMBARI-7352 Support create new PostgreSQL database for hive metastore when deploy hive.(adenisso via jaoki)

2014-10-16 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk f873b147a -> 1be4506a4


AMBARI-7352 Support create new PostgreSQL database for hive metastore when 
deploy hive.(adenisso via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1be4506a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1be4506a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1be4506a

Branch: refs/heads/trunk
Commit: 1be4506a460e2c7a228970a512ba8639c4cf4fd2
Parents: f873b14
Author: Jun Aoki 
Authored: Thu Oct 16 13:32:26 2014 -0700
Committer: Jun Aoki 
Committed: Thu Oct 16 13:32:26 2014 -0700

--
 .../controllers/main/service/info/configs.js|  2 +-
 .../app/controllers/wizard/step7_controller.js  |  6 +-
 .../app/controllers/wizard/step8_controller.js  | 12 
 ambari-web/app/messages.js  |  3 +-
 .../app/models/stack_service_component.js   |  2 +-
 ambari-web/app/views/wizard/controls_view.js|  4 +-
 .../test/controllers/wizard/step8_test.js   | 61 +---
 .../test/models/stack_service_component_test.js | 14 -
 8 files changed, 75 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/1be4506a/ambari-web/app/controllers/main/service/info/configs.js
--
diff --git a/ambari-web/app/controllers/main/service/info/configs.js 
b/ambari-web/app/controllers/main/service/info/configs.js
index c676098..79c11b0 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -1668,7 +1668,7 @@ App.MainServiceInfoConfigsController = 
Em.Controller.extend(App.ServerValidatorM
   setHiveHostName: function (configs) {
 if (configs.someProperty('name', 'hive_database')) {
   var hiveDb = configs.findProperty('name', 'hive_database');
-  if (hiveDb.value === 'New MySQL Database') {
+  if (hiveDb.value === 'New MySQL Database' || hiveDb.value === 'New 
PostgreSQL Database') {
 var ambariHost = configs.findProperty('name', 'hive_ambari_host');
 if (ambariHost) {
   ambariHost.name = 'hive_hostname';

http://git-wip-us.apache.org/repos/asf/ambari/blob/1be4506a/ambari-web/app/controllers/wizard/step7_controller.js
--
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js 
b/ambari-web/app/controllers/wizard/step7_controller.js
index c6ffc3f..44d0a4e 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -1288,11 +1288,11 @@ App.WizardStep7Controller = 
Em.Controller.extend(App.ServerValidatorMixin, {
 var configMap = [
   {
 serviceName: 'OOZIE',
-ignored: Em.I18n.t('installer.step7.oozie.database.new')
+ignored: [Em.I18n.t('installer.step7.oozie.database.new')]
   },
   {
 serviceName: 'HIVE',
-ignored: Em.I18n.t('installer.step7.hive.database.new')
+ignored: [Em.I18n.t('installer.step7.hive.database.new.mysql'), 
Em.I18n.t('installer.step7.hive.database.new.postgres')]
   }
 ];
 configMap.forEach(function (config) {
@@ -1301,7 +1301,7 @@ App.WizardStep7Controller = 
Em.Controller.extend(App.ServerValidatorMixin, {
   if (service && service.get('isSelected') && !service.get('isInstalled')) 
{
 var serviceConfigs = 
this.get('stepConfigs').findProperty('serviceName', config.serviceName).configs;
 var serviceDatabase = serviceConfigs.findProperty('name', 
config.serviceName.toLowerCase() + '_database').get('value');
-if (serviceDatabase !== config.ignored) {
+if (!config.ignored.contains(serviceDatabase)) {
   var filledProperties = App.db.get('tmp', config.serviceName + 
'_connection');
   if (!filledProperties || App.isEmptyObject(filledProperties)) {
 isConnectionNotTested = true;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1be4506a/ambari-web/app/controllers/wizard/step8_controller.js
--
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js 
b/ambari-web/app/controllers/wizard/step8_controller.js
index 688f7ce..29e3cd6 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -246,6 +246,14 @@ App.WizardStep8Controller = 
Em.Cont

git commit: AMBARI-7352 Support create new PostgreSQL database for hive metastore when deploy hive.(adenisso via jaoki)

2014-10-16 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-1.7.0 b7910ab64 -> 2414e599e


AMBARI-7352 Support create new PostgreSQL database for hive metastore when 
deploy hive.(adenisso via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2414e599
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2414e599
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2414e599

Branch: refs/heads/branch-1.7.0
Commit: 2414e599e32a9280f44fc1207d6f60db7497e905
Parents: b7910ab
Author: Jun Aoki 
Authored: Thu Oct 16 13:33:24 2014 -0700
Committer: Jun Aoki 
Committed: Thu Oct 16 13:33:24 2014 -0700

--
 .../controllers/main/service/info/configs.js|  2 +-
 .../app/controllers/wizard/step7_controller.js  |  6 +-
 .../app/controllers/wizard/step8_controller.js  | 12 
 ambari-web/app/messages.js  |  3 +-
 .../app/models/stack_service_component.js   |  2 +-
 ambari-web/app/views/wizard/controls_view.js|  4 +-
 .../test/controllers/wizard/step8_test.js   | 61 +---
 .../test/models/stack_service_component_test.js | 14 -
 8 files changed, 75 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/2414e599/ambari-web/app/controllers/main/service/info/configs.js
--
diff --git a/ambari-web/app/controllers/main/service/info/configs.js 
b/ambari-web/app/controllers/main/service/info/configs.js
index 8ef2bbf..1be52e9 100644
--- a/ambari-web/app/controllers/main/service/info/configs.js
+++ b/ambari-web/app/controllers/main/service/info/configs.js
@@ -1668,7 +1668,7 @@ App.MainServiceInfoConfigsController = 
Em.Controller.extend(App.ServerValidatorM
   setHiveHostName: function (configs) {
 if (configs.someProperty('name', 'hive_database')) {
   var hiveDb = configs.findProperty('name', 'hive_database');
-  if (hiveDb.value === 'New MySQL Database') {
+  if (hiveDb.value === 'New MySQL Database' || hiveDb.value === 'New 
PostgreSQL Database') {
 var ambariHost = configs.findProperty('name', 'hive_ambari_host');
 if (ambariHost) {
   ambariHost.name = 'hive_hostname';

http://git-wip-us.apache.org/repos/asf/ambari/blob/2414e599/ambari-web/app/controllers/wizard/step7_controller.js
--
diff --git a/ambari-web/app/controllers/wizard/step7_controller.js 
b/ambari-web/app/controllers/wizard/step7_controller.js
index c6ffc3f..44d0a4e 100644
--- a/ambari-web/app/controllers/wizard/step7_controller.js
+++ b/ambari-web/app/controllers/wizard/step7_controller.js
@@ -1288,11 +1288,11 @@ App.WizardStep7Controller = 
Em.Controller.extend(App.ServerValidatorMixin, {
 var configMap = [
   {
 serviceName: 'OOZIE',
-ignored: Em.I18n.t('installer.step7.oozie.database.new')
+ignored: [Em.I18n.t('installer.step7.oozie.database.new')]
   },
   {
 serviceName: 'HIVE',
-ignored: Em.I18n.t('installer.step7.hive.database.new')
+ignored: [Em.I18n.t('installer.step7.hive.database.new.mysql'), 
Em.I18n.t('installer.step7.hive.database.new.postgres')]
   }
 ];
 configMap.forEach(function (config) {
@@ -1301,7 +1301,7 @@ App.WizardStep7Controller = 
Em.Controller.extend(App.ServerValidatorMixin, {
   if (service && service.get('isSelected') && !service.get('isInstalled')) 
{
 var serviceConfigs = 
this.get('stepConfigs').findProperty('serviceName', config.serviceName).configs;
 var serviceDatabase = serviceConfigs.findProperty('name', 
config.serviceName.toLowerCase() + '_database').get('value');
-if (serviceDatabase !== config.ignored) {
+if (!config.ignored.contains(serviceDatabase)) {
   var filledProperties = App.db.get('tmp', config.serviceName + 
'_connection');
   if (!filledProperties || App.isEmptyObject(filledProperties)) {
 isConnectionNotTested = true;

http://git-wip-us.apache.org/repos/asf/ambari/blob/2414e599/ambari-web/app/controllers/wizard/step8_controller.js
--
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js 
b/ambari-web/app/controllers/wizard/step8_controller.js
index 688f7ce..29e3cd6 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -246,6 +246,14 @

git commit: AMBARI-7601 Service pluggability: refactor UI code to externalize metrics graph definition to a single file (salvi via jaoki)

2014-10-16 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 1be4506a4 -> 1cf2b9852


AMBARI-7601 Service pluggability: refactor UI code to externalize metrics graph 
definition to a single file (salvi via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1cf2b985
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1cf2b985
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1cf2b985

Branch: refs/heads/trunk
Commit: 1cf2b98520914f32ac33685951fc24cfadb55be6
Parents: 1be4506
Author: Jun Aoki 
Authored: Thu Oct 16 14:55:36 2014 -0700
Committer: Jun Aoki 
Committed: Thu Oct 16 14:55:36 2014 -0700

--
 ambari-web/app/assets/test/tests.js |  2 +
 ambari-web/app/data/service_graph_config.js | 93 +++
 .../app/views/main/service/info/summary.js  | 96 ++--
 .../views/main/service/info/summary_test.js | 24 -
 4 files changed, 146 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/1cf2b985/ambari-web/app/assets/test/tests.js
--
diff --git a/ambari-web/app/assets/test/tests.js 
b/ambari-web/app/assets/test/tests.js
index 1dff1b2..65082ab 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -167,6 +167,8 @@ var files = ['test/init_model_test',
   'test/views/main/charts/heatmap/heatmap_host_test',
   'test/views/main/service/item_test',
   'test/views/main/service/info/config_test',
+  'test/views/main/service/info/summary_test',
+  'test/views/main/mirroring/edit_dataset_view_test',
   'test/views/common/configs/services_config_test',
   'test/views/wizard/step3/hostLogPopupBody_view_test',
   'test/views/wizard/step3/hostWarningPopupBody_view_test',

http://git-wip-us.apache.org/repos/asf/ambari/blob/1cf2b985/ambari-web/app/data/service_graph_config.js
--
diff --git a/ambari-web/app/data/service_graph_config.js 
b/ambari-web/app/data/service_graph_config.js
new file mode 100644
index 000..51a9f9e
--- /dev/null
+++ b/ambari-web/app/data/service_graph_config.js
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+/**
+This determines the graphs to display on the service page under each service.
+
+This is based on the name of the object associated with it.
+
+The name of the object is of the format: 'App.ChartServiceMetrics' where 

+is one of the items below.
+**/
+App.service_graph_config = {
+   'hdfs': [
+   'HDFS_SpaceUtilization',
+   'HDFS_FileOperations',
+   'HDFS_BlockStatus',
+   'HDFS_IO',
+   'HDFS_RPC',
+   'HDFS_GC',
+   'HDFS_JVMHeap',
+   'HDFS_JVMThreads'
+   ],
+
+   'yarn': [
+   'YARN_AllocatedMemory',
+   'YARN_QMR',
+   'YARN_AllocatedContainer',
+   'YARN_NMS',
+   'YARN_ApplicationCurrentStates',
+   'YARN_ApplicationFinishedStates',
+   'YARN_RPC',
+   'YARN_GC',
+   'YARN_JVMThreads',
+   'YARN_JVMHeap'
+   ],
+
+   'mapreduce': [
+   'MapReduce_JobsStatus',
+   'MapReduce_TasksRunningWaiting',
+   'MapReduce_MapSlots',
+   'MapReduce_ReduceSlots',
+   'MapReduce_GC',
+   'MapReduce_RPC',
+   'MapReduce_JVMHeap',
+   'MapReduce_

git commit: AMBARI-7601 Service pluggability: refactor UI code to externalize metrics graph definition to a single file (salvi via jaoki)

2014-10-16 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-1.7.0 2414e599e -> c0b54499b


AMBARI-7601 Service pluggability: refactor UI code to externalize metrics graph 
definition to a single file (salvi via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c0b54499
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c0b54499
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c0b54499

Branch: refs/heads/branch-1.7.0
Commit: c0b54499b202f1829bcc441f7344e6bf69eb7db7
Parents: 2414e59
Author: Jun Aoki 
Authored: Thu Oct 16 14:56:19 2014 -0700
Committer: Jun Aoki 
Committed: Thu Oct 16 14:56:19 2014 -0700

--
 ambari-web/app/assets/test/tests.js |  2 +
 ambari-web/app/data/service_graph_config.js | 93 +++
 .../app/views/main/service/info/summary.js  | 96 ++--
 .../views/main/service/info/summary_test.js | 24 -
 4 files changed, 146 insertions(+), 69 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/c0b54499/ambari-web/app/assets/test/tests.js
--
diff --git a/ambari-web/app/assets/test/tests.js 
b/ambari-web/app/assets/test/tests.js
index b54452a..8682af3 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -163,6 +163,8 @@ var files = ['test/init_model_test',
   'test/views/main/charts/heatmap/heatmap_host_test',
   'test/views/main/service/item_test',
   'test/views/main/service/info/config_test',
+  'test/views/main/service/info/summary_test',
+  'test/views/main/mirroring/edit_dataset_view_test',
   'test/views/common/configs/services_config_test',
   'test/views/wizard/step3/hostLogPopupBody_view_test',
   'test/views/wizard/step3/hostWarningPopupBody_view_test',

http://git-wip-us.apache.org/repos/asf/ambari/blob/c0b54499/ambari-web/app/data/service_graph_config.js
--
diff --git a/ambari-web/app/data/service_graph_config.js 
b/ambari-web/app/data/service_graph_config.js
new file mode 100644
index 000..51a9f9e
--- /dev/null
+++ b/ambari-web/app/data/service_graph_config.js
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+/**
+This determines the graphs to display on the service page under each service.
+
+This is based on the name of the object associated with it.
+
+The name of the object is of the format: 'App.ChartServiceMetrics' where 

+is one of the items below.
+**/
+App.service_graph_config = {
+   'hdfs': [
+   'HDFS_SpaceUtilization',
+   'HDFS_FileOperations',
+   'HDFS_BlockStatus',
+   'HDFS_IO',
+   'HDFS_RPC',
+   'HDFS_GC',
+   'HDFS_JVMHeap',
+   'HDFS_JVMThreads'
+   ],
+
+   'yarn': [
+   'YARN_AllocatedMemory',
+   'YARN_QMR',
+   'YARN_AllocatedContainer',
+   'YARN_NMS',
+   'YARN_ApplicationCurrentStates',
+   'YARN_ApplicationFinishedStates',
+   'YARN_RPC',
+   'YARN_GC',
+   'YARN_JVMThreads',
+   'YARN_JVMHeap'
+   ],
+
+   'mapreduce': [
+   'MapReduce_JobsStatus',
+   'MapReduce_TasksRunningWaiting',
+   'MapReduce_MapSlots',
+   'MapReduce_ReduceSlots',
+   'MapReduce_GC',
+   'MapReduce_RPC',
+   'MapReduce_JVMHeap',
+   'MapReduce_

git commit: AMBARI-7810 TestAlerts fails when executed with python2.6 (jaoki)

2014-10-20 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 65611067b -> c52c180a3


AMBARI-7810 TestAlerts fails when executed with python2.6 (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c52c180a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c52c180a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c52c180a

Branch: refs/heads/trunk
Commit: c52c180a3fe0376292e2d58b6f267530b5074d7f
Parents: 6561106
Author: jaoki 
Authored: Mon Oct 20 22:18:46 2014 -0700
Committer: jaoki 
Committed: Mon Oct 20 22:18:46 2014 -0700

--
 ambari-agent/src/test/python/ambari_agent/TestAlerts.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/c52c180a/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
--
diff --git a/ambari-agent/src/test/python/ambari_agent/TestAlerts.py 
b/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
index 1bacf24..78f3e10 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
@@ -256,7 +256,7 @@ class TestAlerts(TestCase):
 
 res = pa.collect()
 
-self.assertIsNotNone(collector.alerts()[0])
+self.assertTrue(collector.alerts()[0] is not None)
 self.assertEquals('CRITICAL', collector.alerts()[0]['state'])
 
 collector.remove_by_uuid('c1f73191-4481-4435-8dae-fd380e4c0be1')
@@ -354,4 +354,4 @@ class TestAlerts(TestCase):
 
 # execute the alert immediately and verify that the collector has the 
result
 ash.execute_alert(execution_commands)
-self.assertEquals(1, len(ash._collector.alerts()))
\ No newline at end of file
+self.assertEquals(1, len(ash._collector.alerts()))



git commit: AMBARI-7728 Dialog overlays do not receive keyboard focus (apenniston via jaoki)

2014-10-24 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 33ed5921d -> ad9b02b15


AMBARI-7728 Dialog overlays do not receive keyboard focus (apenniston via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ad9b02b1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ad9b02b1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ad9b02b1

Branch: refs/heads/trunk
Commit: ad9b02b155d3b1e7f9a47b20a5dc4328fe016081
Parents: 33ed592
Author: Jun Aoki 
Authored: Fri Oct 24 18:04:46 2014 -0700
Committer: Jun Aoki 
Committed: Fri Oct 24 18:04:46 2014 -0700

--
 ambari-web/app/assets/test/tests.js |  1 +
 ambari-web/app/views/common/modal_popup.js  | 21 ++---
 .../test/views/common/modal_popup_test.js   | 46 
 3 files changed, 62 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/ad9b02b1/ambari-web/app/assets/test/tests.js
--
diff --git a/ambari-web/app/assets/test/tests.js 
b/ambari-web/app/assets/test/tests.js
index ed515cb..3061733 100644
--- a/ambari-web/app/assets/test/tests.js
+++ b/ambari-web/app/assets/test/tests.js
@@ -140,6 +140,7 @@ var files = ['test/init_model_test',
   'test/views/common/table_view_test',
   'test/views/common/quick_link_view_test',
   'test/views/common/rolling_restart_view_test',
+  'test/views/common/modal_popup_test',
   'test/views/common/configs/config_history_flow_test',
   'test/views/main/dashboard_test',
   'test/views/main/menu_test',

http://git-wip-us.apache.org/repos/asf/ambari/blob/ad9b02b1/ambari-web/app/views/common/modal_popup.js
--
diff --git a/ambari-web/app/views/common/modal_popup.js 
b/ambari-web/app/views/common/modal_popup.js
index 223a77a..78c9387 100644
--- a/ambari-web/app/views/common/modal_popup.js
+++ b/ambari-web/app/views/common/modal_popup.js
@@ -63,21 +63,30 @@ App.ModalPopup = Ember.View.extend({
 
   didInsertElement: function () {
 if (this.autoHeight) {
-  var block = this.$().find('#modal > .modal-body').first();
-  block.css('max-height', $(window).height() - block.offset().top - 300 + 
$(window).scrollTop()); // fix popup height
+  var block = $('#modal > .modal-body').first();
+  if(block.offset()) {
+block.css('max-height', $(window).height() - block.offset().top  - 300 
+ $(window).scrollTop()); // fix popup height
+  }
 }
 // If popup is opened from another popup it should be displayed above
-var existedPopups = $(document).find('.modal-backdrop');
+var existedPopups = $('.modal-backdrop');
 if (existedPopups) {
   var maxZindex = 1;
   existedPopups.each(function(index, popup) {
 if ($(popup).css('z-index') > maxZindex) {
   maxZindex = $(popup).css('z-index');
-}
+  }
   });
-  this.$().find('.modal-backdrop').css('z-index', maxZindex * 2);
-  this.$().find('.modal').css('z-index', maxZindex * 2 + 1);
+  $('.modal-backdrop').css('z-index', maxZindex * 2);
+  $('.modal').css('z-index', maxZindex * 2 + 1);
 }
+
+var firstInputElement = 
$('#modal').find(':input').not(':disabled').first();
+this.focusElement(firstInputElement);
+  },
+
+  focusElement: function(elem) {
+elem.focus();
   },
 
   fitHeight: function () {

http://git-wip-us.apache.org/repos/asf/ambari/blob/ad9b02b1/ambari-web/test/views/common/modal_popup_test.js
--
diff --git a/ambari-web/test/views/common/modal_popup_test.js 
b/ambari-web/test/views/common/modal_popup_test.js
new file mode 100644
index 000..cd473aa
--- /dev/null
+++ b/ambari-web/test/views/common/modal_popup_test.js
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CO

git commit: AMBARI-6700 Rest API Null Pointer Exception for non-existent host in HostRoles/host_name filter for host_components (rjaltare via jaoki)

2014-10-27 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk c0c738072 -> 148f5a9b8


AMBARI-6700 Rest API Null Pointer Exception for non-existent host in 
HostRoles/host_name filter for host_components (rjaltare via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/148f5a9b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/148f5a9b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/148f5a9b

Branch: refs/heads/trunk
Commit: 148f5a9b8ea0ba666b485c2080ace556a7dbf5b1
Parents: c0c7380
Author: Jun Aoki 
Authored: Mon Oct 27 12:58:05 2014 -0700
Committer: Jun Aoki 
Committed: Mon Oct 27 12:58:05 2014 -0700

--
 .../org/apache/ambari/server/state/cluster/ClustersImpl.java | 3 +++
 .../org/apache/ambari/server/state/cluster/ClustersTest.java | 8 
 2 files changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/148f5a9b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
index a57e9c2..4cfa308 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClustersImpl.java
@@ -286,6 +286,9 @@ public class ClustersImpl implements Clusters {
 checkLoaded();
 r.lock();
 try {
+  if(!hostClusterMap.containsKey(hostname)){
+throw new HostNotFoundException(hostname);
+  }
   if (LOG.isDebugEnabled()) {
 LOG.debug("Looking up clusters for hostname"
 + ", hostname=" + hostname

http://git-wip-us.apache.org/repos/asf/ambari/blob/148f5a9b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
--
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
index 2baa5c5..f90c2c5 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClustersTest.java
@@ -203,6 +203,7 @@ public class ClustersTest {
 String h1 = "h1";
 String h2 = "h2";
 String h3 = "h3";
+String h4 = "h4";
 
 try {
   clusters.mapHostToCluster(h1, c1);
@@ -235,6 +236,13 @@ public class ClustersTest {
 clusters.getHost(h2).persist();
 clusters.getHost(h3).persist();
 
+try {
+clusters.getClustersForHost(h4);
+fail("Expected exception for invalid host");
+} catch (HostNotFoundException e) {
+  // Expected
+}
+
 Set c = clusters.getClustersForHost(h3);
 Assert.assertEquals(0, c.size());
 



git commit: AMBARI-7875 Docker build on builds.a.o and support JDK1.6 and python2.6 (jaoki)

2014-10-27 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 148f5a9b8 -> b42c3c9be


AMBARI-7875 Docker build on builds.a.o and support JDK1.6 and python2.6 (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b42c3c9b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b42c3c9b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b42c3c9b

Branch: refs/heads/trunk
Commit: b42c3c9bed122368d66e317d8943071a69534ac2
Parents: 148f5a9
Author: Jun Aoki 
Authored: Mon Oct 27 13:02:54 2014 -0700
Committer: Jun Aoki 
Committed: Mon Oct 27 13:02:54 2014 -0700

--
 dev-support/docker/README.md|  51 
 dev-support/docker/docker/Dockerfile|  79 +++
 dev-support/docker/docker/bin/__init__.py   |  12 +
 dev-support/docker/docker/bin/ambaribuild.py| 233 +++
 dev-support/docker/docker/bin/test/__init__.py  |  12 +
 .../docker/docker/bin/test/ambaribuild_test.py  |  70 ++
 .../single-node-HDP-2.1-blueprint1.json |  64 +
 .../blueprints/single-node-hostmapping1.json|  14 ++
 8 files changed, 535 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/b42c3c9b/dev-support/docker/README.md
--
diff --git a/dev-support/docker/README.md b/dev-support/docker/README.md
new file mode 100644
index 000..6613e0d
--- /dev/null
+++ b/dev-support/docker/README.md
@@ -0,0 +1,51 @@
+
+
+
+how to build
+
+
+```
+docker build -t ambari/build ./docker
+```
+
+how to run
+
+
+```
+# bash
+docker run --privileged -t -i -p 80:80 -p 5005:5005 -p 8080:8080 -h 
node1.mydomain.com --name ambari1 -v ${AMBARI_SRC:-$(pwd)}:/tmp/ambari 
ambari/build bash
+# where 5005 is java debug port and 8080 is the default http port, if no 
--privileged ambari-server start fails due to access to /proc/??/exe
+# -t is required otherwise, sudo commands do not run
+
+# build, install ambari and deploy hadoop in container
+cd {ambari src}
+docker rm ambari1
+docker run --privileged -t -p 80:80 -p 5005:5005 -p 8080:8080 -h 
node1.mydomain.com --name ambari1 -v ${AMBARI_SRC:-$(pwd)}:/tmp/ambari 
ambari/build /tmp/ambari-build-docker/bin/ambaribuild.py 
[test|server|agent|deploy] [-b] [-s [HDP|BIGTOP|PHD]]
+where
+test: mvn test
+server: install and run ambari-server
+agent: install and run ambari-server and ambari-agent
+deploy: install and run ambari-server and ambari-agent, and deploy a hadoop
+-b option to rebuild ambari
+```
+
+how to run unit test
+
+```
+cd docker
+python -m bin.test.ambaribuild_test
+
+```
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/b42c3c9b/dev-support/docker/docker/Dockerfile
--
diff --git a/dev-support/docker/docker/Dockerfile 
b/dev-support/docker/docker/Dockerfile
new file mode 100644
index 000..96ba8ff
--- /dev/null
+++ b/dev-support/docker/docker/Dockerfile
@@ -0,0 +1,79 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+FROM centos:centos6
+
+RUN echo root:changeme | chpasswd
+
+## Install some basic utilities that aren't in the default image
+RUN yum -y install vim wget rpm-build sudo which telnet tar openssh-server 
openssh-clients ntp git python-setuptools httpd
+# phantomjs dependency
+RUN yum -y install fontconfig freetype libfreetype.so.6 libfontconfig.so.1 
libstdc++.so.6
+RUN rpm -e --nodeps --justdb glibc-common
+RUN yum -y install glibc-common
+
+ENV HOME /root
+
+#Install JAVA
+# RUN wget --no-check-certificate --no-cookies --header 
"Cookie:oraclelicense=accept-securebackup-cookie" 
http://download.oracle.com/otn-pub/java/jdk/7u55-b13/jdk-7u55-linux-x64.rpm -O 
jdk-7u55-linux-x64.rpm
+# RUN yum -y install jdk-7u55-linux-x64.rpm
+RUN wget --no-check-certificate --no-cookies --header 
"Cookie:oraclelicense=accept-securebackup-cookie" 
http://download.oracle.com/otn-pub/java/jdk/6u45-b06/jdk-6u45-linux-x64-rpm.bin 
-O jdk-6u45-linux-x64-rpm.bin
+RUN chmod +x jdk-6u45-linux-x64-rpm.bin
+RUN ./jdk-6u45-linux-x64-rpm.bin
+ENV JAVA_HOME /usr/java/default/
+
+#Install Maven
+RUN mkdir -p /opt/maven
+WORKDIR /opt/maven
+RUN wget 
http://apache.cs.ut

git commit: AMBARI-7875 Docker build on builds.a.o and support JDK1.6 and python2.6 (jaoki)

2014-10-27 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-1.7.0 1d10aa5dc -> af6428a57


AMBARI-7875 Docker build on builds.a.o and support JDK1.6 and python2.6 (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/af6428a5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/af6428a5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/af6428a5

Branch: refs/heads/branch-1.7.0
Commit: af6428a574bc8bff733771187410ef72914cbe4e
Parents: 1d10aa5
Author: Jun Aoki 
Authored: Mon Oct 27 13:07:28 2014 -0700
Committer: Jun Aoki 
Committed: Mon Oct 27 13:07:28 2014 -0700

--
 dev-support/docker/README.md|  51 
 dev-support/docker/docker/Dockerfile|  79 +++
 dev-support/docker/docker/bin/__init__.py   |  12 +
 dev-support/docker/docker/bin/ambaribuild.py| 233 +++
 dev-support/docker/docker/bin/test/__init__.py  |  12 +
 .../docker/docker/bin/test/ambaribuild_test.py  |  70 ++
 .../single-node-HDP-2.1-blueprint1.json |  64 +
 .../blueprints/single-node-hostmapping1.json|  14 ++
 8 files changed, 535 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/af6428a5/dev-support/docker/README.md
--
diff --git a/dev-support/docker/README.md b/dev-support/docker/README.md
new file mode 100644
index 000..6613e0d
--- /dev/null
+++ b/dev-support/docker/README.md
@@ -0,0 +1,51 @@
+
+
+
+how to build
+
+
+```
+docker build -t ambari/build ./docker
+```
+
+how to run
+
+
+```
+# bash
+docker run --privileged -t -i -p 80:80 -p 5005:5005 -p 8080:8080 -h 
node1.mydomain.com --name ambari1 -v ${AMBARI_SRC:-$(pwd)}:/tmp/ambari 
ambari/build bash
+# where 5005 is java debug port and 8080 is the default http port, if no 
--privileged ambari-server start fails due to access to /proc/??/exe
+# -t is required otherwise, sudo commands do not run
+
+# build, install ambari and deploy hadoop in container
+cd {ambari src}
+docker rm ambari1
+docker run --privileged -t -p 80:80 -p 5005:5005 -p 8080:8080 -h 
node1.mydomain.com --name ambari1 -v ${AMBARI_SRC:-$(pwd)}:/tmp/ambari 
ambari/build /tmp/ambari-build-docker/bin/ambaribuild.py 
[test|server|agent|deploy] [-b] [-s [HDP|BIGTOP|PHD]]
+where
+test: mvn test
+server: install and run ambari-server
+agent: install and run ambari-server and ambari-agent
+deploy: install and run ambari-server and ambari-agent, and deploy a hadoop
+-b option to rebuild ambari
+```
+
+how to run unit test
+
+```
+cd docker
+python -m bin.test.ambaribuild_test
+
+```
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/af6428a5/dev-support/docker/docker/Dockerfile
--
diff --git a/dev-support/docker/docker/Dockerfile 
b/dev-support/docker/docker/Dockerfile
new file mode 100644
index 000..96ba8ff
--- /dev/null
+++ b/dev-support/docker/docker/Dockerfile
@@ -0,0 +1,79 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+FROM centos:centos6
+
+RUN echo root:changeme | chpasswd
+
+## Install some basic utilities that aren't in the default image
+RUN yum -y install vim wget rpm-build sudo which telnet tar openssh-server 
openssh-clients ntp git python-setuptools httpd
+# phantomjs dependency
+RUN yum -y install fontconfig freetype libfreetype.so.6 libfontconfig.so.1 
libstdc++.so.6
+RUN rpm -e --nodeps --justdb glibc-common
+RUN yum -y install glibc-common
+
+ENV HOME /root
+
+#Install JAVA
+# RUN wget --no-check-certificate --no-cookies --header 
"Cookie:oraclelicense=accept-securebackup-cookie" 
http://download.oracle.com/otn-pub/java/jdk/7u55-b13/jdk-7u55-linux-x64.rpm -O 
jdk-7u55-linux-x64.rpm
+# RUN yum -y install jdk-7u55-linux-x64.rpm
+RUN wget --no-check-certificate --no-cookies --header 
"Cookie:oraclelicense=accept-securebackup-cookie" 
http://download.oracle.com/otn-pub/java/jdk/6u45-b06/jdk-6u45-linux-x64-rpm.bin 
-O jdk-6u45-linux-x64-rpm.bin
+RUN chmod +x jdk-6u45-linux-x64-rpm.bin
+RUN ./jdk-6u45-linux-x64-rpm.bin
+ENV JAVA_HOME /usr/java/default/
+
+#Install Maven
+RUN mkdir -p /opt/maven
+WORKDIR /opt/maven
+RUN wget 
http:/

git commit: AMBARI-7663 Confirmation dialogues needs clearer messages regarding actions on services (salvi via jaoki)

2014-10-28 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 96517a9b1 -> db0aa3e14


AMBARI-7663 Confirmation dialogues needs clearer messages regarding actions on 
services (salvi via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/db0aa3e1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/db0aa3e1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/db0aa3e1

Branch: refs/heads/trunk
Commit: db0aa3e1411d341919590b09cca8e0346a66b46a
Parents: 96517a9
Author: Jun Aoki 
Authored: Tue Oct 28 12:58:58 2014 -0700
Committer: Jun Aoki 
Committed: Tue Oct 28 12:58:58 2014 -0700

--
 ambari-web/app/controllers/main/service.js  |  7 +-
 ambari-web/app/controllers/main/service/item.js |  4 +--
 ambari-web/app/messages.js  |  4 +++
 .../test/controllers/main/service/item_test.js  | 16 
 .../test/controllers/main/service_test.js   | 26 
 5 files changed, 54 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/db0aa3e1/ambari-web/app/controllers/main/service.js
--
diff --git a/ambari-web/app/controllers/main/service.js 
b/ambari-web/app/controllers/main/service.js
index a6d5983..c487931 100644
--- a/ambari-web/app/controllers/main/service.js
+++ b/ambari-web/app/controllers/main/service.js
@@ -117,9 +117,14 @@ App.MainServiceController = Em.ArrayController.extend({
   return null;
 }
 var self = this;
+var bodyMessage = Em.Object.create({
+  confirmMsg: state == 'INSTALLED' ? 
Em.I18n.t('services.service.stopAll.confirmMsg') : 
Em.I18n.t('services.service.startAll.confirmMsg'),
+  confirmButton: state == 'INSTALLED' ? 
Em.I18n.t('services.service.stop.confirmButton') : 
Em.I18n.t('services.service.start.confirmButton')
+});
+
 return App.showConfirmationFeedBackPopup(function (query) {
   self.allServicesCall(state, query);
-});
+}, bodyMessage);
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/db0aa3e1/ambari-web/app/controllers/main/service/item.js
--
diff --git a/ambari-web/app/controllers/main/service/item.js 
b/ambari-web/app/controllers/main/service/item.js
index 7339e77..2666032 100644
--- a/ambari-web/app/controllers/main/service/item.js
+++ b/ambari-web/app/controllers/main/service/item.js
@@ -165,8 +165,8 @@ App.MainServiceItemController = Em.Controller.extend({
 var bodyMessage = Em.Object.create({
   putInMaintenance: (serviceHealth == 'INSTALLED' && isMaintenanceOFF) || 
(serviceHealth == 'STARTED' && !isMaintenanceOFF),
   turnOnMmMsg: serviceHealth == 'INSTALLED' ? 
Em.I18n.t('passiveState.turnOnFor').format(serviceDisplayName) : 
Em.I18n.t('passiveState.turnOffFor').format(serviceDisplayName),
-  confirmMsg: serviceHealth == 'INSTALLED'? 
Em.I18n.t('services.service.stop.confirmMsg').format(serviceDisplayName) : 
Em.I18n.t('question.sure'),
-  confirmButton: serviceHealth == 'INSTALLED'? 
Em.I18n.t('services.service.stop.confirmButton') : Em.I18n.t('ok'),
+  confirmMsg: serviceHealth == 'INSTALLED'? 
Em.I18n.t('services.service.stop.confirmMsg').format(serviceDisplayName) : 
Em.I18n.t('services.service.start.confirmMsg').format(serviceDisplayName),
+  confirmButton: serviceHealth == 'INSTALLED'? 
Em.I18n.t('services.service.stop.confirmButton') : 
Em.I18n.t('services.service.start.confirmButton'),
   additionalWarningMsg:  isMaintenanceOFF && serviceHealth == 'INSTALLED'? 
Em.I18n.t('services.service.stop.warningMsg.turnOnMM').format(serviceDisplayName)
 : null
 });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/db0aa3e1/ambari-web/app/messages.js
--
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 99a429c..d084630 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -1460,8 +1460,12 @@ Em.I18n.translations = {
   'services.service.add':'Add Service',
   'services.service.startAll':'Start All',
   'services.service.stopAll':'Stop All',
+  'services.service.startAll.confirmMsg' : 'You are about to start all 
services',
+  'services.service.stopAll.confirmMsg' : 'You are about to stop all services',
+  'services.service.start.confirmMsg' : '

git commit: AMBARI-7934 For the config groups dropdown selector, the combobox's text field should extend the dropdown selection (apenniston via jaoki)

2014-10-28 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 6742635d7 -> 552792b93


AMBARI-7934 For the config groups dropdown selector, the combobox's text field 
should extend the dropdown selection (apenniston via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/552792b9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/552792b9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/552792b9

Branch: refs/heads/trunk
Commit: 552792b932ec3804b2a411b8c164eabba3d97382
Parents: 6742635
Author: Jun Aoki 
Authored: Tue Oct 28 17:16:49 2014 -0700
Committer: Jun Aoki 
Committed: Tue Oct 28 17:16:49 2014 -0700

--
 ambari-web/app/templates/common/configs/service_config.hbs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/552792b9/ambari-web/app/templates/common/configs/service_config.hbs
--
diff --git a/ambari-web/app/templates/common/configs/service_config.hbs 
b/ambari-web/app/templates/common/configs/service_config.hbs
index 260b9c1..aeae30f 100644
--- a/ambari-web/app/templates/common/configs/service_config.hbs
+++ b/ambari-web/app/templates/common/configs/service_config.hbs
@@ -49,7 +49,7 @@

  {{t common.group}}  
  
- {{selectedConfigGroup.displayNameHosts}}
+ {{selectedConfigGroup.displayNameHosts}}
  

  



ambari git commit: AMBARI-15852: Changing HAWQ Ports through Ambari prevents HAWQ service from restarting

2016-04-15 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-2.2 6c2697ad2 -> 96e6fcec3


AMBARI-15852: Changing HAWQ Ports through Ambari prevents HAWQ service from 
restarting


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/96e6fcec
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/96e6fcec
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/96e6fcec

Branch: refs/heads/branch-2.2
Commit: 96e6fcec3cd8808bca3cd170e49c0ec5ae81dacf
Parents: 6c2697a
Author: Jun Aoki 
Authored: Fri Apr 15 11:31:26 2016 -0700
Committer: Jun Aoki 
Committed: Fri Apr 15 11:31:26 2016 -0700

--
 .../HAWQ/2.0.0/package/scripts/common.py| 12 +---
 .../2.0.0/package/scripts/hawq_constants.py | 15 ++
 .../HAWQ/2.0.0/package/scripts/hawqmaster.py|  4 +--
 .../HAWQ/2.0.0/package/scripts/hawqsegment.py   |  2 +-
 .../HAWQ/2.0.0/package/scripts/hawqstandby.py   |  2 +-
 .../HAWQ/2.0.0/package/scripts/hawqstatus.py| 25 +++--
 .../HAWQ/2.0.0/package/scripts/utils.py |  8 +++---
 .../python/stacks/2.3/HAWQ/test_hawqmaster.py   |  4 ++-
 .../python/stacks/2.3/HAWQ/test_hawqsegment.py  |  5 ++--
 .../python/stacks/2.3/HAWQ/test_hawqstandby.py  |  4 ++-
 .../test/python/stacks/2.3/HAWQ/test_utils.py   | 29 
 11 files changed, 73 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/96e6fcec/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
index 413cf1a..b929430 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
@@ -31,6 +31,7 @@ import xml.etree.ElementTree as ET
 import utils
 import hawq_constants
 import custom_params
+import hawqstatus
 
 def setup_user():
   """
@@ -234,7 +235,7 @@ def __update_sysctl_file_suse():
 raise Fail("Failed to update sysctl.conf file ")
 
 
-def get_local_hawq_site_property(property_name):
+def get_local_hawq_site_property_value(property_name):
   """
   Fetches the value of the property specified, from the local hawq-site.xml.
   """
@@ -293,17 +294,20 @@ def start_component(component_name, port, data_dir):
   if os.path.exists(os.path.join(data_dir, 
hawq_constants.postmaster_opts_filename)):
 return utils.exec_hawq_operation(hawq_constants.START,
  "{0} -a -v".format(component_name),
- 
not_if=utils.chk_hawq_process_status_cmd(port))
+ 
not_if=utils.generate_hawq_process_status_cmd(component_name, port))
 
   utils.exec_hawq_operation(hawq_constants.INIT, "{0} -a 
-v".format(component_name))
 
-def stop_component(component_name, port, mode):
+def stop_component(component_name, mode):
   """
   Stops the component
+  Unlike start_component, port is obtained from local hawq-site.xml as Ambari 
pontentially have a new value through UI.
   """
+  port_property_name = 
hawq_constants.COMPONENT_ATTRIBUTES_MAP[component_name]['port_property']
+  port_number = get_local_hawq_site_property_value(port_property_name)
   utils.exec_hawq_operation(hawq_constants.STOP,
 "{0} -M {1} -a -v".format(component_name, mode),
-only_if=utils.chk_hawq_process_status_cmd(port, 
component_name))
+
only_if=utils.generate_hawq_process_status_cmd(component_name, port_number))
 
 def __check_dfs_truncate_enforced():
   """

http://git-wip-us.apache.org/repos/asf/ambari/blob/96e6fcec/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
index 3f6c371..4ce0c94 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
@@ -66,3 +66,18 @@ pxf_hdfs_test_dir = 
"/user/{0}/hawq_pxf_hdfs_service_check".format(hawq_user)
 # Timeouts
 default_exec_timeout = 600
 hawq_operation_exec_timeout = 900
+
+COMPONENT_ATTRIBUTES_MAP = {
+  MASTER: {
+'port_property': 'hawq_master_address_port',
+'process_name': 'postgres'
+  },
+  STANDBY: {
+

ambari git commit: AMBARI-15852: Changing HAWQ Ports through Ambari prevents HAWQ service from restarting

2016-04-15 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk a224f2657 -> d13aa5085


AMBARI-15852: Changing HAWQ Ports through Ambari prevents HAWQ service from 
restarting


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d13aa508
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d13aa508
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d13aa508

Branch: refs/heads/trunk
Commit: d13aa508569c2ee8afce4a746560e7a1d2b0e1a8
Parents: a224f26
Author: Jun Aoki 
Authored: Fri Apr 15 17:44:10 2016 -0700
Committer: Jun Aoki 
Committed: Fri Apr 15 17:44:10 2016 -0700

--
 .../HAWQ/2.0.0/package/scripts/common.py| 13 +
 .../2.0.0/package/scripts/hawq_constants.py | 15 ++
 .../HAWQ/2.0.0/package/scripts/hawqmaster.py|  4 +--
 .../HAWQ/2.0.0/package/scripts/hawqsegment.py   |  2 +-
 .../HAWQ/2.0.0/package/scripts/hawqstandby.py   |  2 +-
 .../HAWQ/2.0.0/package/scripts/hawqstatus.py| 25 +++--
 .../HAWQ/2.0.0/package/scripts/utils.py |  8 +++---
 .../python/stacks/2.3/HAWQ/test_hawqmaster.py   |  4 ++-
 .../python/stacks/2.3/HAWQ/test_hawqsegment.py  |  5 ++--
 .../python/stacks/2.3/HAWQ/test_hawqstandby.py  |  4 ++-
 .../test/python/stacks/2.3/HAWQ/test_utils.py   | 29 
 11 files changed, 73 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/d13aa508/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
index abd0992..e70a959 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/common.py
@@ -31,7 +31,7 @@ import xml.etree.ElementTree as ET
 import utils
 import hawq_constants
 import custom_params
-
+import hawqstatus
 
 def setup_user():
   """
@@ -235,7 +235,7 @@ def __update_sysctl_file_suse():
 raise Fail("Failed to update sysctl.conf file ")
 
 
-def get_local_hawq_site_property(property_name):
+def get_local_hawq_site_property_value(property_name):
   """
   Fetches the value of the property specified, from the local hawq-site.xml.
   """
@@ -294,16 +294,19 @@ def start_component(component_name, port, data_dir):
   if os.path.exists(os.path.join(data_dir, 
hawq_constants.postmaster_opts_filename)):
 return utils.exec_hawq_operation(hawq_constants.START,
  "{0} -a -v".format(component_name),
- 
not_if=utils.chk_hawq_process_status_cmd(port))
+ 
not_if=utils.generate_hawq_process_status_cmd(component_name, port))
   utils.exec_hawq_operation(hawq_constants.INIT, "{0} -a 
-v".format(component_name))
 
-def stop_component(component_name, port, mode):
+def stop_component(component_name, mode):
   """
   Stops the component
+  Unlike start_component, port is obtained from local hawq-site.xml as Ambari 
pontentially have a new value through UI.
   """
+  port_property_name = 
hawq_constants.COMPONENT_ATTRIBUTES_MAP[component_name]['port_property']
+  port_number = get_local_hawq_site_property_value(port_property_name)
   utils.exec_hawq_operation(hawq_constants.STOP,
 "{0} -M {1} -a -v".format(component_name, mode),
-only_if=utils.chk_hawq_process_status_cmd(port, 
component_name))
+
only_if=utils.generate_hawq_process_status_cmd(component_name, port_number))
 
 def __check_dfs_truncate_enforced():
   """

http://git-wip-us.apache.org/repos/asf/ambari/blob/d13aa508/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
index 3f6c371..4ce0c94 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawq_constants.py
@@ -66,3 +66,18 @@ pxf_hdfs_test_dir = 
"/user/{0}/hawq_pxf_hdfs_service_check".format(hawq_user)
 # Timeouts
 default_exec_timeout = 600
 hawq_operation_exec_timeout = 900
+
+COMPONENT_ATTRIBUTES_MAP = {
+  MASTER: {
+'port_property': 'hawq_master_address_port',
+'process_name': 'postgres'
+  },
+  STANDBY: {
+'port_pro

ambari git commit: AMBARI-15926: HAWQ activate standby wizard fails after port number change but before restart.

2016-04-18 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-2.2.2 0bdb4bc07 -> add3a20ca


AMBARI-15926: HAWQ activate standby wizard fails after port number change but 
before restart.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/add3a20c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/add3a20c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/add3a20c

Branch: refs/heads/branch-2.2.2
Commit: add3a20cabc77902de947b05b172c1b9eb832162
Parents: 0bdb4bc
Author: Jun Aoki 
Authored: Mon Apr 18 13:56:05 2016 -0700
Committer: Jun Aoki 
Committed: Mon Apr 18 13:56:05 2016 -0700

--
 .../common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/add3a20c/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
index e2114d8..cdc58c1 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
@@ -58,6 +58,10 @@ class HawqStandby(Script):
 import utils
 Logger.info("Activating HAWQ standby...")
 utils.exec_hawq_operation(hawq_constants.ACTIVATE, "{0} -a -M {1} -v 
--ignore-bad-hosts".format(hawq_constants.STANDBY, hawq_constants.FAST))
+
+# Stop the newly become master as the process might be running with an old 
port,
+# which would cause a failure Start HAWQ Service step in Activte HAWQ 
Standby Master Wizard
+common.stop_component(hawq_constants.MASTER, hawq_constants.FAST)
 
 if __name__ == "__main__":
   HawqStandby().execute()



ambari git commit: AMBARI-15926: HAWQ activate standby wizard fails after port number change but before restart.

2016-04-18 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-2.2 997aa90d6 -> 980a4d946


AMBARI-15926: HAWQ activate standby wizard fails after port number change but 
before restart.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/980a4d94
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/980a4d94
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/980a4d94

Branch: refs/heads/branch-2.2
Commit: 980a4d94686446be1bd04ec67cf74f9ec88762bb
Parents: 997aa90
Author: Jun Aoki 
Authored: Mon Apr 18 13:57:59 2016 -0700
Committer: Jun Aoki 
Committed: Mon Apr 18 13:57:59 2016 -0700

--
 .../common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/980a4d94/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
index e2114d8..cdc58c1 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
@@ -58,6 +58,10 @@ class HawqStandby(Script):
 import utils
 Logger.info("Activating HAWQ standby...")
 utils.exec_hawq_operation(hawq_constants.ACTIVATE, "{0} -a -M {1} -v 
--ignore-bad-hosts".format(hawq_constants.STANDBY, hawq_constants.FAST))
+
+# Stop the newly become master as the process might be running with an old 
port,
+# which would cause a failure Start HAWQ Service step in Activte HAWQ 
Standby Master Wizard
+common.stop_component(hawq_constants.MASTER, hawq_constants.FAST)
 
 if __name__ == "__main__":
   HawqStandby().execute()



ambari git commit: AMBARI-15926: HAWQ activate standby wizard fails after port number change but before restart.

2016-04-19 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk d4ee11cc0 -> 9ed03583b


AMBARI-15926: HAWQ activate standby wizard fails after port number change but 
before restart.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9ed03583
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9ed03583
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9ed03583

Branch: refs/heads/trunk
Commit: 9ed03583b8bdce6be57f482cb88db62d185dd027
Parents: d4ee11c
Author: Jun Aoki 
Authored: Tue Apr 19 11:39:16 2016 -0700
Committer: Jun Aoki 
Committed: Tue Apr 19 11:39:16 2016 -0700

--
 .../common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/9ed03583/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
index caba53f..45dcff0 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqstandby.py
@@ -59,5 +59,9 @@ class HawqStandby(Script):
 Logger.info("Activating HAWQ standby...")
 utils.exec_hawq_operation(hawq_constants.ACTIVATE, "{0} -a -M {1} -v 
--ignore-bad-hosts".format(hawq_constants.STANDBY, hawq_constants.FAST))
 
+# Stop the newly become master as the process might be running with an old 
port,
+# which would cause a failure Start HAWQ Service step in Activate HAWQ 
Standby Master Wizard
+common.stop_component(hawq_constants.MASTER, hawq_constants.FAST)
+
 if __name__ == "__main__":
   HawqStandby().execute()



ambari git commit: AMBARI-16174: Move RM wizard should update HAWQ related parameters

2016-05-03 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 15a7ea3d5 -> 7d0d33a49


AMBARI-16174: Move RM wizard should update HAWQ related parameters


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7d0d33a4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7d0d33a4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7d0d33a4

Branch: refs/heads/trunk
Commit: 7d0d33a49cfd4b8fb832ad6f0aa5f7e4b48ea048
Parents: 15a7ea3
Author: Jun Aoki 
Authored: Tue May 3 10:59:32 2016 -0700
Committer: Jun Aoki 
Committed: Tue May 3 10:59:32 2016 -0700

--
 .../main/service/reassign/step4_controller.js   | 42 -
 .../utils/configs/move_rm_config_initializer.js | 64 +++-
 .../service/reassign/step4_controller_test.js   |  9 ++-
 3 files changed, 107 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/7d0d33a4/ambari-web/app/controllers/main/service/reassign/step4_controller.js
--
diff --git 
a/ambari-web/app/controllers/main/service/reassign/step4_controller.js 
b/ambari-web/app/controllers/main/service/reassign/step4_controller.js
index 736f467..d8f0394 100644
--- a/ambari-web/app/controllers/main/service/reassign/step4_controller.js
+++ b/ambari-web/app/controllers/main/service/reassign/step4_controller.js
@@ -574,6 +574,13 @@ App.ReassignMasterWizardStep4Controller = 
App.HighAvailabilityProgressPageContro
 }
 }
 
+if (componentName === 'RESOURCEMANAGER') {
+if (App.Service.find().someProperty('serviceName', 'HAWQ')) {
+  urlParams.push('(type=hawq-site&tag=' + 
data.Clusters.desired_configs['hawq-site'].tag + ')');
+  urlParams.push('(type=yarn-client&tag=' + 
data.Clusters.desired_configs['yarn-client'].tag + ')');
+}
+}
+
 return urlParams;
   },
 
@@ -629,9 +636,13 @@ App.ReassignMasterWizardStep4Controller = 
App.HighAvailabilityProgressPageContro
*/
   _getRmAdditionalDependencies: function (configs) {
 var ret = {};
-var cfg = configs['yarn-site']['yarn.resourcemanager.hostname.rm1'];
-if (cfg) {
-  ret.rm1 = cfg;
+var rm1 = configs['yarn-site']['yarn.resourcemanager.hostname.rm1'];
+if (rm1) {
+  ret.rm1 = rm1;
+}
+var rm2 = configs['yarn-site']['yarn.resourcemanager.hostname.rm2'];
+if (rm2) {
+  ret.rm2 = rm2;
 }
 return ret;
   },
@@ -700,6 +711,31 @@ App.ReassignMasterWizardStep4Controller = 
App.HighAvailabilityProgressPageContro
   },
 
   onLoadConfigs: function (data) {
+// Find hawq-site.xml location
+var hawqSiteIndex = -1;
+for(var i = 0; i < data.items.length; i++){
+  if(data.items[i].type == 'hawq-site'){
+hawqSiteIndex = i;
+break;
+  }
+}
+
+// if certain services are deployed, include related site files to 
additionalConfigsMap and relatedServicesMap.
+if(hawqSiteIndex >= 0){ // if HAWQ is deployed
+  var hawqSiteProperties = {
+'hawq_rm_yarn_address': ':8050',
+'hawq_rm_yarn_scheduler_address': ':8030'
+  }
+
+  var rmComponent = 
this.get('additionalConfigsMap').findProperty('componentName', 
"RESOURCEMANAGER");
+  rmComponent.configs["hawq-site"] = hawqSiteProperties;
+
+  
if(data.items[hawqSiteIndex].properties["hawq_global_rm_type"].toLowerCase() 
=== "yarn"){
+this.get('relatedServicesMap')['RESOURCEMANAGER'].append('HAWQ');
+  }
+
+}
+
 var componentName = this.get('content.reassign.component_name');
 var targetHostName = this.get('content.reassignHosts.target');
 var configs = {};

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d0d33a4/ambari-web/app/utils/configs/move_rm_config_initializer.js
--
diff --git a/ambari-web/app/utils/configs/move_rm_config_initializer.js 
b/ambari-web/app/utils/configs/move_rm_config_initializer.js
index f8800de..5388224 100644
--- a/ambari-web/app/utils/configs/move_rm_config_initializer.js
+++ b/ambari-web/app/utils/configs/move_rm_config_initializer.js
@@ -33,6 +33,13 @@ function getRmHaDependedConfig(rmHaShouldBeEnabled) {
   };
 }
 
+function getRmHaHawqConfig(rmHaShouldBeEnabled) {
+  return {
+type: 'rm_ha_hawq',
+rmHaShouldBeEnabled: Boolean(rmHaShouldBeEnabled)
+  };
+}
+
 /**
  * Initializer for configs which should be affected when Resource Manager is 
moved from one host to another
  * If Resource Manager HA-mode is already activated, several configs are also 
updated
@@ -42,13 +49,16 @@ function getRmHaDependedConfig(rmHaShouldBeEnabled) {
 App.MoveRmConfigInitializer = App.MoveComponentConfigInitializerClass.create({
 
   initializerTypes: [
-{name: 'rm_ha_depended', method: '_initAsRmHaDepended'}
+  

ambari git commit: AMBARI-16237: PXF alert: change the message to make it more meaningful when both namenodes are down on Secured HA cluster.

2016-05-05 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 1770820a7 -> be7f69625


AMBARI-16237: PXF alert: change the message to make it more meaningful when 
both namenodes  are down on Secured HA cluster.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/be7f6962
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/be7f6962
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/be7f6962

Branch: refs/heads/trunk
Commit: be7f69625e354123facf0df369a29a3da85462ec
Parents: 1770820
Author: Jun Aoki 
Authored: Thu May 5 11:28:20 2016 -0700
Committer: Jun Aoki 
Committed: Thu May 5 11:28:20 2016 -0700

--
 .../libraries/functions/namenode_ha_utils.py| 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/be7f6962/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
--
diff --git 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
index ee16c9b..919ccb5 100644
--- 
a/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
+++ 
b/ambari-common/src/main/python/resource_management/libraries/functions/namenode_ha_utils.py
@@ -17,7 +17,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 
or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 '''
-from resource_management.libraries.script import UnknownConfiguration
 from resource_management.libraries.functions.is_empty import is_empty
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.jmx import get_value_from_jmx
@@ -140,8 +139,8 @@ def get_active_namenode(hdfs_site, security_enabled, 
run_user):
   active_namenodes = get_namenode_states(hdfs_site, security_enabled, 
run_user)[0]
   if active_namenodes:
 return active_namenodes[0]
-  else:
-return UnknownConfiguration('fs_root')
+
+  raise Fail('No active NameNode was found.')
   
 def get_property_for_active_namenode(hdfs_site, property_name, 
security_enabled, run_user):
   """



ambari git commit: AMBARI-8825 Print Button in Install Wizard Step 9 should not be in scrolling area (wangy6 via jaoki)

2015-01-12 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk e2c81b45b -> 6938ad5e5


AMBARI-8825 Print Button in Install Wizard Step 9 should not be in scrolling 
area (wangy6 via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6938ad5e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6938ad5e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6938ad5e

Branch: refs/heads/trunk
Commit: 6938ad5e5cb6c7a6f198a3a8094c6903be511acf
Parents: e2c81b4
Author: Jun Aoki 
Authored: Mon Jan 12 12:06:07 2015 -0800
Committer: Jun Aoki 
Committed: Mon Jan 12 12:06:07 2015 -0800

--
 ambari-web/app/styles/application.less| 3 +++
 ambari-web/app/templates/wizard/step8.hbs | 4 +---
 2 files changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/6938ad5e/ambari-web/app/styles/application.less
--
diff --git a/ambari-web/app/styles/application.less 
b/ambari-web/app/styles/application.less
index 8a5bac9..783846e 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -680,6 +680,9 @@ h1 {
   }
   .btn-area {
 margin-top: 20px;
+.btn.btn-info {
+  margin-right: 10px;
+}
   }
   .wizard-content {
 padding: 25px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/6938ad5e/ambari-web/app/templates/wizard/step8.hbs
--
diff --git a/ambari-web/app/templates/wizard/step8.hbs 
b/ambari-web/app/templates/wizard/step8.hbs
index 1fe7f22..2a0f50e 100644
--- a/ambari-web/app/templates/wizard/step8.hbs
+++ b/ambari-web/app/templates/wizard/step8.hbs
@@ -29,9 +29,6 @@
   {{/if}}
 
   
-
-  {{t common.print}} 
-
 
   {{#each item in controller.clusterInfo}}
 
@@ -76,5 +73,6 @@
 ← {{t 
common.back}}
 {{t common.deploy}} →
+{{t common.print}}
   
 
\ No newline at end of file



ambari git commit: AMBARI-8823 Install Wizard Step 5 should have a checkbox for 'Select All' instead of 'all/none' (wangy6 via jaoki)

2015-01-14 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk a6df75fd6 -> de18175ff


AMBARI-8823 Install Wizard Step 5 should have a checkbox for 'Select All' 
instead of 'all/none' (wangy6 via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/de18175f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/de18175f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/de18175f

Branch: refs/heads/trunk
Commit: de18175ff2cbb23603a33cc79f29e1650941d335
Parents: a6df75f
Author: Jun Aoki 
Authored: Wed Jan 14 13:45:02 2015 -0800
Committer: Jun Aoki 
Committed: Wed Jan 14 13:45:02 2015 -0800

--
 .../app/controllers/wizard/step4_controller.js  | 50 ++--
 ambari-web/app/styles/application.less  |  5 ++
 ambari-web/app/templates/wizard/step4.hbs   | 10 +---
 .../test/controllers/wizard/step4_test.js   | 30 ++--
 4 files changed, 25 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/de18175f/ambari-web/app/controllers/wizard/step4_controller.js
--
diff --git a/ambari-web/app/controllers/wizard/step4_controller.js 
b/ambari-web/app/controllers/wizard/step4_controller.js
index 0f8b998..2620fae 100644
--- a/ambari-web/app/controllers/wizard/step4_controller.js
+++ b/ambari-web/app/controllers/wizard/step4_controller.js
@@ -30,6 +30,21 @@ App.WizardStep4Controller = Em.ArrayController.extend({
   content: [],
 
   /**
+   * Check / Uncheck 'Select All' checkbox with one argument; Check / Uncheck 
all other checkboxes with more arguments
+   * @type {bool}
+   */
+  isAllChecked: function(key, value) {
+if (arguments.length > 1) {
+  this.filterProperty('isInstalled', false).setEach('isSelected', value);
+  return value;
+} else {
+  return this.filterProperty('isInstalled', false).
+filterProperty('isHiddenOnSelectServicePage', false).
+everyProperty('isSelected', true);
+}
+  }.property('@each.isSelected'),
+
+  /**
* Is Submit button disabled
* @type {bool}
*/
@@ -46,46 +61,11 @@ App.WizardStep4Controller = Em.ArrayController.extend({
   errorStack: [],
 
   /**
-   * Check whether all properties are selected
-   * @type {bool}
-   */
-  isAll: function () {
-return this.filterProperty('isInstalled', false).
-  filterProperty('isHiddenOnSelectServicePage', false).
-  everyProperty('isSelected', true);
-  }.property('@each.isSelected'),
-
-  /**
-   * Check whether none properties(minimum) are selected
-   * @type {bool}
-   */
-  isMinimum: function () {
-return this.filterProperty('isInstalled', false).
-  filterProperty('isHiddenOnSelectServicePage', false).
-  everyProperty('isSelected', false);
-  }.property('@each.isSelected'),
-
-  /**
* Drop errorStack content on selected state changes.
**/
   clearErrors: function() {
 this.set('errorStack', []);
   }.observes('@each.isSelected'),
-  /**
-   * Onclick handler for select all link
-   * @method selectAll
-   */
-  selectAll: function () {
-this.filterProperty('isInstalled', false).setEach('isSelected', true);
-  },
-
-  /**
-   * Onclick handler for select minimum link
-   * @method selectMinimum
-   */
-  selectMinimum: function () {
-this.filterProperty('isInstalled', false).setEach('isSelected', false);
-  },
 
   /**
* Check if multiple distributed file systems were selected

http://git-wip-us.apache.org/repos/asf/ambari/blob/de18175f/ambari-web/app/styles/application.less
--
diff --git a/ambari-web/app/styles/application.less 
b/ambari-web/app/styles/application.less
index 8a28084..1c02346 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -853,6 +853,11 @@ h1 {
 i.icon-asterisks {
   color: #00688B;
 }
+th {
+  input {
+margin-right: 5px;
+  }
+}
   }
   #step6 {
 a.remove-link {

http://git-wip-us.apache.org/repos/asf/ambari/blob/de18175f/ambari-web/app/templates/wizard/step4.hbs
--
diff --git a/ambari-web/app/templates/wizard/step4.hbs 
b/ambari-web/app/templates/wizard/step4.hbs
index 68a7e0e..71835d5 100644
--- a/ambari-web/app/templates/wizard/step4.hbs
+++ b/ambari-web/app/templates/wizard/step4.hbs
@@ -25,14 +25,8 @@
   
 
 
-  {{t common.service}}
-
-  {{t all}}
-   |
-  {{t none}}
-
+ 

ambari git commit: AMBARI-9010 Nav bar dropdown does not highlight appropriate section (salvi via jaoki)

2015-01-15 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk c7528452c -> ad0d0b094


AMBARI-9010 Nav bar dropdown does not highlight appropriate section (salvi via 
jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ad0d0b09
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ad0d0b09
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ad0d0b09

Branch: refs/heads/trunk
Commit: ad0d0b094798b8cc143f7e369896b5e4803bfeac
Parents: c752845
Author: Jun Aoki 
Authored: Thu Jan 15 16:21:21 2015 -0800
Committer: Jun Aoki 
Committed: Thu Jan 15 16:21:21 2015 -0800

--
 ambari-web/app/templates/main/menu_item.hbs |  4 +++-
 ambari-web/app/views/main/menu.js   | 28 +---
 2 files changed, 23 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/ad0d0b09/ambari-web/app/templates/main/menu_item.hbs
--
diff --git a/ambari-web/app/templates/main/menu_item.hbs 
b/ambari-web/app/templates/main/menu_item.hbs
index a135f9e..769ff7e 100644
--- a/ambari-web/app/templates/main/menu_item.hbs
+++ b/ambari-web/app/templates/main/menu_item.hbs
@@ -33,7 +33,9 @@
 {{#if view.isAdminItem}}
   
 {{#each category in view.dropdownCategories}}
-{{category.label}}
+  {{#view view.AdminDropdownItemView itemBinding="category.name" }}
+{{category.label}}
+  {{/view}}
 {{/each}}
   
 {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ad0d0b09/ambari-web/app/views/main/menu.js
--
diff --git a/ambari-web/app/views/main/menu.js 
b/ambari-web/app/views/main/menu.js
index 205fe96..0108a12 100644
--- a/ambari-web/app/views/main/menu.js
+++ b/ambari-web/app/views/main/menu.js
@@ -105,13 +105,9 @@ App.MainMenuView = Em.CollectionView.extend({
   }
   App.router.route('main/' + event.context);
 },
-goToCategory: function (event) {
-  var itemName = this.get('content').routing;
-  // route to correct category of current menu item
-  if (itemName == 'admin') {
-App.router.route('main/admin/' + event.context);
-  }
-},
+
+selectedAdminItemBinding: 'App.router.mainAdminController.category',
+
 dropdownCategories: function () {
   var itemName = this.get('content').routing;
   var categories = [];
@@ -145,6 +141,22 @@ App.MainMenuView = Em.CollectionView.extend({
 }
   }
   return categories;
-}.property('')
+}.property(''),
+
+AdminDropdownItemView: Ember.View.extend({
+  tagName: 'li',
+  classNameBindings: 'isActive:active'.w(),
+  isActive: function () {
+return 
this.get('item').toLowerCase().contains(this.get('parentView.selectedAdminItem').toLowerCase());
+  }.property('item', 'parentView.selectedAdminItem'),
+
+  goToCategory: function (event) {
+var itemName = this.get('parentView').get('content').routing;
+// route to correct category of current menu item
+if (itemName == 'admin') {
+  App.router.route('main/admin/' + event.context);
+}
+  }
+})
   })
 });



ambari git commit: AMBARI-8826 Metrics Dropdown Dashboard Should Be Independent Dropdown (wangy6 via jaoki)

2015-01-19 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk a0c5d8f23 -> 8c940368c


AMBARI-8826 Metrics Dropdown Dashboard Should Be Independent Dropdown (wangy6 
via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8c940368
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8c940368
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8c940368

Branch: refs/heads/trunk
Commit: 8c940368cc54f19437fcfc9f809b4abe23ebd265
Parents: a0c5d8f
Author: Jun Aoki 
Authored: Mon Jan 19 13:02:12 2015 -0800
Committer: Jun Aoki 
Committed: Mon Jan 19 13:02:12 2015 -0800

--
 ambari-web/app/styles/application.less  | 9 +
 ambari-web/app/templates/main/dashboard/widgets.hbs | 5 -
 2 files changed, 5 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/8c940368/ambari-web/app/styles/application.less
--
diff --git a/ambari-web/app/styles/application.less 
b/ambari-web/app/styles/application.less
index 0891740..5f375d4 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -3010,14 +3010,7 @@ table.graphs {
   > ul.nav.nav-tabs {
 margin-bottom: 10px;
   }
-  > ul.nav.nav-tabs > li.active > a, .nav-pills > li.active > a {
-padding-right: 18px;
-  }
   #widgets-options-menu {
-position: relative;
-top: -39px;
-left: 62px;
-width: 15px;
 .dropdown-submenu {
   &> a:after {
 border-left-color: #333;
@@ -3058,7 +3051,7 @@ table.graphs {
   }
   .dashboard-widgets-box {
 position: relative;
-top: -20px;
+top: 4px;
   }
   h4{
 line-height: 30px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/8c940368/ambari-web/app/templates/main/dashboard/widgets.hbs
--
diff --git a/ambari-web/app/templates/main/dashboard/widgets.hbs 
b/ambari-web/app/templates/main/dashboard/widgets.hbs
index c462b68..56e681d 100644
--- a/ambari-web/app/templates/main/dashboard/widgets.hbs
+++ b/ambari-web/app/templates/main/dashboard/widgets.hbs
@@ -18,7 +18,10 @@
 {{#if view.isDataLoaded}}
 
   
-
+
+Metric Actions
+
+
 
   
    {{t 
common.add}}



ambari git commit: AMBARI-5042 Ambari Repo URL validator rejecting valid yum repo file:/// URL. (jaoki)

2015-02-06 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 21bf7c330 -> 621303369


AMBARI-5042 Ambari Repo URL validator rejecting valid yum repo file:/// URL. 
(jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/62130336
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/62130336
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/62130336

Branch: refs/heads/trunk
Commit: 62130336971b2a8c6f84557b3931eaa48c2b4b4b
Parents: 21bf7c3
Author: Jun Aoki 
Authored: Fri Feb 6 10:22:04 2015 -0800
Committer: Jun Aoki 
Committed: Fri Feb 6 10:22:04 2015 -0800

--
 .../AmbariManagementControllerImpl.java | 41 ++-
 .../AmbariManagementControllerImplTest.java | 53 
 2 files changed, 81 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/62130336/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 7d78a4a..7e811e2 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -3074,26 +3074,41 @@ public class AmbariManagementControllerImpl implements 
AmbariManagementControlle
 String[] suffixes = configs.getRepoValidationSuffixes(request.getOsType());
 for (String suffix : suffixes) {
   String formatted_suffix = String.format(suffix, repoName);
-  String spec = request.getBaseUrl();
+  String spec = request.getBaseUrl().trim();
 
+  // This logic is to identify if the end of baseurl has a slash ('/') 
and/or the beginning of suffix String (e.g. "/repodata/repomd.xml") 
+  // has a slash and they can form a good url.
+  // e.g. "http://baseurl.com/"; + "/repodata/repomd.xml" becomes 
"http://baseurl.com/repodata/repomd.xml"; but not 
"http://baseurl.com//repodata/repomd.xml";
   if (spec.charAt(spec.length() - 1) != '/' && formatted_suffix.charAt(0) 
!= '/') {
-spec = request.getBaseUrl() + "/" + formatted_suffix;
+spec = spec + "/" + formatted_suffix;
   } else if (spec.charAt(spec.length() - 1) == '/' && 
formatted_suffix.charAt(0) == '/') {
-spec = request.getBaseUrl() + formatted_suffix.substring(1);
+spec = spec + formatted_suffix.substring(1);
   } else {
-spec = request.getBaseUrl() + formatted_suffix;
+spec = spec + formatted_suffix;
   }
 
-  try {
-IOUtils.readLines(usp.readFrom(spec));
-  } catch (IOException ioe) {
-errorMessage = "Could not access base url . " + request.getBaseUrl() + 
" . ";
-if (LOG.isDebugEnabled()) {
-  errorMessage += ioe;
-} else {
-  errorMessage += ioe.getMessage();
+  // if spec contains "file://" then check local file system.
+  final String FILE_SCHEME = "file://";
+  if(spec.toLowerCase().startsWith(FILE_SCHEME)){
+String filePath = spec.substring(FILE_SCHEME.length());
+File f = new File(filePath);
+if(!f.exists()){
+  errorMessage = "Could not access base url . " + spec + " . ";
+  break;
+}
+
+  }else{
+try {
+  IOUtils.readLines(usp.readFrom(spec));
+} catch (IOException ioe) {
+  errorMessage = "Could not access base url . " + request.getBaseUrl() 
+ " . ";
+  if (LOG.isDebugEnabled()) {
+errorMessage += ioe;
+  } else {
+errorMessage += ioe.getMessage();
+  }
+  break;
 }
-break;
   }
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/62130336/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
--
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 6aee03b..208218c 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/cont

git commit: AMBARI-7528 Disabling javadoc check in the test-patch (jaoki)

2014-09-30 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 04688200d -> 63d507275


AMBARI-7528 Disabling javadoc check in the test-patch (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/63d50727
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/63d50727
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/63d50727

Branch: refs/heads/trunk
Commit: 63d507275cbfc4e9f38a86894ed550ea1e0573d4
Parents: 0468820
Author: Jun Aoki 
Authored: Tue Sep 30 12:09:07 2014 -0700
Committer: Jun Aoki 
Committed: Tue Sep 30 12:09:07 2014 -0700

--
 dev-support/test-patch.sh | 62 --
 1 file changed, 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/63d50727/dev-support/test-patch.sh
--
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index c0b60d2..c416c4a 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -285,16 +285,6 @@ prebuildWithoutPatch () {
 return 1
   fi
 
-  echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs 
-D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavadocWarnings.txt 2>&1"
-  $MVN clean test javadoc:javadoc -DskipTests -Pdocs 
-D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavadocWarnings.txt 2>&1
-  if [[ $? != 0 ]] ; then
-echo "Trunk javadoc compilation is broken?"
-JIRA_COMMENT="$JIRA_COMMENT
-
-{color:red}-1 patch{color}.  Trunk compilation may be broken."
-return 1
-  fi
-
   return 0
 }
 
@@ -396,56 +386,6 @@ applyPatch () {
   return 0
 }
 
-###
-calculateJavadocWarnings() {
-WARNING_FILE="$1"
-RET=$(egrep "^[0-9]+ warnings$" "$WARNING_FILE" | awk '{sum+=$1} END 
{print sum}')
-}
-
-### Check there are no javadoc warnings
-checkJavadocWarnings () {
-  echo ""
-  echo ""
-  echo "=="
-  echo "=="
-  echo "Determining number of patched javadoc warnings."
-  echo "=="
-  echo "=="
-  echo ""
-  echo ""
-  echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs 
-D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1"
-  if [ -d hadoop-project ]; then
-(cd hadoop-project; $MVN install > /dev/null 2>&1)
-  fi
-  if [ -d hadoop-common-project/hadoop-annotations ]; then  
-(cd hadoop-common-project/hadoop-annotations; $MVN install > /dev/null 
2>&1)
-  fi
-  $MVN clean test javadoc:javadoc -DskipTests -Pdocs 
-D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
-  calculateJavadocWarnings "$PATCH_DIR/trunkJavadocWarnings.txt"
-  numTrunkJavadocWarnings=$RET
-  calculateJavadocWarnings "$PATCH_DIR/patchJavadocWarnings.txt"
-  numPatchJavadocWarnings=$RET
-  grep -i warning "$PATCH_DIR/trunkJavadocWarnings.txt" > 
"$PATCH_DIR/trunkJavadocWarningsFiltered.txt"
-  grep -i warning "$PATCH_DIR/patchJavadocWarnings.txt" > 
"$PATCH_DIR/patchJavadocWarningsFiltered.txt"
-  diff -u "$PATCH_DIR/trunkJavadocWarningsFiltered.txt" \
-  "$PATCH_DIR/patchJavadocWarningsFiltered.txt" > \
-  "$PATCH_DIR/diffJavadocWarnings.txt"
-  rm -f "$PATCH_DIR/trunkJavadocWarningsFiltered.txt" 
"$PATCH_DIR/patchJavadocWarningsFiltered.txt"
-  echo "There appear to be $numTrunkJavadocWarnings javadoc warnings before 
the patch and $numPatchJavadocWarnings javadoc warnings after applying the 
patch."
-  if [[ $numTrunkJavadocWarnings != "" && $numPatchJavadocWarnings != "" ]] ; 
then
-if [[ $numPatchJavadocWarnings -gt $numTrunkJavadocWarnings ]] ; then
-  JIRA_COMMENT="$JIRA_COMMENT
-
-{color:red}-1 javadoc{color}.  The javadoc tool appears to have generated 
`expr $(($numPatchJavadocWarnings-$numTrunkJavadocWarnings))` warning messages.
-See $BUILD_URL/artifact/trunk/patchprocess/diffJavadocWarnings.txt for 
details."
-return 1
-fi
-  fi
-  JIRA_COMMENT="$JIRA_COMMENT
-
-{color:green}+1 javadoc{color}.  There were no new javadoc warning 
messages."
-  return 0
-}
 
 ###
 ### Check there are no changes in the number of Javac warnings
@@ -1041,8 +981,6 @@ if [[ $JAVAC_RET == 2 ]] ; then
   cleanupAndExit 1
 fi
 (( RESULT = RESULT + $JAVAC_RET ))
-checkJavadocWarnings
-(( RESULT = RESULT + $? ))
 ### Checkstyle not implemented yet
 #checkStyle
 #(( RESULT = RESULT + $? ))



git commit: AMBARI-7442 ServiceCheck cannot be run if there is only one stack definition (adenisso via jaoki)

2014-10-06 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk e40a21889 -> a619219ff


AMBARI-7442 ServiceCheck cannot be run if there is only one stack definition 
(adenisso via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a619219f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a619219f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a619219f

Branch: refs/heads/trunk
Commit: a619219ff6e9a812ecc283751a5c1f3e12ed182e
Parents: e40a218
Author: Jun Aoki 
Authored: Mon Oct 6 11:50:29 2014 -0700
Committer: Jun Aoki 
Committed: Mon Oct 6 11:51:07 2014 -0700

--
 .../server/api/util/StackExtensionHelper.java   |  14 +-
 .../api/util/StackExtensionHelperTest.java  |  27 +++
 .../single_stack/ABC/1.0.0/metainfo.xml |  22 +++
 .../ABC/1.0.0/services/HDFS/metainfo.xml| 193 +++
 4 files changed, 251 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/a619219f/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
index 1c3f9a7..35ae9a7 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
@@ -490,11 +490,6 @@ public class StackExtensionHelper {
   }
 }
 
-// add action for service check
-if(serviceInfo.getCommandScript() != null) {
-  actionMetadata.addServiceCheckAction(serviceInfo.getName());
-}
-
   }
   parentStack = currentStackInfo;
 }
@@ -536,6 +531,7 @@ public class StackExtensionHelper {
   ParserConfigurationException, SAXException,
   XPathExpressionException, IOException, JAXBException {
 List services = new ArrayList();
+
 File servicesFolder = new File(stackRoot.getAbsolutePath() + File
   .separator + stackInfo.getName() + File.separator + 
stackInfo.getVersion()
   + File.separator + AmbariMetaInfo.SERVICES_FOLDER_NAME);
@@ -602,6 +598,14 @@ public class StackExtensionHelper {
 }
 
 stackInfo.getServices().addAll(services);
+
+// add service check actions from the target stack
+for(ServiceInfo serviceInfo : stackInfo.getServices()) {
+  if(serviceInfo.getCommandScript() != null) {
+actionMetadata.addServiceCheckAction(serviceInfo.getName());
+  }
+}
+
   }
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a619219f/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
--
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
index 7262dfb..84ba6aa 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.api.util;
 import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
+
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.state.*;
@@ -32,6 +33,7 @@ import javax.xml.bind.JAXBException;
 import javax.xml.namespace.QName;
 import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.xpath.XPathExpressionException;
+
 import java.io.File;
 import java.io.IOException;
 import java.util.*;
@@ -353,6 +355,31 @@ public class StackExtensionHelperTest {
 Map supports = config.get(keyword);
 supports.put(attributeName, value);
   }
+
+  /**
+   * This test ensures the service status check is added into the action 
metadata when
+   * the stack has no parent and is the only stack in the stack family
+   * @throws Exception
+   */
+  @Test
+  public void testGetServiceInfoFromSingleStack() throws Exception {
+File stackRoot = new File("./src/test/resources/single_stack".replace("/", 
File.separator));
+StackExtensionHelper helper = new StackExtensionHelper(injector, 
stackRoot);
+helper.fillInfo();
+List stackInfoList = helper.getAllAvailableStacks();
+assertEquals(1, stackInfoList.size());
+
+List serviceInfoList = 
helper.getAllApplicableServices(stackInfoList.get(0));
+for(

git commit: AMBARI-7442 ServiceCheck cannot be run if there is only one stack definition (adenisso via jaoki)

2014-10-06 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-1.7.0 5fbaece3d -> df0f496a7


AMBARI-7442 ServiceCheck cannot be run if there is only one stack definition 
(adenisso via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/df0f496a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/df0f496a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/df0f496a

Branch: refs/heads/branch-1.7.0
Commit: df0f496a71e5cc47d0e550eb027d5e3c0191d768
Parents: 5fbaece
Author: Jun Aoki 
Authored: Mon Oct 6 11:52:59 2014 -0700
Committer: Jun Aoki 
Committed: Mon Oct 6 11:52:59 2014 -0700

--
 .../server/api/util/StackExtensionHelper.java   |  14 +-
 .../api/util/StackExtensionHelperTest.java  |  27 +++
 .../single_stack/ABC/1.0.0/metainfo.xml |  22 +++
 .../ABC/1.0.0/services/HDFS/metainfo.xml| 193 +++
 4 files changed, 251 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/df0f496a/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
index 1c3f9a7..35ae9a7 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
@@ -490,11 +490,6 @@ public class StackExtensionHelper {
   }
 }
 
-// add action for service check
-if(serviceInfo.getCommandScript() != null) {
-  actionMetadata.addServiceCheckAction(serviceInfo.getName());
-}
-
   }
   parentStack = currentStackInfo;
 }
@@ -536,6 +531,7 @@ public class StackExtensionHelper {
   ParserConfigurationException, SAXException,
   XPathExpressionException, IOException, JAXBException {
 List services = new ArrayList();
+
 File servicesFolder = new File(stackRoot.getAbsolutePath() + File
   .separator + stackInfo.getName() + File.separator + 
stackInfo.getVersion()
   + File.separator + AmbariMetaInfo.SERVICES_FOLDER_NAME);
@@ -602,6 +598,14 @@ public class StackExtensionHelper {
 }
 
 stackInfo.getServices().addAll(services);
+
+// add service check actions from the target stack
+for(ServiceInfo serviceInfo : stackInfo.getServices()) {
+  if(serviceInfo.getCommandScript() != null) {
+actionMetadata.addServiceCheckAction(serviceInfo.getName());
+  }
+}
+
   }
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/df0f496a/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
--
diff --git 
a/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
 
b/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
index 7262dfb..84ba6aa 100644
--- 
a/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
+++ 
b/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.api.util;
 import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
+
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.state.*;
@@ -32,6 +33,7 @@ import javax.xml.bind.JAXBException;
 import javax.xml.namespace.QName;
 import javax.xml.parsers.ParserConfigurationException;
 import javax.xml.xpath.XPathExpressionException;
+
 import java.io.File;
 import java.io.IOException;
 import java.util.*;
@@ -353,6 +355,31 @@ public class StackExtensionHelperTest {
 Map supports = config.get(keyword);
 supports.put(attributeName, value);
   }
+
+  /**
+   * This test ensures the service status check is added into the action 
metadata when
+   * the stack has no parent and is the only stack in the stack family
+   * @throws Exception
+   */
+  @Test
+  public void testGetServiceInfoFromSingleStack() throws Exception {
+File stackRoot = new File("./src/test/resources/single_stack".replace("/", 
File.separator));
+StackExtensionHelper helper = new StackExtensionHelper(injector, 
stackRoot);
+helper.fillInfo();
+List stackInfoList = helper.getAllAvailableStacks();
+assertEquals(1, stackInfoList.size());
+
+List serviceInfoList = 
helper.getAllApplicableServices(stackInf

git commit: AMBARI-7551 Create mahout definition for bigtop (rmeneses via jaoki)

2014-10-08 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk eb563008d -> c764a574f


AMBARI-7551 Create mahout definition for bigtop (rmeneses via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c764a574
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c764a574
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c764a574

Branch: refs/heads/trunk
Commit: c764a574fe8f5c6807a9677228da3ef2938d34ec
Parents: eb56300
Author: Jun Aoki 
Authored: Wed Oct 8 11:40:13 2014 -0700
Committer: Jun Aoki 
Committed: Wed Oct 8 11:40:13 2014 -0700

--
 .../stacks/BIGTOP/0.8/role_command_order.json   |  2 +
 .../BIGTOP/0.8/services/MAHOUT/metainfo.xml | 66 ++
 .../services/MAHOUT/package/scripts/mahout.py   | 66 ++
 .../MAHOUT/package/scripts/mahout_client.py | 36 
 .../services/MAHOUT/package/scripts/params.py   | 55 
 .../MAHOUT/package/scripts/service_check.py | 92 
 .../MAHOUT/package/templates/mahout-env.sh.j2   | 34 
 7 files changed, 351 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/c764a574/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
index 2604b6d..69fcdac 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
@@ -26,6 +26,7 @@
 "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", 
"HIVE_METASTORE-START"],
 "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
 "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
+"MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
 "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
 "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
 "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
@@ -55,6 +56,7 @@
 "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
 "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
 "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", 
"NODEMANAGER-START"],
+"MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", 
"NODEMANAGER-START"],
 "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
 "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
 "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",

http://git-wip-us.apache.org/repos/asf/ambari/blob/c764a574/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml
new file mode 100644
index 000..058f047
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml
@@ -0,0 +1,66 @@
+
+
+
+  2.0
+  
+
+  MAHOUT
+  Mahout
+  The Apache Mahout project's goal is to build a scalable machine 
learning library
+  0.9.666
+  
+
+  MAHOUT
+  Mahout Client
+  CLIENT
+  0+
+  
+scripts/mahout_client.py
+PYTHON
+600
+  
+
+  
+  
+
+  any
+  
+
+  mahout
+
+  
+
+  
+
+  
+scripts/service_check.py
+PYTHON
+600
+  
+
+  
+YARN
+  
+
+  
+global
+  
+
+
+  
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/c764a574/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/mahout.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/mahout.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/mahout.py

git commit: AMBARI-7551 Create mahout definition for bigtop (rmeneses via jaoki)

2014-10-08 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-1.7.0 6f5db3166 -> 7d0064d7c


AMBARI-7551 Create mahout definition for bigtop (rmeneses via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7d0064d7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7d0064d7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7d0064d7

Branch: refs/heads/branch-1.7.0
Commit: 7d0064d7cf9b2489f81b88ab7333cd9d421c89b3
Parents: 6f5db31
Author: Jun Aoki 
Authored: Wed Oct 8 11:41:12 2014 -0700
Committer: Jun Aoki 
Committed: Wed Oct 8 11:41:12 2014 -0700

--
 .../stacks/BIGTOP/0.8/role_command_order.json   |  2 +
 .../BIGTOP/0.8/services/MAHOUT/metainfo.xml | 66 ++
 .../services/MAHOUT/package/scripts/mahout.py   | 66 ++
 .../MAHOUT/package/scripts/mahout_client.py | 36 
 .../services/MAHOUT/package/scripts/params.py   | 55 
 .../MAHOUT/package/scripts/service_check.py | 92 
 .../MAHOUT/package/templates/mahout-env.sh.j2   | 34 
 7 files changed, 351 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/7d0064d7/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
index 2604b6d..69fcdac 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/role_command_order.json
@@ -26,6 +26,7 @@
 "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", 
"HIVE_METASTORE-START"],
 "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
 "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
+"MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
 "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
 "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
 "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
@@ -55,6 +56,7 @@
 "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", 
"RESOURCEMANAGER-START"],
 "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
 "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", 
"NODEMANAGER-START"],
+"MAHOUT_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", 
"NODEMANAGER-START"],
 "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
 "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
 "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d0064d7/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml
new file mode 100644
index 000..058f047
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/metainfo.xml
@@ -0,0 +1,66 @@
+
+
+
+  2.0
+  
+
+  MAHOUT
+  Mahout
+  The Apache Mahout project's goal is to build a scalable machine 
learning library
+  0.9.666
+  
+
+  MAHOUT
+  Mahout Client
+  CLIENT
+  0+
+  
+scripts/mahout_client.py
+PYTHON
+600
+  
+
+  
+  
+
+  any
+  
+
+  mahout
+
+  
+
+  
+
+  
+scripts/service_check.py
+PYTHON
+600
+  
+
+  
+YARN
+  
+
+  
+global
+  
+
+
+  
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d0064d7/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/mahout.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/package/scripts/mahout.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/MAHOUT/pack

ambari git commit: AMBARI-7924 Background operation notification plurality should match number of operations (apenniston via jaoki)

2014-11-12 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 2f639afe7 -> a77f8bd1d


AMBARI-7924 Background operation notification plurality should match number of 
operations (apenniston via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a77f8bd1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a77f8bd1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a77f8bd1

Branch: refs/heads/trunk
Commit: a77f8bd1dc3588d9342dc6aabcf5b16dd2e9a01c
Parents: 2f639af
Author: Jun Aoki 
Authored: Wed Nov 12 12:40:07 2014 -0800
Committer: Jun Aoki 
Committed: Wed Nov 12 12:40:07 2014 -0800

--
 ambari-web/app/messages.js   | 1 +
 ambari-web/app/templates/application.hbs | 6 --
 ambari-web/app/utils/helper.js   | 6 +++---
 3 files changed, 8 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/a77f8bd1/ambari-web/app/messages.js
--
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index c57a854..f795909 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -60,6 +60,7 @@ Em.I18n.translations = {
   'yes':'Yes',
   'no':'No',
   'add': 'Add',
+  'op': 'op',
   'ops': 'ops',
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a77f8bd1/ambari-web/app/templates/application.hbs
--
diff --git a/ambari-web/app/templates/application.hbs 
b/ambari-web/app/templates/application.hbs
index 1796a27..4034b03 100644
--- a/ambari-web/app/templates/application.hbs
+++ b/ambari-web/app/templates/application.hbs
@@ -28,9 +28,11 @@
   {{clusterDisplayName}} 

   {{#with App.router.backgroundOperationsController}}
 {{#if allOperationsCount}}
-   {{allOperationsCount}} 
{{t ops}}
+  
+{{allOperationsCount}} {{pluralize allOperationsCount 
singular="t:op" plural="t:ops"}}
 {{else}}
-  {{allOperationsCount}} {{t 
ops}}
+  
+{{allOperationsCount}} {{pluralize allOperationsCount 
singular="t:op" plural="t:ops"}}
 {{/if}}
   {{/with}}
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/a77f8bd1/ambari-web/app/utils/helper.js
--
diff --git a/ambari-web/app/utils/helper.js b/ambari-web/app/utils/helper.js
index 5a25b8f..27efd87 100644
--- a/ambari-web/app/utils/helper.js
+++ b/ambari-web/app/utils/helper.js
@@ -596,10 +596,10 @@ App.registerBoundHelper('pluralize', Em.View.extend({
 if (!plural) plural = singular + 's';
 else plural = this.parseValue(plural);
 if (singular && plural) {
-  if (count > 1) {
-return plural;
-  } else {
+  if (count == 1) {
 return singular;
+  } else {
+return plural;
   }
 }
 return '';



ambari git commit: AMBARI-8297 ambari hadoop deploy fails in docker (jaoki)

2014-11-18 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk d9eb57388 -> d531181da


AMBARI-8297 ambari hadoop deploy fails in docker (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d531181d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d531181d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d531181d

Branch: refs/heads/trunk
Commit: d531181dad51878498336a243ec3fb0de7094a17
Parents: d9eb573
Author: Jun Aoki 
Authored: Tue Nov 18 11:35:31 2014 -0800
Committer: Jun Aoki 
Committed: Tue Nov 18 11:35:31 2014 -0800

--
 .../docker/blueprints/single-node-HDP-2.1-blueprint1.json | 10 --
 1 file changed, 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/d531181d/dev-support/docker/docker/blueprints/single-node-HDP-2.1-blueprint1.json
--
diff --git 
a/dev-support/docker/docker/blueprints/single-node-HDP-2.1-blueprint1.json 
b/dev-support/docker/docker/blueprints/single-node-HDP-2.1-blueprint1.json
index 8ac8b2e..f8ab56b 100644
--- a/dev-support/docker/docker/blueprints/single-node-HDP-2.1-blueprint1.json
+++ b/dev-support/docker/docker/blueprints/single-node-HDP-2.1-blueprint1.json
@@ -1,11 +1,4 @@
 {
-   "configurations" : [
-   {
-   "nagios-env" : {
-   "nagios_contact": "me@my-awesome-domain.example"
-   }
-   }
-   ],
   "host_groups" : [
 {
   "name" : "host_group_1",
@@ -48,9 +41,6 @@
 },
 {
   "name" : "GANGLIA_SERVER"
-},
-{
-  "name" : "NAGIOS_SERVER"
 }
   ],
   "cardinality" : "1"



[1/4] ambari git commit: AMBARI-7878 BIGTOP stack definition should be updated (adenisso via jaoki)

2014-11-18 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk c9e24d5a1 -> 32b1fc38e


http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/files/templetonSmoke.sh
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/files/templetonSmoke.sh
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/files/templetonSmoke.sh
deleted file mode 100644
index 2d07b8b..000
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/files/templetonSmoke.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export smoke_user_keytab=$3
-export security_enabled=$4
-export kinit_path_local=$5
-export ttonurl="http://${ttonhost}:50111/templeton/v1";
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else
-  kinitcmd=""
-fi
-
-export no_proxy=$ttonhost
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'
$ttonurl/status 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
-
-#try hcat ddl command
-echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  
\@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
-ttonTestOutput="/tmp/idtest.${outname}.out";
-ttonTestInput="/tmp/idtest.${outname}.in";
-ttonTestScript="idtest.${outname}.pig"
-
-echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
-echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
-echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
-
-#copy pig script to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript 
/tmp/$ttonTestScript"
-
-#copy input file to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd 
$ttonTestInput"
-
-#create, copy post args file
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > 
/tmp/pig_post.txt
-
-#submit pig query
-cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  
$ttonurl/pig 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py
deleted file mode 100644
index 35de4bb..000
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/WEBHCAT/package/scripts/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (

[2/4] ambari git commit: AMBARI-7878 BIGTOP stack definition should be updated (adenisso via jaoki)

2014-11-18 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service_check.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service_check.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service_check.py
new file mode 100644
index 000..8d15e47
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_service_check.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def webhcat_service_check():
+  import params
+  File(format("{tmp_dir}/templetonSmoke.sh"),
+   content= StaticFile('templetonSmoke.sh'),
+   mode=0755
+  )
+
+  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} 
{smokeuser} {smokeuser_keytab}"
+   " {security_param} {kinit_path_local}",
+   smokeuser_keytab=params.smoke_user_keytab if 
params.security_enabled else "no_keytab")
+
+  Execute(cmd,
+  tries=3,
+  try_sleep=5,
+  path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+  logoutput=True)
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/hcat-env.sh.j2
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/hcat-env.sh.j2
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/hcat-env.sh.j2
deleted file mode 100644
index 0b9dcc3..000
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/hcat-env.sh.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HCAT_PID_DIR={{hcat_pid_dir}}/
-HCAT_LOG_DIR={{hcat_log_dir}}/
-HCAT_CONF_DIR={{hcat_conf_dir}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-#DBROOT is the path where the connector jars are downloaded
-DBROOT={{hcat_dbroot}}
-USER={{hcat_user}}
-METASTORE_PORT={{hive_metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/startHiveserver2.sh.j2
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/templates/startHiveserver2.sh.j2
 
b/amb

[3/4] ambari git commit: AMBARI-7878 BIGTOP stack definition should be updated (adenisso via jaoki)

2014-11-18 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/32b1fc38/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
index 0946d84..fc53b44 100644
--- 
a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
+++ 
b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
@@ -24,6 +24,26 @@ import os
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
+  hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
+  hadoop_bin = "/usr/bigtop/current/hadoop-client/sbin"
+  hadoop_bin_dir = "/usr/bigtop/current/hadoop-client/bin"
+else:
+  mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+  hadoop_bin = "/usr/lib/hadoop/sbin"
+  hadoop_bin_dir = "/usr/bin"
+
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+limits_conf_dir = "/etc/security/limits.d"
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
 ulimit_cmd = "ulimit -c unlimited; "
 
 #security params
@@ -96,9 +116,7 @@ user_group = 
config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 #hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/sbin"
 
 hdfs_log_dir_prefix = 
config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hadoop_root_logger = 
config['configurations']['hadoop-env']['hadoop_root_logger']
@@ -106,8 +124,6 @@ hadoop_root_logger = 
config['configurations']['hadoop-env']['hadoop_root_logger'
 dfs_domain_socket_path = 
config['configurations']['hdfs-site']['dfs.domain.socket.path']
 dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
 
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
 jn_edits_dir = 
config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
 
 dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
@@ -124,6 +140,13 @@ namenode_formatted_mark_dir = 
format("/var/lib/hdfs/namenode/formatted/")
 fs_checkpoint_dir = 
config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
 
 dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+data_dir_mount_file = 
config['configurations']['hadoop-env']['dfs.datanode.data.dir.mount.file']
+
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = 
default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = 
default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+
 # HDFS High Availability properties
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", 
None)
@@ -174,11 +197,10 @@ HdfsDirectory = functools.partial(
   hdfs_user=hdfs_user,
   security_enabled = security_enabled,
   keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
+  kinit_path_local = kinit_path_local,
+  bin_dir = hadoop_bin_dir
 )
 
-limits_conf_dir = "/etc/security/limits.d"
-
 io_compression_codecs = 
config['configurations']['core-site']['io.compression.codecs']
 if not "com.hadoop.compression.lzo" in io_compression_codecs:
   exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
@@ -187,14 +209,15 @@ else:
 name_node_params = default("/commandParams/namenode", None)
 
 #hadoop params
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-
 hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 
 #hadoop-env.sh
 java_home = config['hostLevelParams']['java_home']
+stack_version = str(config['hostLevelParams']['stack_version'])
+
+stack_is_champlain_or_further = not (stack_version.startswith('2.0') or 
stack_version.startswith('2.1'))
 
-if str(config['hostLevelParams']['stack_version']).startswith('2.0') and 
System.get_instance().os_family != "suse":
+if stack_version.startswith('2.0') and System.get_instance().os_family != 
"suse":
   # deprecated rhel jsvc_path
   jsvc_path = "/usr/libexec/bigtop-utils"
 else:
@@ -214,5 +237,4 @@ ttnode_heapsize = "1024m"
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = 
default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 mapred_log

[4/4] ambari git commit: AMBARI-7878 BIGTOP stack definition should be updated (adenisso via jaoki)

2014-11-18 Thread jaoki
AMBARI-7878 BIGTOP stack definition should be updated (adenisso via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/32b1fc38
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/32b1fc38
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/32b1fc38

Branch: refs/heads/trunk
Commit: 32b1fc38e374bfc2285c37f8dd83c04b35ab3c92
Parents: c9e24d5
Author: Jun Aoki 
Authored: Tue Nov 18 14:38:06 2014 -0800
Committer: Jun Aoki 
Committed: Tue Nov 18 14:38:06 2014 -0800

--
 .../0.8/hooks/after-INSTALL/scripts/hook.py |   2 +-
 .../0.8/hooks/after-INSTALL/scripts/params.py   |  25 +-
 .../scripts/shared_initialization.py|  24 +-
 .../hooks/before-ANY/files/changeToSecureUid.sh |  50 
 .../BIGTOP/0.8/hooks/before-ANY/scripts/hook.py |   2 +
 .../0.8/hooks/before-ANY/scripts/params.py  | 105 ++-
 .../before-ANY/scripts/shared_initialization.py |  56 
 .../before-INSTALL/files/changeToSecureUid.sh   |  50 
 .../0.8/hooks/before-INSTALL/scripts/hook.py|   1 -
 .../0.8/hooks/before-INSTALL/scripts/params.py  |   6 +-
 .../scripts/shared_initialization.py|  41 +--
 .../hooks/before-START/files/checkForFormat.sh  |   5 +-
 .../0.8/hooks/before-START/scripts/hook.py  |   1 -
 .../0.8/hooks/before-START/scripts/params.py|  24 +-
 .../scripts/shared_initialization.py|  12 +-
 .../stacks/BIGTOP/0.8/repos/repoinfo.xml|   2 +-
 .../stacks/BIGTOP/0.8/role_command_order.json   |   3 +-
 .../services/FLUME/configuration/flume-env.xml  |  38 +++
 .../BIGTOP/0.8/services/FLUME/metainfo.xml  |   2 +-
 .../0.8/services/FLUME/package/scripts/flume.py |   9 +-
 .../FLUME/package/scripts/flume_check.py|   2 +-
 .../services/FLUME/package/scripts/params.py|  12 +-
 .../GANGLIA/configuration/ganglia-env.xml   |   5 +
 .../GANGLIA/package/files/startRrdcached.sh |   2 +-
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   4 +-
 .../services/GANGLIA/package/scripts/params.py  |  31 +-
 .../services/HBASE/configuration/hbase-site.xml |  39 ---
 .../BIGTOP/0.8/services/HBASE/metainfo.xml  |   2 +-
 .../HBASE/package/files/hbaseSmokeVerify.sh |   3 +-
 .../HBASE/package/scripts/hbase_decommission.py |  62 ++--
 .../HBASE/package/scripts/hbase_service.py  |   2 +-
 .../services/HBASE/package/scripts/params.py|  33 ++-
 .../HBASE/package/scripts/service_check.py  |   6 +-
 .../services/HDFS/configuration/hadoop-env.xml  |  10 +-
 .../services/HDFS/configuration/hdfs-site.xml   |  94 +-
 .../BIGTOP/0.8/services/HDFS/metainfo.xml   |   2 +-
 .../HDFS/package/files/checkForFormat.sh|   4 +-
 .../0.8/services/HDFS/package/scripts/hdfs.py   |   9 +
 .../HDFS/package/scripts/hdfs_datanode.py   |  27 +-
 .../HDFS/package/scripts/hdfs_namenode.py   |  15 +-
 .../services/HDFS/package/scripts/namenode.py   |   2 +-
 .../0.8/services/HDFS/package/scripts/params.py |  44 ++-
 .../HDFS/package/scripts/service_check.py   |  35 ++-
 .../0.8/services/HDFS/package/scripts/utils.py  |  93 +-
 .../services/HIVE/configuration/hcat-env.xml|  57 
 .../services/HIVE/configuration/hive-env.xml|   2 +-
 .../services/HIVE/configuration/hive-site.xml   | 284 ---
 .../services/HIVE/configuration/webhcat-env.xml |  54 
 .../HIVE/configuration/webhcat-site.xml | 138 +
 .../BIGTOP/0.8/services/HIVE/metainfo.xml   | 153 +-
 .../HIVE/package/files/templetonSmoke.sh|  96 +++
 .../0.8/services/HIVE/package/scripts/hcat.py   |  21 +-
 .../HIVE/package/scripts/hcat_service_check.py  |  10 +-
 .../0.8/services/HIVE/package/scripts/hive.py   |  35 +--
 .../HIVE/package/scripts/hive_service.py|  34 +--
 .../HIVE/package/scripts/install_jars.py|  37 ++-
 .../0.8/services/HIVE/package/scripts/params.py | 108 +--
 .../HIVE/package/scripts/postgresql_server.py   |   8 +-
 .../HIVE/package/scripts/postgresql_service.py  |   2 +
 .../HIVE/package/scripts/service_check.py   |  13 +-
 .../HIVE/package/scripts/status_params.py   |   1 +
 .../services/HIVE/package/scripts/webhcat.py| 131 +
 .../HIVE/package/scripts/webhcat_server.py  |  53 
 .../HIVE/package/scripts/webhcat_service.py |  40 +++
 .../package/scripts/webhcat_service_check.py|  41 +++
 .../HIVE/package/templates/hcat-env.sh.j2   |  43 ---
 .../package/templates/startHiveserver2.sh.j2|   2 +-
 .../BIGTOP/0.8/services/MAHOUT/metainfo.xml |  66 -
 .../services/MAHOUT/package/scripts/mahout.py   |  66 -
 .../MAHOUT/package/scripts/mahout_client.py |  36 ---
 .../services/MAHOUT/package/scripts/params.py   |  55 
 .../MAHOUT/package/scripts/service_check.py |  92 --
 .../MAHOUT/package/templates/mahout-env.sh.j2   |  34 ---
 .../services/OOZIE/configuration/oozie

ambari git commit: AMBARI-8015 Dashboard: add a widget for YARN links (similar to the HDFS Links) (rpidva via jaoki)

2014-11-18 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 3d9962970 -> c0dc64ca3


AMBARI-8015 Dashboard: add a widget for YARN links (similar to the HDFS Links) 
(rpidva via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c0dc64ca
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c0dc64ca
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c0dc64ca

Branch: refs/heads/trunk
Commit: c0dc64ca3f9b634ed3e98fc614c0e7feb7fd5651
Parents: 3d99629
Author: Jun Aoki 
Authored: Tue Nov 18 15:28:16 2014 -0800
Committer: Jun Aoki 
Committed: Tue Nov 18 15:28:16 2014 -0800

--
 ambari-web/app/messages.js  |  1 +
 .../main/dashboard/widgets/yarn_links.hbs   | 84 
 ambari-web/app/views.js |  1 +
 ambari-web/app/views/main/dashboard/widgets.js  | 11 +--
 .../views/main/dashboard/widgets/yarn_links.js  | 29 +++
 5 files changed, 121 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/c0dc64ca/ambari-web/app/messages.js
--
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index ea16bef..33a36bd 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -2003,6 +2003,7 @@ Em.I18n.translations = {
   'dashboard.widgets.ResourceManagerUptime': 'ResourceManager Uptime',
   'dashboard.widgets.NodeManagersLive': 'NodeManagers Live',
   'dashboard.widgets.YARNMemory': 'YARN Memory',
+  'dashboard.widgets.YARNLinks': 'YARN Links',
 
   'dashboard': {
 'widgets': {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c0dc64ca/ambari-web/app/templates/main/dashboard/widgets/yarn_links.hbs
--
diff --git a/ambari-web/app/templates/main/dashboard/widgets/yarn_links.hbs 
b/ambari-web/app/templates/main/dashboard/widgets/yarn_links.hbs
new file mode 100644
index 000..7cad4f1
--- /dev/null
+++ b/ambari-web/app/templates/main/dashboard/widgets/yarn_links.hbs
@@ -0,0 +1,84 @@
+{{!
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+}}
+
+
+  
+
+  
+
+  
+
+   {{view.title}}
+
+  
+
+  
+  
+
+   {{t 
dashboard.services.yarn.resourceManager}}
+
+  
+
+  
+  
+
+   
{{view.model.nodeManagersTotal}} {{t dashboard.services.yarn.nodeManagers}}
+
+  
+
+  
+
+  
+{{#if view.model.quickLinks.length}}
+  {{#view App.QuickViewLinks contentBinding="view.model"}}
+
+  
+{{t common.more}}
+
+  
+  
+{{#if view.isLoaded}}
+  {{#if view.quickLinksArray}}
+
+{{#each quickLinks in view.quickLinksArray}}
+  
+{{quickLinks.publicHostNameLabel}}  
+
+  {{#each quickLinks}}
+{{label}}
+  {{/each}}
+
+  
+{{/each}}
+  {{else}}
+{{#each view.quickLinks}}
+  {{label}}
+{{/each}}
+  {{/if}}
+{{else}}
+  
+{{/if}}
+  
+
+  {{/view}}
+{{/if}}
+  
+
+  
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/c0dc64ca/ambari-web/app/views.js
--
diff --git a/ambari-web/app/views.js b/ambari-web/app/views.js
index 894d8b5..50f3147 100644
--- a/ambari-web/app/views.js
+++ b/ambari-web/app/views.js
@@ -143,6 +143,7 @@

[03/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/files/validateYarnComponentStatus.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/files/validateYarnComponentStatus.py
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/files/validateYarnComponentStatus.py
new file mode 100644
index 000..33ed8b1
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/package/files/validateYarnComponentStatus.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import json
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+
+  command = "curl"
+  httpGssnegotiate = "--negotiate"
+  userpswd = "-u:"
+  insecure = "-k"# This is smoke test, no need to check CA of server
+  if ssl_enabled:
+url = 'https://' + address + path
+  else:
+url = 'http://' + address + path
+  
+  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
+
+  proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, 
stderr=subprocess.PIPE)
+  (stdout, stderr) = proc.communicate()
+  response = json.loads(stdout)
+  if response == None:
+print 'There is no response for url: ' + str(url)
+raise Exception('There is no response for url: ' + str(url))
+  return response
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, addresses, ssl_enabled):
+  responses = {}
+  for address in addresses.split(','):
+try:
+  responses[address] = getResponse(path, address, ssl_enabled)
+except Exception as e:
+  print 'Error checking availability status of component.', e
+
+  if not responses:
+exit(1)
+
+  is_valid = validateAvailabilityResponse(component, responses.values()[0])
+  if not is_valid:
+exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+  try:
+if component == RESOURCEMANAGER:
+  rm_state = response['clusterInfo']['state']
+  if rm_state == STARTED_STATE:
+return True
+  else:
+print 'Resourcemanager is not started'
+return False
+
+elif component == NODEMANAGER:
+  node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+  if node_healthy:
+return True
+  else:
+return False
+elif component == HISTORYSERVER:
+  hs_start_time = response['historyInfo']['startedOn']
+  if hs_start_time > 0:
+return True
+  else:
+return False
+else:
+  return False
+  except Exception as e:
+print 'Error validation of availability response for ' + str(component), e
+return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, addresses, ssl_enabled):
+  responses = {}
+  for address in addresses.split(','):
+try:
+  responses[address] = getResponse(path, address, ssl_enabled)
+except Exception as e:
+  print 'Error checking ability of component.', e
+
+  if not responses:
+exit(1)
+
+  is_valid = validateAbilityResponse(component, responses.values()[0])
+  if not is_valid:
+exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+  try:
+if component == RESOURCEMANAGER:
+  nodes = []
+  if response.has_key('nodes') and not response['nodes'] == None and 
response['nodes'].has_key('node'):
+nodes = response['nodes']['node']
+  connected_nodes_count = len(nodes)
+  if connected_nodes_count == 0:
+print 'There is no connected nodemanagers to resourcemanager'
+return False
+  active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+  active_nodes_count = len(active_nodes)
+
+  if connected_nodes_count == 0:
+print 'There is no connected active nodemanager

[19/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/draining_servers.rb
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/draining_servers.rb
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/draining_servers.rb
new file mode 100644
index 000..5bcb5b6
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/package/files/draining_servers.rb
@@ -0,0 +1,164 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Add or remove servers from draining mode via zookeeper 
+
+require 'optparse'
+include Java
+
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.client.HBaseAdmin
+import org.apache.hadoop.hbase.zookeeper.ZKUtil
+import org.apache.commons.logging.Log
+import org.apache.commons.logging.LogFactory
+
+# Name of this script
+NAME = "draining_servers"
+
+# Do command-line parsing
+options = {}
+optparse = OptionParser.new do |opts|
+  opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] 
add|remove|list || ..."
+  opts.separator 'Add remove or list servers in draining mode. Can accept 
either hostname to drain all region servers' +
+ 'in that host, a host:port pair or a host,port,startCode 
triplet. More than one server can be given separated by space'
+  opts.on('-h', '--help', 'Display usage information') do
+puts opts
+exit
+  end
+  options[:debug] = false
+  opts.on('-d', '--debug', 'Display extra debug logging') do
+options[:debug] = true
+  end
+end
+optparse.parse!
+
+# Return array of servernames where servername is hostname+port+startcode
+# comma-delimited
+def getServers(admin)
+  serverInfos = admin.getClusterStatus().getServerInfo()
+  servers = []
+  for server in serverInfos
+servers << server.getServerName()
+  end
+  return servers
+end
+
+def getServerNames(hostOrServers, config)
+  ret = []
+  
+  for hostOrServer in hostOrServers
+# check whether it is already serverName. No need to connect to cluster
+parts = hostOrServer.split(',')
+if parts.size() == 3
+  ret << hostOrServer
+else 
+  admin = HBaseAdmin.new(config) if not admin
+  servers = getServers(admin)
+
+  hostOrServer = hostOrServer.gsub(/:/, ",")
+  for server in servers 
+ret << server if server.start_with?(hostOrServer)
+  end
+end
+  end
+  
+  admin.close() if admin
+  return ret
+end
+
+def addServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 
"draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+for server in servers
+  node = ZKUtil.joinZNode(parentZnode, server)
+  ZKUtil.createAndFailSilent(zkw, node)
+end
+  ensure
+zkw.close()
+  end
+end
+
+def removeServers(options, hostOrServers)
+  config = HBaseConfiguration.create()
+  servers = getServerNames(hostOrServers, config)
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 
"draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+  
+  begin
+for server in servers
+  node = ZKUtil.joinZNode(parentZnode, server)
+  ZKUtil.deleteNodeFailSilent(zkw, node)
+end
+  ensure
+zkw.close()
+  end
+end
+
+# list servers in draining mode
+def listServers(options)
+  config = HBaseConfiguration.create()
+  
+  zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 
"draining_servers", nil)
+  parentZnode = zkw.drainingZNode
+
+  servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
+  servers.each {|server| puts server}
+end
+
+hostOrServers = ARGV[1..ARGV.size()]
+
+# Create a logger and disable the DEBUG-level annoying client logging
+def configureLogging(options)
+  apacheLogger = LogFactory.getLog(NAME)
+  # Configure log4j to not spew so much
+  unless (options[:debug]) 
+logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
+logger.setLevel(org.apache.log4j.Le

[06/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/oozieSmoke2.sh
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/oozieSmoke2.sh
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/oozieSmoke2.sh
new file mode 100644
index 000..4e21aad
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/OOZIE/package/files/oozieSmoke2.sh
@@ -0,0 +1,112 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+os_family=$1
+shift
+
+function getValueFromField {
+  xmllint $1 | grep "$2" -C 2 | grep '' | cut -d ">" -f2 | 
cut -d "<" -f1
+  return $?
+}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie 
job -oozie ${OOZIE_SERVER} -info $job_id"
+  su -s /bin/bash - ${smoke_test_user} -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+cmd_output=`su -s /bin/bash - ${smoke_test_user} -c "$cmd"`
+(IFS='';echo $cmd_output)
+act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 
's| ||g')
+echo "workflow_status=$act_status"
+if [ "RUNNING" == "$act_status" ]; then
+  #increment the counter and get the status again after waiting for 15 secs
+  sleep 15
+  (( i++ ))
+  elif [ "SUCCEEDED" == "$act_status" ]; then
+rc=0;
+break;
+  else
+rc=1
+break;
+  fi
+done
+return $rc
+}
+
+export oozie_conf_dir=$1
+export oozie_bin_dir=$2
+export hadoop_conf_dir=$3
+export hadoop_bin_dir=$4
+export smoke_test_user=$5
+export security_enabled=$6
+export smoke_user_keytab=$7
+export kinit_path_local=$8
+
+export OOZIE_EXIT_CODE=0
+export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml 
yarn.resourcemanager.address`
+export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml 
fs.defaultFS`
+export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml 
oozie.base.url | tr '[:upper:]' '[:lower:]'`
+
+if [ "$os_family" == "ubuntu" ] ; then
+  LIST_PACKAGE_FILES_CMD='dpkg-query -L'
+else
+  LIST_PACKAGE_FILES_CMD='rpm -ql'
+fi
+  
+
+export OOZIE_EXAMPLES_DIR=`$LIST_PACKAGE_FILES_CMD oozie-client | grep 
'oozie-examples.tar.gz$' | xargs dirname`
+if [[ -z "$OOZIE_EXAMPLES_DIR" ]] ; then
+  export OOZIE_EXAMPLES_DIR='/usr/phd/current/oozie-client/doc/'
+fi
+cd $OOZIE_EXAMPLES_DIR
+
+tar -zxf oozie-examples.tar.gz
+sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  
examples/apps/map-reduce/job.properties
+sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  
examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" 
examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" 
examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" 
examples/apps/map-reduce/job.properties
+sed -i 
"s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g"
 examples/apps/map-reduce/job.properties
+
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else 
+  kinitcmd=""
+fi
+
+su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config 
${hadoop_conf_dir} dfs -rm -r examples"
+su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config 
${hadoop_conf_dir} dfs -rm -r input-data"
+su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config 
${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+su -s /bin/bash - ${smoke_test_user} -c "${hadoop_bin_dir}/hdfs --config 
${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data 
input-data"
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; ${oozie_bin_dir}/oozie 
-Doozie.auth.token.cache=false job 

[23/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 000..f70eee8
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,177 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  Execute("/bin/echo 0 > /selinux/enforce",
+  only_if="test -f /selinux/enforce"
+  )
+
+  install_snappy()
+
+  #directories
+  if params.has_namenode:
+Directory(params.hdfs_log_dir_prefix,
+  recursive=True,
+  owner='root',
+  group=params.user_group,
+  mode=0775
+)
+Directory(params.hadoop_pid_dir_prefix,
+  recursive=True,
+  owner='root',
+  group='root'
+)
+  #this doesn't needed with stack 1
+Directory(params.hadoop_tmp_dir,
+  recursive=True,
+  owner=params.hdfs_user,
+  )
+  #files
+if params.security_enabled:
+  tc_owner = "root"
+else:
+  tc_owner = params.hdfs_user
+
+File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+ owner=tc_owner,
+ content=Template('commons-logging.properties.j2')
+)
+
+health_check_template = "health_check-v2" #for stack 1 use 'health_check'
+File(os.path.join(params.hadoop_conf_dir, "health_check"),
+ owner=tc_owner,
+ content=Template(health_check_template + ".j2")
+)
+
+log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+if (params.log4j_props != None):
+  File(log4j_filename,
+   mode=0644,
+   group=params.user_group,
+   owner=params.hdfs_user,
+   content=params.log4j_props
+  )
+elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+  File(log4j_filename,
+   mode=0644,
+   group=params.user_group,
+   owner=params.hdfs_user,
+  )
+
+File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+ owner=params.hdfs_user,
+ content=Template("hadoop-metrics2.properties.j2")
+)
+
+def setup_database():
+  """
+  Load DB
+  """
+  import params
+  db_driver_dload_cmd = ""
+  environment = {
+"no_proxy": format("{ambari_server_hostname}")
+  }
+  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
+db_driver_dload_cmd = format(
+  "curl -kf -x \"\" \
+  --retry 5 {oracle_driver_symlink_url} -o 
{hadoop_lib_home}/{db_driver_filename}",)
+  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
+db_driver_dload_cmd = format(
+  "curl -kf -x \"\" \
+  --retry 5 {mysql_driver_symlink_url} -o 
{hadoop_lib_home}/{db_driver_filename}")
+
+  if db_driver_dload_cmd:
+Execute(db_driver_dload_cmd,
+not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}"),
+environment = environment
+)
+
+
+def setup_configs():
+  """
+  Creates configs for services HDFS mapred
+  """
+  import params
+
+  if params.has_namenode:
+File(params.task_log4j_properties_location,
+ content=StaticFile("task-log4j.properties"),
+ mode=0755
+)
+
+if os.path.exists(os.path.join(params.hadoop_conf_dir, 
'configuration.xsl')):
+  File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+   owner=params.hdfs_user,
+   group=params.user_group
+  )
+if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+  File(os.path.join(params.hadoop_conf_dir, 'masters'),
+owner=params.hdfs_user,
+group=params.user_group
+  )
+
+  generate_include_file()
+
+
+de

[05/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-env.xml
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-env.xml
new file mode 100644
index 000..5730d4a
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/configuration/yarn-env.xml
@@ -0,0 +1,184 @@
+
+
+
+
+
+  
+yarn_log_dir_prefix
+/var/log/hadoop-yarn
+YARN Log Dir Prefix
+  
+  
+yarn_pid_dir_prefix
+/var/run/hadoop-yarn
+YARN PID Dir Prefix
+  
+  
+yarn_user
+yarn
+USER
+YARN User
+  
+  
+yarn_heapsize
+1024
+Max heapsize for all YARN components using a numerical value 
in the scale of MB
+  
+  
+resourcemanager_heapsize
+1024
+Max heapsize for ResourceManager using a numerical value in 
the scale of MB
+  
+  
+nodemanager_heapsize
+1024
+Max heapsize for NodeManager using a numerical value in the 
scale of MB
+  
+  
+min_user_id
+1000
+Set to 0 to disallow root from submitting jobs. Set to 1000 
to disallow all superusers from submitting jobs
+  
+  
+apptimelineserver_heapsize
+1024
+Max heapsize for AppTimelineServer using a numerical value in 
the scale of MB
+  
+
+  
+  
+content
+This is the jinja template for yarn-env.sh file
+
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+# Specify the max Heapsize for the HistoryManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1024.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_HISTORYSERVER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory and file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_PO

[15/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
new file mode 100644
index 000..d7c6704
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer-err.log
@@ -0,0 +1,1032 @@
+14/07/28 17:01:48 INFO balancer.Balancer: Using a threshold of 5.0
+14/07/28 17:01:48 INFO balancer.Balancer: namenodes = 
[hdfs://evhubudsd1aae.budapest.epam.com:8020]
+14/07/28 17:01:48 INFO balancer.Balancer: p = 
Balancer.Parameters[BalancingPolicy.Node, threshold=5.0]
+14/07/28 17:01:49 INFO balancer.Balancer: Block token params received from NN: 
keyUpdateInterval=600 min(s), tokenLifetime=600 min(s)
+14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
+14/07/28 17:01:49 INFO balancer.Balancer: Balancer will update its block keys 
every 150 minute(s)
+14/07/28 17:01:49 INFO block.BlockTokenSecretManager: Setting block keys
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.9:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.129.224:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.8:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.4:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.129.225:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.129.223:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.3:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.1:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.2:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.0:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.11:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.6:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.10:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.7:50010
+14/07/28 17:01:49 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.5:50010
+14/07/28 17:01:49 INFO balancer.Balancer: 1 over-utilized: 
[Source[10.253.130.9:50010, utilization=34.887235026238486]]
+14/07/28 17:01:49 INFO balancer.Balancer: 1 underutilized: 
[BalancerDatanode[10.253.130.5:50010, utilization=21.178140109955496]]
+14/07/28 17:01:49 INFO balancer.Balancer: Need to move 5.74 GB to make the 
cluster balanced.
+14/07/28 17:01:49 INFO balancer.Balancer: Decided to move 9.79 GB bytes from 
10.253.130.9:50010 to 10.253.130.5:50010
+14/07/28 17:01:49 INFO balancer.Balancer: Will move 9.79 GB in this iteration
+14/07/28 17:01:57 INFO balancer.Balancer: Moving block 1073950748 from 
10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is 
succeeded.
+14/07/28 17:01:58 INFO balancer.Balancer: Moving block 1073939272 from 
10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is 
succeeded.
+14/07/28 17:02:06 INFO balancer.Balancer: Moving block 1073863504 from 
10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is 
succeeded.
+14/07/28 17:02:13 INFO balancer.Balancer: Moving block 1073863516 from 
10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.9:50010 is 
succeeded.
+14/07/28 17:02:31 INFO balancer.Balancer: Moving block 1073743089 from 
10.253.130.9:50010 to 10.253.130.5:50010 through 10.253.130.2:50010 is 
succeeded.
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.3:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.5:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.129.225:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.129.223:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.1:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.4:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.7:50010
+14/07/28 17:03:00 INFO net.NetworkTopology: Adding a new node: 
/default-rack/10.253.130.8:50010
+14/07/28 17:03:00 INFO net.NetworkTo

[09/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_nagios_init.php
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_nagios_init.php
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_nagios_init.php
new file mode 100644
index 000..487eb43
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/hdp_nagios_init.php
@@ -0,0 +1,81 @@
+http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Common functions called from other alerts
+ *
+ */
+ 
+ /*
+ * Function for kinit. Checks if security enabled and klist for this principal 
doesn't returns nothing,
+ * make kinit call in this case.
+ */
+  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, 
$principal_name) {
+if($security_enabled === 'true') {
+
+  $is_logined = is_logined($principal_name);
+  
+  if (!$is_logined)
+$status = kinit($kinit_path_local, $keytab_path, $principal_name);
+  else
+$status = array(0, '');
+} else {
+  $status = array(0, '');
+}
+  
+return $status;
+  }
+  
+  
+  /*
+  * Checks if user is logined on kerberos
+  */
+  function is_logined($principal_name) {
+$check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? 
!= 0 ]] && echo 1";
+$check_output =  shell_exec($check_cmd);
+
+if ($check_output)
+  return false;
+else
+  return true;
+  }
+
+  /*
+  * Runs kinit command.
+  */
+  function kinit($kinit_path_local, $keytab_path, $principal_name) {
+$init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
+$kinit_output = shell_exec($init_cmd);
+if ($kinit_output) 
+  $status = array(1, $kinit_output);
+else
+  $status = array(0, '');
+  
+return $status;
+  }
+
+  function logout() {
+if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
+  $status = true;
+else
+  $status = false;
+  
+return $status;
+  }
+ 
+ ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/mm_wrapper.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/mm_wrapper.py
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/mm_wrapper.py
new file mode 100644
index 000..7a622b6
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/mm_wrapper.py
@@ -0,0 +1,326 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+import sys
+import subprocess
+import os
+
+N_SGN = 'NAGIOS_SERVICEGROUPNAME'
+N_SD = 'NAGIOS_SERVICEDESC'
+N_HOST = 'NAGIOS_HOSTNAME'
+
+LIST_SEPARATOR = "--"
+HOSTNAME_PLACEHOLDER = "^^"
+IGNORE_DAT_FILE = "/var/nagios/ignore.dat"
+
+# Mode constants
+OR = 0
+AND = 1
+ENV_ONLY = 2
+FILTER_MM = 3
+LEGACY_CHECK_WRAPPER = 4
+MODES = ['or', 'and', 'env_only', 'filter_mm', 'legacy_check_wrapper']
+
+
+def ignored_host_list(service, component):
+  """
+  :param service: current service
+  :param component: current component
+  :return: all hosts where specified host component is in ignored state
+  """
+  try:
+with open(IGNORE_DAT_FILE) as f:
+  lines = f.readlines()
+  except IOError:
+return []
+  result = []
+  if lines:
+for l in lines:
+  tokens = l.split(' ')
+  if len(tokens) == 3 and tokens[1] == service and tokens[2].strip() == 
component:
+result.append(tokens[0])
+  return result
+
+
+def get_real_service():
+  try:
+ser

[12/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
new file mode 100644
index 000..812b897
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/etc/hive-schema-0.12.0.oracle.sql
@@ -0,0 +1,718 @@
+-- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE SEQUENCE_TABLE
+(
+   SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+   NEXT_VAL NUMBER NOT NULL
+);
+
+ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY 
(SEQUENCE_NAME);
+
+-- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+-- This table is required if datanucleus.autoStartMechanism=SchemaTable
+-- NOTE: Some versions of SchemaTool do not automatically generate this table.
+-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+CREATE TABLE NUCLEUS_TABLES
+(
+   CLASS_NAME VARCHAR2(128) NOT NULL,
+   TABLE_NAME VARCHAR2(128) NOT NULL,
+   TYPE VARCHAR2(4) NOT NULL,
+   OWNER VARCHAR2(2) NOT NULL,
+   VERSION VARCHAR2(20) NOT NULL,
+   INTERFACE_NAME VARCHAR2(255) NULL
+);
+
+ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY 
(CLASS_NAME);
+
+-- Table PART_COL_PRIVS for classes 
[org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+CREATE TABLE PART_COL_PRIVS
+(
+PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+"COLUMN_NAME" VARCHAR2(128) NULL,
+CREATE_TIME NUMBER (10) NOT NULL,
+GRANT_OPTION NUMBER (5) NOT NULL,
+GRANTOR VARCHAR2(128) NULL,
+GRANTOR_TYPE VARCHAR2(128) NULL,
+PART_ID NUMBER NULL,
+PRINCIPAL_NAME VARCHAR2(128) NULL,
+PRINCIPAL_TYPE VARCHAR2(128) NULL,
+PART_COL_PRIV VARCHAR2(128) NULL
+);
+
+ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY 
(PART_COLUMN_GRANT_ID);
+
+-- Table CDS.
+CREATE TABLE CDS
+(
+CD_ID NUMBER NOT NULL
+);
+
+ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+
+-- Table COLUMNS_V2 for join relationship
+CREATE TABLE COLUMNS_V2
+(
+CD_ID NUMBER NOT NULL,
+"COMMENT" VARCHAR2(256) NULL,
+"COLUMN_NAME" VARCHAR2(128) NOT NULL,
+TYPE_NAME VARCHAR2(4000) NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY 
(CD_ID,"COLUMN_NAME");
+
+-- Table PARTITION_KEY_VALS for join relationship
+CREATE TABLE PARTITION_KEY_VALS
+(
+PART_ID NUMBER NOT NULL,
+PART_KEY_VAL VARCHAR2(256) NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY 
KEY (PART_ID,INTEGER_IDX);
+
+-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+CREATE TABLE DBS
+(
+DB_ID NUMBER NOT NULL,
+"DESC" VARCHAR2(4000) NULL,
+DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+"NAME" VARCHAR2(128) NULL
+);
+
+ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+
+-- Table PARTITION_PARAMS for join relationship
+CREATE TABLE PARTITION_PARAMS
+(
+PART_ID NUMBER NOT NULL,
+PARAM_KEY VARCHAR2(256) NOT NULL,
+PARAM_VALUE VARCHAR2(4000) NULL
+);
+
+ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY 
(PART_ID,PARAM_KEY);
+
+-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+CREATE TABLE SERDES
+(
+SERDE_ID NUMBER NOT NULL,
+"NAME" VARCHAR2(128) NULL,
+SLIB VARCHAR2(4000) NULL
+);
+
+ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+
+-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+CREATE TABLE TYPES
+(
+TYPES_ID NUMBER NOT NULL,
+TYPE_NAME VARCHAR2(128) NULL,
+TYPE1 VARCHAR2(767) NULL,
+TYPE2 VARCHAR2(767) NULL
+);
+
+ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+
+-- Table PARTITION_KEYS for join relationship
+CREATE TABLE PARTITION_KEYS
+(
+TBL_ID NUMBER NOT NULL,
+PKEY_COMMENT VARCHAR2(4000) NULL,
+PKEY_NAME VARCHAR2(128) NOT NULL,
+PKEY_TYPE VARCHAR2(767) NOT NULL,
+INTEGER_IDX NUMBER(10) NOT NULL
+);
+
+ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY 
(TBL_ID,PKEY_NAME);
+
+-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+CREATE TABLE ROLES
+(
+ROLE_ID NUMBER NOT NULL,
+CREATE_TIME NUMBER (10) NOT NULL,
+OWNER_NAME VARCHAR2(128) NULL,
+ROLE_NAME VARCHAR2(128) NULL
+);
+
+ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+
+-- Table PARTITIONS for classes 
[org.apache.hadoop.hive.meta

[14/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log
new file mode 100644
index 000..2010c02
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/balancer.log
@@ -0,0 +1,29 @@
+Time Stamp   Iteration#  Bytes Already Moved  Bytes Left To Move  
Bytes Being Moved
+Jul 28, 2014 5:01:49 PM   0  0 B 5.74 GB   
 9.79 GB
+Jul 28, 2014 5:03:00 PM   1  0 B 5.58 GB   
 9.79 GB
+Jul 28, 2014 5:04:07 PM   2  0 B 5.40 GB   
 9.79 GB
+Jul 28, 2014 5:05:14 PM   3  0 B 5.06 GB   
 9.79 GB
+Jul 28, 2014 5:05:50 PM   4  0 B 5.06 GB   
 9.79 GB
+Jul 28, 2014 5:06:56 PM   5  0 B 4.81 GB   
 9.79 GB
+Jul 28, 2014 5:07:33 PM   6  0 B 4.80 GB   
 9.79 GB
+Jul 28, 2014 5:09:11 PM   7  0 B 4.29 GB   
 9.79 GB
+Jul 28, 2014 5:09:47 PM   8  0 B 4.29 GB   
 9.79 GB
+Jul 28, 2014 5:11:24 PM   9  0 B 3.89 GB   
 9.79 GB
+Jul 28, 2014 5:12:00 PM  10  0 B 3.86 GB   
 9.79 GB
+Jul 28, 2014 5:13:37 PM  11  0 B 3.23 GB   
 9.79 GB
+Jul 28, 2014 5:15:13 PM  12  0 B 2.53 GB   
 9.79 GB
+Jul 28, 2014 5:15:49 PM  13  0 B 2.52 GB   
 9.79 GB
+Jul 28, 2014 5:16:25 PM  14  0 B 2.51 GB   
 9.79 GB
+Jul 28, 2014 5:17:01 PM  15  0 B 2.39 GB   
 9.79 GB
+Jul 28, 2014 5:17:37 PM  16  0 B 2.38 GB   
 9.79 GB
+Jul 28, 2014 5:18:14 PM  17  0 B 2.31 GB   
 9.79 GB
+Jul 28, 2014 5:18:50 PM  18  0 B 2.30 GB   
 9.79 GB
+Jul 28, 2014 5:19:26 PM  19  0 B 2.21 GB   
 9.79 GB
+Jul 28, 2014 5:20:02 PM  20  0 B 2.10 GB   
 9.79 GB
+Jul 28, 2014 5:20:38 PM  21  0 B 2.06 GB   
 9.79 GB
+Jul 28, 2014 5:22:14 PM  22  0 B 1.68 GB   
 9.79 GB
+Jul 28, 2014 5:23:20 PM  23  0 B 1.00 GB   
 9.79 GB
+Jul 28, 2014 5:23:56 PM  24  0 B  1016.16 MB   
 9.79 GB
+Jul 28, 2014 5:25:33 PM  25  0 B30.55 MB   
 9.79 GB
+The cluster is balanced. Exiting...
+Balancing took 24.8580335 minutes

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
new file mode 100644
index 000..0cce48c
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/scripts/balancer-emulator/hdfs-command.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+import time
+import sys
+from threading import Thread
+
+
+def write_function(path, han

[21/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server.py
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server.py
new file mode 100644
index 000..c38366f
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server.py
@@ -0,0 +1,119 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import functions
+import ganglia_server_service
+
+
+class GangliaServer(Script):
+  def install(self, env):
+import params
+
+self.install_packages(env)
+env.set_params(params)
+self.configure(env)
+
+functions.turn_off_autostart(params.gmond_service_name) # since the 
package is installed as well
+functions.turn_off_autostart("gmetad")
+
+  def start(self, env):
+import params
+env.set_params(params)
+self.configure(env)
+ganglia_server_service.server("start")
+
+  def stop(self, env):
+import params
+
+env.set_params(params)
+ganglia_server_service.server("stop")
+
+  def status(self, env):
+import status_params
+env.set_params(status_params)
+pid_file = format("{pid_dir}/gmetad.pid")
+# Recursively check all existing gmetad pid files
+check_process_status(pid_file)
+
+  def configure(self, env):
+import params
+env.set_params(params)
+
+ganglia.groups_and_users()
+ganglia.config()
+
+generate_daemon("gmetad",
+name = "gmetad",
+role = "server",
+owner = "root",
+group = params.user_group)
+
+change_permission()
+server_files()
+File(path.join(params.ganglia_dir, "gmetad.conf"),
+ owner="root",
+ group=params.user_group
+)
+
+
+def change_permission():
+  import params
+
+  Directory(params.dwoo_path,
+mode=0755,
+recursive=True
+  )
+  Execute(format("chown -R {web_user} {dwoo_path}"))
+
+def server_files():
+  import params
+
+  rrd_py_path = params.rrd_py_path
+  Directory(rrd_py_path,
+recursive=True
+  )
+  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
+  TemplateConfig(rrd_py_file_path,
+ owner="root",
+ group="root",
+ mode=0755
+  )
+  rrd_file_owner = params.gmetad_user
+
+  Directory(params.rrdcached_base_dir,
+owner=rrd_file_owner,
+group=rrd_file_owner,
+mode=0755,
+recursive=True
+  )
+  
+  if System.get_instance().os_family in ["ubuntu","suse"]:
+File( params.ganglia_apache_config_file,
+  content = Template("ganglia.conf.j2"),
+  mode = 0644
+)
+
+
+if __name__ == "__main__":
+  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server_service.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server_service.py
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server_service.py
new file mode 100644
index 000..b93e3f8
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/package/scripts/ganglia_server_service.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/li

[02/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/params.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/params.py
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/params.py
new file mode 100644
index 000..9fa99b5
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/params.py
@@ -0,0 +1,86 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+#RPM versioning support
+rpm_version = default("/configurations/cluster-env/rpm_version", None)
+
+#hadoop params
+if rpm_version:
+  zk_home = '/usr/phd/current/zookeeper-client'
+  zk_bin = '/usr/phd/current/zookeeper-client/bin'
+  smoke_script = '/usr/phd/current/zookeeper-client/bin/zkCli.sh'
+else:
+  zk_home = '/usr'
+  zk_bin = '/usr/lib/zookeeper/bin'
+  smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
+
+config_dir = "/etc/zookeeper/conf"
+zk_user =  config['configurations']['zookeeper-env']['zk_user']
+hostname = config['hostname']
+user_group = config['configurations']['cluster-env']['user_group']
+zk_env_sh_template = config['configurations']['zookeeper-env']['content']
+
+zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
+zk_data_dir = config['configurations']['zookeeper-env']['zk_data_dir']
+zk_pid_dir = status_params.zk_pid_dir
+zk_pid_file = status_params.zk_pid_file
+zk_server_heapsize = "-Xmx1024m"
+
+tickTime = config['configurations']['zookeeper-env']['tickTime']
+initLimit = config['configurations']['zookeeper-env']['initLimit']
+syncLimit = config['configurations']['zookeeper-env']['syncLimit']
+clientPort = config['configurations']['zookeeper-env']['clientPort']
+
+if 'zoo.cfg' in config['configurations']:
+  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
+else:
+  zoo_cfg_properties_map = {}
+zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
+
+zk_principal_name = 
default("/configurations/zookeeper-env/zookeeper_principal_name", 
"zookee...@example.com")
+zk_principal = zk_principal_name.replace('_HOST',hostname.lower())
+
+java64_home = config['hostLevelParams']['java_home']
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+
+zk_keytab_path = 
config['configurations']['zookeeper-env']['zookeeper_keytab_path']
+zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
+zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", 
"/usr/sbin"])
+
+#log4j.properties
+if (('zookeeper-log4j' in config['configurations']) and ('content' in 
config['configurations']['zookeeper-log4j'])):
+  log4j_props = config['configurations']['zookeeper-log4j']['content']
+else:
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/service_check.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/service_check.py
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/service_check.py
new file mode 100644
index 000..87c13db
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/ZOOKEEPER/package/scripts/service_check.py
@@ -0,0 +1,46 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licens

[10/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/metainfo.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/metainfo.xml
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/metainfo.xml
new file mode 100644
index 000..a41e261
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/metainfo.xml
@@ -0,0 +1,163 @@
+
+
+
+  2.0
+  
+
+  NAGIOS
+  Nagios
+  Nagios Monitoring and Alerting system
+  3.5.0
+  
+
+   NAGIOS_SERVER
+  Nagios Server
+   MASTER
+   1
+  
+
+  HDFS/HDFS_CLIENT
+  host
+  
+true
+  
+
+
+  MAPREDUCE2/MAPREDUCE2_CLIENT
+  host
+  
+true
+  
+
+
+  OOZIE/OOZIE_CLIENT
+  host
+  
+true
+  
+
+
+  YARN/YARN_CLIENT
+  host
+  
+true
+  
+
+
+  HIVE/HCAT
+  host
+  
+true
+  
+
+  
+   
+ scripts/nagios_server.py
+ PYTHON
+ 600
+   
+
+  
+  
+
+  any
+  
+
+  perl
+
+
+  fping
+
+  
+
+
+  ubuntu12
+  
+
+  nagios3
+
+
+  nagios3-common
+
+
+  nagios3-dbg
+
+
+  nagios3-doc
+
+
+  nagios-plugins-extra
+
+
+  php5-curl
+
+
+  libapache2-mod-php5
+
+  
+
+
+  redhat5,redhat6,suse11
+  
+
+  nagios-plugins-1.4.9
+
+
+  nagios-3.5.0-99
+
+
+  nagios-www-3.5.0-99
+
+
+  nagios-devel-3.5.0-99
+
+
+  php
+
+  
+
+
+  suse11
+  
+
+  php5*-json
+
+
+  apache2?mod_php*
+
+
+  php-curl
+
+  
+
+
+  redhat5
+  
+
+  php-pecl-json.x86_64
+
+  
+
+  
+  
+nagios-env
+  
+  true
+
+  
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_aggregate.php
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_aggregate.php
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_aggregate.php
new file mode 100644
index 000..792b25b
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/files/check_aggregate.php
@@ -0,0 +1,248 @@
+http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+define("PASSIVE_MODE_STR", "AMBARIPASSIVE=");
+
+  $options = getopt ("f:s:n:w:c:t:");
+  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || 
!array_key_exists('w', $options)
+  || !array_key_exists('c', $options) || !array_key_exists('s', $options)) 
{
+usage();
+exit(3);
+  }
+  $status_file=$options['f'];
+  $status_code=$options['s'];
+  $type=$options['t'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  if ($type == "service" && !array_key_exists('n', $options)) {
+echo "Service description not provided -n option\n";
+exit(3);
+  }
+  if ($type == "service") {
+$service_name=$options['n'];
+/* echo "DESC: " . $service_name . "\n"; */
+  }
+
+  $result = array();
+  $status_file_content = file_get_contents($status_file);
+
+  $counts;
+  if ($type == "service") {
+$counts=query_alert_count($status_file_c

[11/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/pigSmoke.sh
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/pigSmoke.sh
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/pigSmoke.sh
new file mode 100644
index 000..2e90ac0
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/startMetastore.sh
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/startMetastore.sh
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/startMetastore.sh
new file mode 100644
index 000..da0f60b
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/startMetastore.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 hive --service metastore -hiveconf 
hive.log.file=hivemetastore.log -hiveconf hive.log.dir=$5 > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/templetonSmoke.sh
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/templetonSmoke.sh
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/templetonSmoke.sh
new file mode 100644
index 000..e26148b
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/package/files/templetonSmoke.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export smoke_user_keytab=$3
+export security_enabled=$4
+export kinit_path_local=$5
+export ttonurl="http://${ttonhost}:50111/templeton/v1";
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else
+  kinitcmd=""
+fi
+
+export no_proxy=$ttonhost
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code

[07/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.cfg.j2
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.cfg.j2
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.cfg.j2
new file mode 100644
index 000..bcff8ac
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/nagios.cfg.j2
@@ -0,0 +1,1365 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+##
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
+#
+# Read the documentation for more information on this configuration
+# file.  I've provided some comments here, but things may not be so
+# clear without further explanation.
+#
+# Last Modified: 12-14-2008
+#
+##
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes.  This should be the first option specified 
+# in the config file!!!
+
+log_file=/var/log/nagios/nagios.log
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+
+{% for cfg_file in cfg_files %}
+cfg_file={{cfg_file}}
+{% endfor %}
+
+# Definitions for monitoring the local (Linux) host
+#cfg_file={{conf_dir}}/objects/localhost.cfg
+
+# Definitions for monitoring a Windows machine
+#cfg_file={{conf_dir}}/objects/windows.cfg
+
+# Definitions for monitoring a router/switch
+#cfg_file={{conf_dir}}/objects/switch.cfg
+
+# Definitions for monitoring a network printer
+#cfg_file={{conf_dir}}/objects/printer.cfg
+
+# Definitions for hadoop servers
+cfg_file={{nagios_host_cfg}}
+cfg_file={{nagios_hostgroup_cfg}}
+cfg_file={{nagios_servicegroup_cfg}}
+cfg_file={{nagios_service_cfg}}
+cfg_file={{nagios_command_cfg}}
+
+
+# You can also tell Nagios to process all config files (with a .cfg
+# extension) in a particular directory by using the cfg_dir
+# directive as shown below:
+
+#cfg_dir={{conf_dir}}/servers
+#cfg_dir={{conf_dir}}/printers
+#cfg_dir={{conf_dir}}/switches
+#cfg_dir={{conf_dir}}/routers
+
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts.  The CGIs read object definitions from 
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/nagios/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file.  You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg

[18/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 000..08822eb
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,201 @@
+
+
+
+
+
+
+  
+content
+Custom log4j.properties
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+# To change daemon root logger use hadoop_root_logger in hadoop-env
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} 
(%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p 
%c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.-MM-dd
+
+#
+# mapred audit logging
+#
+map

[20/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metrics.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metrics.json
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metrics.json
new file mode 100644
index 000..37f73bf
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HBASE/metrics.json
@@ -0,0 +1,13635 @@
+{
+  "HBASE_REGIONSERVER": {
+"Component": [
+  {
+"type": "ganglia",
+"metrics": {
+  "metrics/hbase/regionserver/compactionTime_avg_time": {
+"metric": "hbase.regionserver.compactionTime_avg_time",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/closeRegion_num_ops": {
+"metric": "rpc.rpc.closeRegion_num_ops",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+"metric": "regionserver.Server.mutationsWithoutWALSize",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/rpc/unassign_num_ops": {
+"metric": "rpc.rpc.unassign_num_ops",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/modifyTable_num_ops": {
+"metric": "rpc.rpc.modifyTable_num_ops",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/getProtocolVersion_avg_time": {
+"metric": "rpc.rpc.getProtocolVersion_avg_time",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
+"metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/load/load_one": {
+"metric": "load_one",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/getClosestRowBefore_num_ops": {
+"metric": "rpc.rpc.getClosestRowBefore_num_ops",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/hbase/regionserver/slowAppendCount": {
+"metric": "regionserver.Server.slowAppendCount",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
+"metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/lockRow_num_ops": {
+"metric": "rpc.rpc.lockRow_num_ops",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/flushRegion_avg_time": {
+"metric": "rpc.rpc.flushRegion_avg_time",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/memory/swap_total": {
+"metric": "swap_total",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/stopMaster_num_ops": {
+"metric": "rpc.rpc.stopMaster_num_ops",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
+"metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/balance_avg_time": {
+"metric": "rpc.rpc.balance_avg_time",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/process/proc_total": {
+"metric": "proc_total",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/disk/part_max_used": {
+"metric": "part_max_used",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/modifyColumn_avg_time": {
+"metric": "rpc.rpc.modifyColumn_avg_time",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/multi/aboveOneSec/_avg_time": {
+"metric": "rpc.rpc.multi.aboveOneSec._avg_time",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/hbase/regionserver/rootIndexSizeKB": {
+"metric": "hbase.regionserver.rootIndexSizeKB",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/getZooKeeper_num_ops": {
+"metric": "rpc.rpc.getZooKeeper_num_ops",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/hbase/regionserver/blockCac

[08/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
new file mode 100644
index 000..c1a792c
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
@@ -0,0 +1,166 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+{% if check_cpu_on %}
+# 'check_cpu' check remote cpu load
+define command {
+command_namecheck_cpu
+command_line/var/lib/ambari-agent/ambari-python-wrap 
$USER1$/mm_wrapper.py legacy_check_wrapper -- php $USER1$/check_cpu.php -h 
$HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t 
$ARG7$ -u $ARG8$
+   }
+define command {
+command_namecheck_cpu_ha
+command_line/var/lib/ambari-agent/ambari-python-wrap 
$USER1$/mm_wrapper.py or $ARG1$ -- php $USER1$/check_cpu_ha.php -h ^^ -p $ARG2$ 
-w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -u $ARG9$
+   }
+{% endif %}
+
+# Check data node storage full 
+define command {
+command_namecheck_datanode_storage
+command_line/var/lib/ambari-agent/ambari-python-wrap 
$USER1$/mm_wrapper.py legacy_check_wrapper -- php 
$USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c 
$ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
+   }
+
+define command{
+command_namecheck_hdfs_blocks
+command_line/var/lib/ambari-agent/ambari-python-wrap 
$USER1$/mm_wrapper.py and $ARG1$ -- php $USER1$/check_hdfs_blocks.php -h ^^ -p 
$ARG2$ -s $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -u $ARG8$
+   }
+
+define command{
+command_namecheck_hdfs_capacity
+command_line/var/lib/ambari-agent/ambari-python-wrap 
$USER1$/mm_wrapper.py and $ARG1$ -- php $USER1$/check_hdfs_capacity.php -h ^^ 
-p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+   }
+
+define command{
+command_namecheck_aggregate
+command_linephp $USER1$/check_aggregate.php -f 
/var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
+   }
+
+define command{
+command_namecheck_rpcq_latency
+command_line/var/lib/ambari-agent/ambari-python-wrap 
$USER1$/mm_wrapper.py legacy_check_wrapper -- php 
$USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ 
-c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+   }
+
+define command{
+command_namecheck_rpcq_latency_ha
+command_line/var/lib/ambari-agent/ambari-python-wrap 
$USER1$/mm_wrapper.py or $ARG1$ -- php $USER1$/check_rpcq_latency_ha.php -h ^^ 
-p $ARG3$ -n $ARG2$ -w $ARG4$ -c $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ 
-s $ARG10$
+   }
+
+define command{
+command_namecheck_nagios
+command_line/var/lib/ambari-agent/ambari-python-wrap 
$USER1$/mm_wrapper.py legacy_check_wrapper -- $USER1$/check_nagios -e $ARG1$ -F 
$ARG2$ -C $ARG3$ 
+   }
+
+define command{
+  

[13/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-site.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-site.xml
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 000..87940a7
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,538 @@
+
+
+
+
+
+
+  
+hive.heapsize
+1024
+Hive Java heap size
+  
+
+
+  
+ambari.hive.db.schema.name
+hive
+Database name used as the Hive Metastore
+  
+
+  
+javax.jdo.option.ConnectionURL
+jdbc:postgresql://localhost/hive
+JDBC connect string for a JDBC metastore
+  
+
+  
+javax.jdo.option.ConnectionDriverName
+org.postgresql.Driver
+Driver class name for a JDBC metastore
+  
+
+
+  
+javax.jdo.option.ConnectionUserName
+hive
+username to use against metastore database
+  
+
+  
+javax.jdo.option.ConnectionPassword
+
+PASSWORD
+password to use against metastore database
+  
+
+  
+hive.metastore.warehouse.dir
+/apps/hive/warehouse
+location of default database for the warehouse
+  
+
+  
+hive.metastore.sasl.enabled
+false
+If true, the metastore thrift interface will be secured with 
SASL.
+ Clients must authenticate with Kerberos.
+  
+
+  
+hive.metastore.cache.pinobjtypes
+Table,Database,Type,FieldSchema,Order
+List of comma separated metastore object types that should be 
pinned in the cache
+  
+
+  
+hive.metastore.uris
+thrift://localhost:9083
+URI for client to contact metastore server
+  
+
+  
+hive.metastore.client.socket.timeout
+60
+MetaStore Client socket timeout in seconds
+  
+
+  
+hive.metastore.execute.setugi
+true
+In unsecure mode, setting this property to true will cause 
the metastore to execute DFS operations using the client's reported user and 
group permissions. Note that this property must be set on both the client and   
  server sides. Further note that its best effort. If client sets its to true 
and server sets it to false, client setting will be ignored.
+  
+
+  
+hive.security.authorization.enabled
+false
+enable or disable the hive client authorization
+  
+
+  
+hive.security.authorization.manager
+
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
+the hive client authorization manager class name.
+The user defined authorization class should implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  

+  
+
+  
+hive.security.metastore.authorization.manager
+
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider
+The authorization manager class name to be used in the 
metastore for authorization. The user-defined authorization class should 
implement interface 
org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
  
+  
+
+  
+hive.metastore.pre.event.listeners
+
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+Pre-event listener classes to be loaded on the metastore side 
to run code
+  whenever databases, tables, and partitions are created, altered, or 
dropped.
+  Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+  if metastore-side authorization is desired.
+  
+
+  
+hive.metastore.pre.event.listeners
+
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+Pre-event listener classes to be loaded on the metastore side 
to run code
+  whenever databases, tables, and partitions are created, altered, or 
dropped.
+  Set to 
org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
+  if metastore-side authorization is desired.
+  
+
+  
+hive.security.authenticator.manager
+org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator
+Hive client authenticator manager class name. The 
user-defined authenticator class should implement interface 
org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  
+  
+
+  
+hive.server2.enable.doAs
+true
+Impersonate the connected user. By default HiveServer2 
performs the query processing as the user who
+  submitted the query. But if the parameter is set to false, the query 
will run as the user that the hiveserver2
+  process runs as.
+
+  
+
+  
+hive.server2.enable.impersonation
+Enable user impersonation for HiveServer2
+true
+  
+
+  
+hive.server2.authentication
+Authentication mode, default NONE. Options are NONE, NOSASL, 
KERBEROS, LDAP, PAM and CUSTOM
+NOSASL
+  
+
+  
+fs.h

[01/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk bd32ef392 -> e7d070303


http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-web/app/data/PHD/site_properties.js
--
diff --git a/ambari-web/app/data/PHD/site_properties.js 
b/ambari-web/app/data/PHD/site_properties.js
new file mode 100644
index 000..2bc1092
--- /dev/null
+++ b/ambari-web/app/data/PHD/site_properties.js
@@ -0,0 +1,3725 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+var App = require('app');
+
+module.exports =
+{
+  "configProperties": [
+  //* PHD stack 
**
+  
/**HDFS***/
+{
+  "id": "site property",
+  "name": "dfs.namenode.checkpoint.dir",
+  "displayName": "SecondaryNameNode Checkpoint directories",
+  "defaultDirectory": "/hadoop/hdfs/namesecondary",
+  "displayType": "directories",
+  "isOverridable": false,
+  "serviceName": "HDFS",
+  "category": "SECONDARY_NAMENODE",
+  "index": 1
+},
+{
+  "id": "site property",
+  "name": "dfs.namenode.checkpoint.period",
+  "displayName": "HDFS Maximum Checkpoint Delay",
+  "displayType": "int",
+  "unit": "seconds",
+  "category": "General",
+  "serviceName": "HDFS",
+  "index": 3
+},
+{
+  "id": "site property",
+  "name": "dfs.namenode.name.dir",
+  "displayName": "NameNode directories",
+  "defaultDirectory": "/hadoop/hdfs/namenode",
+  "displayType": "directories",
+  "isOverridable": false,
+  "serviceName": "HDFS",
+  "category": "NAMENODE",
+  "index": 1
+},
+{
+  "id": "site property",
+  "name": "dfs.webhdfs.enabled",
+  "displayName": "WebHDFS enabled",
+  "displayType": "checkbox",
+  "isOverridable": false,
+  "category": "General",
+  "serviceName": "HDFS",
+  "index": 0
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.failed.volumes.tolerated",
+  "displayName": "DataNode volumes failure toleration",
+  "displayType": "int",
+  "category": "DATANODE",
+  "serviceName": "HDFS",
+  "index": 3
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.data.dir.mount.file",
+  "displayName": "File that stores mount point for each data dir",
+  "description": "File path that contains the last known mount point for 
each data dir. This file is used to avoid creating a DFS data dir on the root 
drive (and filling it up) if a path was previously mounted on a drive.",
+  "defaultValue": "/etc/hadoop/conf/dfs_data_dir_mount.hist",
+  "displayType": "directory",
+  "isVisible": true,
+  "category": "DATANODE",
+  "serviceName": "HDFS",
+  "filename": "hadoop-env.xml",
+  "index": 4
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.data.dir",
+  "displayName": "DataNode directories",
+  "defaultDirectory": "/hadoop/hdfs/data",
+  "displayType": "directories",
+  "category": "DATANODE",
+  "serviceName": "HDFS",
+  "index": 1
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.data.dir.perm",
+  "displayName": "DataNode directories permission",
+  "displayType": "int",
+  "category": "DATANODE",
+  "serviceName": "HDFS"
+},
+{
+  "id": "site property",
+  "name": "dfs.replication",
+  "displayName": "Block replication",
+  "displayType": "int",
+  "category": "General",
+  "serviceName": "HDFS"
+},
+{
+  "id": "site property",
+  "name": "dfs.datanode.du.reserved",
+  "displayName": "Reserved space for HDFS",
+  "displayType": "int",
+  "unit": "bytes",
+  "category": "General",
+  "serviceName": "HDFS",
+  "index": 2
+},
+{
+  "id": "site property",
+  "name": "dfs.client.read.shortcircuit",
+  "displayName": "HDFS Short-circuit read",
+  "displayType": "checkbox",
+  "category": "Advanced hdfs-site",
+  "serviceName": "HDFS"
+},
+

[22/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/log4j.properties.j2
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/log4j.properties.j2
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/log4j.properties.j2
new file mode 100644
index 000..3b34db8
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/FLUME/package/templates/log4j.properties.j2
@@ -0,0 +1,67 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Define some default values that can be overridden by system properties.
+#
+# For testing, it may also be convenient to specify
+# -Dflume.root.logger=DEBUG,console when launching flume.
+
+#flume.root.logger=DEBUG,console
+flume.root.logger=INFO,LOGFILE
+flume.log.dir={{flume_log_dir}}
+flume.log.file=flume-{{agent_name}}.log
+
+log4j.logger.org.apache.flume.lifecycle = INFO
+log4j.logger.org.jboss = WARN
+log4j.logger.org.mortbay = INFO
+log4j.logger.org.apache.avro.ipc.NettyTransceiver = WARN
+log4j.logger.org.apache.hadoop = INFO
+
+# Define the root logger to the system property "flume.root.logger".
+log4j.rootLogger=${flume.root.logger}
+
+
+# Stock log4j rolling file appender
+# Default log rotation configuration
+log4j.appender.LOGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.LOGFILE.MaxFileSize=100MB
+log4j.appender.LOGFILE.MaxBackupIndex=10
+log4j.appender.LOGFILE.File=${flume.log.dir}/${flume.log.file}
+log4j.appender.LOGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.LOGFILE.layout.ConversionPattern=%d{dd MMM  HH:mm:ss,SSS} 
%-5p [%t] (%C.%M:%L) %x - %m%n
+
+
+# Warning: If you enable the following appender it will fill up your disk if 
you don't have a cleanup job!
+# This uses the updated rolling file appender from log4j-extras that supports 
a reliable time-based rolling policy.
+# See 
http://logging.apache.org/log4j/companions/extras/apidocs/org/apache/log4j/rolling/TimeBasedRollingPolicy.html
+# Add "DAILY" to flume.root.logger above if you want to use this
+log4j.appender.DAILY=org.apache.log4j.rolling.RollingFileAppender
+log4j.appender.DAILY.rollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy
+log4j.appender.DAILY.rollingPolicy.ActiveFileName=${flume.log.dir}/${flume.log.file}
+log4j.appender.DAILY.rollingPolicy.FileNamePattern=${flume.log.dir}/${flume.log.file}.%d{-MM-dd}
+log4j.appender.DAILY.layout=org.apache.log4j.PatternLayout
+log4j.appender.DAILY.layout.ConversionPattern=%d{dd MMM  HH:mm:ss,SSS} 
%-5p [%t] (%C.%M:%L) %x - %m%n
+
+
+# console
+# Add "console" to flume.root.logger above if you want to use this
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/configuration/ganglia-env.xml
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/configuration/ganglia-env.xml
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/configuration/ganglia-env.xml
new file mode 100644
index 000..3328acf
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/GANGLIA/configuration/ganglia-env.xml
@@ -0,0 +1,77 @@
+
+
+
+
+
+  
+ganglia_conf_dir
+/etc/ganglia/hdp
+Config directory for Ganglia
+  
+  
+ganglia_runtime_dir
+/var/run/ganglia/hdp
+Run directories for Ganglia
+  
+  
+gmetad_user
+nobody
+USER GROUP
+User 
+  
+
+gmond_user
+nobody
+USER GROUP
+User 
+  
+  
+rrdcached_base_dir
+/var/lib/ganglia/rrds
+Default directory for saving the rrd files on ganglia 
server
+  
+  
+rrdcached_timeout
+3600
+(-w) Data is written to disk every timeout seconds. If this 
option is not

[24/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, 
Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet 
Varakhedi via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e7d07030
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e7d07030
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e7d07030

Branch: refs/heads/trunk
Commit: e7d0703033f5b799e9237c1cb369b1b8557182af
Parents: bd32ef3
Author: Jun Aoki 
Authored: Mon Nov 24 15:43:40 2014 -0800
Committer: Jun Aoki 
Committed: Mon Nov 24 15:43:40 2014 -0800

--
 ambari-server/pom.xml   | 2 +
 .../3.0.0.0/blueprints/multinode-default.json   |   183 +
 .../3.0.0.0/blueprints/singlenode-default.json  |   137 +
 .../PHD/3.0.0.0/configuration/cluster-env.xml   |56 +
 .../3.0.0.0/hooks/after-INSTALL/scripts/hook.py |35 +
 .../hooks/after-INSTALL/scripts/params.py   |73 +
 .../scripts/shared_initialization.py|38 +
 .../hooks/before-ANY/files/changeToSecureUid.sh |50 +
 .../3.0.0.0/hooks/before-ANY/scripts/hook.py|35 +
 .../3.0.0.0/hooks/before-ANY/scripts/params.py  |   134 +
 .../before-ANY/scripts/shared_initialization.py |   114 +
 .../hooks/before-INSTALL/scripts/hook.py|38 +
 .../hooks/before-INSTALL/scripts/params.py  |   122 +
 .../scripts/repo_initialization.py  |57 +
 .../scripts/shared_initialization.py|63 +
 .../before-INSTALL/templates/repo_suse_rhel.j2  | 7 +
 .../before-INSTALL/templates/repo_ubuntu.j2 | 1 +
 .../hooks/before-RESTART/scripts/hook.py|29 +
 .../hooks/before-START/files/checkForFormat.sh  |65 +
 .../before-START/files/task-log4j.properties|   134 +
 .../3.0.0.0/hooks/before-START/scripts/hook.py  |37 +
 .../hooks/before-START/scripts/params.py|   158 +
 .../scripts/shared_initialization.py|   177 +
 .../templates/commons-logging.properties.j2 |43 +
 .../templates/exclude_hosts_list.j2 |21 +
 .../templates/hadoop-metrics2.properties.j2 |65 +
 .../before-START/templates/health_check-v2.j2   |81 +
 .../before-START/templates/health_check.j2  |   109 +
 .../templates/include_hosts_list.j2 |21 +
 .../resources/stacks/PHD/3.0.0.0/metainfo.xml   |22 +
 .../stacks/PHD/3.0.0.0/repos/repoinfo.xml   |33 +
 .../stacks/PHD/3.0.0.0/role_command_order.json  |75 +
 .../services/FLUME/configuration/flume-conf.xml |31 +
 .../services/FLUME/configuration/flume-env.xml  |78 +
 .../FLUME/configuration/flume-log4j.xml |31 +
 .../PHD/3.0.0.0/services/FLUME/metainfo.xml |69 +
 .../PHD/3.0.0.0/services/FLUME/metrics.json |   716 +
 .../services/FLUME/package/scripts/flume.py |   255 +
 .../FLUME/package/scripts/flume_check.py|40 +
 .../FLUME/package/scripts/flume_handler.py  |   121 +
 .../services/FLUME/package/scripts/params.py|70 +
 .../FLUME/package/templates/flume.conf.j2   |24 +
 .../FLUME/package/templates/log4j.properties.j2 |67 +
 .../GANGLIA/configuration/ganglia-env.xml   |77 +
 .../PHD/3.0.0.0/services/GANGLIA/metainfo.xml   |   127 +
 .../GANGLIA/package/files/checkGmetad.sh|37 +
 .../GANGLIA/package/files/checkGmond.sh |62 +
 .../GANGLIA/package/files/checkRrdcached.sh |34 +
 .../services/GANGLIA/package/files/gmetad.init  |73 +
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 +
 .../services/GANGLIA/package/files/gmond.init   |73 +
 .../services/GANGLIA/package/files/gmondLib.sh  |   539 +
 .../GANGLIA/package/files/rrdcachedLib.sh   |47 +
 .../GANGLIA/package/files/setupGanglia.sh   |   141 +
 .../GANGLIA/package/files/startGmetad.sh|68 +
 .../GANGLIA/package/files/startGmond.sh |85 +
 .../GANGLIA/package/files/startRrdcached.sh |79 +
 .../GANGLIA/package/files/stopGmetad.sh |43 +
 .../services/GANGLIA/package/files/stopGmond.sh |54 +
 .../GANGLIA/package/files/stopRrdcached.sh  |41 +
 .../GANGLIA/package/files/teardownGanglia.sh|28 +
 .../GANGLIA/package/scripts/functions.py|31 +
 .../services/GANGLIA/package/scripts/ganglia.py |97 +
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   236 +
 .../package/scripts/ganglia_monitor_service.py  |27 +
 .../GANGLIA/package/scripts/ganglia_server.py   |   119 +
 .../package/scripts/ganglia_server_service.py   |27 +
 .../services/GANGLIA/package/scripts/params.py  |   160 +
 .../GANGLIA/package/scripts/status_params.py|25 +
 .../GANGLIA/package/templates/ganglia.conf.j2   |34 +
 .../package/templates/gangliaClusters.conf.j2   |43 +
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |46 +
 .../GANGLIA/package

[16/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkForFormat.sh
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkForFormat.sh
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkForFormat.sh
new file mode 100644
index 000..be8c75f
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkForFormat.sh
@@ -0,0 +1,70 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export old_mark_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ -d $old_mark_dir ]] ; then
+  mv ${old_mark_dir} ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+echo "NameNode Dirname = $dir"
+cmd="ls $dir | wc -l  | grep -q ^0$"
+eval $cmd
+if [[ $? -ne 0 ]] ; then
+  (( EXIT_CODE = $EXIT_CODE + 1 ))
+  list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+su -s /bin/bash - ${hdfs_user} -c "export PATH=$PATH:${bin_dir} ; yes Y | 
hadoop --config ${conf_dir} ${command}"
+  else
+echo "ERROR: Namenode directory(s) is non empty. Will not format the 
namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkWebUI.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkWebUI.py
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkWebUI.py
new file mode 100644
index 000..f8e9c1a
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/package/files/checkWebUI.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts 
list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check 
it availability")
+
+  (options, args) = parser.parse_args()
+  
+  hosts = options.hosts.split(',')
+  port = options.port
+
+  for host in hosts:
+try:
+  conn = httplib.HTTPConnection(host, port)
+  # This can be modified to get a partial url part to be sent with request
+  conn.request("GET", "/")
+  httpCode = conn.getresponse().status
+  conn.close()
+except Exception:
+  httpCode = 404
+
+if httpCode != 200:
+  print "Cannot access WEB UI on: http://"; + host + ":" + port
+  exit(1)
+  
+
+if __name__ == "__main__":
+  main()

[04/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metrics.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metrics.json
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metrics.json
new file mode 100644
index 000..5d8309d
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/YARN/metrics.json
@@ -0,0 +1,5360 @@
+{
+  "NODEMANAGER": {
+"Component": [
+  {
+"type": "ganglia",
+"metrics": {
+  "metrics/memory/mem_total": {
+"metric": "mem_total",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/jvm/memHeapCommittedM": {
+"metric": "jvm.JvmMetrics.MemHeapCommittedM",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/mapred/ShuffleOutputsFailed": {
+"metric": "mapred.ShuffleOutputsFailed",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/jvm/threadsRunnable": {
+"metric": "jvm.JvmMetrics.ThreadsRunnable",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/jvm/threadsNew": {
+"metric": "jvm.JvmMetrics.ThreadsNew",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/rpc/rpcAuthorizationFailures": {
+"metric": "rpc.metrics.RpcAuthorizationFailures",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/ugi/loginSuccess_avg_time": {
+"metric": "ugi.ugi.LoginSuccessAvgTime",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/rpc/RpcQueueTime_avg_time": {
+"metric": "rpc.rpc.RpcQueueTimeAvgTime",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/yarn/ContainersCompleted": {
+"metric": "yarn.NodeManagerMetrics.ContainersCompleted",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/SentBytes": {
+"metric": "rpc.rpc.SentBytes",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/jvm/memNonHeapUsedM": {
+"metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/yarn/ContainersKilled": {
+"metric": "yarn.NodeManagerMetrics.ContainersKilled",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/jvm/logWarn": {
+"metric": "jvm.JvmMetrics.LogWarn",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/jvm/threadsTimedWaiting": {
+"metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/jvm/gcCount": {
+"metric": "jvm.JvmMetrics.GcCount",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/process/proc_run": {
+"metric": "proc_run",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/memory/swap_total": {
+"metric": "swap_total",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/ReceivedBytes": {
+"metric": "rpc.rpc.ReceivedBytes",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/cpu/cpu_nice": {
+"metric": "cpu_nice",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/jvm/threadsBlocked": {
+"metric": "jvm.JvmMetrics.ThreadsBlocked",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/rpc/RpcQueueTime_num_ops": {
+"metric": "rpc.rpc.RpcQueueTimeNumOps",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/process/proc_total": {
+"metric": "proc_total",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/yarn/AllocatedGB": {
+"metric": "yarn.NodeManagerMetrics.AllocatedGB",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/disk/part_max_used": {
+"metric": "part_max_used",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpc/NumOpenConnections": {
+"metric": "rpc.rpc.NumOpenConnections",
+"pointInTime": false,
+"temporal": true
+  },
+

[17/24] ambari git commit: AMBARI-7872 Create stack definitions for PHD-3.0.0.0 (vasanm, adenisso, tyu, Boxiong Ding, rpidva, rmeneses, Sourabh Bansod, Ashvin Agrawal, Sujeet Varakhedi via jaoki)

2014-11-24 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/e7d07030/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metrics.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metrics.json
 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metrics.json
new file mode 100644
index 000..088626a
--- /dev/null
+++ 
b/ambari-server/src/main/resources/stacks/PHD/3.0.0.0/services/HDFS/metrics.json
@@ -0,0 +1,7840 @@
+{
+  "NAMENODE": {
+"Component": [
+  {
+"type": "ganglia",
+"metrics": {
+  "metrics/dfs/FSNamesystem/TotalLoad": {
+"metric": "dfs.FSNamesystem.TotalLoad",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/dfs/FSNamesystem/CapacityTotal": {
+"metric": "dfs.FSNamesystem.CapacityTotal",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/dfs/FSNamesystem/CapacityUsed": {
+"metric": "dfs.FSNamesystem.CapacityUsed",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/dfs/FSNamesystem/CapacityRemaining": {
+"metric": "dfs.FSNamesystem.CapacityRemaining",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+"metric": "dfs.FSNamesystem.CapacityUsedNonDFS",
+"pointInTime": false,
+"temporal": true
+  },  
+  "metrics/dfs/FSNamesystem/BlockCapacity": {
+"metric": "dfs.FSNamesystem.BlockCapacity",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/dfs/namenode/GetListingOps": {
+"metric": "dfs.namenode.GetListingOps",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/dfs/namenode/FilesAppended": {
+"metric": "dfs.namenode.FilesAppended",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+"metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/rpcdetailed/fsync_avg_time": {
+"metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/ugi/loginSuccess_avg_time": {
+"metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/load/load_one": {
+"metric": "load_one",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpcdetailed/renewLease_num_ops": {
+"metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpcdetailed/getFileInfo_avg_time": {
+"metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/jvm/memNonHeapUsedM": {
+"metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/rpcdetailed/complete_avg_time": {
+"metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/rpcdetailed/setPermission_num_ops": {
+"metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+"metric": "dfs.FSNamesystem.CapacityTotalGB",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/rpcdetailed/setOwner_num_ops": {
+"metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/rpcdetailed/getBlockLocations_num_ops": {
+"metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/process/proc_run": {
+"metric": "proc_run",
+"pointInTime": true,
+"temporal": true
+  },
+  "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+"metric": "dfs.FSNamesystem.CapacityUsedGB",
+"pointInTime": false,
+"temporal": true
+  },
+  "metrics/dfs/namenode/AddBlockOps": {
+"metric": "dfs.namenode.AddBlockOps",
+"pointInTime"

ambari git commit: AMBARI-8438 ambari build in docker to open Java debug port. (jaoki)

2014-11-26 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk aebd5c0b3 -> 501785fa5


AMBARI-8438 ambari build in docker to open Java debug port. (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/501785fa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/501785fa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/501785fa

Branch: refs/heads/trunk
Commit: 501785fa532525dbae61ca69f6e6a5098577862a
Parents: aebd5c0
Author: Jun Aoki 
Authored: Wed Nov 26 14:20:25 2014 -0800
Committer: Jun Aoki 
Committed: Wed Nov 26 14:20:25 2014 -0800

--
 dev-support/docker/README.md|  8 ++--
 dev-support/docker/docker/Dockerfile|  1 +
 dev-support/docker/docker/bin/ambaribuild.py| 42 ++--
 .../docker/docker/bin/test/ambaribuild_test.py  | 16 +++-
 4 files changed, 58 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/501785fa/dev-support/docker/README.md
--
diff --git a/dev-support/docker/README.md b/dev-support/docker/README.md
index 6613e0d..fedfd50 100644
--- a/dev-support/docker/README.md
+++ b/dev-support/docker/README.md
@@ -17,7 +17,7 @@ how to build
 
 
 ```
-docker build -t ambari/build ./docker
+docker build -t ambari/build ./dev-support/docker/docker
 ```
 
 how to run
@@ -32,19 +32,21 @@ docker run --privileged -t -i -p 80:80 -p 5005:5005 -p 
8080:8080 -h node1.mydoma
 # build, install ambari and deploy hadoop in container
 cd {ambari src}
 docker rm ambari1
-docker run --privileged -t -p 80:80 -p 5005:5005 -p 8080:8080 -h 
node1.mydomain.com --name ambari1 -v ${AMBARI_SRC:-$(pwd)}:/tmp/ambari 
ambari/build /tmp/ambari-build-docker/bin/ambaribuild.py 
[test|server|agent|deploy] [-b] [-s [HDP|BIGTOP|PHD]]
+docker run --privileged -t -p 80:80 -p 5005:5005 -p 8080:8080 -h 
node1.mydomain.com --name ambari1 -v ${AMBARI_SRC:-$(pwd)}:/tmp/ambari 
ambari/build /tmp/ambari-build-docker/bin/ambaribuild.py 
[test|server|agent|deploy] [-b] [-s [HDP|BIGTOP|PHD]] [-d] [-c]
 where
 test: mvn test
 server: install and run ambari-server
 agent: install and run ambari-server and ambari-agent
 deploy: install and run ambari-server and ambari-agent, and deploy a hadoop
 -b option to rebuild ambari
+-d option to start ambari-server with --debug option
+-c option to clean local git repo. "git clean -xdf"
 ```
 
 how to run unit test
 
 ```
-cd docker
+cd dev-support/docker/docker
 python -m bin.test.ambaribuild_test
 
 ```

http://git-wip-us.apache.org/repos/asf/ambari/blob/501785fa/dev-support/docker/docker/Dockerfile
--
diff --git a/dev-support/docker/docker/Dockerfile 
b/dev-support/docker/docker/Dockerfile
index 96ba8ff..bd2c1f4 100644
--- a/dev-support/docker/docker/Dockerfile
+++ b/dev-support/docker/docker/Dockerfile
@@ -15,6 +15,7 @@ FROM centos:centos6
 RUN echo root:changeme | chpasswd
 
 ## Install some basic utilities that aren't in the default image
+RUN yum clean all -y && yum update -y
 RUN yum -y install vim wget rpm-build sudo which telnet tar openssh-server 
openssh-clients ntp git python-setuptools httpd
 # phantomjs dependency
 RUN yum -y install fontconfig freetype libfreetype.so.6 libfontconfig.so.1 
libstdc++.so.6

http://git-wip-us.apache.org/repos/asf/ambari/blob/501785fa/dev-support/docker/docker/bin/ambaribuild.py
--
diff --git a/dev-support/docker/docker/bin/ambaribuild.py 
b/dev-support/docker/docker/bin/ambaribuild.py
index 66df31f..50db0e2 100755
--- a/dev-support/docker/docker/bin/ambaribuild.py
+++ b/dev-support/docker/docker/bin/ambaribuild.py
@@ -14,6 +14,7 @@
 
 import subprocess, time, sys
 import json
+import datetime
 from optparse import OptionParser
 
 SKIP_TEST="-DskipTests"
@@ -29,14 +30,12 @@ def git_deep_cleaning():
return proc.wait()
 
 def ambariUnitTest():
-   git_deep_cleaning()
proc = subprocess.Popen("mvn -fae clean install",
shell=True,
cwd="/tmp/ambari")
return proc.wait()
 
 def buildAmbari(stack_distribution):
-   git_deep_cleaning()
stack_distribution_param = ""
if stack_distribution is not None:
stack_distribution_param = "-Dstack.distribution=" + 
stack_distribution
@@ -134,35 +133,57 @@ def create_cluster():
 
 # Loop to not to exit Docker container
 def no_exit():
+   print ""
print "loop to not to exit docker container..."
+   print ""
while True:
tim

[07/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/test/python/stacks/2.3/common/services-master_ambari_colo-3-hosts.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-master_ambari_colo-3-hosts.json
 
b/ambari-server/src/test/python/stacks/2.3/common/services-master_ambari_colo-3-hosts.json
new file mode 100644
index 000..78ba3aa
--- /dev/null
+++ 
b/ambari-server/src/test/python/stacks/2.3/common/services-master_ambari_colo-3-hosts.json
@@ -0,0 +1,2575 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER,HAWQ)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HAWQ",
+"StackServices" : {
+  "service_name" : "HAWQ",
+  "service_version" : "2.0",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1",
+"component_category" : "MASTER",
+"component_name" : "HAWQMASTER",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6401.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  }, {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/NAMENODE",
+"Dependencies" : {
+  "component_name" : "NAMENODE",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "HAWQSEGMENT",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Segment",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT/dependencies/DATANODE",
+"Dependencies" : {
+  "component_name" : "DATANODE",
+  "dependent_component_name" : "HAWQSEGMENT",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0-1",
+"component_category" : "MASTER",
+"component_name" : "HAWQSTANDBY",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Standby Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6402.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQSTANDBY",
+  "dependent_service_name" : "HAWQ",
+   

[01/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 7a0aa253d -> 46a20019d


http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py 
b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index b886117..cc9ef68 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -16,6 +16,7 @@ See the License for the specific language governing 
permissions and
 limitations under the License.
 '''
 
+import json
 import os
 import socket
 from unittest import TestCase
@@ -51,6 +52,12 @@ class TestHDP23StackAdvisor(TestCase):
 self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
 self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
 
+  def load_json(self, filename):
+file = os.path.join(self.testDirectory, filename)
+with open(file, 'rb') as f:
+  data = json.load(f)
+return data
+
   @patch('__builtin__.open')
   @patch('os.path.exists')
   def get_system_min_uid_magic(self, exists_mock, open_mock):
@@ -71,6 +78,174 @@ class TestHDP23StackAdvisor(TestCase):
 open_mock.return_value = MagicFile()
 return self.get_system_min_uid_real()
 
+
+  def test_createComponentLayoutRecommendations_hawq_1_Host(self):
+""" Test that HAWQSTANDBY is not recommended on a single node cluster """
+
+services = self.load_json("services-hawq-1-host.json")
+componentsListList = [service["components"] for service in 
services["services"]]
+componentsList = [item for sublist in componentsListList for item in 
sublist]
+componentNames = [component["StackServiceComponents"]["component_name"] 
for component in componentsList]
+self.assertTrue('HAWQSTANDBY' in componentNames)
+
+hosts = self.load_json("hosts-1-host.json")
+hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
+self.assertEquals(len(hostsList), 1)
+
+recommendations = 
self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
+
+recommendedComponentsListList = [hostgroup["components"] for hostgroup in 
recommendations["blueprint"]["host_groups"]]
+recommendedComponents = [item["name"] for sublist in 
recommendedComponentsListList for item in sublist]
+self.assertTrue('HAWQMASTER' in recommendedComponents) 
+self.assertFalse('HAWQSTANDBY' in recommendedComponents) 
+self.assertTrue('HAWQSEGMENT' in recommendedComponents) 
+
+
+  def test_createComponentLayoutRecommendations_hawq_3_Hosts(self):
+""" Test that HAWQSTANDBY is recommended on a 3-node cluster """
+
+services = self.load_json("services-hawq-3-hosts.json")
+componentsListList = [service["components"] for service in 
services["services"]]
+componentsList = [item for sublist in componentsListList for item in 
sublist]
+componentNames = [component["StackServiceComponents"]["component_name"] 
for component in componentsList]
+self.assertTrue('HAWQSTANDBY' in componentNames)
+
+hosts = self.load_json("hosts-3-hosts.json")
+hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
+self.assertEquals(len(hostsList), 3)
+
+recommendations = 
self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
+
+recommendedComponentsListList = [hostgroup["components"] for hostgroup in 
recommendations["blueprint"]["host_groups"]]
+recommendedComponents = [item["name"] for sublist in 
recommendedComponentsListList for item in sublist]
+self.assertTrue('HAWQMASTER' in recommendedComponents) 
+self.assertTrue('HAWQSTANDBY' in recommendedComponents) 
+self.assertTrue('HAWQSEGMENT' in recommendedComponents)
+
+# make sure master components are not collocated
+for sublist in recommendedComponentsListList:
+  hostComponents = [item["name"] for item in sublist]
+  self.assertFalse(set(['HAWQMASTER', 
'HAWQSTANDBY']).issubset(hostComponents))
+
+
+  def test_createComponentLayoutRecommendations_no_hawq_3_Hosts(self):
+""" Test no failures when there are no HAWQ components """
+
+services = self.load_json("services-nohawq-3-hosts.json")
+componentsListList = [service["components"] for service in 
services["services"]]
+componentsList = [item for sublist in componentsListList for item in 
sublist]
+componentNames = [component["StackServiceComponents"]["component_name"] 
for component in componentsList]
+self.assertFalse('HAWQMASTER' in componentNames) 
+self.assertFalse('HAWQSTANDBY' in componentNames) 
+self.assertFalse('HAWQSEGMENT' in componentNames)
+
+hosts = self.load_json("hosts-3-hosts.json")
+hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
+self.assertEqua

[10/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
AMBARI-13442: stack advisor layout and validations for HAWQ service master 
components (adenissov via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/46a20019
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/46a20019
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/46a20019

Branch: refs/heads/trunk
Commit: 46a20019de96f02437720f93f1d442fc33898585
Parents: 7a0aa25
Author: Jun Aoki 
Authored: Mon Oct 26 13:56:44 2015 -0700
Committer: Jun Aoki 
Committed: Mon Oct 26 13:56:44 2015 -0700

--
 .../stacks/HDP/2.3/services/stack_advisor.py|   70 +
 .../python/stacks/2.3/common/hosts-1-host.json  |   93 +
 .../python/stacks/2.3/common/hosts-3-hosts.json |  269 ++
 .../stacks/2.3/common/services-hawq-1-host.json | 2575 ++
 .../2.3/common/services-hawq-3-hosts.json   | 2575 ++
 .../services-master_ambari_colo-3-hosts.json| 2575 ++
 .../services-master_standby_colo-3-hosts.json   | 2575 ++
 .../2.3/common/services-nohawq-3-hosts.json | 2214 +++
 .../common/services-normal-hawq-3-hosts.json| 2575 ++
 .../common/services-normal-nohawq-3-hosts.json  | 2214 +++
 .../services-standby_ambari_colo-3-hosts.json   | 2575 ++
 .../stacks/2.3/common/test_stack_advisor.py |  175 ++
 12 files changed, 20485 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py 
b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 464f9cc..2a26405 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -22,6 +22,76 @@ import socket
 
 class HDP23StackAdvisor(HDP22StackAdvisor):
 
+  def createComponentLayoutRecommendations(self, services, hosts):
+parentComponentLayoutRecommendations = super(HDP23StackAdvisor, 
self).createComponentLayoutRecommendations(services, hosts)
+
+# remove HAWQSTANDBY on a single node
+hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
+if len(hostsList) == 1:
+  servicesList = [service["StackServices"]["service_name"] for service in 
services["services"]]
+  if "HAWQ" in servicesList:
+components = 
parentComponentLayoutRecommendations["blueprint"]["host_groups"][0]["components"]
+components = [ component for component in components if 
component["name"] != 'HAWQSTANDBY' ]
+
parentComponentLayoutRecommendations["blueprint"]["host_groups"][0]["components"]
 = components
+
+return parentComponentLayoutRecommendations
+
+  def getComponentLayoutValidations(self, services, hosts):
+parentItems = super(HDP23StackAdvisor, 
self).getComponentLayoutValidations(services, hosts)
+
+if not "HAWQ" in [service["StackServices"]["service_name"] for service in 
services["services"]]:
+  return parentItems
+
+childItems = []
+hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
+hostsCount = len(hostsList)
+
+componentsListList = [service["components"] for service in 
services["services"]]
+componentsList = [item for sublist in componentsListList for item in 
sublist]
+hawqMasterHosts = [component["StackServiceComponents"]["hostnames"] for 
component in componentsList if 
component["StackServiceComponents"]["component_name"] == "HAWQMASTER"]
+hawqStandbyHosts = [component["StackServiceComponents"]["hostnames"] for 
component in componentsList if 
component["StackServiceComponents"]["component_name"] == "HAWQSTANDBY"]
+
+# single node case is not analyzed because HAWQ Standby Master will not be 
present in single node topology due to logic in 
createComponentLayoutRecommendations()
+if len(hawqMasterHosts) > 0 and len(hawqStandbyHosts) > 0:
+  commonHosts = [host for host in hawqMasterHosts[0] if host in 
hawqStandbyHosts[0]]
+  for host in commonHosts:
+message = "HAWQ Standby Master and HAWQ Master should not be deployed 
on the same host."
+childItems.append( { "type": 'host-component', "level": 'ERROR', 
"message": 

[02/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/test/python/stacks/2.3/common/services-standby_ambari_colo-3-hosts.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-standby_ambari_colo-3-hosts.json
 
b/ambari-server/src/test/python/stacks/2.3/common/services-standby_ambari_colo-3-hosts.json
new file mode 100644
index 000..08d1c36
--- /dev/null
+++ 
b/ambari-server/src/test/python/stacks/2.3/common/services-standby_ambari_colo-3-hosts.json
@@ -0,0 +1,2575 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER,HAWQ)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HAWQ",
+"StackServices" : {
+  "service_name" : "HAWQ",
+  "service_version" : "2.0",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1",
+"component_category" : "MASTER",
+"component_name" : "HAWQMASTER",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  }, {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/NAMENODE",
+"Dependencies" : {
+  "component_name" : "NAMENODE",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "HAWQSEGMENT",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Segment",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT/dependencies/DATANODE",
+"Dependencies" : {
+  "component_name" : "DATANODE",
+  "dependent_component_name" : "HAWQSEGMENT",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0-1",
+"component_category" : "MASTER",
+"component_name" : "HAWQSTANDBY",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Standby Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6401.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQSTANDBY",
+  "dependent_service_name" : "HAWQ",
+   

[05/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/test/python/stacks/2.3/common/services-nohawq-3-hosts.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-nohawq-3-hosts.json 
b/ambari-server/src/test/python/stacks/2.3/common/services-nohawq-3-hosts.json
new file mode 100644
index 000..a5fc45d
--- /dev/null
+++ 
b/ambari-server/src/test/python/stacks/2.3/common/services-nohawq-3-hosts.json
@@ -0,0 +1,2214 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HDFS",
+"StackServices" : {
+  "service_name" : "HDFS",
+  "service_version" : "2.7.1.2.3",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/DATANODE",
+  "StackServiceComponents" : {
+"advertise_version" : true,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "DATANODE",
+"custom_commands" : [ ],
+"display_name" : "DataNode",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HDFS",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/HDFS_CLIENT",
+  "StackServiceComponents" : {
+"advertise_version" : true,
+"cardinality" : "1+",
+"component_category" : "CLIENT",
+"component_name" : "HDFS_CLIENT",
+"custom_commands" : [ ],
+"display_name" : "HDFS Client",
+"is_client" : true,
+"is_master" : false,
+"service_name" : "HDFS",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/JOURNALNODE",
+  "StackServiceComponents" : {
+"advertise_version" : true,
+"cardinality" : "0+",
+"component_category" : "SLAVE",
+"component_name" : "JOURNALNODE",
+"custom_commands" : [ ],
+"display_name" : "JournalNode",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HDFS",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "JOURNALNODE",
+  "dependent_service_name" : "HDFS",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/NAMENODE",
+  "StackServiceComponents" : {
+"advertise_version" : true,
+"cardinality" : "1-2",
+"component_category" : "MASTER",
+"component_name" : "NAMENODE",
+"custom_commands" : [ "DECOMMISSION", "REBALANCEHDFS" ],
+"display_name" : "NameNode",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HDFS",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/NFS_GATEWAY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0+",
+"component_category" : "SLAVE",
+"component_name" : "NFS_GATEWAY",
+"custom_commands" : [ ],
+"display_name" : "NFSGateway",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HDFS",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies

[04/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/test/python/stacks/2.3/common/services-normal-hawq-3-hosts.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-normal-hawq-3-hosts.json
 
b/ambari-server/src/test/python/stacks/2.3/common/services-normal-hawq-3-hosts.json
new file mode 100644
index 000..2e76c3e
--- /dev/null
+++ 
b/ambari-server/src/test/python/stacks/2.3/common/services-normal-hawq-3-hosts.json
@@ -0,0 +1,2575 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER,HAWQ)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HAWQ",
+"StackServices" : {
+  "service_name" : "HAWQ",
+  "service_version" : "2.0",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1",
+"component_category" : "MASTER",
+"component_name" : "HAWQMASTER",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  }, {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/NAMENODE",
+"Dependencies" : {
+  "component_name" : "NAMENODE",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "HAWQSEGMENT",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Segment",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6402.ambari.apache.org", "c6403.ambari.apache.org", 
"c6401.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT/dependencies/DATANODE",
+"Dependencies" : {
+  "component_name" : "DATANODE",
+  "dependent_component_name" : "HAWQSEGMENT",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0-1",
+"component_category" : "MASTER",
+"component_name" : "HAWQSTANDBY",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Standby Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6402.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQSTANDBY",
+  "dependent_servi

[06/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/test/python/stacks/2.3/common/services-master_standby_colo-3-hosts.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-master_standby_colo-3-hosts.json
 
b/ambari-server/src/test/python/stacks/2.3/common/services-master_standby_colo-3-hosts.json
new file mode 100644
index 000..e380492
--- /dev/null
+++ 
b/ambari-server/src/test/python/stacks/2.3/common/services-master_standby_colo-3-hosts.json
@@ -0,0 +1,2575 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER,HAWQ)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HAWQ",
+"StackServices" : {
+  "service_name" : "HAWQ",
+  "service_version" : "2.0",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1",
+"component_category" : "MASTER",
+"component_name" : "HAWQMASTER",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  }, {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/NAMENODE",
+"Dependencies" : {
+  "component_name" : "NAMENODE",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "HAWQSEGMENT",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Segment",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT/dependencies/DATANODE",
+"Dependencies" : {
+  "component_name" : "DATANODE",
+  "dependent_component_name" : "HAWQSEGMENT",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0-1",
+"component_category" : "MASTER",
+"component_name" : "HAWQSTANDBY",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Standby Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQSTANDBY",
+  "dependent_service_name" : "HAWQ",
+   

[03/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/test/python/stacks/2.3/common/services-normal-nohawq-3-hosts.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-normal-nohawq-3-hosts.json
 
b/ambari-server/src/test/python/stacks/2.3/common/services-normal-nohawq-3-hosts.json
new file mode 100644
index 000..aec23c8
--- /dev/null
+++ 
b/ambari-server/src/test/python/stacks/2.3/common/services-normal-nohawq-3-hosts.json
@@ -0,0 +1,2214 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HDFS",
+"StackServices" : {
+  "service_name" : "HDFS",
+  "service_version" : "2.7.1.2.3",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/DATANODE",
+  "StackServiceComponents" : {
+"advertise_version" : true,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "DATANODE",
+"custom_commands" : [ ],
+"display_name" : "DataNode",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HDFS",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6402.ambari.apache.org", "c6403.ambari.apache.org", 
"c6401.ambari.apache.org" ]
+  },
+  "dependencies" : [ ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/HDFS_CLIENT",
+  "StackServiceComponents" : {
+"advertise_version" : true,
+"cardinality" : "1+",
+"component_category" : "CLIENT",
+"component_name" : "HDFS_CLIENT",
+"custom_commands" : [ ],
+"display_name" : "HDFS Client",
+"is_client" : true,
+"is_master" : false,
+"service_name" : "HDFS",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6401.ambari.apache.org" ]
+  },
+  "dependencies" : [ ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/JOURNALNODE",
+  "StackServiceComponents" : {
+"advertise_version" : true,
+"cardinality" : "0+",
+"component_category" : "SLAVE",
+"component_name" : "JOURNALNODE",
+"custom_commands" : [ ],
+"display_name" : "JournalNode",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HDFS",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6402.ambari.apache.org", "c6403.ambari.apache.org", 
"c6401.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "JOURNALNODE",
+  "dependent_service_name" : "HDFS",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/NAMENODE",
+  "StackServiceComponents" : {
+"advertise_version" : true,
+"cardinality" : "1-2",
+"component_category" : "MASTER",
+"component_name" : "NAMENODE",
+"custom_commands" : [ "DECOMMISSION", "REBALANCEHDFS" ],
+"display_name" : "NameNode",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HDFS",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6401.ambari.apache.org" ]
+  },
+  "dependencies" : [ ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HDFS/components/NFS_GATEWAY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0+",
+"component_category" : "SLAVE",
+"component_name" : "NFS_GATEWAY",
+"custom_commands" : [ ],
+"dis

[09/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/test/python/stacks/2.3/common/services-hawq-1-host.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-hawq-1-host.json 
b/ambari-server/src/test/python/stacks/2.3/common/services-hawq-1-host.json
new file mode 100644
index 000..36852c4
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/common/services-hawq-1-host.json
@@ -0,0 +1,2575 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER,HAWQ)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HAWQ",
+"StackServices" : {
+  "service_name" : "HAWQ",
+  "service_version" : "2.0",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1",
+"component_category" : "MASTER",
+"component_name" : "HAWQMASTER",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  }, {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/NAMENODE",
+"Dependencies" : {
+  "component_name" : "NAMENODE",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "HAWQSEGMENT",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Segment",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT/dependencies/DATANODE",
+"Dependencies" : {
+  "component_name" : "DATANODE",
+  "dependent_component_name" : "HAWQSEGMENT",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0-1",
+"component_category" : "MASTER",
+"component_name" : "HAWQSTANDBY",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Standby Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQSTANDBY",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+} ],
+"configurations" : [ {
+  "href" : 
"/a

[08/10] ambari git commit: AMBARI-13442: stack advisor layout and validations for HAWQ service master components (adenissov via jaoki)

2015-10-26 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/46a20019/ambari-server/src/test/python/stacks/2.3/common/services-hawq-3-hosts.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-hawq-3-hosts.json 
b/ambari-server/src/test/python/stacks/2.3/common/services-hawq-3-hosts.json
new file mode 100644
index 000..36852c4
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.3/common/services-hawq-3-hosts.json
@@ -0,0 +1,2575 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER,HAWQ)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HAWQ",
+"StackServices" : {
+  "service_name" : "HAWQ",
+  "service_version" : "2.0",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1",
+"component_category" : "MASTER",
+"component_name" : "HAWQMASTER",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  }, {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/NAMENODE",
+"Dependencies" : {
+  "component_name" : "NAMENODE",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "HAWQSEGMENT",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Segment",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT/dependencies/DATANODE",
+"Dependencies" : {
+  "component_name" : "DATANODE",
+  "dependent_component_name" : "HAWQSEGMENT",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0-1",
+"component_category" : "MASTER",
+"component_name" : "HAWQSTANDBY",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Standby Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQSTANDBY",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+} ],
+"configurations" : [ {
+  "href" : 

ambari git commit: AMBARI-13589: Rename resource manager's GUC names in sync with apache hawq (bhuvnesh2703 via jaoki)

2015-10-28 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 2337e5390 -> 897703b72


AMBARI-13589: Rename resource manager's GUC names in sync with apache hawq 
(bhuvnesh2703 via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/897703b7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/897703b7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/897703b7

Branch: refs/heads/trunk
Commit: 897703b72f9a5274f01d277f894d30b3af87faac
Parents: 2337e53
Author: Jun Aoki 
Authored: Wed Oct 28 15:10:46 2015 -0700
Committer: Jun Aoki 
Committed: Wed Oct 28 15:10:46 2015 -0700

--
 .../HAWQ/2.0.0.0/configuration/hawq-site.xml| 21 +---
 .../HAWQ/2.0.0.0/package/scripts/common.py  |  6 +++---
 2 files changed, 21 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/897703b7/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
index a322a5b..3e9573e 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
@@ -81,7 +81,7 @@
 
   
   
-hawq_resourcemanager_server_type
+hawq_global_rm_type
 none
 The resource manager type to start for allocating resource. 
   'none' means HAWQ resource manager exclusively uses whole
@@ -94,7 +94,7 @@
 hawq_resourcemanager_segment_limit_memory_use
 64GB
 The limit of memory usage in a HAWQ segment when 
-  HAWQ is set 'none'.
+  hawq_global_rm_type is set 'none'.
 
   
 
@@ -102,7 +102,7 @@
 hawq_resourcemanager_segment_limit_core_use
 16
 The limit of virtual core usage in a HAWQ segment when 
-  HAWQ is set 'none'.
+  hawq_global_rm_type is set 'none'.
 
   
 
@@ -164,4 +164,19 @@
 1.0
   
 
+  
+hawq_resourcemanager_master_address_domainsocket_port
+5436
+  
+
+  
+hawq_rm_master_port
+5437
+  
+
+  
+hawq_rm_segment_port
+5438
+  
+
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/897703b7/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
index 41a3196..8e363c1 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
@@ -276,8 +276,8 @@ def validate_configuration():
 raise Fail("Configurations does not contain hawq-site. Please include 
HAWQ")
 
   # If HAWQ is set to use YARN and YARN is not configured, error.
-  rm_type = 
params.config["configurations"]["hawq-site"].get("hawq_resourcemanager_server_type")
+  rm_type = 
params.config["configurations"]["hawq-site"].get("hawq_global_rm_type")
   if rm_type == "yarn" and "yarn-site" not in params.config["configurations"]:
 raise Fail("HAWQ is set to use YARN but YARN is not deployed. " + 
-   "hawq_resourcemanager_server_type property in hawq-site is set 
to 'yarn' but YARN is not configured. " + 
-   "Please deploy YARN before starting HAWQ or change the value of 
hawq_resourcemanager_server_type property to 'none'")
+   "hawq_global_rm_type property in hawq-site is set to 'yarn' but 
YARN is not configured. " + 
+   "Please deploy YARN before starting HAWQ or change the value of 
hawq_global_rm_type property to 'none'")



ambari git commit: AMBARI-13666: Align to HAWQ Resource manager GUC names HAWQ-111 (bhuvnesh2703 via jaoki)

2015-11-02 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk cc5b61066 -> 3bfd4cb49


AMBARI-13666: Align to HAWQ Resource manager GUC names HAWQ-111 (bhuvnesh2703 
via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3bfd4cb4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3bfd4cb4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3bfd4cb4

Branch: refs/heads/trunk
Commit: 3bfd4cb491c50a7735e05e77c25fbe66e7d36e2d
Parents: cc5b610
Author: Jun Aoki 
Authored: Mon Nov 2 16:51:12 2015 -0800
Committer: Jun Aoki 
Committed: Mon Nov 2 16:51:12 2015 -0800

--
 .../HAWQ/2.0.0.0/configuration/hawq-site.xml| 28 -
 .../HAWQ/2.0.0.0/package/scripts/common.py  |  4 +-
 .../stacks/2.3/common/services-hawq-1-host.json | 61 
 .../2.3/common/services-hawq-3-hosts.json   | 61 
 .../services-master_ambari_colo-3-hosts.json| 61 
 .../services-master_standby_colo-3-hosts.json   | 61 
 .../common/services-normal-hawq-3-hosts.json| 61 
 .../services-standby_ambari_colo-3-hosts.json   | 61 
 .../app/utils/configs/config_property_helper.js |  4 +-
 9 files changed, 166 insertions(+), 236 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/3bfd4cb4/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
index 3e9573e..41b10dc 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/hawq-site.xml
@@ -91,7 +91,7 @@
   
 
   
-hawq_resourcemanager_segment_limit_memory_use
+hawq_rm_memory_limit_perseg
 64GB
 The limit of memory usage in a HAWQ segment when 
   hawq_global_rm_type is set 'none'.
@@ -99,7 +99,7 @@
   
 
   
-hawq_resourcemanager_segment_limit_core_use
+hawq_rm_nvcore_limit_perseg
 16
 The limit of virtual core usage in a HAWQ segment when 
   hawq_global_rm_type is set 'none'.
@@ -107,60 +107,56 @@
   
 
   
-hawq_resourcemanager_yarn_resourcemanager_address
+hawq_rm_yarn_address
 localhost:8032
 The address of YARN resource manager server.
   
 
   
-hawq_resourcemanager_yarn_resourcemanager_scheduler_address
+hawq_rm_yarn_scheduler_address
 localhost:8030
 The address of YARN scheduler server.
   
 
   
-hawq_resourcemanager_yarn_queue
+hawq_rm_yarn_queue_name
 default
 The YARN queue name to register HAWQ resource 
manager.
   
 
   
-hawq_resourcemanager_yarn_application_name
+hawq_rm_yarn_app_name
 hawq
 The application name to register HAWQ resource manager in 
YARN.
   
-  
-hawq_resourcemanager_log_level
-10
-  
 
   
-hawq_resourceenforcer_cpu_enable
+hawq_re_cpu_enable
 false
   
 
   
-hawq_resourceenforcer_cgroup_mount_point
+hawq_re_cgroup_mount_point
 /sys/fs/cgroup
   
 
   
-hawq_resourceenforcer_cgroup_hierarchy_name
+hawq_re_cgroup_hierarchy_name
 hadoop-yarn
   
 
   
-hawq_resourceenforcer_cleanup_period
+hawq_re_cleanup_period
 180
   
 
   
-hawq_resourceenforcer_cpu_weight
+hawq_re_cpu_weight
 1024.0
   
 
   
-hawq_resourceenforcer_vcore_pcore_ratio
+hawq_re_vcore_pcore_ratio
 1.0
   
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3bfd4cb4/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
index 8e363c1..31ef2fa 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/package/scripts/common.py
@@ -107,8 +107,8 @@ def __substitute_hostnames_in_hawq_site():
   substituted_hawq_site = params.hawq_site.copy()
   hawq_site_property_map = {"hawq_master_address_host": params.hawqmaster_host,
 "hawq_standby_address_host": 
hawqstandby_host_desired_value,
-
"hawq_resourcemanager_yarn_resourcemanager_address": params.rm_host,
-
"hawq_resourcemanag

ambari git commit: AMBARI-13516: HAWQ service's order is not available in role_command_order.js (mithmatt via jaoki)

2015-11-02 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 94b8f7b25 -> 715437a5f


AMBARI-13516: HAWQ service's order is not available in role_command_order.js 
(mithmatt via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/715437a5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/715437a5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/715437a5

Branch: refs/heads/trunk
Commit: 715437a5fd519005c2d868ea8dd676d0389064a0
Parents: 94b8f7b
Author: Jun Aoki 
Authored: Mon Nov 2 17:13:17 2015 -0800
Committer: Jun Aoki 
Committed: Mon Nov 2 17:13:17 2015 -0800

--
 .../src/main/resources/stacks/HDP/2.3/role_command_order.json| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/715437a5/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
--
diff --git 
a/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json 
b/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
index 9b1625d..ad195de 100755
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/role_command_order.json
@@ -8,6 +8,8 @@
 "RANGER_KMS_SERVICE_CHECK-SERVICE_CHECK" : ["RANGER_KMS_SERVER-START"],
 "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"],
 "ATLAS_SERVICE_CHECK-SERVICE_CHECK": ["ATLAS_SERVER-START"],
-"SPARK_THRIFTSERVER-START" : ["NAMENODE-START"]
+"SPARK_THRIFTSERVER-START" : ["NAMENODE-START"],
+"HAWQMASTER-START" : 
["NAMENODE-START","DATANODE-START","HAWQSTANDBY-START"],
+"HAWQ_SERVICE_CHECK-SERVICE_CHECK" : ["HAWQMASTER-START"]
   }
 }



ambari git commit: AMBARI-13618: Add configure / start / stop / status operations for Apache HAWQ's PXF (mithmatt via jaoki)

2015-11-03 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 1e86d690d -> 4763c759c


AMBARI-13618: Add configure / start / stop / status operations for Apache 
HAWQ's PXF (mithmatt via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4763c759
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4763c759
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4763c759

Branch: refs/heads/trunk
Commit: 4763c759cc728d9fd8f24402c90012ae527985e5
Parents: 1e86d69
Author: Jun Aoki 
Authored: Tue Nov 3 12:14:45 2015 -0800
Committer: Jun Aoki 
Committed: Tue Nov 3 12:14:45 2015 -0800

--
 .../PXF/3.0.0.0/configuration/pxf-site.xml  |  19 +++
 .../common-services/PXF/3.0.0.0/metainfo.xml|   2 +-
 .../PXF/3.0.0.0/package/scripts/params.py   |  42 +++
 .../PXF/3.0.0.0/package/scripts/pxf.py  | 120 +++
 .../PXF/3.0.0.0/package/scripts/pxfservice.py   |  41 ---
 .../PXF/3.0.0.0/package/templates/pxf-env.j2|  34 ++
 6 files changed, 216 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/4763c759/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/configuration/pxf-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/configuration/pxf-site.xml
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/configuration/pxf-site.xml
new file mode 100644
index 000..0b3a36e
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/configuration/pxf-site.xml
@@ -0,0 +1,19 @@
+
+
+ 
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/4763c759/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/metainfo.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/metainfo.xml 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/metainfo.xml
index 4df1bd6..f578d64 100644
--- a/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/metainfo.xml
@@ -32,7 +32,7 @@
   SLAVE
   1+
   
-scripts/pxfservice.py
+scripts/pxf.py
 PYTHON
 600
   

http://git-wip-us.apache.org/repos/asf/ambari/blob/4763c759/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/package/scripts/params.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/package/scripts/params.py
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/package/scripts/params.py
new file mode 100644
index 000..a4986c9
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/package/scripts/params.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import Script
+
+config = Script.get_config()
+
+
+pxf_service_name = "pxf-service"
+stack_name = str(config["hostLevelParams"]["stack_name"])
+
+# Users and Groups
+pxf_user = "pxf"
+pxf_group = pxf_user
+hdfs_superuser_group = 
config["configurations"]["hdfs-site"]["dfs.permissions.superusergroup"]
+user_group = config["configurations"]["cluster-env"]["user_group"]
+tomcat_group = "tomcat"
+
+# Directories
+pxf_conf_dir = "/etc/pxf/conf"
+pxf_instance_dir = "/var/pxf"
+
+# Java home path
+java_home = config["hostLevelParams"]["java_home"] if "java_home" in 
config["hostLevelParams"] else None
+
+# Timeouts
+default_exec_timeout = 600

http://git-wip-us.apache.org/repos/asf/ambari/blob/4763c759/ambari-server/src/main/resources/common-services/PXF/3.0.0.0/package/scripts/pxf.py
-

[3/3] ambari git commit: AMBARI-13725: HAWQ and PXF to support 3 digit versioning instead of 4. (jaoki)

2015-11-06 Thread jaoki
AMBARI-13725: HAWQ and PXF to support 3 digit versioning instead of 4. (jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5dac27be
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5dac27be
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5dac27be

Branch: refs/heads/trunk
Commit: 5dac27bef4a89a049c7e77f49828a20370d0b518
Parents: 9cee9a2
Author: Jun Aoki 
Authored: Fri Nov 6 13:49:33 2015 -0800
Committer: Jun Aoki 
Committed: Fri Nov 6 13:49:33 2015 -0800

--
 .../HAWQ/2.0.0.0/configuration/gpcheck-env.xml  |  86 --
 .../2.0.0.0/configuration/hawq-limits-env.xml   |  46 ---
 .../HAWQ/2.0.0.0/configuration/hawq-site.xml| 178 
 .../2.0.0.0/configuration/hawq-sysctl-env.xml   | 247 
 .../common-services/HAWQ/2.0.0.0/metainfo.xml   | 129 -
 .../HAWQ/2.0.0.0/package/scripts/common.py  | 283 ---
 .../HAWQ/2.0.0.0/package/scripts/constants.py   |  61 
 .../HAWQ/2.0.0.0/package/scripts/hawqmaster.py  |  55 
 .../HAWQ/2.0.0.0/package/scripts/hawqsegment.py | 102 ---
 .../HAWQ/2.0.0.0/package/scripts/hawqstandby.py |  58 
 .../HAWQ/2.0.0.0/package/scripts/hawqstatus.py  |  64 -
 .../2.0.0.0/package/scripts/master_helper.py| 194 -
 .../HAWQ/2.0.0.0/package/scripts/params.py  |  92 --
 .../2.0.0.0/package/scripts/service_check.py| 102 ---
 .../HAWQ/2.0.0.0/package/scripts/utils.py   | 108 ---
 .../2.0.0.0/package/templates/hawq-hosts.j2 |   5 -
 .../package/templates/hawq-profile.sh.j2|   8 -
 .../HAWQ/2.0.0.0/package/templates/slaves.j2|   3 -
 .../HAWQ/2.0.0/configuration/gpcheck-env.xml|  86 ++
 .../2.0.0/configuration/hawq-limits-env.xml |  46 +++
 .../HAWQ/2.0.0/configuration/hawq-site.xml  | 178 
 .../2.0.0/configuration/hawq-sysctl-env.xml | 247 
 .../common-services/HAWQ/2.0.0/metainfo.xml | 129 +
 .../HAWQ/2.0.0/package/scripts/common.py| 283 +++
 .../HAWQ/2.0.0/package/scripts/constants.py |  61 
 .../HAWQ/2.0.0/package/scripts/hawqmaster.py|  55 
 .../HAWQ/2.0.0/package/scripts/hawqsegment.py   | 102 +++
 .../HAWQ/2.0.0/package/scripts/hawqstandby.py   |  58 
 .../HAWQ/2.0.0/package/scripts/hawqstatus.py|  64 +
 .../HAWQ/2.0.0/package/scripts/master_helper.py | 194 +
 .../HAWQ/2.0.0/package/scripts/params.py|  92 ++
 .../HAWQ/2.0.0/package/scripts/service_check.py | 102 +++
 .../HAWQ/2.0.0/package/scripts/utils.py | 108 +++
 .../HAWQ/2.0.0/package/templates/hawq-hosts.j2  |   5 +
 .../2.0.0/package/templates/hawq-profile.sh.j2  |   8 +
 .../HAWQ/2.0.0/package/templates/slaves.j2  |   3 +
 .../PXF/3.0.0.0/configuration/pxf-site.xml  |  19 --
 .../common-services/PXF/3.0.0.0/metainfo.xml|  71 -
 .../PXF/3.0.0.0/package/scripts/params.py   |  42 ---
 .../PXF/3.0.0.0/package/scripts/pxf.py  | 120 
 .../PXF/3.0.0.0/package/templates/pxf-env.j2|  34 ---
 .../PXF/3.0.0/configuration/pxf-site.xml|  19 ++
 .../common-services/PXF/3.0.0/metainfo.xml  |  71 +
 .../PXF/3.0.0/package/scripts/params.py |  42 +++
 .../PXF/3.0.0/package/scripts/pxf.py| 120 
 .../PXF/3.0.0/package/templates/pxf-env.j2  |  34 +++
 46 files changed, 2107 insertions(+), 2107 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/5dac27be/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/gpcheck-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/gpcheck-env.xml
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/gpcheck-env.xml
deleted file mode 100755
index a61a34f..000
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/gpcheck-env.xml
+++ /dev/null
@@ -1,86 +0,0 @@
-
-
-
-
-
-  
-
-  content
-  Content
-  Contents of the configuration file 
/usr/local/hawq/etc/gpcheck.cnf. This file is used by 'hawq check' command, 
which can be run manually by gpadmin user on the HAWQ master host. This command 
validates the system parameters and HDFS parameters mentioned in this file to 
ensure optimal HAWQ operation.
-
-[global]
-configfile_version = 4
-
-[linux.mount]
-mount.points = /
-
-[linux.sysctl]
-sysctl.kernel.shmmax = 5
-sysctl.kernel.shmmni = 4096
-sysctl.kernel.shmall = 40
-sysctl.kernel.sem = 250 512000 100 2048
-sysctl.kernel.sysrq = 1
-sysctl.kernel.core_uses_pid = 1
-sysctl.kernel.msgmnb = 65536
-sysctl.kernel.msgmax = 65536
-sysctl.kernel.msgmni = 2048
-sysct

[2/3] ambari git commit: AMBARI-13725: HAWQ and PXF to support 3 digit versioning instead of 4. (jaoki)

2015-11-06 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/5dac27be/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-site.xml
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-site.xml
new file mode 100644
index 000..41b10dc
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-site.xml
@@ -0,0 +1,178 @@
+
+
+
+  
+hawq_master_address_host
+HAWQ Master
+localhost
+The host name of HAWQ master.
+  
+
+  
+hawq_standby_address_host
+HAWQ Standby Master
+localhost
+The host name of HAWQ standby.
+  
+
+  
+hawq_master_address_port
+HAWQ Master Port
+5432
+The port of HAWQ master.
+  
+
+  
+hawq_segment_address_port
+HAWQ Segment Port
+4
+The port of HAWQ segment.
+  
+
+  
+hawq_dfs_url
+HAWQ DFS URL
+localhost:8020/hawq_default
+URL for accessing HDFS.
+  
+
+  
+hawq_master_directory
+HAWQ Master Directory
+/data/hawq/master
+The directory of HAWQ master.
+  
+
+  
+hawq_segment_directory
+HAWQ Segment Directory
+/data/hawq/segment
+The directory of HAWQ segment.
+   
+
+  
+hawq_master_temp_directory
+HAWQ Master Temp Directory
+/tmp
+The temporary directory reserved for HAWQ 
master.
+  
+
+  
+hawq_segment_temp_directory
+HAWQ Segment Temp Directory
+/tmp
+The temporary directory reserved for HAWQ 
segment.
+  
+
+  
+  
+hawq_global_rm_type
+none
+The resource manager type to start for allocating resource. 
+  'none' means HAWQ resource manager exclusively uses whole
+  cluster; 'yarn' means HAWQ resource manager contacts YARN
+  resource manager to negotiate resource.
+
+  
+
+  
+hawq_rm_memory_limit_perseg
+64GB
+The limit of memory usage in a HAWQ segment when 
+  hawq_global_rm_type is set 'none'.
+
+  
+
+  
+hawq_rm_nvcore_limit_perseg
+16
+The limit of virtual core usage in a HAWQ segment when 
+  hawq_global_rm_type is set 'none'.
+
+  
+
+  
+hawq_rm_yarn_address
+localhost:8032
+The address of YARN resource manager server.
+  
+
+  
+hawq_rm_yarn_scheduler_address
+localhost:8030
+The address of YARN scheduler server.
+  
+
+  
+hawq_rm_yarn_queue_name
+default
+The YARN queue name to register HAWQ resource 
manager.
+  
+
+  
+hawq_rm_yarn_app_name
+hawq
+The application name to register HAWQ resource manager in 
YARN.
+  
+
+  
+hawq_re_cpu_enable
+false
+  
+
+  
+hawq_re_cgroup_mount_point
+/sys/fs/cgroup
+  
+
+  
+hawq_re_cgroup_hierarchy_name
+hadoop-yarn
+  
+
+  
+hawq_re_cleanup_period
+180
+  
+
+  
+hawq_re_cpu_weight
+1024.0
+  
+
+  
+hawq_re_vcore_pcore_ratio
+1.0
+  
+
+  
+hawq_resourcemanager_master_address_domainsocket_port
+5436
+  
+
+  
+hawq_rm_master_port
+5437
+  
+
+  
+hawq_rm_segment_port
+5438
+  
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5dac27be/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-sysctl-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-sysctl-env.xml
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-sysctl-env.xml
new file mode 100644
index 000..32ae5a5
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-sysctl-env.xml
@@ -0,0 +1,247 @@
+
+
+
+  
+kernel.shmmax
+5
+Maximum size in bytes of a single shared memory segment that 
a Linux process can allocate in its
+  virtual address space
+
+  false
+  true
+
+  
+
+  
+kernel.shmmni
+4096
+System wide maximum number of shared memory 
segments
+
+  false
+  true
+
+  
+
+  
+kernel.shmall
+40
+Total amount of shared memory pages that can be used system 
wide
+
+  false
+  true
+
+  
+
+  
+kernel.sem
+250 512000 100 2048
+Parameter to define semaphore related values
+
+  false
+  true
+
+  
+
+  
+kernel.sysrq
+1
+Enable(1)/Disable(0) functions of sysrq
+
+  false
+  true
+
+  
+
+  
+kernel.core_uses_pid
+1
+Enable appending process id to the name of core dump file. 
Ex: core.PID
+
+  false
+  true
+
+  
+
+  
+kernel.msgmnb
+65536
+Default maximum size of a message in bytes
+
+  false
+  true
+
+  
+
+  
+kernel.msgmax
+65536
+Default maxmimum size of a mesage queue
+
+  false
+  true
+
+  
+
+  
+kernel.ms

[1/3] ambari git commit: AMBARI-13725: HAWQ and PXF to support 3 digit versioning instead of 4. (jaoki)

2015-11-06 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 9cee9a22e -> 5dac27bef


http://git-wip-us.apache.org/repos/asf/ambari/blob/5dac27be/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
new file mode 100644
index 000..dd0031c
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import Script
+
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.resources.accounts import User
+from resource_management.core.resources.system import Directory, File, Execute
+from resource_management.core.source import Template
+
+
+
+class Pxf(Script):
+  """
+  Contains the interface definitions for methods like install,
+  start, stop, status, etc. for the PXF
+  """
+
+  def install(self, env):
+self.install_packages(env)
+self.configure(env)
+
+
+  def configure(self, env):
+import params
+env.set_params(params)
+self.__setup_user_group()
+self.__generate_config_files()
+# pxf-service init exits safely when it is already initialized
+self.__execute_service_command("init")
+
+
+  def start(self, env):
+self.configure(env)
+self.__grant_permissions()
+self.__execute_service_command("restart")
+
+
+  def stop(self, env):
+self.__execute_service_command("stop")
+
+
+  def status(self, env):
+try:
+  self.__execute_service_command("status")
+except Exception:
+  raise ComponentIsNotRunning()
+
+
+  def __execute_service_command(self, command):
+import params
+Execute("service {0} {1}".format(params.pxf_service_name, command),
+  timeout=params.default_exec_timeout,
+  logoutput=True)
+
+
+  def __setup_user_group(self):
+"""
+Creates PXF user with the required groups and bash as default shell
+"""
+import params
+User(params.pxf_user,
+ groups=[params.hdfs_superuser_group, params.user_group, 
params.tomcat_group],
+ shell="/bin/bash")
+
+
+  def __generate_config_files(self):
+"""
+Generates pxf-env.sh file from jinja template and sets the classpath for 
HDP
+"""
+import params
+import shutil
+
+hdp_stack = "HDP"
+
+# Create file pxf-env.sh from jinja template
+File("{0}/pxf-env.sh".format(params.pxf_conf_dir),
+ content = Template("pxf-env.j2"))
+
+# Classpath is set for PHD by default. If stack is HDP, set classpath for 
HDP
+if(params.stack_name == hdp_stack):
+  shutil.copy2("{0}/pxf-privatehdp.classpath".format(params.pxf_conf_dir),
+   "{0}/pxf-private.classpath".format(params.pxf_conf_dir))
+
+XmlConfig("pxf-site.xml",
+  conf_dir=params.pxf_conf_dir,
+  configurations=params.config['configurations']['pxf-site'],
+  
configuration_attributes=params.config['configuration_attributes']['pxf-site'])
+
+
+  def __grant_permissions(self):
+"""
+Grants permission to pxf:pxf for PXF instance directory
+"""
+import params
+Directory(params.pxf_instance_dir,
+  owner=params.pxf_user,
+  group=params.pxf_group,
+  recursive=True)
+
+
+if __name__ == "__main__":
+  Pxf().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5dac27be/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/templates/pxf-env.j2
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/templates/pxf-env.j2
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/templates/pxf-env.j2
new file mode 100644
index 000..03f2420
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/templates/pxf-env.j2
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#Licensed to the Apache Software Foundation (ASF) u

[1/3] ambari git commit: AMBARI-13725: HAWQ and PXF to support 3 digit versioning instead of 4.(jaoki)

2015-11-09 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 86820bc92 -> d02ec979f


http://git-wip-us.apache.org/repos/asf/ambari/blob/d02ec979/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py
new file mode 100644
index 000..a4986c9
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/params.py
@@ -0,0 +1,42 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import Script
+
+config = Script.get_config()
+
+
+pxf_service_name = "pxf-service"
+stack_name = str(config["hostLevelParams"]["stack_name"])
+
+# Users and Groups
+pxf_user = "pxf"
+pxf_group = pxf_user
+hdfs_superuser_group = 
config["configurations"]["hdfs-site"]["dfs.permissions.superusergroup"]
+user_group = config["configurations"]["cluster-env"]["user_group"]
+tomcat_group = "tomcat"
+
+# Directories
+pxf_conf_dir = "/etc/pxf/conf"
+pxf_instance_dir = "/var/pxf"
+
+# Java home path
+java_home = config["hostLevelParams"]["java_home"] if "java_home" in 
config["hostLevelParams"] else None
+
+# Timeouts
+default_exec_timeout = 600

http://git-wip-us.apache.org/repos/asf/ambari/blob/d02ec979/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
new file mode 100644
index 000..dd0031c
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import Script
+
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.resources.accounts import User
+from resource_management.core.resources.system import Directory, File, Execute
+from resource_management.core.source import Template
+
+
+
+class Pxf(Script):
+  """
+  Contains the interface definitions for methods like install,
+  start, stop, status, etc. for the PXF
+  """
+
+  def install(self, env):
+self.install_packages(env)
+self.configure(env)
+
+
+  def configure(self, env):
+import params
+env.set_params(params)
+self.__setup_user_group()
+self.__generate_config_files()
+# pxf-service init exits safely when it is already initialized
+self.__execute_service_command("init")
+
+
+  def start(self, env):
+self.configure(env)
+self.__grant_permissions()
+self.__execute_service_command("restart")
+
+
+  def stop(self, env):
+self.__execute_service_command("stop")
+
+
+  def status(self, env):
+try:
+  self.__execute_service_command("status")
+except Exception:
+  raise ComponentIsNotRunning()
+
+
+  def __execute_service_command(self, command):
+import params
+Execute("service {0} {1}".format(params.pxf_service_name, command),
+  timeout=params.default_exec_timeout,
+  logoutput=True)
+
+
+  def __setup_user_group(self):
+"""
+Creates PXF user with the required groups and bash as default shell
+"""
+import pa

[3/3] ambari git commit: AMBARI-13725: HAWQ and PXF to support 3 digit versioning instead of 4.(jaoki)

2015-11-09 Thread jaoki
AMBARI-13725: HAWQ and PXF to support 3 digit versioning instead of 4.(jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d02ec979
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d02ec979
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d02ec979

Branch: refs/heads/trunk
Commit: d02ec979f223e91efaa5ad9db1ad6e6377f86dd5
Parents: 86820bc
Author: jaoki 
Authored: Mon Nov 9 22:40:14 2015 -0800
Committer: jaoki 
Committed: Mon Nov 9 22:40:14 2015 -0800

--
 ambari-server/pom.xml   |   3 -
 .../HAWQ/2.0.0.0/configuration/gpcheck-env.xml  |  86 --
 .../2.0.0.0/configuration/hawq-limits-env.xml   |  46 ---
 .../HAWQ/2.0.0.0/configuration/hawq-site.xml| 178 
 .../2.0.0.0/configuration/hawq-sysctl-env.xml   | 247 
 .../common-services/HAWQ/2.0.0.0/metainfo.xml   | 129 -
 .../HAWQ/2.0.0.0/package/scripts/common.py  | 283 ---
 .../HAWQ/2.0.0.0/package/scripts/constants.py   |  61 
 .../HAWQ/2.0.0.0/package/scripts/hawqmaster.py  |  55 
 .../HAWQ/2.0.0.0/package/scripts/hawqsegment.py | 102 ---
 .../HAWQ/2.0.0.0/package/scripts/hawqstandby.py |  58 
 .../HAWQ/2.0.0.0/package/scripts/hawqstatus.py  |  64 -
 .../2.0.0.0/package/scripts/master_helper.py| 194 -
 .../HAWQ/2.0.0.0/package/scripts/params.py  |  92 --
 .../2.0.0.0/package/scripts/service_check.py| 102 ---
 .../HAWQ/2.0.0.0/package/scripts/utils.py   | 108 ---
 .../2.0.0.0/package/templates/hawq-hosts.j2 |   5 -
 .../package/templates/hawq-profile.sh.j2|   8 -
 .../HAWQ/2.0.0.0/package/templates/slaves.j2|   3 -
 .../HAWQ/2.0.0/configuration/gpcheck-env.xml|  86 ++
 .../2.0.0/configuration/hawq-limits-env.xml |  46 +++
 .../HAWQ/2.0.0/configuration/hawq-site.xml  | 178 
 .../2.0.0/configuration/hawq-sysctl-env.xml | 247 
 .../common-services/HAWQ/2.0.0/metainfo.xml | 129 +
 .../HAWQ/2.0.0/package/scripts/common.py| 283 +++
 .../HAWQ/2.0.0/package/scripts/constants.py |  61 
 .../HAWQ/2.0.0/package/scripts/hawqmaster.py|  55 
 .../HAWQ/2.0.0/package/scripts/hawqsegment.py   | 102 +++
 .../HAWQ/2.0.0/package/scripts/hawqstandby.py   |  58 
 .../HAWQ/2.0.0/package/scripts/hawqstatus.py|  64 +
 .../HAWQ/2.0.0/package/scripts/master_helper.py | 194 +
 .../HAWQ/2.0.0/package/scripts/params.py|  92 ++
 .../HAWQ/2.0.0/package/scripts/service_check.py | 102 +++
 .../HAWQ/2.0.0/package/scripts/utils.py | 108 +++
 .../HAWQ/2.0.0/package/templates/hawq-hosts.j2  |  24 ++
 .../2.0.0/package/templates/hawq-profile.sh.j2  |  27 ++
 .../HAWQ/2.0.0/package/templates/slaves.j2  |  22 ++
 .../PXF/3.0.0.0/configuration/pxf-site.xml  |  19 --
 .../common-services/PXF/3.0.0.0/metainfo.xml|  71 -
 .../PXF/3.0.0.0/package/scripts/params.py   |  42 ---
 .../PXF/3.0.0.0/package/scripts/pxf.py  | 120 
 .../PXF/3.0.0.0/package/templates/pxf-env.j2|  34 ---
 .../PXF/3.0.0/configuration/pxf-site.xml|  19 ++
 .../common-services/PXF/3.0.0/metainfo.xml  |  71 +
 .../PXF/3.0.0/package/scripts/params.py |  42 +++
 .../PXF/3.0.0/package/scripts/pxf.py| 120 
 .../PXF/3.0.0/package/templates/pxf-env.j2  |  34 +++
 47 files changed, 2164 insertions(+), 2110 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/d02ec979/ambari-server/pom.xml
--
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 5a7ddc4..17e9ea9 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -203,9 +203,6 @@
 
src/main/resources/stacks/HDP/2.0._/services/HBASE/package/templates/regionservers.j2
 
src/main/resources/stacks/HDPWIN/2.1/services/*/configuration*/*
 
-
-
src/main/resources/common-services/HAWQ/2.0.0.0/package/templates/*.j2
-
 
 src/test/resources/TestAmbaryServer.samples/**
 src/test/resources/*.txt

http://git-wip-us.apache.org/repos/asf/ambari/blob/d02ec979/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/gpcheck-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/gpcheck-env.xml
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/gpcheck-env.xml
deleted file mode 100755
index a61a34f..000
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0.0/configuration/gpcheck-env.xml
+++ /dev/null
@@ -1,86 +0,0

[2/3] ambari git commit: AMBARI-13725: HAWQ and PXF to support 3 digit versioning instead of 4.(jaoki)

2015-11-09 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/d02ec979/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-site.xml
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-site.xml
new file mode 100644
index 000..41b10dc
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-site.xml
@@ -0,0 +1,178 @@
+
+
+
+  
+hawq_master_address_host
+HAWQ Master
+localhost
+The host name of HAWQ master.
+  
+
+  
+hawq_standby_address_host
+HAWQ Standby Master
+localhost
+The host name of HAWQ standby.
+  
+
+  
+hawq_master_address_port
+HAWQ Master Port
+5432
+The port of HAWQ master.
+  
+
+  
+hawq_segment_address_port
+HAWQ Segment Port
+4
+The port of HAWQ segment.
+  
+
+  
+hawq_dfs_url
+HAWQ DFS URL
+localhost:8020/hawq_default
+URL for accessing HDFS.
+  
+
+  
+hawq_master_directory
+HAWQ Master Directory
+/data/hawq/master
+The directory of HAWQ master.
+  
+
+  
+hawq_segment_directory
+HAWQ Segment Directory
+/data/hawq/segment
+The directory of HAWQ segment.
+   
+
+  
+hawq_master_temp_directory
+HAWQ Master Temp Directory
+/tmp
+The temporary directory reserved for HAWQ 
master.
+  
+
+  
+hawq_segment_temp_directory
+HAWQ Segment Temp Directory
+/tmp
+The temporary directory reserved for HAWQ 
segment.
+  
+
+  
+  
+hawq_global_rm_type
+none
+The resource manager type to start for allocating resource. 
+  'none' means HAWQ resource manager exclusively uses whole
+  cluster; 'yarn' means HAWQ resource manager contacts YARN
+  resource manager to negotiate resource.
+
+  
+
+  
+hawq_rm_memory_limit_perseg
+64GB
+The limit of memory usage in a HAWQ segment when 
+  hawq_global_rm_type is set 'none'.
+
+  
+
+  
+hawq_rm_nvcore_limit_perseg
+16
+The limit of virtual core usage in a HAWQ segment when 
+  hawq_global_rm_type is set 'none'.
+
+  
+
+  
+hawq_rm_yarn_address
+localhost:8032
+The address of YARN resource manager server.
+  
+
+  
+hawq_rm_yarn_scheduler_address
+localhost:8030
+The address of YARN scheduler server.
+  
+
+  
+hawq_rm_yarn_queue_name
+default
+The YARN queue name to register HAWQ resource 
manager.
+  
+
+  
+hawq_rm_yarn_app_name
+hawq
+The application name to register HAWQ resource manager in 
YARN.
+  
+
+  
+hawq_re_cpu_enable
+false
+  
+
+  
+hawq_re_cgroup_mount_point
+/sys/fs/cgroup
+  
+
+  
+hawq_re_cgroup_hierarchy_name
+hadoop-yarn
+  
+
+  
+hawq_re_cleanup_period
+180
+  
+
+  
+hawq_re_cpu_weight
+1024.0
+  
+
+  
+hawq_re_vcore_pcore_ratio
+1.0
+  
+
+  
+hawq_resourcemanager_master_address_domainsocket_port
+5436
+  
+
+  
+hawq_rm_master_port
+5437
+  
+
+  
+hawq_rm_segment_port
+5438
+  
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/d02ec979/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-sysctl-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-sysctl-env.xml
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-sysctl-env.xml
new file mode 100644
index 000..32ae5a5
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/configuration/hawq-sysctl-env.xml
@@ -0,0 +1,247 @@
+
+
+
+  
+kernel.shmmax
+5
+Maximum size in bytes of a single shared memory segment that 
a Linux process can allocate in its
+  virtual address space
+
+  false
+  true
+
+  
+
+  
+kernel.shmmni
+4096
+System wide maximum number of shared memory 
segments
+
+  false
+  true
+
+  
+
+  
+kernel.shmall
+40
+Total amount of shared memory pages that can be used system 
wide
+
+  false
+  true
+
+  
+
+  
+kernel.sem
+250 512000 100 2048
+Parameter to define semaphore related values
+
+  false
+  true
+
+  
+
+  
+kernel.sysrq
+1
+Enable(1)/Disable(0) functions of sysrq
+
+  false
+  true
+
+  
+
+  
+kernel.core_uses_pid
+1
+Enable appending process id to the name of core dump file. 
Ex: core.PID
+
+  false
+  true
+
+  
+
+  
+kernel.msgmnb
+65536
+Default maximum size of a message in bytes
+
+  false
+  true
+
+  
+
+  
+kernel.msgmax
+65536
+Default maxmimum size of a mesage queue
+
+  false
+  true
+
+  
+
+  
+kernel.ms

ambari git commit: AMBARI-13759: PXF start fails after deploying through blueprint (mithmatt via jaoki)

2015-11-10 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 65629e789 -> 7870a4f24


AMBARI-13759: PXF start fails after deploying through blueprint (mithmatt via 
jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7870a4f2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7870a4f2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7870a4f2

Branch: refs/heads/trunk
Commit: 7870a4f246145465c329fd1cc0c38512220aaee5
Parents: 65629e7
Author: Jun Aoki 
Authored: Tue Nov 10 11:03:58 2015 -0800
Committer: Jun Aoki 
Committed: Tue Nov 10 11:03:58 2015 -0800

--
 .../common-services/PXF/3.0.0/configuration/pxf-site.xml| 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/7870a4f2/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
index 0b3a36e..20ca06b 100644
--- 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/configuration/pxf-site.xml
@@ -16,4 +16,9 @@
limitations under the License.
 -->
  
+   
+   pxf.service.kerberos.keytab
+   /etc/security/keytabs/pxf.service.keytab
+   Path to keytab file owned by pxf service with 
permissions 0400
+   
 



ambari git commit: AMBARI-13825: Exchange of keys should be done during start phase (not during install) (bhuvnesh2703 via jaoki)

2015-11-12 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 113f3712b -> 545ea7493


AMBARI-13825: Exchange of keys should be done during start phase (not during 
install) (bhuvnesh2703 via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/545ea749
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/545ea749
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/545ea749

Branch: refs/heads/trunk
Commit: 545ea7493a58f3190b29ee394f18a3f88499e6c9
Parents: 113f371
Author: Jun Aoki 
Authored: Thu Nov 12 12:01:14 2015 -0800
Committer: Jun Aoki 
Committed: Thu Nov 12 12:01:14 2015 -0800

--
 .../common-services/HAWQ/2.0.0/package/scripts/master_helper.py | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/545ea749/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
index 35f5112..7261a04 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
@@ -66,7 +66,6 @@ def configure_master():
   common.setup_user()
   common.setup_common_configurations()
   __setup_master_specific_conf_files()
-  __setup_passwordless_ssh()
   __setup_hawq_user_profile()
   __create_local_dirs()
 
@@ -165,6 +164,10 @@ def start_master():
 Fail("Host should be either active Hawq master or Hawq standby.")
 
   is_active_master = __is_active_master()
+  # Exchange ssh keys from active hawq master before starting.
+  if is_active_master:
+__setup_passwordless_ssh()
+
   if __is_local_initialized():
 __start_local_master()
 



ambari git commit: AMBARI-13827: hawq standby stop is not working (bhuvnesh2703 via jaoki)

2015-11-12 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/trunk 545ea7493 -> dd1741707


AMBARI-13827: hawq standby stop is not working (bhuvnesh2703 via jaoki)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dd174170
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dd174170
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dd174170

Branch: refs/heads/trunk
Commit: dd17417072e576edd9d86ba5772ba786c72f835e
Parents: 545ea74
Author: Jun Aoki 
Authored: Thu Nov 12 12:03:43 2015 -0800
Committer: Jun Aoki 
Committed: Thu Nov 12 12:03:43 2015 -0800

--
 .../HAWQ/2.0.0/package/scripts/hawqsegment.py |  4 ++--
 .../HAWQ/2.0.0/package/scripts/master_helper.py   | 10 ++
 .../common-services/HAWQ/2.0.0/package/scripts/utils.py   |  7 ---
 3 files changed, 12 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/dd174170/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqsegment.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqsegment.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqsegment.py
index b4be502..0e349d2 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqsegment.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqsegment.py
@@ -50,7 +50,7 @@ class HawqSegment(Script):
 return utils.exec_hawq_operation(
   constants.START, 
   "{0} -a".format(constants.SEGMENT), 
-  
not_if=utils.chk_postgres_status_cmd(params.hawq_segment_address_port))
+  
not_if=utils.chk_hawq_process_status_cmd(params.hawq_segment_address_port))
 
   def start(self, env):
 self.configure(env)
@@ -67,7 +67,7 @@ class HawqSegment(Script):
   def stop(self, env):
 import params
 
-utils.exec_hawq_operation(constants.STOP, "{0} 
-a".format(constants.SEGMENT), only_if=utils.chk_postgres_status_cmd(
+utils.exec_hawq_operation(constants.STOP, "{0} 
-a".format(constants.SEGMENT), only_if=utils.chk_hawq_process_status_cmd(
 params.hawq_segment_address_port))
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/dd174170/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
index 7261a04..fdfd7c9 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/master_helper.py
@@ -121,10 +121,11 @@ def __start_local_master():
   Starts HAWQ Master or HAWQ Standby Master component on the host
   """
   import params
+  component_name = __get_component_name()
   utils.exec_hawq_operation(
 constants.START, 
-"{0} -a".format(__get_component_name()), 
-not_if=utils.chk_postgres_status_cmd(params.hawq_master_address_port))
+"{0} -a".format(component_name),
+
not_if=utils.chk_hawq_process_status_cmd(params.hawq_master_address_port, 
component_name))
 
   
 def __is_local_initialized():
@@ -183,10 +184,11 @@ def stop_master():
   Stops the HAWQ Master/Standby
   """
   import params
+  component_name = __get_component_name()
   utils.exec_hawq_operation(
 constants.STOP,
-"{0} -a".format(__get_component_name()),
-
only_if=utils.chk_postgres_status_cmd(params.hawq_master_address_port))
+"{0} -a".format(component_name),
+
only_if=utils.chk_hawq_process_status_cmd(params.hawq_master_address_port, 
component_name))
 
 
 def __is_active_master():

http://git-wip-us.apache.org/repos/asf/ambari/blob/dd174170/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/utils.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/utils.py
 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/utils.py
index da51c19..8e2b157 100644
--- 
a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/utils.py
+++ 
b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/utils.py
@@ -23,11 +23,12 @@ from resou

[04/11] ambari git commit: AMBARI-13665: Enable HAWQ/PXF services in Ambari 2.1.3

2015-11-19 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/8857e63b/ambari-server/src/test/python/stacks/2.3/common/services-normal-hawq-3-hosts.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-normal-hawq-3-hosts.json
 
b/ambari-server/src/test/python/stacks/2.3/common/services-normal-hawq-3-hosts.json
new file mode 100644
index 000..9fab56d
--- /dev/null
+++ 
b/ambari-server/src/test/python/stacks/2.3/common/services-normal-hawq-3-hosts.json
@@ -0,0 +1,2564 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER,HAWQ)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HAWQ",
+"StackServices" : {
+  "service_name" : "HAWQ",
+  "service_version" : "2.0",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1",
+"component_category" : "MASTER",
+"component_name" : "HAWQMASTER",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  }, {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/NAMENODE",
+"Dependencies" : {
+  "component_name" : "NAMENODE",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "HAWQSEGMENT",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Segment",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6402.ambari.apache.org", "c6403.ambari.apache.org", 
"c6401.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT/dependencies/DATANODE",
+"Dependencies" : {
+  "component_name" : "DATANODE",
+  "dependent_component_name" : "HAWQSEGMENT",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0-1",
+"component_category" : "MASTER",
+"component_name" : "HAWQSTANDBY",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Standby Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6402.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQSTANDBY",
+  "dependent_servi

[10/11] ambari git commit: AMBARI-13665: Enable HAWQ/PXF services in Ambari 2.1.3

2015-11-19 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/8857e63b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
new file mode 100644
index 000..dd0031c
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/scripts/pxf.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import Script
+
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.core.resources.accounts import User
+from resource_management.core.resources.system import Directory, File, Execute
+from resource_management.core.source import Template
+
+
+
+class Pxf(Script):
+  """
+  Contains the interface definitions for methods like install,
+  start, stop, status, etc. for the PXF
+  """
+
+  def install(self, env):
+self.install_packages(env)
+self.configure(env)
+
+
+  def configure(self, env):
+import params
+env.set_params(params)
+self.__setup_user_group()
+self.__generate_config_files()
+# pxf-service init exits safely when it is already initialized
+self.__execute_service_command("init")
+
+
+  def start(self, env):
+self.configure(env)
+self.__grant_permissions()
+self.__execute_service_command("restart")
+
+
+  def stop(self, env):
+self.__execute_service_command("stop")
+
+
+  def status(self, env):
+try:
+  self.__execute_service_command("status")
+except Exception:
+  raise ComponentIsNotRunning()
+
+
+  def __execute_service_command(self, command):
+import params
+Execute("service {0} {1}".format(params.pxf_service_name, command),
+  timeout=params.default_exec_timeout,
+  logoutput=True)
+
+
+  def __setup_user_group(self):
+"""
+Creates PXF user with the required groups and bash as default shell
+"""
+import params
+User(params.pxf_user,
+ groups=[params.hdfs_superuser_group, params.user_group, 
params.tomcat_group],
+ shell="/bin/bash")
+
+
+  def __generate_config_files(self):
+"""
+Generates pxf-env.sh file from jinja template and sets the classpath for 
HDP
+"""
+import params
+import shutil
+
+hdp_stack = "HDP"
+
+# Create file pxf-env.sh from jinja template
+File("{0}/pxf-env.sh".format(params.pxf_conf_dir),
+ content = Template("pxf-env.j2"))
+
+# Classpath is set for PHD by default. If stack is HDP, set classpath for 
HDP
+if(params.stack_name == hdp_stack):
+  shutil.copy2("{0}/pxf-privatehdp.classpath".format(params.pxf_conf_dir),
+   "{0}/pxf-private.classpath".format(params.pxf_conf_dir))
+
+XmlConfig("pxf-site.xml",
+  conf_dir=params.pxf_conf_dir,
+  configurations=params.config['configurations']['pxf-site'],
+  
configuration_attributes=params.config['configuration_attributes']['pxf-site'])
+
+
+  def __grant_permissions(self):
+"""
+Grants permission to pxf:pxf for PXF instance directory
+"""
+import params
+Directory(params.pxf_instance_dir,
+  owner=params.pxf_user,
+  group=params.pxf_group,
+  recursive=True)
+
+
+if __name__ == "__main__":
+  Pxf().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/8857e63b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/templates/pxf-env.j2
--
diff --git 
a/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/templates/pxf-env.j2
 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/templates/pxf-env.j2
new file mode 100644
index 000..03f2420
--- /dev/null
+++ 
b/ambari-server/src/main/resources/common-services/PXF/3.0.0/package/templates/pxf-env.j2
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#Licensed to the Apache Software Foundation (ASF) under one
+#or more contributor license agreements.  See the NOTICE file
+#distrib

[02/11] ambari git commit: AMBARI-13665: Enable HAWQ/PXF services in Ambari 2.1.3

2015-11-19 Thread jaoki
http://git-wip-us.apache.org/repos/asf/ambari/blob/8857e63b/ambari-server/src/test/python/stacks/2.3/common/services-standby_ambari_colo-3-hosts.json
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/services-standby_ambari_colo-3-hosts.json
 
b/ambari-server/src/test/python/stacks/2.3/common/services-standby_ambari_colo-3-hosts.json
new file mode 100644
index 000..ca0637c
--- /dev/null
+++ 
b/ambari-server/src/test/python/stacks/2.3/common/services-standby_ambari_colo-3-hosts.json
@@ -0,0 +1,2564 @@
+{
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/?fields=Versions/stack_name,Versions/stack_version,Versions/parent_stack_version,services/StackServices/service_name,services/StackServices/service_version,services/components/StackServiceComponents,services/components/dependencies,services/components/auto_deploy,services/configurations/StackConfigurations/property_depends_on,services/configurations/dependencies/StackConfigurationDependency/dependency_name,services/configurations/dependencies/StackConfigurationDependency/dependency_type,services/configurations/StackConfigurations/type&services/StackServices/service_name.in(HDFS,ZOOKEEPER,HAWQ)",
+  "Versions" : {
+"parent_stack_version" : "2.2",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"stack_hierarchy" : {
+  "stack_name" : "HDP",
+  "stack_versions" : [ "2.2", "2.1", "2.0.6" ]
+}
+  },
+  "services" : [ {
+"href" : "/api/v1/stacks/HDP/versions/2.3/services/HAWQ",
+"StackServices" : {
+  "service_name" : "HAWQ",
+  "service_version" : "2.0",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+},
+"components" : [ {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1",
+"component_category" : "MASTER",
+"component_name" : "HAWQMASTER",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  }, {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQMASTER/dependencies/NAMENODE",
+"Dependencies" : {
+  "component_name" : "NAMENODE",
+  "dependent_component_name" : "HAWQMASTER",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "1+",
+"component_category" : "SLAVE",
+"component_name" : "HAWQSEGMENT",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Segment",
+"is_client" : false,
+"is_master" : false,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6403.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSEGMENT/dependencies/DATANODE",
+"Dependencies" : {
+  "component_name" : "DATANODE",
+  "dependent_component_name" : "HAWQSEGMENT",
+  "dependent_service_name" : "HAWQ",
+  "stack_name" : "HDP",
+  "stack_version" : "2.3"
+}
+  } ]
+}, {
+  "href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY",
+  "StackServiceComponents" : {
+"advertise_version" : false,
+"cardinality" : "0-1",
+"component_category" : "MASTER",
+"component_name" : "HAWQSTANDBY",
+"custom_commands" : [ ],
+"display_name" : "HAWQ Standby Master",
+"is_client" : false,
+"is_master" : true,
+"service_name" : "HAWQ",
+"stack_name" : "HDP",
+"stack_version" : "2.3",
+"hostnames" : [ "c6401.ambari.apache.org" ]
+  },
+  "dependencies" : [ {
+"href" : 
"/api/v1/stacks/HDP/versions/2.3/services/HAWQ/components/HAWQSTANDBY/dependencies/HDFS_CLIENT",
+"Dependencies" : {
+  "component_name" : "HDFS_CLIENT",
+  "dependent_component_name" : "HAWQSTANDBY",
+  "dependent_service_name" : "HAWQ",
+   

[01/11] ambari git commit: AMBARI-13665: Enable HAWQ/PXF services in Ambari 2.1.3

2015-11-19 Thread jaoki
Repository: ambari
Updated Branches:
  refs/heads/branch-2.1 12f9d0764 -> 8857e63b8


http://git-wip-us.apache.org/repos/asf/ambari/blob/8857e63b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
--
diff --git 
a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py 
b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index 02753d4..99cfa6c 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -16,6 +16,7 @@ See the License for the specific language governing 
permissions and
 limitations under the License.
 '''
 
+import json
 import os
 import socket
 from unittest import TestCase
@@ -51,6 +52,12 @@ class TestHDP23StackAdvisor(TestCase):
 self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
 self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
 
+  def load_json(self, filename):
+file = os.path.join(self.testDirectory, filename)
+with open(file, 'rb') as f:
+  data = json.load(f)
+return data
+
   @patch('__builtin__.open')
   @patch('os.path.exists')
   def get_system_min_uid_magic(self, exists_mock, open_mock):
@@ -71,6 +78,174 @@ class TestHDP23StackAdvisor(TestCase):
 open_mock.return_value = MagicFile()
 return self.get_system_min_uid_real()
 
+
+  def test_createComponentLayoutRecommendations_hawq_1_Host(self):
+""" Test that HAWQSTANDBY is not recommended on a single node cluster """
+
+services = self.load_json("services-hawq-1-host.json")
+componentsListList = [service["components"] for service in 
services["services"]]
+componentsList = [item for sublist in componentsListList for item in 
sublist]
+componentNames = [component["StackServiceComponents"]["component_name"] 
for component in componentsList]
+self.assertTrue('HAWQSTANDBY' in componentNames)
+
+hosts = self.load_json("hosts-1-host.json")
+hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
+self.assertEquals(len(hostsList), 1)
+
+recommendations = 
self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
+
+recommendedComponentsListList = [hostgroup["components"] for hostgroup in 
recommendations["blueprint"]["host_groups"]]
+recommendedComponents = [item["name"] for sublist in 
recommendedComponentsListList for item in sublist]
+self.assertTrue('HAWQMASTER' in recommendedComponents) 
+self.assertFalse('HAWQSTANDBY' in recommendedComponents) 
+self.assertTrue('HAWQSEGMENT' in recommendedComponents) 
+
+
+  def test_createComponentLayoutRecommendations_hawq_3_Hosts(self):
+""" Test that HAWQSTANDBY is recommended on a 3-node cluster """
+
+services = self.load_json("services-hawq-3-hosts.json")
+componentsListList = [service["components"] for service in 
services["services"]]
+componentsList = [item for sublist in componentsListList for item in 
sublist]
+componentNames = [component["StackServiceComponents"]["component_name"] 
for component in componentsList]
+self.assertTrue('HAWQSTANDBY' in componentNames)
+
+hosts = self.load_json("hosts-3-hosts.json")
+hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
+self.assertEquals(len(hostsList), 3)
+
+recommendations = 
self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
+
+recommendedComponentsListList = [hostgroup["components"] for hostgroup in 
recommendations["blueprint"]["host_groups"]]
+recommendedComponents = [item["name"] for sublist in 
recommendedComponentsListList for item in sublist]
+self.assertTrue('HAWQMASTER' in recommendedComponents) 
+self.assertTrue('HAWQSTANDBY' in recommendedComponents) 
+self.assertTrue('HAWQSEGMENT' in recommendedComponents)
+
+# make sure master components are not collocated
+for sublist in recommendedComponentsListList:
+  hostComponents = [item["name"] for item in sublist]
+  self.assertFalse(set(['HAWQMASTER', 
'HAWQSTANDBY']).issubset(hostComponents))
+
+
+  def test_createComponentLayoutRecommendations_no_hawq_3_Hosts(self):
+""" Test no failures when there are no HAWQ components """
+
+services = self.load_json("services-nohawq-3-hosts.json")
+componentsListList = [service["components"] for service in 
services["services"]]
+componentsList = [item for sublist in componentsListList for item in 
sublist]
+componentNames = [component["StackServiceComponents"]["component_name"] 
for component in componentsList]
+self.assertFalse('HAWQMASTER' in componentNames) 
+self.assertFalse('HAWQSTANDBY' in componentNames) 
+self.assertFalse('HAWQSEGMENT' in componentNames)
+
+hosts = self.load_json("hosts-3-hosts.json")
+hostsList = [host["Hosts"]["host_name"] for host in hosts["items"]]
+self.asser

  1   2   3   >