[ambari] branch trunk updated: AMBARI-24714. Avoid multi-threading and caching issues when aborting requests and processing agent reports at the same time (#2411)

2018-10-03 Thread smolnar
This is an automated email from the ASF dual-hosted git repository.

smolnar pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new af3f9ff  AMBARI-24714. Avoid multi-threading and caching issues when 
aborting requests and processing agent reports at the same time (#2411)
af3f9ff is described below

commit af3f9ff0b367340c432431384d5d805a72f8495a
Author: Sandor Molnar 
AuthorDate: Thu Oct 4 07:07:46 2018 +0200

AMBARI-24714. Avoid multi-threading and caching issues when aborting 
requests and processing agent reports at the same time (#2411)
---
 .../server/actionmanager/ActionDBAccessorImpl.java | 171 +
 1 file changed, 103 insertions(+), 68 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
index 94aee41..5c1fa66 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionDBAccessorImpl.java
@@ -28,6 +28,8 @@ import java.util.Map;
 import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.ambari.annotations.TransactionalLock;
 import org.apache.ambari.annotations.TransactionalLock.LockArea;
@@ -153,6 +155,10 @@ public class ActionDBAccessorImpl implements 
ActionDBAccessor {
   private Cache hostRoleCommandCache;
   private long cacheLimit; //may be exceeded to store tasks from one request
 
+  //We do lock for writing/reading when HRCs are manipulated/read by different 
threads
+  //For instance we do lock for writing when aborting all HRCs of a request to 
avoid reading the same HRCs by agent report processor (so that we lock there 
for reading too)
+  private final ReadWriteLock hrcOperationsLock = new ReentrantReadWriteLock();
+
   @Inject
   public ActionDBAccessorImpl(@Named("executionCommandCacheSize") long 
cacheLimit,
   AmbariEventPublisher eventPublisher) {
@@ -213,32 +219,35 @@ public class ActionDBAccessorImpl implements 
ActionDBAccessor {
*/
   @Override
   public Collection abortOperation(long requestId) {
-Collection abortedHostRoleCommands = 
Collections.emptyList();
-long now = System.currentTimeMillis();
-
-// only request commands which actually need to be aborted; requesting all
-// commands here can cause OOM problems during large requests like upgrades
-List commands = 
hostRoleCommandDAO.findByRequestIdAndStatuses(requestId,
-HostRoleStatus.SCHEDULED_STATES);
-
-for (HostRoleCommandEntity command : commands) {
-  command.setStatus(HostRoleStatus.ABORTED);
-  command.setEndTime(now);
-  LOG.info("Aborting command. Hostname " + command.getHostName()
-  + " role " + command.getRole()
-  + " requestId " + command.getRequestId()
-  + " taskId " + command.getTaskId()
-  + " stageId " + command.getStageId());
-
-  auditLog(command, requestId);
-}
+try {
+  hrcOperationsLock.writeLock().lock();
+  Collection abortedHostRoleCommands = new 
ArrayList<>();
+  long now = System.currentTimeMillis();
+
+  // only request commands which actually need to be aborted; requesting 
all
+  // commands here can cause OOM problems during large requests like 
upgrades
+  List commands = 
hostRoleCommandDAO.findByRequestIdAndStatuses(requestId,
+  HostRoleStatus.SCHEDULED_STATES);
+
+  for (HostRoleCommandEntity command : commands) {
+command.setStatus(HostRoleStatus.ABORTED);
+command.setEndTime(now);
+abortedHostRoleCommands.add(hostRoleCommandDAO.merge(command));
+LOG.info("Aborted command. Hostname " + command.getHostName()
++ " role " + command.getRole()
++ " requestId " + command.getRequestId()
++ " taskId " + command.getTaskId()
++ " stageId " + command.getStageId());
+
+auditLog(command, requestId);
+cacheHostRoleCommand(hostRoleCommandFactory.createExisting(command));
+  }
 
-// no need to merge if there's nothing to merge
-if (!commands.isEmpty()) {
-  abortedHostRoleCommands = hostRoleCommandDAO.mergeAll(commands);
+  endRequest(requestId);
+  return abortedHostRoleCommands;
+} finally {
+  hrcOperationsLock.writeLock().unlock();
 }
-endRequest(requestId);
-return abortedHostRoleCommands;
   }
 
   /* (non-Javadoc)
@@ -514,12 +523,21 @@ public class ActionDBAccessorImpl implements 
ActionDBAccessor {
 
 List requestsToCheck = new ArrayList<>();
 
-List commandEntities = 
hostRoleCommandDAO.findByPKs(taskReports.keySet());
-  

[ambari] branch trunk updated: AMBARI-24727 - Autoscaling based on metric alerts (#2416)

2018-10-03 Thread krisztiankasa
This is an automated email from the ASF dual-hosted git repository.

krisztiankasa pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 27d3aba  AMBARI-24727 - Autoscaling based on metric alerts (#2416)
27d3aba is described below

commit 27d3abace07c8cdd209b918a8981a4e08618d7dd
Author: kasakrisz <33458261+kasakr...@users.noreply.github.com>
AuthorDate: Thu Oct 4 06:47:58 2018 +0200

AMBARI-24727 - Autoscaling based on metric alerts (#2416)
---
 .../python/ambari_commons/ambari_metrics_helper.py | 163 -
 1 file changed, 162 insertions(+), 1 deletion(-)

diff --git 
a/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py 
b/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
index 6444dfd..07e4831 100644
--- a/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
+++ b/ambari-common/src/main/python/ambari_commons/ambari_metrics_helper.py
@@ -18,13 +18,33 @@ See the License for the specific language governing 
permissions and
 limitations under the License.
 '''
 
+import ambari_commons.network as network
+import ambari_simplejson as json
+import logging
 import os
 import random
+import urllib
+from ambari_agent.AmbariConfig import AmbariConfig
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.is_empty import is_empty
+
+logger = logging.getLogger(__name__)
+
 
 DEFAULT_COLLECTOR_SUFFIX = '.sink.timeline.collector.hosts'
 DEFAULT_METRICS2_PROPERTIES_FILE_NAME = 'hadoop-metrics2.properties'
 
+AMS_METRICS_GET_URL = "/ws/v1/timeline/metrics?%s"
+
+METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY = 
'{{ams-site/timeline.metrics.service.webapp.address}}'
+METRICS_COLLECTOR_VIP_HOST_KEY = 
'{{cluster-env/metrics_collector_external_hosts}}'
+METRICS_COLLECTOR_VIP_PORT_KEY = 
'{{cluster-env/metrics_collector_external_port}}'
+AMS_METRICS_COLLECTOR_USE_SSL_KEY = 
'{{ams-site/timeline.metrics.service.http.policy}}'
+CONNECTION_TIMEOUT_KEY = 'http.connection.timeout'
+CONNECTION_TIMEOUT_DEFAULT = 5.0
+
+
 def select_metric_collector_for_sink(sink_name):
   # TODO check '*' sink_name
 
@@ -71,4 +91,145 @@ def load_properties_from_file(filepath, sep='=', 
comment_char='#'):
 key = key_value[0].strip()
 value = sep.join(key_value[1:]).strip('" \t')
 props[key] = value
-  return props
\ No newline at end of file
+  return props
+
+
+def get_ams_tokens():
+  return (METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY, 
AMS_METRICS_COLLECTOR_USE_SSL_KEY, METRICS_COLLECTOR_VIP_HOST_KEY, 
METRICS_COLLECTOR_VIP_PORT_KEY)
+
+
+def create_ams_client(alert_id, ams_app_id, configurations, parameters):
+  if METRICS_COLLECTOR_VIP_HOST_KEY in configurations and 
METRICS_COLLECTOR_VIP_PORT_KEY in configurations:
+ams_collector_hosts = 
configurations[METRICS_COLLECTOR_VIP_HOST_KEY].split(',')
+ams_collector_port = int(configurations[METRICS_COLLECTOR_VIP_PORT_KEY])
+  else:
+# ams-site/timeline.metrics.service.webapp.address is required
+if not METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY in configurations:
+  raise Exception('{0} is a required parameter for the 
script'.format(METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY))
+
+collector_webapp_address = 
configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY].split(":")
+if not _valid_collector_webapp_address(collector_webapp_address):
+  raise Exception('{0} value should be set as "fqdn_hostname:port", but 
set to {1}'.format(
+METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY, 
configurations[METRICS_COLLECTOR_WEBAPP_ADDRESS_KEY]))
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", 
[])
+if not ams_collector_hosts:
+  raise Exception("Ambari metrics is not available: ams_collector_hosts is 
None")
+ams_collector_port = int(collector_webapp_address[1])
+
+  use_ssl = False
+  if AMS_METRICS_COLLECTOR_USE_SSL_KEY in configurations:
+use_ssl = configurations[AMS_METRICS_COLLECTOR_USE_SSL_KEY] == 'HTTPS_ONLY'
+
+  connection_timeout = CONNECTION_TIMEOUT_DEFAULT
+  if CONNECTION_TIMEOUT_KEY in parameters:
+connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
+  return AmsClient(alert_id, ams_collector_hosts, ams_collector_port, use_ssl, 
connection_timeout, ams_app_id)
+
+def _valid_collector_webapp_address(webapp_address):
+  if len(webapp_address) == 2 \
+  and webapp_address[0] != '127.0.0.1' \
+  and webapp_address[1].isdigit():
+return True
+
+  return False
+
+class AmsClient:
+
+  def __init__(self, alert_id, ams_collector_hosts, ams_collector_port, 
use_ssl, connection_timeout, ams_app_id):
+self.alert_id = alert_id
+self.ams_collector_hosts = ams_collector_hosts
+self.ams_collector_port = ams_collector_port
+self.use_ssl = use_ssl
+self.connection_timeout 

[ambari] branch trunk updated: [AMBARI-24728] Orchestration Should Save Upgrade Pack for source or target (#2417)

2018-10-03 Thread ncole
This is an automated email from the ASF dual-hosted git repository.

ncole pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ce9cdfa  [AMBARI-24728] Orchestration Should Save Upgrade Pack for 
source or target (#2417)
ce9cdfa is described below

commit ce9cdfa2845208cdc3c210318907b636cc99b15a
Author: ncole 
AuthorDate: Wed Oct 3 16:14:08 2018 -0400

[AMBARI-24728] Orchestration Should Save Upgrade Pack for source or target 
(#2417)
---
 .../internal/UpgradeResourceProvider.java  |   1 +
 .../ambari/server/orm/entities/UpgradeEntity.java  |  19 ++
 .../ambari/server/stack/upgrade/UpgradePack.java   |  22 +++
 .../stack/upgrade/orchestrate/UpgradeContext.java  |  76 ---
 .../stack/upgrade/orchestrate/UpgradeHelper.java   | 114 ---
 .../org/apache/ambari/server/state/StackInfo.java  |   6 +
 .../ambari/server/upgrade/UpgradeCatalog280.java   |  16 +-
 .../src/main/resources/Ambari-DDL-Derby-CREATE.sql |   1 +
 .../src/main/resources/Ambari-DDL-MySQL-CREATE.sql |   1 +
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql|   1 +
 .../main/resources/Ambari-DDL-Postgres-CREATE.sql  |   1 +
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql|   1 +
 .../main/resources/Ambari-DDL-SQLServer-CREATE.sql |   1 +
 .../internal/UpgradeResourceProviderTest.java  |   1 +
 .../UpgradeSummaryResourceProviderTest.java|   1 +
 .../ambari/server/orm/dao/UpgradeDAOTest.java  |  12 ++
 .../AbstractAuthenticationProviderTest.java|   2 +-
 .../upgrades/ComponentVersionCheckActionTest.java  |   2 +
 .../serveraction/upgrades/ConfigureActionTest.java |   1 +
 .../upgrades/CreateAndConfigureActionTest.java |   1 +
 .../serveraction/upgrades/UpgradeActionTest.java   |   2 +
 .../upgrade/orchestrate/UpgradeContextTest.java|  69 ---
 .../upgrade/orchestrate/UpgradeHelperTest.java |  19 ++
 .../services/RetryUpgradeActionServiceTest.java|   1 +
 .../state/stack/ConfigUpgradeValidityTest.java |   2 -
 .../server/upgrade/UpgradeCatalog280Test.java  |  11 +-
 .../stacks/HDP/2.2.0/upgrades/upgrade_from_211.xml | 220 +
 27 files changed, 511 insertions(+), 93 deletions(-)

diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 9d1398f..3c5ee2c 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -760,6 +760,7 @@ public class UpgradeResourceProvider extends 
AbstractControllerResourceProvider
 upgrade.setClusterId(cluster.getClusterId());
 upgrade.setDirection(direction);
 upgrade.setUpgradePackage(pack.getName());
+upgrade.setUpgradePackStackId(pack.getOwnerStackId());
 upgrade.setUpgradeType(pack.getType());
 
upgrade.setAutoSkipComponentFailures(upgradeContext.isComponentFailureAutoSkipped());
 
upgrade.setAutoSkipServiceCheckFailures(upgradeContext.isServiceCheckFailureAutoSkipped());
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
index 9342b46..21d7f84 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/UpgradeEntity.java
@@ -43,6 +43,7 @@ import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.stack.upgrade.Direction;
 import org.apache.ambari.server.stack.upgrade.UpgradeType;
 import org.apache.ambari.server.state.RepositoryType;
+import org.apache.ambari.server.state.StackId;
 import org.apache.commons.lang.builder.EqualsBuilder;
 
 import com.google.common.base.Objects;
@@ -126,6 +127,9 @@ public class UpgradeEntity {
   @Column(name="upgrade_package", nullable = false)
   private String upgradePackage;
 
+  @Column(name="upgrade_package_stack", nullable = false)
+  private String upgradePackStack;
+
   @Column(name="upgrade_type", nullable = false)
   @Enumerated(value = EnumType.STRING)
   private UpgradeType upgradeType;
@@ -326,6 +330,21 @@ public class UpgradeEntity {
   }
 
   /**
+   * @return the stack that owns the upgrade pack
+   */
+  public StackId getUpgradePackStackId() {
+return null == upgradePackStack ? null : new StackId(upgradePackStack);
+  }
+
+  /**
+   * @param stackId
+   *  the stack that owns the upgrade pack
+   */
+  public void setUpgradePackStackId(StackId stackId) {
+upgradePackStack = stackId.toString();
+  }
+
+  /**
* Gets whether skippable components that failed are automatically skipped.
* They 

[GitHub] avijayanhwx commented on a change in pull request #5: [AMBARI-24723] Support wild cards in AppId and InstanceId fields in AMS GET API.

2018-10-03 Thread GitBox
avijayanhwx commented on a change in pull request #5: [AMBARI-24723] Support 
wild cards in AppId and InstanceId fields in AMS GET API.
URL: https://github.com/apache/ambari-metrics/pull/5#discussion_r222389041
 
 

 ##
 File path: 
ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
 ##
 @@ -667,13 +681,17 @@ public TimelineMetric getMetricFromUuid(byte[] uuid) {
   }
   TimelineMetric metric = new TimelineMetric();
   metric.setMetricName(metricName);
-  metric.setAppId(appId);
-  metric.setInstanceId(instanceId);
   for (String hostname : sanitizedHostNames) {
 metric.setHostName(hostname);
-byte[] uuid = getUuid(metric, false);
-if (uuid != null) {
-  uuids.add(uuid);
+for (String a : sanitizedAppIds) {
+  for (String i : sanitizedInstanceIds) {
+metric.setAppId(a);
+metric.setInstanceId(i);
+byte[] uuid = getUuid(metric, false);
+if (uuid != null) {
 
 Review comment:
   The else is not an ERROR. It just means we have not receieved a metric with 
this combination of IDs (name + appId + instanceId). Also, the misses are 
logged in the internal getUuid call. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] avijayanhwx commented on a change in pull request #5: [AMBARI-24723] Support wild cards in AppId and InstanceId fields in AMS GET API.

2018-10-03 Thread GitBox
avijayanhwx commented on a change in pull request #5: [AMBARI-24723] Support 
wild cards in AppId and InstanceId fields in AMS GET API.
URL: https://github.com/apache/ambari-metrics/pull/5#discussion_r222389041
 
 

 ##
 File path: 
ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
 ##
 @@ -667,13 +681,17 @@ public TimelineMetric getMetricFromUuid(byte[] uuid) {
   }
   TimelineMetric metric = new TimelineMetric();
   metric.setMetricName(metricName);
-  metric.setAppId(appId);
-  metric.setInstanceId(instanceId);
   for (String hostname : sanitizedHostNames) {
 metric.setHostName(hostname);
-byte[] uuid = getUuid(metric, false);
-if (uuid != null) {
-  uuids.add(uuid);
+for (String a : sanitizedAppIds) {
+  for (String i : sanitizedInstanceIds) {
+metric.setAppId(a);
+metric.setInstanceId(i);
+byte[] uuid = getUuid(metric, false);
+if (uuid != null) {
 
 Review comment:
   The else is not an ERROR. It just means we have not received a metric with 
this combination of IDs (name + appId + instanceId). Also, the misses are 
logged in the internal getUuid call. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] asfgit commented on issue #2: AMBARI-24725 - Infra Solr: manage autoscaling properties in Ambari

2018-10-03 Thread GitBox
asfgit commented on issue #2: AMBARI-24725 - Infra Solr: manage autoscaling 
properties in Ambari
URL: https://github.com/apache/ambari-infra/pull/2#issuecomment-426591627
 
 
   
   Refer to this link for build results (access rights to CI server needed): 
   https://builds.apache.org/job/Ambari-Infra-Github-PR-Builder/2/
   Test PASSed.
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] kasakrisz opened a new pull request #2: AMBARI-24725 - Infra Solr: manage autoscaling properties in Ambari

2018-10-03 Thread GitBox
kasakrisz opened a new pull request #2: AMBARI-24725 - Infra Solr: manage 
autoscaling properties in Ambari
URL: https://github.com/apache/ambari-infra/pull/2
 
 
   ## What changes were proposed in this pull request?
   
   Add the feature upload autoscaling.json to the infra-solr znode to 
   AmbariSolrCloudCLI.
   
   ## How was this patch tested?
   
   Manually:
   
   - Deploy Ambari
   - install ambari-infra-solr-client to a host where infra-solr will be 
installed using yum
   - install infra-solr using ambari
   - check the content of Cloud/Tree/autoscaling.json on Solr Admin page 
   - change some autoscaling settings on Ambari UI and check the content of 
Cloud/Tree/autoscaling.json on Solr Admin page again
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[ambari] branch trunk updated: AMBARI-23771. Customize Services - Alignment between Final/Set Recommended buttons and text boxes not proper in new tabs (akovalenko)

2018-10-03 Thread akovalenko
This is an automated email from the ASF dual-hosted git repository.

akovalenko pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 44436e1  AMBARI-23771. Customize Services - Alignment between 
Final/Set Recommended buttons and text boxes not proper in new tabs (akovalenko)
44436e1 is described below

commit 44436e1d5e0d28e07285c7b506cf596748ce85fc
Author: Aleksandr Kovalenko 
AuthorDate: Thu Sep 27 18:23:53 2018 +0300

AMBARI-23771. Customize Services - Alignment between Final/Set Recommended 
buttons and text boxes not proper in new tabs (akovalenko)
---
 ambari-web/app/styles/service_configurations.less | 1 +
 1 file changed, 1 insertion(+)

diff --git a/ambari-web/app/styles/service_configurations.less 
b/ambari-web/app/styles/service_configurations.less
index ea5d53b..7bde577 100644
--- a/ambari-web/app/styles/service_configurations.less
+++ b/ambari-web/app/styles/service_configurations.less
@@ -371,6 +371,7 @@
 .password-field-wrapper {
   width: 75%;
   display: inline-block;
+  float: left;
   .row {
 padding: 0;
   }



[ambari] branch branch-2.7 updated: AMBARI-24671. Workaround for non-atomic directory creation (#2407)

2018-10-03 Thread adoroszlai
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch branch-2.7
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/branch-2.7 by this push:
 new ffe3aba  AMBARI-24671. Workaround for non-atomic directory creation 
(#2407)
ffe3aba is described below

commit ffe3aba719e90ddba3ef25eb425e353a08d5b560
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Wed Oct 3 09:02:19 2018 +0200

AMBARI-24671. Workaround for non-atomic directory creation (#2407)
---
 .../src/main/resources/scripts/Ambaripreupload.py  | 35 ++
 1 file changed, 35 insertions(+)

diff --git a/ambari-server/src/main/resources/scripts/Ambaripreupload.py 
b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
index ab98c64..1986a50 100644
--- a/ambari-server/src/main/resources/scripts/Ambaripreupload.py
+++ b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
@@ -36,6 +36,7 @@ from resource_management.core import File
 from resource_management.core import shell
 from resource_management.core.environment import Environment
 from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Directory
 from resource_management.core.resources.system import Execute
 from resource_management.core.source import StaticFile
 from resource_management.libraries import ConfigDictionary
@@ -325,6 +326,40 @@ with Environment() as env:
   env.set_params(params)
   hadoop_conf_dir = params.hadoop_conf_dir
 
+  Directory('/var/lib/ambari-agent/tmp/hadoop_java_io_tmpdir',
+owner=params.hdfs_user,
+group=params.user_group,
+mode=01777
+  )
+  Directory('/var/log/hadoop',
+create_parents = True,
+owner='root',
+group=params.user_group,
+mode=0775,
+cd_access='a',
+  )
+  Directory('/var/run/hadoop',
+create_parents = True,
+owner='root',
+group='root',
+cd_access='a',
+  )
+  Directory('/var/run/hadoop/hdfs',
+owner=params.hdfs_user,
+cd_access='a',
+  )
+  Directory('/tmp/hadoop-hdfs',
+create_parents = True,
+owner=params.hdfs_user,
+cd_access='a',
+  )
+  Directory('/tmp/hbase-hbase',
+owner='hbase',
+mode=0775,
+create_parents = True,
+cd_access="a",
+  )
+
   oozie_libext_dir = params.oozie_libext_dir
   sql_driver_filename = os.path.basename(SQL_DRIVER_PATH)
   oozie_home=params.oozie_home



[GitHub] swagle commented on a change in pull request #5: [AMBARI-24723] Support wild cards in AppId and InstanceId fields in AMS GET API.

2018-10-03 Thread GitBox
swagle commented on a change in pull request #5: [AMBARI-24723] Support wild 
cards in AppId and InstanceId fields in AMS GET API.
URL: https://github.com/apache/ambari-metrics/pull/5#discussion_r222191550
 
 

 ##
 File path: 
ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
 ##
 @@ -87,6 +86,8 @@
   AtomicBoolean SYNC_HOSTED_APPS_METADATA = new AtomicBoolean(false);
   AtomicBoolean SYNC_HOSTED_INSTANCES_METADATA = new AtomicBoolean(false);
 
+  private Map> appInstanceMap = new ConcurrentHashMap<>();
 
 Review comment:
   Instead of this data structure why don't we do a Phoenix lookup with 
wildcard pass down?


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] swagle commented on a change in pull request #5: [AMBARI-24723] Support wild cards in AppId and InstanceId fields in AMS GET API.

2018-10-03 Thread GitBox
swagle commented on a change in pull request #5: [AMBARI-24723] Support wild 
cards in AppId and InstanceId fields in AMS GET API.
URL: https://github.com/apache/ambari-metrics/pull/5#discussion_r222190967
 
 

 ##
 File path: 
ambari-metrics-timelineservice/src/main/java/org/apache/ambari/metrics/core/timeline/discovery/TimelineMetricMetadataManager.java
 ##
 @@ -667,13 +681,17 @@ public TimelineMetric getMetricFromUuid(byte[] uuid) {
   }
   TimelineMetric metric = new TimelineMetric();
   metric.setMetricName(metricName);
-  metric.setAppId(appId);
-  metric.setInstanceId(instanceId);
   for (String hostname : sanitizedHostNames) {
 metric.setHostName(hostname);
-byte[] uuid = getUuid(metric, false);
-if (uuid != null) {
-  uuids.add(uuid);
+for (String a : sanitizedAppIds) {
+  for (String i : sanitizedInstanceIds) {
+metric.setAppId(a);
+metric.setInstanceId(i);
+byte[] uuid = getUuid(metric, false);
+if (uuid != null) {
 
 Review comment:
   Shouldn't this if have an else which logs an error?


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services