AMBARI-16094. Improvements to DB consistency check.(vbrodetskyi)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cb8dca09 Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cb8dca09 Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cb8dca09 Branch: refs/heads/trunk Commit: cb8dca09c7794c93233b2546464f1327be261c60 Parents: 46abb14 Author: Vitaly Brodetskyi <[email protected]> Authored: Wed May 4 15:28:05 2016 +0300 Committer: Vitaly Brodetskyi <[email protected]> Committed: Wed May 4 15:28:05 2016 +0300 ---------------------------------------------------------------------- ambari-server/conf/unix/log4j.properties | 11 +- ambari-server/conf/windows/log4j.properties | 11 +- ambari-server/src/main/conf/log4j.properties | 11 +- .../server/checks/CheckDatabaseHelper.java | 578 ------------------ .../checks/DatabaseConsistencyCheckHelper.java | 579 +++++++++++++++++++ .../checks/DatabaseConsistencyChecker.java | 156 +++++ .../ambari/server/controller/AmbariServer.java | 15 +- .../server/controller/ControllerModule.java | 4 +- .../controller/utilities/DatabaseChecker.java | 3 + ambari-server/src/main/python/ambari-server.py | 4 +- .../main/python/ambari_server/checkDatabase.py | 15 +- .../python/ambari_server/serverConfiguration.py | 1 + .../src/main/python/ambari_server_main.py | 35 +- .../server/checks/CheckDatabaseHelperTest.java | 303 ---------- .../DatabaseConsistencyCheckHelperTest.java | 298 ++++++++++ .../utilities/DatabaseCheckerTest.java | 20 +- .../src/test/python/TestAmbariServer.py | 5 +- 17 files changed, 1128 insertions(+), 921 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/conf/unix/log4j.properties ---------------------------------------------------------------------- diff --git a/ambari-server/conf/unix/log4j.properties b/ambari-server/conf/unix/log4j.properties index 01bbc4c..a5b58bc 100644 --- a/ambari-server/conf/unix/log4j.properties +++ b/ambari-server/conf/unix/log4j.properties @@ -54,13 +54,20 @@ log4j.appender.alerts.layout=org.apache.log4j.PatternLayout log4j.appender.alerts.layout.ConversionPattern=%d{ISO8601} %m%n # Log database check process -log4j.logger.org.apache.ambari.server.checks.CheckDatabaseHelper=INFO, dbcheck -log4j.additivity.org.apache.ambari.server.checks.CheckDatabaseHelper=false +log4j.logger.org.apache.ambari.server.checks.DatabaseConsistencyChecker=INFO, dbcheck +log4j.additivity.org.apache.ambari.server.checks.DatabaseConsistencyChecker=false log4j.appender.dbcheck=org.apache.log4j.FileAppender log4j.appender.dbcheck.File=${ambari.log.dir}/${ambari.dbcheck.file} log4j.appender.dbcheck.layout=org.apache.log4j.PatternLayout log4j.appender.dbcheck.layout.ConversionPattern=%d{ISO8601} %5p - %m%n +log4j.logger.org.apache.ambari.server.checks.DatabaseConsistencyCheckHelper=INFO, dbcheckhelper +log4j.additivity.org.apache.ambari.server.checks.DatabaseConsistencyCheckHelper=false +log4j.appender.dbcheckhelper=org.apache.log4j.FileAppender +log4j.appender.dbcheckhelper.File=${ambari.log.dir}/${ambari.dbcheck.file} +log4j.appender.dbcheckhelper.layout=org.apache.log4j.PatternLayout +log4j.appender.dbcheckhelper.layout.ConversionPattern=%d{ISO8601} %5p - %m%n + # EclipsLink -> slf4j bridge log4j.logger.eclipselink=TRACE,eclipselink log4j.additivity.eclipselink=false http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/conf/windows/log4j.properties ---------------------------------------------------------------------- diff --git a/ambari-server/conf/windows/log4j.properties b/ambari-server/conf/windows/log4j.properties index 8beba68..4b768f3 100644 --- a/ambari-server/conf/windows/log4j.properties +++ b/ambari-server/conf/windows/log4j.properties @@ -79,13 +79,20 @@ log4j.appender.alerts.layout=org.apache.log4j.PatternLayout log4j.appender.alerts.layout.ConversionPattern=%d{ISO8601} %m%n # Log database check process -log4j.logger.org.apache.ambari.server.checks.CheckDatabaseHelper=INFO, dbcheck -log4j.additivity.org.apache.ambari.server.checks.CheckDatabaseHelper=false +log4j.logger.org.apache.ambari.server.checks.DatabaseConsistencyChecker=INFO, dbcheck +log4j.additivity.org.apache.ambari.server.checks.DatabaseConsistencyChecker=false log4j.appender.dbcheck=org.apache.log4j.FileAppender log4j.appender.dbcheck.File=${ambari.log.dir}/${ambari.dbcheck.file} log4j.appender.dbcheck.layout=org.apache.log4j.PatternLayout log4j.appender.dbcheck.layout.ConversionPattern=%d{ISO8601} %5p - %m%n +log4j.logger.org.apache.ambari.server.checks.DatabaseConsistencyCheckHelper=INFO, dbcheckhelper +log4j.additivity.org.apache.ambari.server.checks.DatabaseConsistencyCheckHelper=false +log4j.appender.dbcheckhelper=org.apache.log4j.FileAppender +log4j.appender.dbcheckhelper.File=${ambari.log.dir}/${ambari.dbcheck.file} +log4j.appender.dbcheckhelper.layout=org.apache.log4j.PatternLayout +log4j.appender.dbcheckhelper.layout.ConversionPattern=%d{ISO8601} %5p - %m%n + # EclipsLink -> slf4j bridge log4j.logger.eclipselink=TRACE,eclipselink log4j.additivity.eclipselink=false http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/conf/log4j.properties ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/conf/log4j.properties b/ambari-server/src/main/conf/log4j.properties index 5021c77..680b4c4 100644 --- a/ambari-server/src/main/conf/log4j.properties +++ b/ambari-server/src/main/conf/log4j.properties @@ -79,13 +79,20 @@ log4j.appender.alerts.layout=org.apache.log4j.PatternLayout log4j.appender.alerts.layout.ConversionPattern=%d{ISO8601} %m%n # Log database check process -log4j.logger.org.apache.ambari.server.checks.CheckDatabaseHelper=INFO, dbcheck -log4j.additivity.org.apache.ambari.server.checks.CheckDatabaseHelper=false +log4j.logger.org.apache.ambari.server.checks.DatabaseConsistencyChecker=INFO, dbcheck +log4j.additivity.org.apache.ambari.server.checks.DatabaseConsistencyChecker=false log4j.appender.dbcheck=org.apache.log4j.FileAppender log4j.appender.dbcheck.File=${ambari.log.dir}/${ambari.dbcheck.file} log4j.appender.dbcheck.layout=org.apache.log4j.PatternLayout log4j.appender.dbcheck.layout.ConversionPattern=%d{ISO8601} %5p - %m%n +log4j.logger.org.apache.ambari.server.checks.DatabaseConsistencyCheckHelper=INFO, dbcheckhelper +log4j.additivity.org.apache.ambari.server.checks.DatabaseConsistencyCheckHelper=false +log4j.appender.dbcheckhelper=org.apache.log4j.FileAppender +log4j.appender.dbcheckhelper.File=${ambari.log.dir}/${ambari.dbcheck.file} +log4j.appender.dbcheckhelper.layout=org.apache.log4j.PatternLayout +log4j.appender.dbcheckhelper.layout.ConversionPattern=%d{ISO8601} %5p - %m%n + # EclipsLink -> slf4j bridge log4j.logger.eclipselink=TRACE,eclipselink log4j.additivity.eclipselink=false http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java deleted file mode 100644 index 6ed0c08..0000000 --- a/ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDatabaseHelper.java +++ /dev/null @@ -1,578 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.ambari.server.checks; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Collection; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import org.apache.ambari.server.AmbariException; -import org.apache.ambari.server.api.services.AmbariMetaInfo; -import org.apache.ambari.server.audit.AuditLoggerModule; -import org.apache.ambari.server.controller.ControllerModule; -import org.apache.ambari.server.orm.DBAccessor; -import org.apache.ambari.server.state.ServiceInfo; -import org.apache.ambari.server.utils.EventBusSynchronizer; -import org.apache.commons.lang.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; -import com.google.inject.Guice; -import com.google.inject.Inject; -import com.google.inject.Injector; -import com.google.inject.persist.PersistService; - -/* -* Class for database validation. -* Here we will check configs, services, components and etc. -*/ -public class CheckDatabaseHelper { - private static final Logger LOG = LoggerFactory.getLogger - (CheckDatabaseHelper.class); - - private static final String AUTHENTICATED_USER_NAME = "ambari-check-database"; - - private PersistService persistService; - private DBAccessor dbAccessor; - private Connection connection; - private AmbariMetaInfo ambariMetaInfo; - private Injector injector; - private boolean errorAvailable = false; - private boolean warningAvailable = false; - - @Inject - public CheckDatabaseHelper(DBAccessor dbAccessor, - Injector injector, - PersistService persistService) { - this.dbAccessor = dbAccessor; - this.injector = injector; - this.persistService = persistService; - } - - /** - * Extension of main controller module - */ - public static class CheckHelperControllerModule extends ControllerModule { - - public CheckHelperControllerModule() throws Exception { - } - - @Override - protected void configure() { - super.configure(); - EventBusSynchronizer.synchronizeAmbariEventPublisher(binder()); - } - } - - /** - * Extension of audit logger module - */ - public static class CheckHelperAuditModule extends AuditLoggerModule { - - public CheckHelperAuditModule() throws Exception { - } - - @Override - protected void configure() { - super.configure(); - } - - } - - /* - * init method to create connection - * */ - protected void init() { - connection = dbAccessor.getConnection(); - ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class); - } - - /* - * method to close connection - * */ - private void closeConnection() { - try { - connection.close(); - } catch (SQLException e) { - LOG.error("Exception occurred during connection close procedure: ", e); - } - } - - public void startPersistenceService() { - persistService.start(); - } - - public void stopPersistenceService() { - persistService.stop(); - } - - protected boolean isErrorAvailable() { - return errorAvailable; - } - - protected void setErrorAvailable(boolean errorAvailable) { - this.errorAvailable = errorAvailable; - } - - public boolean isWarningAvailable() { - return warningAvailable; - } - - public void setWarningAvailable(boolean warningAvailable) { - this.warningAvailable = warningAvailable; - } - - /* - * This method checks if all configurations that we have in clusterconfig table - * have at least one mapping in clusterconfigmapping table. If we found not mapped config - * then we are showing warning message for user. - * */ - protected void checkForNotMappedConfigsToCluster() { - String GET_NOT_MAPPED_CONFIGS_QUERY = "select type_name from clusterconfig where type_name not in (select type_name from clusterconfigmapping)"; - Set<String> nonSelectedConfigs = new HashSet<>(); - ResultSet rs = null; - try { - Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); - rs = statement.executeQuery(GET_NOT_MAPPED_CONFIGS_QUERY); - if (rs != null) { - while (rs.next()) { - nonSelectedConfigs.add(rs.getString("type_name")); - } - } - if (!nonSelectedConfigs.isEmpty()) { - LOG.warn("You have config(s): {} that is(are) not mapped (in clusterconfigmapping table) to any cluster!", StringUtils.join(nonSelectedConfigs, ",")); - warningAvailable = true; - } - } catch (SQLException e) { - LOG.error("Exception occurred during check for not mapped configs to cluster procedure: ", e); - } finally { - if (rs != null) { - try { - rs.close(); - } catch (SQLException e) { - LOG.error("Exception occurred during result set closing procedure: ", e); - } - } - } - } - - /* - * This method checks if any config type in clusterconfigmapping table, has - * more than one versions selected. If config version is selected(in selected column = 1), - * it means that this version of config is actual. So, if any config type has more - * than one selected version it's a bug and we are showing error message for user. - * */ - protected void checkForConfigsSelectedMoreThanOnce() { - String GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY = "select c.cluster_name, ccm.type_name from clusterconfigmapping ccm " + - "join clusters c on ccm.cluster_id=c.cluster_id " + - "group by c.cluster_name, ccm.type_name " + - "having sum(selected) > 1"; - Multimap<String, String> clusterConfigTypeMap = HashMultimap.create(); - ResultSet rs = null; - try { - Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); - rs = statement.executeQuery(GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY); - if (rs != null) { - while (rs.next()) { - clusterConfigTypeMap.put(rs.getString("cluster_name"), rs.getString("type_name")); - } - - for (String clusterName : clusterConfigTypeMap.keySet()) { - LOG.error("You have config(s), in cluster {}, that is(are) selected more than once in clusterconfigmapping table: {}", - clusterName ,StringUtils.join(clusterConfigTypeMap.get(clusterName), ",")); - errorAvailable = true; - } - } - - } catch (SQLException e) { - LOG.error("Exception occurred during check for config selected more than ones procedure: ", e); - } finally { - if (rs != null) { - try { - rs.close(); - } catch (SQLException e) { - LOG.error("Exception occurred during result set closing procedure: ", e); - } - } - } - } - - /* - * This method checks if all hosts from hosts table - * has related host state info in hoststate table. - * If not then we are showing error. - * */ - protected void checkForHostsWithoutState() { - String GET_HOSTS_WITHOUT_STATUS_QUERY = "select host_name from hosts where host_id not in (select host_id from hoststate)"; - Set<String> hostsWithoutStatus = new HashSet<>(); - ResultSet rs = null; - try { - Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); - rs = statement.executeQuery(GET_HOSTS_WITHOUT_STATUS_QUERY); - if (rs != null) { - while (rs.next()) { - hostsWithoutStatus.add(rs.getString("host_name")); - } - - if (!hostsWithoutStatus.isEmpty()) { - LOG.error("You have host(s) without state (in hoststate table): " + StringUtils.join(hostsWithoutStatus, ",")); - errorAvailable = true; - } - } - - } catch (SQLException e) { - LOG.error("Exception occurred during check for host without state procedure: ", e); - } finally { - if (rs != null) { - try { - rs.close(); - } catch (SQLException e) { - LOG.error("Exception occurred during result set closing procedure: ", e); - } - } - } - } - - /* - * This method checks if count of host component states equals count - * of desired host component states. According to ambari logic these - * two tables should have the same count of rows. If not then we are - * showing error for user. - * */ - protected void checkHostComponentStatesCountEqualsHostComponentsDesiredStates() { - String GET_HOST_COMPONENT_STATE_COUNT_QUERY = "select count(*) from hostcomponentstate"; - String GET_HOST_COMPONENT_DESIRED_STATE_COUNT_QUERY = "select count(*) from hostcomponentdesiredstate"; - String GET_MERGED_TABLE_ROW_COUNT_QUERY = "select count(*) FROM hostcomponentstate hcs " + - "JOIN hostcomponentdesiredstate hcds ON hcs.service_name=hcds.service_name AND hcs.component_name=hcds.component_name AND hcs.host_id=hcds.host_id"; - int hostComponentStateCount = 0; - int hostComponentDesiredStateCount = 0; - int mergedCount = 0; - ResultSet rs = null; - try { - Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); - - rs = statement.executeQuery(GET_HOST_COMPONENT_STATE_COUNT_QUERY); - if (rs != null) { - while (rs.next()) { - hostComponentStateCount = rs.getInt(1); - } - } - - rs = statement.executeQuery(GET_HOST_COMPONENT_DESIRED_STATE_COUNT_QUERY); - if (rs != null) { - while (rs.next()) { - hostComponentDesiredStateCount = rs.getInt(1); - } - } - - rs = statement.executeQuery(GET_MERGED_TABLE_ROW_COUNT_QUERY); - if (rs != null) { - while (rs.next()) { - mergedCount = rs.getInt(1); - } - } - - if (hostComponentStateCount != hostComponentDesiredStateCount || hostComponentStateCount != mergedCount) { - LOG.error("Your host component states (hostcomponentstate table) count not equals host component desired states (hostcomponentdesiredstate table) count!"); - errorAvailable = true; - } - - } catch (SQLException e) { - LOG.error("Exception occurred during check for same count of host component states and host component desired states: ", e); - } finally { - if (rs != null) { - try { - rs.close(); - } catch (SQLException e) { - LOG.error("Exception occurred during result set closing procedure: ", e); - } - } - } - - } - - - /* - * This method checks several potential problems for services: - * 1) Check if we have services in cluster which doesn't have service config id(not available in serviceconfig table). - * 2) Check if service has no mapped configs to it's service config id. - * 3) Check if service has all required configs mapped to it. - * 4) Check if service has config which is not selected(has no actual config version) in clusterconfigmapping table. - * If any issue was discovered, we are showing error message for user. - * */ - protected void checkServiceConfigs() { - String GET_SERVICES_WITHOUT_CONFIGS_QUERY = "select c.cluster_name, service_name from clusterservices cs " + - "join clusters c on cs.cluster_id=c.cluster_id " + - "where service_name not in (select service_name from serviceconfig sc where sc.cluster_id=cs.cluster_id and sc.service_name=cs.service_name and sc.group_id is null)"; - String GET_SERVICE_CONFIG_WITHOUT_MAPPING_QUERY = "select c.cluster_name, sc.service_name, sc.version from serviceconfig sc " + - "join clusters c on sc.cluster_id=c.cluster_id " + - "where service_config_id not in (select service_config_id from serviceconfigmapping) and group_id is null"; - String GET_STACK_NAME_VERSION_QUERY = "select c.cluster_name, s.stack_name, s.stack_version from clusters c " + - "join stack s on c.desired_stack_id = s.stack_id"; - String GET_SERVICES_WITH_CONFIGS_QUERY = "select c.cluster_name, cs.service_name, cc.type_name, sc.version from clusterservices cs " + - "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " + - "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " + - "join clusterconfig cc on scm.config_id=cc.config_id and sc.cluster_id=cc.cluster_id " + - "join clusters c on cc.cluster_id=c.cluster_id and sc.stack_id=c.desired_stack_id " + - "where sc.group_id is null and sc.service_config_id=(select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name and sc2.cluster_id=sc.cluster_id) " + - "group by c.cluster_name, cs.service_name, cc.type_name, sc.version"; - String GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY = "select c.cluster_name, cs.service_name, cc.type_name from clusterservices cs " + - "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " + - "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " + - "join clusterconfig cc on scm.config_id=cc.config_id and cc.cluster_id=sc.cluster_id " + - "join clusterconfigmapping ccm on cc.type_name=ccm.type_name and cc.version_tag=ccm.version_tag and cc.cluster_id=ccm.cluster_id " + - "join clusters c on ccm.cluster_id=c.cluster_id " + - "where sc.group_id is null and sc.service_config_id = (select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name and sc2.cluster_id=sc.cluster_id) " + - "group by c.cluster_name, cs.service_name, cc.type_name " + - "having sum(ccm.selected) < 1"; - Multimap<String, String> clusterServiceMap = HashMultimap.create(); - Map<String, Map<String, String>> clusterStackInfo = new HashMap<>(); - Map<String, Multimap<String, String>> clusterServiceVersionMap = new HashMap<>(); - Map<String, Multimap<String, String>> clusterServiceConfigType = new HashMap<>(); - ResultSet rs = null; - - try { - Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); - - rs = statement.executeQuery(GET_SERVICES_WITHOUT_CONFIGS_QUERY); - if (rs != null) { - while (rs.next()) { - clusterServiceMap.put(rs.getString("cluster_name"), rs.getString("service_name")); - } - - for (String clusterName : clusterServiceMap.keySet()) { - LOG.error("Service(s): {}, from cluster {} has no config(s) in serviceconfig table!", StringUtils.join(clusterServiceMap.get(clusterName), ","), clusterName); - errorAvailable = true; - } - - } - - rs = statement.executeQuery(GET_SERVICE_CONFIG_WITHOUT_MAPPING_QUERY); - if (rs != null) { - String serviceName = null, version = null, clusterName = null; - while (rs.next()) { - serviceName = rs.getString("service_name"); - clusterName = rs.getString("cluster_name"); - version = rs.getString("version"); - - if (clusterServiceVersionMap.get(clusterName) != null) { - Multimap<String, String> serviceVersion = clusterServiceVersionMap.get(clusterName); - serviceVersion.put(serviceName, version); - } else { - Multimap<String, String> serviceVersion = HashMultimap.create();; - serviceVersion.put(serviceName, version); - clusterServiceVersionMap.put(clusterName, serviceVersion); - } - } - - for (String clName : clusterServiceVersionMap.keySet()) { - Multimap<String, String> serviceVersion = clusterServiceVersionMap.get(clName); - for (String servName : serviceVersion.keySet()) { - LOG.error("In cluster {}, service config mapping is unavailable (in table serviceconfigmapping) for service {} with version(s) {}! ", clName, servName, StringUtils.join(serviceVersion.get(servName), ",")); - errorAvailable = true; - } - } - - } - - //get stack info from db - rs = statement.executeQuery(GET_STACK_NAME_VERSION_QUERY); - if (rs != null) { - while (rs.next()) { - Map<String, String> stackInfoMap = new HashMap<>(); - stackInfoMap.put(rs.getString("stack_name"), rs.getString("stack_version")); - clusterStackInfo.put(rs.getString("cluster_name"), stackInfoMap); - } - } - - - Set<String> serviceNames = new HashSet<>(); - Map<String, Map<Integer, Multimap<String, String>>> dbClusterServiceVersionConfigs = new HashMap<>(); - Multimap<String, String> stackServiceConfigs = HashMultimap.create(); - - rs = statement.executeQuery(GET_SERVICES_WITH_CONFIGS_QUERY); - if (rs != null) { - String serviceName = null, configType = null, clusterName = null; - Integer serviceVersion = null; - while (rs.next()) { - clusterName = rs.getString("cluster_name"); - serviceName = rs.getString("service_name"); - configType = rs.getString("type_name"); - serviceVersion = rs.getInt("version"); - - serviceNames.add(serviceName); - - //collect data about mapped configs to services from db - if (dbClusterServiceVersionConfigs.get(clusterName) != null) { - Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = dbClusterServiceVersionConfigs.get(clusterName); - - if (dbServiceVersionConfigs.get(serviceVersion) != null) { - dbServiceVersionConfigs.get(serviceVersion).put(serviceName, configType); - } else { - Multimap<String, String> dbServiceConfigs = HashMultimap.create(); - dbServiceConfigs.put(serviceName, configType); - dbServiceVersionConfigs.put(serviceVersion, dbServiceConfigs); - } - } else { - Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = new HashMap<>(); - Multimap<String, String> dbServiceConfigs = HashMultimap.create(); - dbServiceConfigs.put(serviceName, configType); - dbServiceVersionConfigs.put(serviceVersion, dbServiceConfigs); - dbClusterServiceVersionConfigs.put(clusterName, dbServiceVersionConfigs); - } - } - } - - //compare service configs from stack with configs that we got from db - for (Map.Entry<String, Map<String, String>> clusterStackInfoEntry : clusterStackInfo.entrySet()) { - //collect required configs for all services from stack - String clusterName = clusterStackInfoEntry.getKey(); - Map<String, String> stackInfo = clusterStackInfoEntry.getValue(); - String stackName = stackInfo.keySet().iterator().next(); - String stackVersion = stackInfo.get(stackName); - Map<String, ServiceInfo> serviceInfoMap = ambariMetaInfo.getServices(stackName, stackVersion); - for (String serviceName : serviceNames) { - ServiceInfo serviceInfo = serviceInfoMap.get(serviceName); - Set<String> configTypes = serviceInfo.getConfigTypeAttributes().keySet(); - for (String configType : configTypes) { - stackServiceConfigs.put(serviceName, configType); - } - } - - //compare required service configs from stack with mapped service configs from db - Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = dbClusterServiceVersionConfigs.get(clusterName); - for (Integer serviceVersion : dbServiceVersionConfigs.keySet()) { - Multimap<String, String> dbServiceConfigs = dbServiceVersionConfigs.get(serviceVersion); - for (String serviceName : dbServiceConfigs.keySet()) { - Collection<String> serviceConfigsFromStack = stackServiceConfigs.get(serviceName); - Collection<String> serviceConfigsFromDB = dbServiceConfigs.get(serviceName); - if (serviceConfigsFromDB != null && serviceConfigsFromStack != null) { - serviceConfigsFromStack.removeAll(serviceConfigsFromDB); - if (!serviceConfigsFromStack.isEmpty()) { - LOG.error("Required config(s): {} is(are) not available for service {} with service config version {} in cluster {}", - StringUtils.join(serviceConfigsFromStack, ","), serviceName, Integer.toString(serviceVersion), clusterName); - errorAvailable = true; - } - } - } - } - } - - //getting services which has mapped configs which are not selected in clusterconfigmapping - rs = statement.executeQuery(GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY); - if (rs != null) { - String serviceName = null, configType = null, clusterName = null; - while (rs.next()) { - clusterName = rs.getString("cluster_name"); - serviceName = rs.getString("service_name"); - configType = rs.getString("type_name"); - - - if (clusterServiceConfigType.get(clusterName) != null) { - Multimap<String, String> serviceConfigs = clusterServiceConfigType.get(clusterName); - serviceConfigs.put(serviceName, configType); - } else { - - Multimap<String, String> serviceConfigs = HashMultimap.create(); - serviceConfigs.put(serviceName, configType); - clusterServiceConfigType.put(clusterName, serviceConfigs); - - } - - } - } - - for (String clusterName : clusterServiceConfigType.keySet()) { - Multimap<String, String> serviceConfig = clusterServiceConfigType.get(clusterName); - for (String serviceName : serviceConfig.keySet()) { - LOG.error("You have non selected configs: {} for service {} from cluster {}!", StringUtils.join(serviceConfig.get(serviceName), ","), serviceName, clusterName); - errorAvailable = true; - } - } - } catch (SQLException e) { - LOG.error("Exception occurred during complex service check procedure: ", e); - } catch (AmbariException e) { - LOG.error("Exception occurred during complex service check procedure: ", e); - } finally { - if (rs != null) { - try { - rs.close(); - } catch (SQLException e) { - LOG.error("Exception occurred during result set closing procedure: ", e); - } - } - } - - } - - /* - * Main method from which we are calling all checks - * */ - public static void main(String[] args) throws Exception { - CheckDatabaseHelper checkDatabaseHelper = null; - try { - LOG.info("******************************* Check database started *******************************"); - - Injector injector = Guice.createInjector(new CheckHelperControllerModule(), new CheckHelperAuditModule()); - checkDatabaseHelper = injector.getInstance(CheckDatabaseHelper.class); - - checkDatabaseHelper.startPersistenceService(); - - checkDatabaseHelper.init(); - - checkDatabaseHelper.checkForNotMappedConfigsToCluster(); - - checkDatabaseHelper.checkForConfigsSelectedMoreThanOnce(); - - checkDatabaseHelper.checkForHostsWithoutState(); - - checkDatabaseHelper.checkHostComponentStatesCountEqualsHostComponentsDesiredStates(); - - checkDatabaseHelper.checkServiceConfigs(); - - checkDatabaseHelper.stopPersistenceService(); - - LOG.info("******************************* Check database completed *******************************"); - } catch (Throwable e) { - if (e instanceof AmbariException) { - LOG.error("Exception occurred during database check:", e); - throw (AmbariException)e; - }else{ - LOG.error("Unexpected error, database check failed", e); - throw new Exception("Unexpected error, database check failed", e); - } - } finally { - if (checkDatabaseHelper != null) { - checkDatabaseHelper.closeConnection(); - if (checkDatabaseHelper.isErrorAvailable() || checkDatabaseHelper.isWarningAvailable()) { - System.out.print("Some error(s) or/and warning(s) was(were) found. Please check ambari-server-check-database.log for problem(s)."); - } else { - System.out.print("No erros were found."); - } - } - } - } -} http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java new file mode 100644 index 0000000..d85b6f2 --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyCheckHelper.java @@ -0,0 +1,579 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.io.File; +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Scanner; +import java.util.Set; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.api.services.AmbariMetaInfo; +import org.apache.ambari.server.configuration.Configuration; +import org.apache.ambari.server.orm.DBAccessor; +import org.apache.ambari.server.orm.dao.MetainfoDAO; +import org.apache.ambari.server.orm.entities.MetainfoEntity; +import org.apache.ambari.server.state.ServiceInfo; +import org.apache.ambari.server.utils.VersionUtils; +import org.apache.commons.lang.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; +import com.google.inject.Inject; +import com.google.inject.Injector; + +public class DatabaseConsistencyCheckHelper { + + static Logger LOG = LoggerFactory.getLogger(DatabaseConsistencyCheckHelper.class); + + @Inject + private static Injector injector; + + private static MetainfoDAO metainfoDAO; + private static Connection connection; + private static AmbariMetaInfo ambariMetaInfo; + private static DBAccessor dbAccessor; + + + private static boolean errorAvailable = false; + private static boolean warningAvailable = false; + + public static boolean isErrorAvailable() { + return errorAvailable; + } + + public static void setErrorAvailable(boolean errorAvailable) { + errorAvailable = errorAvailable; + } + + public static boolean isWarningAvailable() { + return warningAvailable; + } + + public static void setWarningAvailable(boolean warningAvailable) { + warningAvailable = warningAvailable; + } + + public static void resetErrorWarningFlags() { + errorAvailable = false; + warningAvailable = false; + } + + protected static void setInjector(Injector injector) { + DatabaseConsistencyCheckHelper.injector = injector; + } + + public static void setConnection(Connection connection) { + DatabaseConsistencyCheckHelper.connection = connection; + } + + /* + * method to close connection + * */ + public static void closeConnection() { + try { + if (connection != null) { + connection.close(); + } + } catch (SQLException e) { + LOG.error("Exception occurred during connection close procedure: ", e); + } + } + + public static void checkDBVersionCompatible() throws AmbariException { + LOG.info("Checking DB store version"); + + if (metainfoDAO == null) { + metainfoDAO = injector.getInstance(MetainfoDAO.class); + } + + MetainfoEntity schemaVersionEntity = metainfoDAO.findByKey(Configuration.SERVER_VERSION_KEY); + String schemaVersion = null; + + if (schemaVersionEntity != null) { + schemaVersion = schemaVersionEntity.getMetainfoValue(); + } + + Configuration conf = injector.getInstance(Configuration.class); + File versionFile = new File(conf.getServerVersionFilePath()); + if (!versionFile.exists()) { + throw new AmbariException("Server version file does not exist."); + } + String serverVersion = null; + try (Scanner scanner = new Scanner(versionFile)) { + serverVersion = scanner.useDelimiter("\\Z").next(); + + } catch (IOException ioe) { + throw new AmbariException("Unable to read server version file."); + } + + if (schemaVersionEntity==null || VersionUtils.compareVersions(schemaVersion, serverVersion, 3) != 0) { + String error = "Current database store version is not compatible with " + + "current server version" + + ", serverVersion=" + serverVersion + + ", schemaVersion=" + schemaVersion; + LOG.error(error); + throw new AmbariException(error); + } + + LOG.info("DB store version is compatible"); + } + + public static void checkForNotMappedConfigsToCluster() { + LOG.info("Checking for configs not mapped to any cluster"); + + String GET_NOT_MAPPED_CONFIGS_QUERY = "select type_name from clusterconfig where type_name not in (select type_name from clusterconfigmapping)"; + Set<String> nonSelectedConfigs = new HashSet<>(); + ResultSet rs = null; + + if (connection == null) { + if (dbAccessor == null) { + dbAccessor = injector.getInstance(DBAccessor.class); + } + connection = dbAccessor.getConnection(); + } + + try { + Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); + rs = statement.executeQuery(GET_NOT_MAPPED_CONFIGS_QUERY); + if (rs != null) { + while (rs.next()) { + nonSelectedConfigs.add(rs.getString("type_name")); + } + } + if (!nonSelectedConfigs.isEmpty()) { + LOG.warn("You have config(s): {} that is(are) not mapped (in clusterconfigmapping table) to any cluster!", StringUtils.join(nonSelectedConfigs, ",")); + warningAvailable = true; + } + } catch (SQLException e) { + LOG.error("Exception occurred during check for not mapped configs to cluster procedure: ", e); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + LOG.error("Exception occurred during result set closing procedure: ", e); + } + } + } + } + + /* + * This method checks if any config type in clusterconfigmapping table, has + * more than one versions selected. If config version is selected(in selected column = 1), + * it means that this version of config is actual. So, if any config type has more + * than one selected version it's a bug and we are showing error message for user. + * */ + public static void checkForConfigsSelectedMoreThanOnce() { + LOG.info("Checking for configs selected more than once"); + + String GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY = "select c.cluster_name, ccm.type_name from clusterconfigmapping ccm " + + "join clusters c on ccm.cluster_id=c.cluster_id " + + "group by c.cluster_name, ccm.type_name " + + "having sum(selected) > 1"; + Multimap<String, String> clusterConfigTypeMap = HashMultimap.create(); + ResultSet rs = null; + + if (connection == null) { + if (dbAccessor == null) { + dbAccessor = injector.getInstance(DBAccessor.class); + } + connection = dbAccessor.getConnection(); + } + + try { + Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); + rs = statement.executeQuery(GET_CONFIGS_SELECTED_MORE_THAN_ONCE_QUERY); + if (rs != null) { + while (rs.next()) { + clusterConfigTypeMap.put(rs.getString("cluster_name"), rs.getString("type_name")); + } + + for (String clusterName : clusterConfigTypeMap.keySet()) { + LOG.error("You have config(s), in cluster {}, that is(are) selected more than once in clusterconfigmapping table: {}", + clusterName ,StringUtils.join(clusterConfigTypeMap.get(clusterName), ",")); + errorAvailable = true; + } + } + + } catch (SQLException e) { + LOG.error("Exception occurred during check for config selected more than ones procedure: ", e); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + LOG.error("Exception occurred during result set closing procedure: ", e); + } + } + } + } + + /* + * This method checks if all hosts from hosts table + * has related host state info in hoststate table. + * If not then we are showing error. + * */ + public static void checkForHostsWithoutState() { + LOG.info("Checking for hosts without state"); + + String GET_HOSTS_WITHOUT_STATUS_QUERY = "select host_name from hosts where host_id not in (select host_id from hoststate)"; + Set<String> hostsWithoutStatus = new HashSet<>(); + ResultSet rs = null; + + if (connection == null) { + if (dbAccessor == null) { + dbAccessor = injector.getInstance(DBAccessor.class); + } + connection = dbAccessor.getConnection(); + } + + try { + Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); + rs = statement.executeQuery(GET_HOSTS_WITHOUT_STATUS_QUERY); + if (rs != null) { + while (rs.next()) { + hostsWithoutStatus.add(rs.getString("host_name")); + } + + if (!hostsWithoutStatus.isEmpty()) { + LOG.error("You have host(s) without state (in hoststate table): " + StringUtils.join(hostsWithoutStatus, ",")); + errorAvailable = true; + } + } + + } catch (SQLException e) { + LOG.error("Exception occurred during check for host without state procedure: ", e); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + LOG.error("Exception occurred during result set closing procedure: ", e); + } + } + } + } + + /* + * This method checks if count of host component states equals count + * of desired host component states. According to ambari logic these + * two tables should have the same count of rows. If not then we are + * showing error for user. + * */ + public static void checkHostComponentStatesCountEqualsHostComponentsDesiredStates() { + LOG.info("Checking host component states count equals host component desired states count"); + + String GET_HOST_COMPONENT_STATE_COUNT_QUERY = "select count(*) from hostcomponentstate"; + String GET_HOST_COMPONENT_DESIRED_STATE_COUNT_QUERY = "select count(*) from hostcomponentdesiredstate"; + String GET_MERGED_TABLE_ROW_COUNT_QUERY = "select count(*) FROM hostcomponentstate hcs " + + "JOIN hostcomponentdesiredstate hcds ON hcs.service_name=hcds.service_name AND hcs.component_name=hcds.component_name AND hcs.host_id=hcds.host_id"; + int hostComponentStateCount = 0; + int hostComponentDesiredStateCount = 0; + int mergedCount = 0; + ResultSet rs = null; + + if (connection == null) { + if (dbAccessor == null) { + dbAccessor = injector.getInstance(DBAccessor.class); + } + connection = dbAccessor.getConnection(); + } + + try { + Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); + + rs = statement.executeQuery(GET_HOST_COMPONENT_STATE_COUNT_QUERY); + if (rs != null) { + while (rs.next()) { + hostComponentStateCount = rs.getInt(1); + } + } + + rs = statement.executeQuery(GET_HOST_COMPONENT_DESIRED_STATE_COUNT_QUERY); + if (rs != null) { + while (rs.next()) { + hostComponentDesiredStateCount = rs.getInt(1); + } + } + + rs = statement.executeQuery(GET_MERGED_TABLE_ROW_COUNT_QUERY); + if (rs != null) { + while (rs.next()) { + mergedCount = rs.getInt(1); + } + } + + if (hostComponentStateCount != hostComponentDesiredStateCount || hostComponentStateCount != mergedCount) { + LOG.error("Your host component states (hostcomponentstate table) count not equals host component desired states (hostcomponentdesiredstate table) count!"); + errorAvailable = true; + } + + } catch (SQLException e) { + LOG.error("Exception occurred during check for same count of host component states and host component desired states: ", e); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + LOG.error("Exception occurred during result set closing procedure: ", e); + } + } + } + + } + + + /* + * This method checks several potential problems for services: + * 1) Check if we have services in cluster which doesn't have service config id(not available in serviceconfig table). + * 2) Check if service has no mapped configs to it's service config id. + * 3) Check if service has all required configs mapped to it. + * 4) Check if service has config which is not selected(has no actual config version) in clusterconfigmapping table. + * If any issue was discovered, we are showing error message for user. + * */ + public static void checkServiceConfigs() { + LOG.info("Checking services and their configs"); + + String GET_SERVICES_WITHOUT_CONFIGS_QUERY = "select c.cluster_name, service_name from clusterservices cs " + + "join clusters c on cs.cluster_id=c.cluster_id " + + "where service_name not in (select service_name from serviceconfig sc where sc.cluster_id=cs.cluster_id and sc.service_name=cs.service_name and sc.group_id is null)"; + String GET_SERVICE_CONFIG_WITHOUT_MAPPING_QUERY = "select c.cluster_name, sc.service_name, sc.version from serviceconfig sc " + + "join clusters c on sc.cluster_id=c.cluster_id " + + "where service_config_id not in (select service_config_id from serviceconfigmapping) and group_id is null"; + String GET_STACK_NAME_VERSION_QUERY = "select c.cluster_name, s.stack_name, s.stack_version from clusters c " + + "join stack s on c.desired_stack_id = s.stack_id"; + String GET_SERVICES_WITH_CONFIGS_QUERY = "select c.cluster_name, cs.service_name, cc.type_name, sc.version from clusterservices cs " + + "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " + + "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " + + "join clusterconfig cc on scm.config_id=cc.config_id and sc.cluster_id=cc.cluster_id " + + "join clusters c on cc.cluster_id=c.cluster_id and sc.stack_id=c.desired_stack_id " + + "where sc.group_id is null and sc.service_config_id=(select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name and sc2.cluster_id=sc.cluster_id) " + + "group by c.cluster_name, cs.service_name, cc.type_name, sc.version"; + String GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY = "select c.cluster_name, cs.service_name, cc.type_name from clusterservices cs " + + "join serviceconfig sc on cs.service_name=sc.service_name and cs.cluster_id=sc.cluster_id " + + "join serviceconfigmapping scm on sc.service_config_id=scm.service_config_id " + + "join clusterconfig cc on scm.config_id=cc.config_id and cc.cluster_id=sc.cluster_id " + + "join clusterconfigmapping ccm on cc.type_name=ccm.type_name and cc.version_tag=ccm.version_tag and cc.cluster_id=ccm.cluster_id " + + "join clusters c on ccm.cluster_id=c.cluster_id " + + "where sc.group_id is null and sc.service_config_id = (select max(service_config_id) from serviceconfig sc2 where sc2.service_name=sc.service_name and sc2.cluster_id=sc.cluster_id) " + + "group by c.cluster_name, cs.service_name, cc.type_name " + + "having sum(ccm.selected) < 1"; + Multimap<String, String> clusterServiceMap = HashMultimap.create(); + Map<String, Map<String, String>> clusterStackInfo = new HashMap<>(); + Map<String, Multimap<String, String>> clusterServiceVersionMap = new HashMap<>(); + Map<String, Multimap<String, String>> clusterServiceConfigType = new HashMap<>(); + ResultSet rs = null; + + if (connection == null) { + if (dbAccessor == null) { + dbAccessor = injector.getInstance(DBAccessor.class); + } + connection = dbAccessor.getConnection(); + } + + if (ambariMetaInfo == null) { + ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class); + } + + try { + Statement statement = connection.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, ResultSet.CONCUR_UPDATABLE); + + rs = statement.executeQuery(GET_SERVICES_WITHOUT_CONFIGS_QUERY); + if (rs != null) { + while (rs.next()) { + clusterServiceMap.put(rs.getString("cluster_name"), rs.getString("service_name")); + } + + for (String clusterName : clusterServiceMap.keySet()) { + LOG.error("Service(s): {}, from cluster {} has no config(s) in serviceconfig table!", StringUtils.join(clusterServiceMap.get(clusterName), ","), clusterName); + errorAvailable = true; + } + + } + + rs = statement.executeQuery(GET_SERVICE_CONFIG_WITHOUT_MAPPING_QUERY); + if (rs != null) { + String serviceName = null, version = null, clusterName = null; + while (rs.next()) { + serviceName = rs.getString("service_name"); + clusterName = rs.getString("cluster_name"); + version = rs.getString("version"); + + if (clusterServiceVersionMap.get(clusterName) != null) { + Multimap<String, String> serviceVersion = clusterServiceVersionMap.get(clusterName); + serviceVersion.put(serviceName, version); + } else { + Multimap<String, String> serviceVersion = HashMultimap.create();; + serviceVersion.put(serviceName, version); + clusterServiceVersionMap.put(clusterName, serviceVersion); + } + } + + for (String clName : clusterServiceVersionMap.keySet()) { + Multimap<String, String> serviceVersion = clusterServiceVersionMap.get(clName); + for (String servName : serviceVersion.keySet()) { + LOG.error("In cluster {}, service config mapping is unavailable (in table serviceconfigmapping) for service {} with version(s) {}! ", clName, servName, StringUtils.join(serviceVersion.get(servName), ",")); + errorAvailable = true; + } + } + + } + + //get stack info from db + rs = statement.executeQuery(GET_STACK_NAME_VERSION_QUERY); + if (rs != null) { + while (rs.next()) { + Map<String, String> stackInfoMap = new HashMap<>(); + stackInfoMap.put(rs.getString("stack_name"), rs.getString("stack_version")); + clusterStackInfo.put(rs.getString("cluster_name"), stackInfoMap); + } + } + + + Set<String> serviceNames = new HashSet<>(); + Map<String, Map<Integer, Multimap<String, String>>> dbClusterServiceVersionConfigs = new HashMap<>(); + Multimap<String, String> stackServiceConfigs = HashMultimap.create(); + + rs = statement.executeQuery(GET_SERVICES_WITH_CONFIGS_QUERY); + if (rs != null) { + String serviceName = null, configType = null, clusterName = null; + Integer serviceVersion = null; + while (rs.next()) { + clusterName = rs.getString("cluster_name"); + serviceName = rs.getString("service_name"); + configType = rs.getString("type_name"); + serviceVersion = rs.getInt("version"); + + serviceNames.add(serviceName); + + //collect data about mapped configs to services from db + if (dbClusterServiceVersionConfigs.get(clusterName) != null) { + Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = dbClusterServiceVersionConfigs.get(clusterName); + + if (dbServiceVersionConfigs.get(serviceVersion) != null) { + dbServiceVersionConfigs.get(serviceVersion).put(serviceName, configType); + } else { + Multimap<String, String> dbServiceConfigs = HashMultimap.create(); + dbServiceConfigs.put(serviceName, configType); + dbServiceVersionConfigs.put(serviceVersion, dbServiceConfigs); + } + } else { + Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = new HashMap<>(); + Multimap<String, String> dbServiceConfigs = HashMultimap.create(); + dbServiceConfigs.put(serviceName, configType); + dbServiceVersionConfigs.put(serviceVersion, dbServiceConfigs); + dbClusterServiceVersionConfigs.put(clusterName, dbServiceVersionConfigs); + } + } + } + + //compare service configs from stack with configs that we got from db + for (Map.Entry<String, Map<String, String>> clusterStackInfoEntry : clusterStackInfo.entrySet()) { + //collect required configs for all services from stack + String clusterName = clusterStackInfoEntry.getKey(); + Map<String, String> stackInfo = clusterStackInfoEntry.getValue(); + String stackName = stackInfo.keySet().iterator().next(); + String stackVersion = stackInfo.get(stackName); + Map<String, ServiceInfo> serviceInfoMap = ambariMetaInfo.getServices(stackName, stackVersion); + for (String serviceName : serviceNames) { + ServiceInfo serviceInfo = serviceInfoMap.get(serviceName); + Set<String> configTypes = serviceInfo.getConfigTypeAttributes().keySet(); + for (String configType : configTypes) { + stackServiceConfigs.put(serviceName, configType); + } + } + + //compare required service configs from stack with mapped service configs from db + Map<Integer, Multimap<String, String>> dbServiceVersionConfigs = dbClusterServiceVersionConfigs.get(clusterName); + for (Integer serviceVersion : dbServiceVersionConfigs.keySet()) { + Multimap<String, String> dbServiceConfigs = dbServiceVersionConfigs.get(serviceVersion); + for (String serviceName : dbServiceConfigs.keySet()) { + Collection<String> serviceConfigsFromStack = stackServiceConfigs.get(serviceName); + Collection<String> serviceConfigsFromDB = dbServiceConfigs.get(serviceName); + if (serviceConfigsFromDB != null && serviceConfigsFromStack != null) { + serviceConfigsFromStack.removeAll(serviceConfigsFromDB); + if (!serviceConfigsFromStack.isEmpty()) { + LOG.error("Required config(s): {} is(are) not available for service {} with service config version {} in cluster {}", + StringUtils.join(serviceConfigsFromStack, ","), serviceName, Integer.toString(serviceVersion), clusterName); + errorAvailable = true; + } + } + } + } + } + + //getting services which has mapped configs which are not selected in clusterconfigmapping + rs = statement.executeQuery(GET_NOT_SELECTED_SERVICE_CONFIGS_QUERY); + if (rs != null) { + String serviceName = null, configType = null, clusterName = null; + while (rs.next()) { + clusterName = rs.getString("cluster_name"); + serviceName = rs.getString("service_name"); + configType = rs.getString("type_name"); + + + if (clusterServiceConfigType.get(clusterName) != null) { + Multimap<String, String> serviceConfigs = clusterServiceConfigType.get(clusterName); + serviceConfigs.put(serviceName, configType); + } else { + + Multimap<String, String> serviceConfigs = HashMultimap.create(); + serviceConfigs.put(serviceName, configType); + clusterServiceConfigType.put(clusterName, serviceConfigs); + + } + + } + } + + for (String clusterName : clusterServiceConfigType.keySet()) { + Multimap<String, String> serviceConfig = clusterServiceConfigType.get(clusterName); + for (String serviceName : serviceConfig.keySet()) { + LOG.error("You have non selected configs: {} for service {} from cluster {}!", StringUtils.join(serviceConfig.get(serviceName), ","), serviceName, clusterName); + errorAvailable = true; + } + } + } catch (SQLException e) { + LOG.error("Exception occurred during complex service check procedure: ", e); + } catch (AmbariException e) { + LOG.error("Exception occurred during complex service check procedure: ", e); + } finally { + if (rs != null) { + try { + rs.close(); + } catch (SQLException e) { + LOG.error("Exception occurred during result set closing procedure: ", e); + } + } + } + + } + + +} http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyChecker.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyChecker.java b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyChecker.java new file mode 100644 index 0000000..535d74f --- /dev/null +++ b/ambari-server/src/main/java/org/apache/ambari/server/checks/DatabaseConsistencyChecker.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ambari.server.checks; + +import java.util.Enumeration; + +import org.apache.ambari.server.AmbariException; +import org.apache.ambari.server.audit.AuditLoggerModule; +import org.apache.ambari.server.controller.ControllerModule; +import org.apache.ambari.server.orm.DBAccessor; +import org.apache.ambari.server.utils.EventBusSynchronizer; +import org.apache.log4j.FileAppender; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.impl.Log4jLoggerAdapter; + +import com.google.inject.Guice; +import com.google.inject.Inject; +import com.google.inject.Injector; +import com.google.inject.persist.PersistService; + +public class DatabaseConsistencyChecker { + private static final Logger LOG = LoggerFactory.getLogger + (DatabaseConsistencyChecker.class); + + + private PersistService persistService; + private DBAccessor dbAccessor; + private Injector injector; + + + @Inject + public DatabaseConsistencyChecker(DBAccessor dbAccessor, + Injector injector, + PersistService persistService) { + this.dbAccessor = dbAccessor; + this.injector = injector; + this.persistService = persistService; + } + + /** + * Extension of main controller module + */ + public static class CheckHelperControllerModule extends ControllerModule { + + public CheckHelperControllerModule() throws Exception { + } + + @Override + protected void configure() { + super.configure(); + EventBusSynchronizer.synchronizeAmbariEventPublisher(binder()); + } + } + + /** + * Extension of audit logger module + */ + public static class CheckHelperAuditModule extends AuditLoggerModule { + + public CheckHelperAuditModule() throws Exception { + } + + @Override + protected void configure() { + super.configure(); + } + + } + + public void startPersistenceService() { + persistService.start(); + } + + public void stopPersistenceService() { + persistService.stop(); + } + + /* + * Main method from which we are calling all checks + * */ + public static void main(String[] args) throws Exception { + DatabaseConsistencyChecker databaseConsistencyChecker = null; + try { + LOG.info("******************************* Check database started *******************************"); + + Injector injector = Guice.createInjector(new CheckHelperControllerModule(), new CheckHelperAuditModule()); + databaseConsistencyChecker = injector.getInstance(DatabaseConsistencyChecker.class); + + databaseConsistencyChecker.startPersistenceService(); + + DatabaseConsistencyCheckHelper.checkForNotMappedConfigsToCluster(); + + DatabaseConsistencyCheckHelper.checkForConfigsSelectedMoreThanOnce(); + + DatabaseConsistencyCheckHelper.checkForHostsWithoutState(); + + DatabaseConsistencyCheckHelper.checkHostComponentStatesCountEqualsHostComponentsDesiredStates(); + + DatabaseConsistencyCheckHelper.checkServiceConfigs(); + + databaseConsistencyChecker.stopPersistenceService(); + + LOG.info("******************************* Check database completed *******************************"); + } catch (Throwable e) { + if (e instanceof AmbariException) { + LOG.error("Exception occurred during database check:", e); + throw (AmbariException)e; + }else{ + LOG.error("Unexpected error, database check failed", e); + throw new Exception("Unexpected error, database check failed", e); + } + } finally { + DatabaseConsistencyCheckHelper.closeConnection(); + if (DatabaseConsistencyCheckHelper.isErrorAvailable()) { + String ambariDBConsistencyCheckLog = "ambari-server-check-database.log"; + if (LOG instanceof Log4jLoggerAdapter) { + org.apache.log4j.Logger dbConsistencyCheckHelperLogger = org.apache.log4j.Logger.getLogger(DatabaseConsistencyCheckHelper.class); + Enumeration appenders = dbConsistencyCheckHelperLogger.getAllAppenders(); + while (appenders.hasMoreElements()) { + Object appender = appenders.nextElement(); + if (appender instanceof FileAppender) { + ambariDBConsistencyCheckLog = ((FileAppender) appender).getFile(); + break; + } + } + } + ambariDBConsistencyCheckLog = ambariDBConsistencyCheckLog.replace("//", "/"); + System.out.print(String.format("DB configs consistency check failed. Run \"ambari-server start --skip-database-check\" to skip. " + + "If you use this \"--skip-database-check\" option, do not make any changes to your cluster topology " + + "or perform a cluster upgrade until you correct the database consistency issues. See \"%s\" " + + "for more details on the consistency issues.", ambariDBConsistencyCheckLog)); + } else { + System.out.print("No errors were found."); + } + + } + } + + +} http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java index 8476ec6..a071c9b 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java @@ -19,6 +19,9 @@ package org.apache.ambari.server.controller; +import javax.crypto.BadPaddingException; +import javax.servlet.DispatcherType; + import java.io.File; import java.io.IOException; import java.net.Authenticator; @@ -31,9 +34,6 @@ import java.util.HashMap; import java.util.Map; import java.util.logging.LogManager; -import javax.crypto.BadPaddingException; -import javax.servlet.DispatcherType; - import org.apache.ambari.eventdb.webservice.WorkflowJsonService; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.StateRecoveryManager; @@ -58,6 +58,7 @@ import org.apache.ambari.server.audit.AuditLogger; import org.apache.ambari.server.audit.AuditLoggerModule; import org.apache.ambari.server.audit.request.RequestAuditLogger; import org.apache.ambari.server.bootstrap.BootStrapImpl; +import org.apache.ambari.server.checks.DatabaseConsistencyCheckHelper; import org.apache.ambari.server.configuration.ComponentSSLConfiguration; import org.apache.ambari.server.configuration.Configuration; import org.apache.ambari.server.controller.internal.AbstractControllerResourceProvider; @@ -75,7 +76,6 @@ import org.apache.ambari.server.controller.internal.StackDependencyResourceProvi import org.apache.ambari.server.controller.internal.UserPrivilegeResourceProvider; import org.apache.ambari.server.controller.internal.ViewPermissionResourceProvider; import org.apache.ambari.server.controller.metrics.ThreadPoolEnabledPropertyProvider; -import org.apache.ambari.server.controller.utilities.DatabaseChecker; import org.apache.ambari.server.controller.utilities.KerberosChecker; import org.apache.ambari.server.orm.GuiceJpaInitializer; import org.apache.ambari.server.orm.PersistenceType; @@ -295,11 +295,6 @@ public class AmbariServer { setSystemProperties(configs); - if (System.getProperty("skipDatabaseConsistencyValidation") == null) { - DatabaseChecker.checkDBConsistency(); - DatabaseChecker.checkDBConfigsConsistency(); - } - try { ClassPathXmlApplicationContext parentSpringAppContext = new ClassPathXmlApplicationContext(); @@ -957,7 +952,7 @@ public class AmbariServer { setupProxyAuth(); injector.getInstance(GuiceJpaInitializer.class); - DatabaseChecker.checkDBVersion(); + DatabaseConsistencyCheckHelper.checkDBVersionCompatible(); server = injector.getInstance(AmbariServer.class); CertificateManager certMan = injector.getInstance(CertificateManager.class); certMan.initRootCert(); http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java index 992f2d2..617553b 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java @@ -55,6 +55,7 @@ import org.apache.ambari.server.actionmanager.RequestFactory; import org.apache.ambari.server.actionmanager.StageFactory; import org.apache.ambari.server.actionmanager.StageFactoryImpl; import org.apache.ambari.server.checks.AbstractCheckDescriptor; +import org.apache.ambari.server.checks.DatabaseConsistencyCheckHelper; import org.apache.ambari.server.checks.UpgradeCheckRegistry; import org.apache.ambari.server.configuration.Configuration; import org.apache.ambari.server.configuration.Configuration.ConnectionPoolType; @@ -72,7 +73,6 @@ import org.apache.ambari.server.controller.internal.UpgradeResourceProvider; import org.apache.ambari.server.controller.metrics.timeline.cache.TimelineMetricCacheEntryFactory; import org.apache.ambari.server.controller.metrics.timeline.cache.TimelineMetricCacheProvider; import org.apache.ambari.server.controller.spi.ResourceProvider; -import org.apache.ambari.server.controller.utilities.DatabaseChecker; import org.apache.ambari.server.controller.utilities.KerberosChecker; import org.apache.ambari.server.notifications.DispatchFactory; import org.apache.ambari.server.notifications.NotificationDispatcher; @@ -367,7 +367,7 @@ public class ControllerModule extends AbstractModule { bind(AuthenticationEntryPoint.class).to(AmbariEntryPoint.class).in(Scopes.SINGLETON); - requestStaticInjection(DatabaseChecker.class); + requestStaticInjection(DatabaseConsistencyCheckHelper.class); requestStaticInjection(KerberosChecker.class); requestStaticInjection(AuthorizationHelper.class); http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java index 1cc9684..d35fc1a 100644 --- a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java +++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/DatabaseChecker.java @@ -54,6 +54,9 @@ import org.slf4j.LoggerFactory; import com.google.inject.Inject; import com.google.inject.Injector; + +/*This class should not be used anymore +* now we will use DatabaseConsistencyChecker*/ public class DatabaseChecker { static Logger LOG = LoggerFactory.getLogger(DatabaseChecker.class); http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/python/ambari-server.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py index d211e9c..f929042 100755 --- a/ambari-server/src/main/python/ambari-server.py +++ b/ambari-server/src/main/python/ambari-server.py @@ -330,7 +330,7 @@ def init_parser_options(parser): parser.add_option('--jdbc-driver', default=None, dest="jdbc_driver", help="Specifies the path to the JDBC driver JAR file") parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation") - parser.add_option('--skip-database-validation', action="store_true", default=False, help="Skip database consistency validation", dest="skip_database_validation") + parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check") parser.add_option('--mpack', default=None, help="Specified the path for management pack to be installed/upgraded", dest="mpack_path") @@ -404,7 +404,7 @@ def init_parser_options(parser): parser.add_option('--cluster-name', default=None, help="Cluster name", dest="cluster_name") parser.add_option('--version-display-name', default=None, help="Display name of desired repo version", dest="desired_repo_version") parser.add_option('--skip-properties-validation', action="store_true", default=False, help="Skip properties file validation", dest="skip_properties_validation") - parser.add_option('--skip-database-validation', action="store_true", default=False, help="Skip database consistency validation", dest="skip_database_validation") + parser.add_option('--skip-database-check', action="store_true", default=False, help="Skip database consistency check", dest="skip_database_check") parser.add_option('--force-version', action="store_true", default=False, help="Force version to current", dest="force_repo_version") parser.add_option('--version', dest="stack_versions", default=None, action="append", type="string", help="Specify stack version that needs to be enabled. All other stacks versions will be disabled") http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/python/ambari_server/checkDatabase.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/python/ambari_server/checkDatabase.py b/ambari-server/src/main/python/ambari_server/checkDatabase.py index 2b5c735..831efde 100644 --- a/ambari-server/src/main/python/ambari_server/checkDatabase.py +++ b/ambari-server/src/main/python/ambari_server/checkDatabase.py @@ -39,7 +39,7 @@ from ambari_server.serverUtils import is_server_runing from ambari_server.userInput import get_YN_input CHECK_DATABASE_HELPER_CMD = "{0} -cp {1} " + \ - "org.apache.ambari.server.checks.CheckDatabaseHelper" + "org.apache.ambari.server.checks.DatabaseConsistencyChecker" def check_database(options): @@ -74,13 +74,20 @@ def check_database(options): environ = setupSecurity.generate_env(options, ambari_user, current_user) (retcode, stdout, stderr) = os_utils.run_os_command(command, env=environ) - print_info_msg("Return code from check database command, retcode = " + str(retcode)) + if retcode > 0: - print_error_msg("Database check failed to complete. Please check ambari-server.log and ambari-server-check-database.log for problem.") - raise FatalException(1, 'Database check failed.') + print str(stdout) + raise FatalException(1, 'Database check failed to complete. Please check ' + configDefaults.SERVER_LOG_FILE + + ' and ' + configDefaults.DB_CHECK_LOG + ' for more information.') else: print str(stdout) + if not stdout.startswith("No errors"): + print "Ambari Server 'check-database' completed" + sys.exit(1) + + + http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/python/ambari_server/serverConfiguration.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py index d815365..c28909e 100644 --- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py +++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py @@ -333,6 +333,7 @@ class ServerConfigDefaults(object): self.OUT_DIR = parse_log4j_file(get_conf_dir() + "/log4j.properties")['ambari.log.dir'].replace("//", "/") self.SERVER_OUT_FILE = os.path.join(self.OUT_DIR, "ambari-server.out") self.SERVER_LOG_FILE = os.path.join(self.OUT_DIR, "ambari-server.log") + self.DB_CHECK_LOG = os.path.join(self.OUT_DIR, "ambari-server-check-database.log") self.ROOT_FS_PATH = os.sep self.JDK_INSTALL_DIR = "" http://git-wip-us.apache.org/repos/asf/ambari/blob/cb8dca09/ambari-server/src/main/python/ambari_server_main.py ---------------------------------------------------------------------- diff --git a/ambari-server/src/main/python/ambari_server_main.py b/ambari-server/src/main/python/ambari_server_main.py index cd0b858..0664e32 100644 --- a/ambari-server/src/main/python/ambari_server_main.py +++ b/ambari-server/src/main/python/ambari_server_main.py @@ -26,7 +26,7 @@ from ambari_commons.logging_utils import get_debug_mode, print_warning_msg, prin set_debug_mode_from_options from ambari_commons.os_check import OSConst from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl -from ambari_commons.os_utils import is_root +from ambari_commons.os_utils import is_root, run_os_command from ambari_server.dbConfiguration import ensure_dbms_is_running, ensure_jdbc_driver_is_installed from ambari_server.serverConfiguration import configDefaults, find_jdk, get_ambari_properties, \ get_conf_dir, get_is_persisted, get_is_secure, get_java_exe_path, get_original_master_key, read_ambari_user, \ @@ -56,6 +56,7 @@ if ambari_provider_module is not None: jvm_args = os.getenv('AMBARI_JVM_ARGS', '-Xms512m -Xmx2048m -XX:MaxPermSize=128m') ENV_FOREGROUND_KEY = "AMBARI_SERVER_RUN_IN_FOREGROUND" +CHECK_DATABASE_HELPER_CMD = "{0} -cp {1} org.apache.ambari.server.checks.DatabaseConsistencyChecker" IS_FOREGROUND = ENV_FOREGROUND_KEY in os.environ and os.environ[ENV_FOREGROUND_KEY].lower() == "true" SERVER_START_CMD = "{0} " \ @@ -283,14 +284,34 @@ def server_process_main(options, scmStatus=None): suspend_start = (debug_mode & 2) or SUSPEND_START_MODE suspend_mode = 'y' if suspend_start else 'n' - if options.skip_database_validation: + environ = generate_env(options, ambari_user, current_user) + class_path = serverClassPath.get_full_ambari_classpath_escaped_for_shell(validate_classpath=True) + + if options.skip_database_check: global jvm_args - jvm_args += " -DskipDatabaseConsistencyValidation" + jvm_args += " -DskipDatabaseConsistencyCheck" + print "Ambari Server is starting with the database consistency check skipped. Do not make any changes to your cluster " \ + "topology or perform a cluster upgrade until you correct the database consistency issues. See \"" \ + + configDefaults.DB_CHECK_LOG + "\" for more details on the consistency issues." + else: + print "Ambari database consistency check started..." + command = CHECK_DATABASE_HELPER_CMD.format(java_exe, class_path) + + (retcode, stdout, stderr) = run_os_command(command, env=environ) + + if retcode > 0: + print str(stdout) + raise FatalException(1, 'Database check failed to complete. Please check ' + configDefaults.SERVER_LOG_FILE + + ' and ' + configDefaults.DB_CHECK_LOG + ' for more information.') + else: + print str(stdout) + print "Ambari database consistency check finished" + + if not stdout.startswith("No errors"): + sys.exit(1) + + param_list = generate_child_process_param_list(ambari_user, java_exe, class_path, debug_start, suspend_mode) - param_list = generate_child_process_param_list(ambari_user, java_exe, - serverClassPath.get_full_ambari_classpath_escaped_for_shell(validate_classpath=True), - debug_start, suspend_mode) - environ = generate_env(options, ambari_user, current_user) if not os.path.exists(configDefaults.PID_DIR): os.makedirs(configDefaults.PID_DIR, 0755)
