AMBARI-9736 - Ambari Exhausts Connection Pool When Using MySQL Due To Invalid
Connections (jonathanhurley)
Conflicts:
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
ambari-server/src/main/resources/META-INF/persistence.xml
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3271d1d1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3271d1d1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3271d1d1
Branch: refs/heads/branch-1.7.0
Commit: 3271d1d1f5163390b1039080e4aff9cb7322c4a3
Parents: 00ee76d
Author: Jonathan Hurley <[email protected]>
Authored: Fri Feb 20 15:41:19 2015 -0500
Committer: Jonathan Hurley <[email protected]>
Committed: Fri Feb 20 17:10:19 2015 -0500
----------------------------------------------------------------------
.../server/configuration/Configuration.java | 348 ++++++++++++++-----
.../AmbariManagementControllerImpl.java | 19 +-
.../server/controller/ControllerModule.java | 75 +++-
.../orm/EclipseLinkSessionCustomizer.java | 58 ++++
.../scheduler/ExecutionSchedulerImpl.java | 21 +-
.../server/upgrade/AbstractUpgradeCatalog.java | 101 +++---
.../server/upgrade/UpgradeCatalog150.java | 113 +++---
.../server/upgrade/UpgradeCatalog151.java | 22 +-
.../server/upgrade/UpgradeCatalog160.java | 7 +-
.../server/upgrade/UpgradeCatalog161.java | 35 +-
.../server/upgrade/UpgradeCatalog170.java | 94 ++---
.../src/main/resources/META-INF/persistence.xml | 9 +-
.../server/configuration/ConfigurationTest.java | 53 +++
.../scheduler/ExecutionSchedulerTest.java | 23 +-
14 files changed, 665 insertions(+), 313 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 060c936..8c3760a 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -21,6 +21,7 @@ import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
+import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
@@ -81,14 +82,11 @@ public class Configuration {
public static final String SRVR_CRT_NAME_KEY = "security.server.cert_name";
public static final String SRVR_CSR_NAME_KEY = "security.server.csr_name";
public static final String SRVR_KEY_NAME_KEY = "security.server.key_name";
- public static final String KSTR_NAME_KEY =
- "security.server.keystore_name";
- public static final String SRVR_CRT_PASS_FILE_KEY =
- "security.server.crt_pass_file";
+ public static final String KSTR_NAME_KEY = "security.server.keystore_name";
+ public static final String SRVR_CRT_PASS_FILE_KEY =
"security.server.crt_pass_file";
public static final String SRVR_CRT_PASS_KEY = "security.server.crt_pass";
public static final String SRVR_CRT_PASS_LEN_KEY =
"security.server.crt_pass.len";
- public static final String PASSPHRASE_ENV_KEY =
- "security.server.passphrase_env_var";
+ public static final String PASSPHRASE_ENV_KEY =
"security.server.passphrase_env_var";
public static final String PASSPHRASE_KEY = "security.server.passphrase";
public static final String SRVR_DISABLED_CIPHERS =
"security.server.disabled.ciphers";
public static final String SRVR_DISABLED_PROTOCOLS =
"security.server.disabled.protocols";
@@ -111,56 +109,45 @@ public class Configuration {
public static final String SERVER_DB_NAME_KEY = "server.jdbc.database_name";
public static final String SERVER_DB_NAME_DEFAULT = "ambari";
public static final String SERVER_JDBC_POSTGRES_SCHEMA_NAME =
"server.jdbc.postgres.schema";
- public static final String POSTGRES_DB_NAME = "postgres";
- public static final String ORACLE_DB_NAME = "oracle";
- public static final String MYSQL_DB_NAME = "mysql";
- public static final String DERBY_DB_NAME = "derby";
public static final String OJDBC_JAR_NAME_KEY = "db.oracle.jdbc.name";
public static final String OJDBC_JAR_NAME_DEFAULT = "ojdbc6.jar";
public static final String MYSQL_JAR_NAME_KEY = "db.mysql.jdbc.name";
public static final String MYSQL_JAR_NAME_DEFAULT =
"mysql-connector-java.jar";
public static final String IS_LDAP_CONFIGURED = "ambari.ldap.isConfigured";
public static final String LDAP_USE_SSL_KEY = "authentication.ldap.useSSL";
- public static final String LDAP_PRIMARY_URL_KEY =
- "authentication.ldap.primaryUrl";
- public static final String LDAP_SECONDARY_URL_KEY =
- "authentication.ldap.secondaryUrl";
- public static final String LDAP_BASE_DN_KEY =
- "authentication.ldap.baseDn";
- public static final String LDAP_BIND_ANONYMOUSLY_KEY =
- "authentication.ldap.bindAnonymously";
- public static final String LDAP_MANAGER_DN_KEY =
- "authentication.ldap.managerDn";
- public static final String LDAP_MANAGER_PASSWORD_KEY =
- "authentication.ldap.managerPassword";
- public static final String LDAP_USERNAME_ATTRIBUTE_KEY =
- "authentication.ldap.usernameAttribute";
- public static final String LDAP_USER_BASE_KEY =
- "authentication.ldap.userBase";
- public static final String LDAP_USER_OBJECT_CLASS_KEY =
- "authentication.ldap.userObjectClass";
- public static final String LDAP_GROUP_BASE_KEY =
- "authentication.ldap.groupBase";
- public static final String LDAP_GROUP_OBJECT_CLASS_KEY =
- "authentication.ldap.groupObjectClass";
- public static final String LDAP_GROUP_NAMING_ATTR_KEY =
- "authentication.ldap.groupNamingAttr";
- public static final String LDAP_GROUP_MEMEBERSHIP_ATTR_KEY =
- "authentication.ldap.groupMembershipAttr";
- public static final String LDAP_ADMIN_GROUP_MAPPING_RULES_KEY =
- "authorization.ldap.adminGroupMappingRules";
- public static final String LDAP_GROUP_SEARCH_FILTER_KEY =
- "authorization.ldap.groupSearchFilter";
+ public static final String LDAP_PRIMARY_URL_KEY =
"authentication.ldap.primaryUrl";
+ public static final String LDAP_SECONDARY_URL_KEY =
"authentication.ldap.secondaryUrl";
+ public static final String LDAP_BASE_DN_KEY = "authentication.ldap.baseDn";
+ public static final String LDAP_BIND_ANONYMOUSLY_KEY =
"authentication.ldap.bindAnonymously";
+ public static final String LDAP_MANAGER_DN_KEY =
"authentication.ldap.managerDn";
+ public static final String LDAP_MANAGER_PASSWORD_KEY =
"authentication.ldap.managerPassword";
+ public static final String LDAP_USERNAME_ATTRIBUTE_KEY =
"authentication.ldap.usernameAttribute";
+ public static final String LDAP_USER_BASE_KEY =
"authentication.ldap.userBase";
+ public static final String LDAP_USER_OBJECT_CLASS_KEY =
"authentication.ldap.userObjectClass";
+ public static final String LDAP_GROUP_BASE_KEY =
"authentication.ldap.groupBase";
+ public static final String LDAP_GROUP_OBJECT_CLASS_KEY =
"authentication.ldap.groupObjectClass";
+ public static final String LDAP_GROUP_NAMING_ATTR_KEY =
"authentication.ldap.groupNamingAttr";
+ public static final String LDAP_GROUP_MEMEBERSHIP_ATTR_KEY =
"authentication.ldap.groupMembershipAttr";
+ public static final String LDAP_ADMIN_GROUP_MAPPING_RULES_KEY =
"authorization.ldap.adminGroupMappingRules";
+ public static final String LDAP_GROUP_SEARCH_FILTER_KEY =
"authorization.ldap.groupSearchFilter";
public static final String SERVER_EC_CACHE_SIZE = "server.ecCacheSize";
- public static final String SERVER_STALE_CONFIG_CACHE_ENABLED_KEY =
- "server.cache.isStale.enabled";
+ public static final String SERVER_STALE_CONFIG_CACHE_ENABLED_KEY =
"server.cache.isStale.enabled";
public static final String SERVER_PERSISTENCE_TYPE_KEY =
"server.persistence.type";
public static final String SERVER_JDBC_USER_NAME_KEY =
"server.jdbc.user.name";
public static final String SERVER_JDBC_USER_PASSWD_KEY =
"server.jdbc.user.passwd";
public static final String SERVER_JDBC_DRIVER_KEY = "server.jdbc.driver";
public static final String SERVER_JDBC_URL_KEY = "server.jdbc.url";
public static final String SERVER_JDBC_PROPERTIES_PREFIX =
"server.jdbc.properties.";
- // public static final String SERVER_RCA_PERSISTENCE_TYPE_KEY =
"server.rca.persistence.type";
+
+ public static final String SERVER_JDBC_CONNECTION_POOL =
"server.jdbc.connection-pool";
+ public static final String SERVER_JDBC_CONNECTION_POOL_MIN_SIZE =
"server.jdbc.connection-pool.min-size";
+ public static final String SERVER_JDBC_CONNECTION_POOL_MAX_SIZE =
"server.jdbc.connection-pool.max-size";
+ public static final String SERVER_JDBC_CONNECTION_POOL_AQUISITION_SIZE =
"server.jdbc.connection-pool.acquisition-size";
+ public static final String SERVER_JDBC_CONNECTION_POOL_MAX_AGE =
"server.jdbc.connection-pool.max-age";
+ public static final String SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME =
"server.jdbc.connection-pool.max-idle-time";
+ public static final String SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS
= "server.jdbc.connection-pool.max-idle-time-excess";
+ public static final String SERVER_JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL =
"server.jdbc.connection-pool.idle-test-interval";
+
public static final String SERVER_JDBC_RCA_USER_NAME_KEY =
"server.jdbc.rca.user.name";
public static final String SERVER_JDBC_RCA_USER_PASSWD_KEY =
"server.jdbc.rca.user.passwd";
public static final String SERVER_JDBC_RCA_DRIVER_KEY =
"server.jdbc.rca.driver";
@@ -174,10 +161,8 @@ public class Configuration {
public static final String HOSTNAME_MACRO = "{hostname}";
public static final String JDBC_RCA_LOCAL_URL = "jdbc:postgresql://" +
HOSTNAME_MACRO + "/ambarirca";
public static final String JDBC_RCA_LOCAL_DRIVER = "org.postgresql.Driver";
- public static final String OS_VERSION_KEY =
- "server.os_type";
- public static final String SRVR_HOSTS_MAPPING =
- "server.hosts.mapping";
+ public static final String OS_VERSION_KEY = "server.os_type";
+ public static final String SRVR_HOSTS_MAPPING = "server.hosts.mapping";
// Command parameter names
public static final String UPGRADE_FROM_STACK = "source_stack_version";
public static final String UPGRADE_TO_STACK = "target_stack_version";
@@ -205,30 +190,21 @@ public class Configuration {
public static final String MAPREDUCE2_LOG4J_CONFIG_TAG = "mapreduce2-log4j";
public static final String RCA_ENABLED_PROPERTY = "rca_enabled";
public static final String HIVE_CONFIG_TAG = "hive-site";
- public static final String HIVE_METASTORE_PASSWORD_PROPERTY =
- "javax.jdo.option.ConnectionPassword";
- public static final String MASTER_KEY_PERSISTED = "security.master" +
- ".key.ispersisted";
- public static final String MASTER_KEY_LOCATION = "security.master.key" +
- ".location";
- public static final String MASTER_KEY_ENV_PROP =
- "AMBARI_SECURITY_MASTER_KEY";
+ public static final String HIVE_METASTORE_PASSWORD_PROPERTY =
"javax.jdo.option.ConnectionPassword";
+ public static final String MASTER_KEY_PERSISTED =
"security.master.key.ispersisted";
+ public static final String MASTER_KEY_LOCATION =
"security.master.key.location";
+ public static final String MASTER_KEY_ENV_PROP =
"AMBARI_SECURITY_MASTER_KEY";
public static final String MASTER_KEY_FILENAME_DEFAULT = "master";
/**
* Key for repo validation suffixes.
*/
public static final String REPO_SUFFIX_KEY_UBUNTU =
"repo.validation.suffixes.ubuntu";
public static final String REPO_SUFFIX_KEY_DEFAULT =
"repo.validation.suffixes.default";
- public static final String EXECUTION_SCHEDULER_CLUSTERED =
- "server.execution.scheduler.isClustered";
- public static final String EXECUTION_SCHEDULER_THREADS =
- "server.execution.scheduler.maxThreads";
- public static final String EXECUTION_SCHEDULER_CONNECTIONS =
- "server.execution.scheduler.maxDbConnections";
- public static final String EXECUTION_SCHEDULER_MISFIRE_TOLERATION =
- "server.execution.scheduler.misfire.toleration.minutes";
- public static final String EXECUTION_SCHEDULER_START_DELAY =
- "server.execution.scheduler.start.delay.seconds";
+ public static final String EXECUTION_SCHEDULER_CLUSTERED =
"server.execution.scheduler.isClustered";
+ public static final String EXECUTION_SCHEDULER_THREADS =
"server.execution.scheduler.maxThreads";
+ public static final String EXECUTION_SCHEDULER_CONNECTIONS =
"server.execution.scheduler.maxDbConnections";
+ public static final String EXECUTION_SCHEDULER_MISFIRE_TOLERATION =
"server.execution.scheduler.misfire.toleration.minutes";
+ public static final String EXECUTION_SCHEDULER_START_DELAY =
"server.execution.scheduler.start.delay.seconds";
public static final String DEFAULT_SCHEDULER_THREAD_COUNT = "5";
public static final String DEFAULT_SCHEDULER_MAX_CONNECTIONS = "5";
public static final String DEFAULT_EXECUTION_SCHEDULER_MISFIRE_TOLERATION =
"480";
@@ -239,17 +215,15 @@ public class Configuration {
public static final String EXTERNAL_SCRIPT_TIMEOUT_DEFAULT = "5000";
/**
* This key defines whether stages of parallel requests are executed in
- * parallel or sequentally. Only stages from different requests
- * running on not interfering host sets may be executed in parallel.
+ * parallel or sequentally. Only stages from different requests running on
not
+ * interfering host sets may be executed in parallel.
*/
- public static final String PARALLEL_STAGE_EXECUTION_KEY =
- "server.stages.parallel";
+ public static final String PARALLEL_STAGE_EXECUTION_KEY =
"server.stages.parallel";
public static final String AGENT_TASK_TIMEOUT_KEY = "agent.task.timeout";
public static final String AGENT_TASK_TIMEOUT_DEFAULT = "900";
public static final String CUSTOM_ACTION_DEFINITION_KEY =
"custom.action.definitions";
- private static final String CUSTOM_ACTION_DEFINITION_DEF_VALUE =
- "/var/lib/ambari-server/resources/custom_action_definitions";
+ private static final String CUSTOM_ACTION_DEFINITION_DEF_VALUE =
"/var/lib/ambari-server/resources/custom_action_definitions";
private static final long SERVER_EC_CACHE_SIZE_DEFAULT = 10000L;
private static final String SERVER_STALE_CONFIG_CACHE_ENABLED_DEFAULT =
"true";
@@ -265,34 +239,29 @@ public class Configuration {
private static final String SRVR_DISABLED_CIPHERS_DEFAULT = "";
private static final String SRVR_DISABLED_PROTOCOLS_DEFAULT = "";
private static final String PASSPHRASE_ENV_DEFAULT = "AMBARI_PASSPHRASE";
- private static final String RESOURCES_DIR_DEFAULT =
- "/var/lib/ambari-server/resources/";
+ private static final String RESOURCES_DIR_DEFAULT =
"/var/lib/ambari-server/resources/";
private static final String ANONYMOUS_AUDIT_NAME_KEY =
"anonymous.audit.name";
private static final int CLIENT_API_PORT_DEFAULT = 8080;
private static final int CLIENT_API_SSL_PORT_DEFAULT = 8443;
private static final String LDAP_BIND_ANONYMOUSLY_DEFAULT = "true";
- //TODO For embedded server only - should be removed later
+ // TODO For embedded server only - should be removed later
private static final String LDAP_PRIMARY_URL_DEFAULT = "localhost:33389";
private static final String LDAP_BASE_DN_DEFAULT =
"dc=ambari,dc=apache,dc=org";
private static final String LDAP_USERNAME_ATTRIBUTE_DEFAULT = "uid";
- private static final String LDAP_USER_BASE_DEFAULT =
- "ou=people,dc=ambari,dc=apache,dc=org";
+ private static final String LDAP_USER_BASE_DEFAULT =
"ou=people,dc=ambari,dc=apache,dc=org";
private static final String LDAP_USER_OBJECT_CLASS_DEFAULT = "person";
- private static final String LDAP_GROUP_BASE_DEFAULT =
- "ou=groups,dc=ambari,dc=apache,dc=org";
+ private static final String LDAP_GROUP_BASE_DEFAULT =
"ou=groups,dc=ambari,dc=apache,dc=org";
private static final String LDAP_GROUP_OBJECT_CLASS_DEFAULT = "group";
private static final String LDAP_GROUP_NAMING_ATTR_DEFAULT = "cn";
private static final String LDAP_GROUP_MEMBERSHIP_ATTR_DEFAULT = "member";
- private static final String LDAP_ADMIN_GROUP_MAPPING_RULES_DEFAULT =
- "Ambari Administrators";
+ private static final String LDAP_ADMIN_GROUP_MAPPING_RULES_DEFAULT = "Ambari
Administrators";
private static final String LDAP_GROUP_SEARCH_FILTER_DEFAULT = "";
private static final String IS_LDAP_CONFIGURED_DEFAULT = "false";
- //TODO for development purposes only, should be changed to 'false'
+ // TODO for development purposes only, should be changed to 'false'
private static final String SERVER_PERSISTENCE_TYPE_DEFAULT = "local";
- private static final String SERVER_CONNECTION_MAX_IDLE_TIME =
- "server.connection.max.idle.millis";
+ private static final String SERVER_CONNECTION_MAX_IDLE_TIME =
"server.connection.max.idle.millis";
private static final String UBUNTU_OS = "ubuntu12";
@@ -318,6 +287,15 @@ public class Configuration {
private static final String SERVER_HTTP_SESSION_INACTIVE_TIMEOUT =
"server.http.session.inactive_timeout";
+ // database pooling defaults
+ private static final String DEFAULT_JDBC_POOL_MIN_CONNECTIONS = "5";
+ private static final String DEFAULT_JDBC_POOL_MAX_CONNECTIONS = "32";
+ private static final String DEFAULT_JDBC_POOL_ACQUISITION_SIZE = "5";
+ private static final String DEFAULT_JDBC_POOL_MAX_IDLE_TIME_SECONDS =
"14400";
+ private static final String DEFAULT_JDBC_POOL_EXCESS_MAX_IDLE_TIME_SECONDS =
"0";
+ private static final String DEFAULT_JDBC_POOL_MAX_AGE_SECONDS = "0";
+ private static final String DEFAULT_JDBC_POOL_IDLE_TEST_INTERVAL = "7200";
+
private static final Logger LOG = LoggerFactory.getLogger(
Configuration.class);
private Properties properties;
@@ -326,6 +304,80 @@ public class Configuration {
private volatile boolean credentialProviderInitialized = false;
private Map<String, String> customDbProperties = null;
+ /**
+ * The {@link DatabaseType} enum represents the database being used.
+ */
+ public enum DatabaseType {
+ POSTGRES("postgres"), ORACLE("oracle"), MYSQL("mysql"), DERBY("derby"),
SQL_SERVER(
+ "sqlserver");
+
+ private static final Map<String, DatabaseType> m_mappedTypes = new
HashMap<String, Configuration.DatabaseType>(
+ 5);
+
+ static {
+ for (DatabaseType databaseType : EnumSet.allOf(DatabaseType.class)) {
+ m_mappedTypes.put(databaseType.getName(), databaseType);
+ }
+ }
+
+ /**
+ * The JDBC URL type name.
+ */
+ private String m_databaseType;
+
+ /**
+ * Constructor.
+ *
+ */
+ private DatabaseType(String databaseType) {
+ m_databaseType = databaseType;
+ }
+
+ /**
+ * Gets an internal name for this database type.
+ *
+ * @return the internal name for this database type.
+ */
+ public String getName() {
+ return m_databaseType;
+ }
+
+ public DatabaseType get(String databaseTypeName) {
+ return m_mappedTypes.get(databaseTypeName);
+ }
+ }
+
+ /**
+ * The {@link ConnectionPoolType} is used to define which pooling mechanism
+ * JDBC should use.
+ */
+ public enum ConnectionPoolType {
+ INTERNAL("internal"), C3P0("c3p0");
+
+ /**
+ * The connection pooling name.
+ */
+ private String m_name;
+
+ /**
+ * Constructor.
+ *
+ * @param name
+ */
+ private ConnectionPoolType(String name) {
+ m_name = name;
+ }
+
+ /**
+ * Gets an internal name for this connection pool type.
+ *
+ * @return the internal name for this connection pool type.
+ */
+ public String getName() {
+ return m_name;
+ }
+ }
+
public Configuration() {
this(readConfigFile());
}
@@ -1137,4 +1189,134 @@ public class Configuration {
SERVER_HTTP_SESSION_INACTIVE_TIMEOUT,
"1800"));
}
+
+ /**
+ * Gets the type of database by examining the {@link #getDatabaseUrl()} JDBC
+ * URL.
+ *
+ * @return the database type (never {@code null}).
+ * @throws RuntimeException
+ * if there no known database type.
+ */
+ public DatabaseType getDatabaseType() {
+ String dbUrl = getDatabaseUrl();
+ DatabaseType databaseType;
+
+ if (dbUrl.contains(DatabaseType.POSTGRES.getName())) {
+ databaseType = DatabaseType.POSTGRES;
+ } else if (dbUrl.contains(DatabaseType.ORACLE.getName())) {
+ databaseType = DatabaseType.ORACLE;
+ } else if (dbUrl.contains(DatabaseType.MYSQL.getName())) {
+ databaseType = DatabaseType.MYSQL;
+ } else if (dbUrl.contains(DatabaseType.DERBY.getName())) {
+ databaseType = DatabaseType.DERBY;
+ } else {
+ throw new RuntimeException(
+ "The database type could be not determined from the JDBC URL "
+ + dbUrl);
+ }
+
+ return databaseType;
+ }
+
+ /**
+ * Gets the type of connection pool that EclipseLink should use.
+ *
+ * @return default of {@link ConnectionPoolType#INTERNAL}.
+ */
+ public ConnectionPoolType getConnectionPoolType() {
+ String connectionPoolType = properties.getProperty(
+ SERVER_JDBC_CONNECTION_POOL, ConnectionPoolType.INTERNAL.getName());
+
+ if (connectionPoolType.equals(ConnectionPoolType.C3P0.getName())) {
+ return ConnectionPoolType.C3P0;
+ }
+
+ return ConnectionPoolType.INTERNAL;
+ }
+
+ /**
+ * Gets the minimum number of connections that should always exist in the
+ * connection pool.
+ *
+ * @return default of {@value #DEFAULT_JDBC_POOL_MIN_CONNECTIONS}
+ */
+ public int getConnectionPoolMinimumSize() {
+ return Integer.parseInt(properties.getProperty(
+ SERVER_JDBC_CONNECTION_POOL_MIN_SIZE,
DEFAULT_JDBC_POOL_MIN_CONNECTIONS));
+ }
+
+ /**
+ * Gets the maximum number of connections that should even exist in the
+ * connection pool.
+ *
+ * @return default of {@value #DEFAULT_JDBC_POOL_MAX_CONNECTIONS}
+ */
+ public int getConnectionPoolMaximumSize() {
+ return Integer.parseInt(properties.getProperty(
+ SERVER_JDBC_CONNECTION_POOL_MAX_SIZE,
DEFAULT_JDBC_POOL_MAX_CONNECTIONS));
+ }
+
+ /**
+ * Gets the maximum amount of time in seconds any connection, whether its
been
+ * idle or active, should even be in the pool. This will terminate the
+ * connection after the expiration age and force new connections to be
opened.
+ *
+ * @return default of {@value #DEFAULT_JDBC_POOL_MAX_AGE_SECONDS}
+ */
+ public int getConnectionPoolMaximumAge() {
+ return Integer.parseInt(properties.getProperty(
+ SERVER_JDBC_CONNECTION_POOL_MAX_AGE,
DEFAULT_JDBC_POOL_MAX_AGE_SECONDS));
+ }
+
+ /**
+ * Gets the maximum amount of time in seconds that an idle connection can
+ * remain in the pool. This should always be greater than the value returned
+ * from {@link #getConnectionPoolMaximumExcessIdle()}
+ *
+ * @return default of {@value #DEFAULT_JDBC_POOL_MAX_IDLE_TIME_SECONDS}
+ */
+ public int getConnectionPoolMaximumIdle() {
+ return Integer.parseInt(properties.getProperty(
+ SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME,
+ DEFAULT_JDBC_POOL_MAX_IDLE_TIME_SECONDS));
+ }
+
+ /**
+ * Gets the maximum amount of time in seconds that connections beyond the
+ * minimum pool size should remain in the pool. This should always be less
+ * than than the value returned from {@link #getConnectionPoolMaximumIdle()}
+ *
+ * @return default of {@value
#DEFAULT_JDBC_POOL_EXCESS_MAX_IDLE_TIME_SECONDS}
+ */
+ public int getConnectionPoolMaximumExcessIdle() {
+ return Integer.parseInt(properties.getProperty(
+ SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS,
+ DEFAULT_JDBC_POOL_EXCESS_MAX_IDLE_TIME_SECONDS));
+ }
+
+ /**
+ * Gets the number of connections that should be retrieved when the pool size
+ * must increase. It's wise to set this higher than 1 since the assumption is
+ * that a pool that needs to grow should probably grow by more than 1.
+ *
+ * @return default of {@value #DEFAULT_JDBC_POOL_ACQUISITION_SIZE}
+ */
+ public int getConnectionPoolAcquisitionSize() {
+ return Integer.parseInt(properties.getProperty(
+ SERVER_JDBC_CONNECTION_POOL_AQUISITION_SIZE,
+ DEFAULT_JDBC_POOL_ACQUISITION_SIZE));
+ }
+
+ /**
+ * Gets the number of seconds in between testing each idle connection in the
+ * connection pool for validity.
+ *
+ * @return default of {@value #DEFAULT_JDBC_POOL_IDLE_TEST_INTERVAL}
+ */
+ public int getConnectionPoolIdleTestInternval() {
+ return Integer.parseInt(properties.getProperty(
+ SERVER_JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL,
+ DEFAULT_JDBC_POOL_IDLE_TEST_INTERVAL));
+ }
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 6a7d22a..66de2d6 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -22,9 +22,11 @@ import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_PASSWORD;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_URL;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_DB_RCA_USERNAME;
+import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CLIENTS_TO_UPDATE_CONFIGS;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
+import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
@@ -41,8 +43,6 @@ import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_R
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.USER_LIST;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
-import static
org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CLIENTS_TO_UPDATE_CONFIGS;
import java.io.File;
import java.io.IOException;
@@ -84,6 +84,7 @@ import org.apache.ambari.server.actionmanager.StageFactory;
import org.apache.ambari.server.agent.ExecutionCommand;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.controller.internal.RequestOperationLevel;
import org.apache.ambari.server.controller.internal.RequestStageContainer;
import org.apache.ambari.server.controller.internal.URLStreamProvider;
@@ -670,8 +671,9 @@ public class AmbariManagementControllerImpl implements
AmbariManagementControlle
String configTypeName = configType.getKey();
Map<String, String> properties = configType.getValue();
- if(configTypeName.equals(Configuration.GLOBAL_CONFIG_TAG))
+ if(configTypeName.equals(Configuration.GLOBAL_CONFIG_TAG)) {
continue;
+ }
String tag;
if(cluster.getConfigsByType(configTypeName) == null) {
@@ -1320,6 +1322,7 @@ public class AmbariManagementControllerImpl implements
AmbariManagementControlle
return clusterUpdateCache.getIfPresent(clusterRequest);
}
+ @Override
public String getJobTrackerHost(Cluster cluster) {
try {
Service svc = cluster.getService("MAPREDUCE");
@@ -1658,11 +1661,10 @@ public class AmbariManagementControllerImpl implements
AmbariManagementControlle
String groupList = gson.toJson(groupSet);
hostParams.put(GROUP_LIST, groupList);
- if (configs.getServerDBName().equalsIgnoreCase(Configuration
- .ORACLE_DB_NAME)) {
+ DatabaseType databaseType = configs.getDatabaseType();
+ if (databaseType == DatabaseType.ORACLE) {
hostParams.put(DB_DRIVER_FILENAME, configs.getOjdbcJarName());
- } else if (configs.getServerDBName().equalsIgnoreCase(Configuration
- .MYSQL_DB_NAME)) {
+ } else if (databaseType == DatabaseType.MYSQL) {
hostParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName());
}
@@ -2414,6 +2416,7 @@ public class AmbariManagementControllerImpl implements
AmbariManagementControlle
}
}
+ @Override
public String findServiceName(Cluster cluster, String componentName) throws
AmbariException {
StackId stackId = cluster.getDesiredStackVersion();
String serviceName =
@@ -3754,7 +3757,7 @@ public class AmbariManagementControllerImpl implements
AmbariManagementControlle
}
}
- this.users.processLdapSync(batchInfo);
+ users.processLdapSync(batchInfo);
return batchInfo;
} finally {
ldapSyncInProgress = false;
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
index c395df6..5ce9107 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
@@ -30,8 +30,10 @@ import static
org.eclipse.persistence.config.PersistenceUnitProperties.JDBC_DRIV
import static
org.eclipse.persistence.config.PersistenceUnitProperties.JDBC_PASSWORD;
import static
org.eclipse.persistence.config.PersistenceUnitProperties.JDBC_URL;
import static
org.eclipse.persistence.config.PersistenceUnitProperties.JDBC_USER;
+import static
org.eclipse.persistence.config.PersistenceUnitProperties.NON_JTA_DATASOURCE;
import static
org.eclipse.persistence.config.PersistenceUnitProperties.THROW_EXCEPTIONS;
+import java.beans.PropertyVetoException;
import java.security.SecureRandom;
import java.util.Map;
import java.util.Map.Entry;
@@ -45,6 +47,8 @@ import
org.apache.ambari.server.actionmanager.HostRoleCommandFactoryImpl;
import org.apache.ambari.server.actionmanager.RequestFactory;
import org.apache.ambari.server.actionmanager.StageFactory;
import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.configuration.Configuration.ConnectionPoolType;
+import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.controller.internal.ComponentResourceProvider;
import
org.apache.ambari.server.controller.internal.HostComponentResourceProvider;
import org.apache.ambari.server.controller.internal.HostResourceProvider;
@@ -91,6 +95,8 @@ import org.eclipse.jetty.server.SessionIdManager;
import org.eclipse.jetty.server.SessionManager;
import org.eclipse.jetty.server.session.HashSessionIdManager;
import org.eclipse.jetty.server.session.HashSessionManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.security.crypto.password.StandardPasswordEncoder;
import org.springframework.web.filter.DelegatingFilterProxy;
@@ -103,11 +109,13 @@ import
com.google.inject.assistedinject.FactoryModuleBuilder;
import com.google.inject.name.Names;
import com.google.inject.persist.PersistModule;
import com.google.inject.persist.jpa.AmbariJpaPersistModule;
+import com.mchange.v2.c3p0.ComboPooledDataSource;
/**
* Used for injection purposes.
*/
public class ControllerModule extends AbstractModule {
+ private static Logger LOG = LoggerFactory.getLogger(ControllerModule.class);
private final Configuration configuration;
private final HostsMap hostsMap;
@@ -136,13 +144,17 @@ public class ControllerModule extends AbstractModule {
* @return the configuration properties
*/
public static Properties getPersistenceProperties(Configuration
configuration) {
+ // log what database type has been calculated
+ DatabaseType databaseType = configuration.getDatabaseType();
+ LOG.info("Detected {} as the database type from the JDBC URL",
databaseType);
+
Properties properties = new Properties();
// custom jdbc properties
- Map<String, String> custom = configuration.getDatabaseCustomProperties();
+ Map<String, String> customProperties =
configuration.getDatabaseCustomProperties();
- if (0 != custom.size()) {
- for (Entry<String, String> entry : custom.entrySet()) {
+ if (0 != customProperties.size()) {
+ for (Entry<String, String> entry : customProperties.entrySet()) {
properties.setProperty("eclipselink.jdbc.property." + entry.getKey(),
entry.getValue());
}
@@ -163,6 +175,61 @@ public class ControllerModule extends AbstractModule {
properties.setProperty(JDBC_DRIVER, Configuration.JDBC_LOCAL_DRIVER);
break;
}
+
+ // determine the type of pool to use
+ boolean isConnectionPoolingExternal = false;
+ ConnectionPoolType connectionPoolType =
configuration.getConnectionPoolType();
+ if (connectionPoolType == ConnectionPoolType.C3P0) {
+ isConnectionPoolingExternal = true;
+ }
+
+ // force the use of c3p0 with MySQL
+ if (databaseType == DatabaseType.MYSQL) {
+ isConnectionPoolingExternal = true;
+ }
+
+ // use c3p0
+ if (isConnectionPoolingExternal) {
+ LOG.info("Using c3p0 {} as the EclipsLink DataSource",
+ ComboPooledDataSource.class.getSimpleName());
+
+ // Oracle requires a different validity query
+ String testQuery = "SELECT 1";
+ if (databaseType == DatabaseType.ORACLE) {
+ testQuery = "SELECT 1 FROM DUAL";
+ }
+
+ ComboPooledDataSource dataSource = new ComboPooledDataSource();
+
+ // attempt to load the driver; if this fails, warn and move on
+ try {
+ dataSource.setDriverClass(configuration.getDatabaseDriver());
+ } catch (PropertyVetoException pve) {
+ LOG.warn("Unable to initialize c3p0", pve);
+ return properties;
+ }
+
+ // basic configuration stuff
+ dataSource.setJdbcUrl(configuration.getDatabaseUrl());
+ dataSource.setUser(configuration.getDatabaseUser());
+ dataSource.setPassword(configuration.getDatabasePassword());
+
+ // pooling
+ dataSource.setMinPoolSize(configuration.getConnectionPoolMinimumSize());
+
dataSource.setInitialPoolSize(configuration.getConnectionPoolMinimumSize());
+ dataSource.setMaxPoolSize(configuration.getConnectionPoolMaximumSize());
+
dataSource.setAcquireIncrement(configuration.getConnectionPoolAcquisitionSize());
+
+ // validity
+
dataSource.setMaxConnectionAge(configuration.getConnectionPoolMaximumAge());
+ dataSource.setMaxIdleTime(configuration.getConnectionPoolMaximumIdle());
+
dataSource.setMaxIdleTimeExcessConnections(configuration.getConnectionPoolMaximumExcessIdle());
+ dataSource.setPreferredTestQuery(testQuery);
+
dataSource.setIdleConnectionTestPeriod(configuration.getConnectionPoolIdleTestInternval());
+
+ properties.put(NON_JTA_DATASOURCE, dataSource);
+ }
+
return properties;
}
@@ -231,7 +298,7 @@ public class ControllerModule extends AbstractModule {
PersistenceType persistenceType = configuration.getPersistenceType();
AmbariJpaPersistModule jpaPersistModule = new
AmbariJpaPersistModule(Configuration.JDBC_UNIT_NAME);
- Properties persistenceProperties = getPersistenceProperties(configuration);
+ Properties persistenceProperties =
ControllerModule.getPersistenceProperties(configuration);
if (!persistenceType.equals(PersistenceType.IN_MEMORY)) {
persistenceProperties.setProperty(JDBC_USER,
configuration.getDatabaseUser());
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/orm/EclipseLinkSessionCustomizer.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/orm/EclipseLinkSessionCustomizer.java
b/ambari-server/src/main/java/org/apache/ambari/server/orm/EclipseLinkSessionCustomizer.java
new file mode 100644
index 0000000..2f4aacb
--- /dev/null
+++
b/ambari-server/src/main/java/org/apache/ambari/server/orm/EclipseLinkSessionCustomizer.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.orm;
+
+import javax.activation.DataSource;
+
+import org.eclipse.persistence.config.SessionCustomizer;
+import org.eclipse.persistence.sessions.JNDIConnector;
+import org.eclipse.persistence.sessions.Session;
+
+/**
+ * The {@link EclipseLinkSessionCustomizer} is used as a way to quickly
override
+ * the way that EclipseLink interacts with the database. Some possible uses of
+ * this class are:
+ * <ul>
+ * <li>Setting runtime analysis properties such as log levels and
profilers</li>
+ * <li>Providing a custom {@link DataSource} via {@link JNDIConnector}</li>
+ * <li>Changing JDBC driver properties.</li>
+ * </ul>
+ * For example:
+ *
+ * <pre>
+ * DatabaseLogin login = (DatabaseLogin) session.getDatasourceLogin();
+ * ComboPooledDataSource source = new ComboPooledDataSource();
+ * source.setDriverClass(login.getDriverClassName());
+ * source.setMaxConnectionAge(100);
+ * ...
+ * login.setConnector(new JNDIConnector(source));
+ *
+ * <pre>
+ */
+public class EclipseLinkSessionCustomizer implements SessionCustomizer {
+
+ /**
+ * {@inheritDoc}
+ * <p/>
+ * Currently a NOOP, this class exists for quick customization purposes.
+ */
+ @Override
+ public void customize(Session session) throws Exception {
+ // NOOP
+ }
+}
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionSchedulerImpl.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionSchedulerImpl.java
b/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionSchedulerImpl.java
index 324ee27..76ebac6 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionSchedulerImpl.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/scheduler/ExecutionSchedulerImpl.java
@@ -17,15 +17,13 @@
*/
package org.apache.ambari.server.scheduler;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Singleton;
+import java.util.List;
+import java.util.Properties;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.state.scheduler.GuiceJobFactory;
-import org.apache.ambari.server.state.scheduler.RequestExecution;
-import org.apache.ambari.server.state.scheduler.Schedule;
-import org.quartz.Job;
import org.quartz.JobDetail;
import org.quartz.JobKey;
import org.quartz.Scheduler;
@@ -35,8 +33,9 @@ import org.quartz.impl.StdSchedulerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.List;
-import java.util.Properties;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Singleton;
@Singleton
public class ExecutionSchedulerImpl implements ExecutionScheduler {
@@ -134,13 +133,13 @@ public class ExecutionSchedulerImpl implements
ExecutionScheduler {
protected String[] getQuartzDbDelegateClassAndValidationQuery() {
- String dbUrl = configuration.getDatabaseUrl();
String dbDelegate = "org.quartz.impl.jdbcjobstore.StdJDBCDelegate";
String dbValidate = "select 0";
- if (dbUrl.contains(Configuration.POSTGRES_DB_NAME)) {
+ DatabaseType databaseType = configuration.getDatabaseType();
+ if (databaseType == DatabaseType.POSTGRES) {
dbDelegate = "org.quartz.impl.jdbcjobstore.PostgreSQLDelegate";
- } else if (dbUrl.contains(Configuration.ORACLE_DB_NAME)) {
+ } else if (databaseType == DatabaseType.ORACLE) {
dbDelegate = "org.quartz.impl.jdbcjobstore.oracle.OracleDelegate";
dbValidate = "select 0 from dual";
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
index d245b51..6070ecb 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/AbstractUpgradeCatalog.java
@@ -17,14 +17,20 @@
*/
package org.apache.ambari.server.upgrade;
-import com.google.common.collect.Maps;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Provider;
-import com.google.inject.persist.Transactional;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import javax.persistence.EntityManager;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.controller.ConfigurationRequest;
import org.apache.ambari.server.orm.DBAccessor;
@@ -35,24 +41,16 @@ import org.apache.ambari.server.state.Clusters;
import org.apache.ambari.server.state.Config;
import org.apache.ambari.server.state.ConfigHelper;
import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.ambari.server.state.Service;
import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
import org.apache.ambari.server.utils.VersionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.persistence.EntityManager;
-
-import java.sql.SQLException;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.Map.Entry;
+import com.google.common.collect.Maps;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.Provider;
+import com.google.inject.persist.Transactional;
public abstract class AbstractUpgradeCatalog implements UpgradeCatalog {
@Inject
@@ -126,25 +124,6 @@ public abstract class AbstractUpgradeCatalog implements
UpgradeCatalog {
return rows;
}
- protected String getDbType() {
- String dbUrl = configuration.getDatabaseUrl();
- String dbType;
-
- if (dbUrl.contains(Configuration.POSTGRES_DB_NAME)) {
- dbType = Configuration.POSTGRES_DB_NAME;
- } else if (dbUrl.contains(Configuration.ORACLE_DB_NAME)) {
- dbType = Configuration.ORACLE_DB_NAME;
- } else if (dbUrl.contains(Configuration.MYSQL_DB_NAME)) {
- dbType = Configuration.MYSQL_DB_NAME;
- } else if (dbUrl.contains(Configuration.DERBY_DB_NAME)) {
- dbType = Configuration.DERBY_DB_NAME;
- } else {
- throw new RuntimeException("Unable to determine database type.");
- }
-
- return dbType;
- }
-
protected Provider<EntityManager> getEntityManagerProvider() {
return injector.getProvider(EntityManager.class);
}
@@ -183,11 +162,11 @@ public abstract class AbstractUpgradeCatalog implements
UpgradeCatalog {
dbAccessor.executeQuery(String.format("ALTER ROLE %s SET search_path to
'%s';", dbUser, schemaName));
}
}
-
+
public void addNewConfigurationsFromXml() throws AmbariException {
ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
AmbariManagementController controller =
injector.getInstance(AmbariManagementController.class);
-
+
Clusters clusters = controller.getClusters();
if (clusters == null) {
return;
@@ -197,23 +176,23 @@ public abstract class AbstractUpgradeCatalog implements
UpgradeCatalog {
if (clusterMap != null && !clusterMap.isEmpty()) {
for (Cluster cluster : clusterMap.values()) {
Map<String, Set<String>> newProperties = new HashMap<String,
Set<String>>();
-
+
Set<PropertyInfo> stackProperties =
configHelper.getStackProperties(cluster);
for(String serviceName: cluster.getServices().keySet()) {
Set<PropertyInfo> properties =
configHelper.getServiceProperties(cluster, serviceName);
-
+
if(properties == null) {
continue;
}
properties.addAll(stackProperties);
-
+
for(PropertyInfo property:properties) {
String configType =
ConfigHelper.fileNameToConfigType(property.getFilename());
Config clusterConfigs = cluster.getDesiredConfigByType(configType);
if(clusterConfigs == null ||
!clusterConfigs.getProperties().containsKey(property.getName())) {
LOG.info("Config " + property.getName() + " from " + configType
+ " from xml configurations" +
" is not found on the cluster. Adding it...");
-
+
if(!newProperties.containsKey(configType)) {
newProperties.put(configType, new HashSet<String>());
}
@@ -221,22 +200,22 @@ public abstract class AbstractUpgradeCatalog implements
UpgradeCatalog {
}
}
}
-
-
-
+
+
+
for (Entry<String, Set<String>> newProperty :
newProperties.entrySet()) {
updateConfigurationPropertiesWithValuesFromXml(newProperty.getKey(),
newProperty.getValue(), false, true);
}
}
}
}
-
+
/**
* Create a new cluster scoped configuration with the new properties added
* with the values from the coresponding xml files.
- *
+ *
* If xml owner service is not in the cluster, the configuration won't be
added.
- *
+ *
* @param configType Configuration type. (hdfs-site, etc.)
* @param properties Set property names.
*/
@@ -244,7 +223,7 @@ public abstract class AbstractUpgradeCatalog implements
UpgradeCatalog {
Set<String> propertyNames, boolean updateIfExists, boolean
createNewConfigType) throws AmbariException {
ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
AmbariManagementController controller =
injector.getInstance(AmbariManagementController.class);
-
+
Clusters clusters = controller.getClusters();
if (clusters == null) {
return;
@@ -254,43 +233,43 @@ public abstract class AbstractUpgradeCatalog implements
UpgradeCatalog {
if (clusterMap != null && !clusterMap.isEmpty()) {
for (Cluster cluster : clusterMap.values()) {
Map<String, String> properties = new HashMap<String, String>();
-
+
for(String propertyName:propertyNames) {
String propertyValue =
configHelper.getPropertyValueFromStackDefenitions(cluster, configType,
propertyName);
-
+
if(propertyValue == null) {
LOG.info("Config " + propertyName + " from " + configType + " is
not found in xml definitions." +
"Skipping configuration property update");
continue;
}
-
+
ServiceInfo propertyService =
configHelper.getPropertyOwnerService(cluster, configType, propertyName);
if(propertyService != null &&
!cluster.getServices().containsKey(propertyService.getName())) {
LOG.info("Config " + propertyName + " from " + configType + " with
value = " + propertyValue + " " +
"Is not added due to service " + propertyService.getName() + "
is not in the cluster.");
continue;
}
-
+
properties.put(propertyName, propertyValue);
}
-
+
updateConfigurationPropertiesForCluster(cluster, configType,
properties, updateIfExists, createNewConfigType);
}
}
}
-
+
protected void updateConfigurationPropertiesForCluster(Cluster cluster,
String configType,
Map<String, String> properties, boolean updateIfExists, boolean
createNewConfigType) throws AmbariException {
AmbariManagementController controller =
injector.getInstance(AmbariManagementController.class);
String newTag = "version" + System.currentTimeMillis();
-
+
if (properties != null) {
Map<String, Config> all = cluster.getConfigsByType(configType);
if (all == null || !all.containsKey(newTag) || properties.size() > 0) {
Map<String, String> oldConfigProperties;
Config oldConfig = cluster.getDesiredConfigByType(configType);
-
+
if (oldConfig == null && !createNewConfigType) {
LOG.info("Config " + configType + " not found. Assuming service not
installed. " +
"Skipping configuration properties update");
@@ -376,11 +355,13 @@ public abstract class AbstractUpgradeCatalog implements
UpgradeCatalog {
@Override
public void upgradeSchema() throws AmbariException, SQLException {
- if (getDbType().equals(Configuration.POSTGRES_DB_NAME)) {
+ DatabaseType databaseType = configuration.getDatabaseType();
+
+ if (databaseType == DatabaseType.POSTGRES) {
changePostgresSearchPath();
}
- this.executeDDLUpdates();
+ executeDDLUpdates();
}
@Override
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
index 2ccf16b..4ecfe44 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog150.java
@@ -17,14 +17,29 @@
*/
package org.apache.ambari.server.upgrade;
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
+import java.io.File;
+import java.io.IOException;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.persistence.EntityManager;
+import javax.persistence.Query;
+import javax.persistence.TypedQuery;
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Expression;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.actionmanager.HostRoleStatus;
import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
import org.apache.ambari.server.orm.dao.ClusterDAO;
import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
@@ -57,20 +72,9 @@ import org.eclipse.persistence.jpa.JpaEntityManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.persistence.EntityManager;
-import javax.persistence.Query;
-import javax.persistence.TypedQuery;
-import javax.persistence.criteria.*;
-
-import java.io.File;
-import java.io.IOException;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Collection;
-import java.util.HashMap;
+import com.google.gson.Gson;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
public class UpgradeCatalog150 extends AbstractUpgradeCatalog {
private static final Logger LOG =
LoggerFactory.getLogger(UpgradeCatalog150.class);
@@ -86,7 +90,7 @@ public class UpgradeCatalog150 extends AbstractUpgradeCatalog
{
@Override
public void executeDDLUpdates() throws AmbariException, SQLException {
LOG.debug("Upgrading schema...");
- String dbType = getDbType();
+ DatabaseType databaseType = configuration.getDatabaseType();
List<DBColumnInfo> columns = new ArrayList<DBColumnInfo>();
// ========================================================================
@@ -276,7 +280,7 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
// Alter columns
- if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+ if (databaseType == DatabaseType.POSTGRES) {
if (dbAccessor.tableExists("hostcomponentdesiredconfigmapping")) {
dbAccessor.executeQuery("ALTER TABLE hostcomponentdesiredconfigmapping
rename to hcdesiredconfigmapping", true);
}
@@ -284,8 +288,7 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
dbAccessor.executeQuery("ALTER TABLE users ALTER column ldap_user TYPE
INTEGER USING CASE WHEN ldap_user=true THEN 1 ELSE 0 END", true);
}
- if (Configuration.ORACLE_DB_NAME.equals(dbType) ||
- Configuration.POSTGRES_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.ORACLE || databaseType ==
DatabaseType.POSTGRES) {
if (dbAccessor.tableHasColumn("hosts", "disks_info")) {
dbAccessor.executeQuery("ALTER TABLE hosts DROP COLUMN disks_info",
true);
}
@@ -293,7 +296,7 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
//Move tables from ambarirca db to ambari db; drop ambarirca; Mysql
- if (dbType.equals(Configuration.MYSQL_DB_NAME)) {
+ if (databaseType == DatabaseType.MYSQL) {
String dbName = configuration.getServerJDBCPostgresSchemaName();
moveRCATableInMySQL("workflow", dbName);
moveRCATableInMySQL("job", dbName);
@@ -314,11 +317,11 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
throw new AmbariException(msg);
} else if (!dbAccessor.tableHasData(tableName)) {
String query = null;
- if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+ if (databaseType == DatabaseType.POSTGRES) {
query = getPostgresRequestUpgradeQuery();
- } else if (dbType.equals(Configuration.ORACLE_DB_NAME)) {
+ } else if (databaseType == DatabaseType.ORACLE) {
query = getOracleRequestUpgradeQuery();
- } else if (Configuration.MYSQL_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.MYSQL) {
query = getMysqlRequestUpgradeQuery();
}
@@ -331,9 +334,9 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
// Drop old constraints
// ========================================================================
- if (Configuration.POSTGRES_DB_NAME.equals(dbType)
- || Configuration.MYSQL_DB_NAME.equals(dbType)
- || Configuration.DERBY_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.POSTGRES
+ || databaseType == DatabaseType.MYSQL
+ || databaseType == DatabaseType.DERBY) {
//recreate old constraints to sync with oracle
dbAccessor.dropConstraint("clusterconfigmapping",
"FK_clusterconfigmapping_cluster_id");
@@ -410,7 +413,7 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
public void executeDMLUpdates() throws AmbariException, SQLException {
// Service Config mapping
String tableName = "serviceconfigmapping";
- String dbType = getDbType();
+ DatabaseType databaseType = configuration.getDatabaseType();
EntityManager em = getEntityManagerProvider().get();
@@ -420,7 +423,7 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
&& dbAccessor.tableHasData(tableName)
&& dbAccessor.tableExists("clusterconfigmapping")) {
- if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+ if (databaseType == DatabaseType.POSTGRES) {
// Service config mapping entity object will be deleted so need to
// proceed with executing as query
@@ -430,13 +433,13 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
} else {
LOG.warn("Unsupported database for service config mapping query. " +
- "database = " + dbType);
+ "database = " + databaseType);
}
}
// Sequences
if (dbAccessor.tableExists("ambari_sequences")) {
- if (dbType.equals(Configuration.POSTGRES_DB_NAME)) {
+ if (databaseType == DatabaseType.POSTGRES) {
ResultSet resultSet = dbAccessor.executeSelect("select * from
ambari_sequences where sequence_name in " +
"('cluster_id_seq','user_id_seq','host_role_command_id_seq')");
@@ -539,7 +542,7 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
}
}
});
-
+
// add history server on the host where jobtracker is
executeInTransaction(new Runnable() {
@Override
@@ -574,46 +577,48 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
ClusterServiceDAO clusterServiceDAO =
injector.getInstance(ClusterServiceDAO.class);
ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO =
injector.getInstance(ServiceComponentDesiredStateDAO.class);
-
+
List<ClusterEntity> clusterEntities = clusterDAO.findAll();
for (final ClusterEntity clusterEntity : clusterEntities) {
ServiceComponentDesiredStateEntityPK pkHS = new
ServiceComponentDesiredStateEntityPK();
pkHS.setComponentName("HISTORYSERVER");
pkHS.setClusterId(clusterEntity.getClusterId());
pkHS.setServiceName("MAPREDUCE");
-
+
ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntityHS
= serviceComponentDesiredStateDAO.findByPK(pkHS);
-
+
// already have historyserver
- if(serviceComponentDesiredStateEntityHS != null)
+ if(serviceComponentDesiredStateEntityHS != null) {
continue;
-
+ }
+
ServiceComponentDesiredStateEntityPK pkJT = new
ServiceComponentDesiredStateEntityPK();
pkJT.setComponentName("JOBTRACKER");
pkJT.setClusterId(clusterEntity.getClusterId());
pkJT.setServiceName("MAPREDUCE");
-
+
ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntityJT
= serviceComponentDesiredStateDAO.findByPK(pkJT);
-
+
// no jobtracker present probably mapreduce is not installed
- if(serviceComponentDesiredStateEntityJT == null)
+ if(serviceComponentDesiredStateEntityJT == null) {
continue;
+ }
+
-
HostComponentStateEntity jtHostComponentStateEntity =
serviceComponentDesiredStateEntityJT.getHostComponentStateEntities().iterator().next();
HostComponentDesiredStateEntity jtHostComponentDesiredStateEntity =
serviceComponentDesiredStateEntityJT.getHostComponentDesiredStateEntities().iterator().next();
String jtHostname = jtHostComponentStateEntity.getHostName();
State jtCurrState = jtHostComponentStateEntity.getCurrentState();
State jtHostComponentDesiredState =
jtHostComponentDesiredStateEntity.getDesiredState();
State jtServiceComponentDesiredState =
serviceComponentDesiredStateEntityJT.getDesiredState();
-
+
ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
pk.setClusterId(clusterEntity.getClusterId());
pk.setServiceName("MAPREDUCE");
-
+
ClusterServiceEntity clusterServiceEntity =
clusterServiceDAO.findByPK(pk);
-
-
+
+
final ServiceComponentDesiredStateEntity
serviceComponentDesiredStateEntity = new ServiceComponentDesiredStateEntity();
serviceComponentDesiredStateEntity.setComponentName("HISTORYSERVER");
serviceComponentDesiredStateEntity.setDesiredStackVersion(clusterEntity.getDesiredStackVersion());
@@ -625,21 +630,21 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
stateEntity.setHostName(jtHostname);
stateEntity.setCurrentState(jtCurrState);
stateEntity.setCurrentStackVersion(clusterEntity.getDesiredStackVersion());
-
+
final HostComponentDesiredStateEntity desiredStateEntity = new
HostComponentDesiredStateEntity();
desiredStateEntity.setDesiredState(jtHostComponentDesiredState);
desiredStateEntity.setDesiredStackVersion(clusterEntity.getDesiredStackVersion());
-
+
persistComponentEntities(stateEntity, desiredStateEntity,
serviceComponentDesiredStateEntity);
}
}
-
+
private void persistComponentEntities(HostComponentStateEntity stateEntity,
HostComponentDesiredStateEntity desiredStateEntity,
ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity) {
ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO =
injector.getInstance(ServiceComponentDesiredStateDAO.class);
HostComponentStateDAO hostComponentStateDAO =
injector.getInstance(HostComponentStateDAO.class);
HostComponentDesiredStateDAO hostComponentDesiredStateDAO =
injector.getInstance(HostComponentDesiredStateDAO.class);
HostDAO hostDAO = injector.getInstance(HostDAO.class);
-
+
HostEntity hostEntity = hostDAO.findByName(stateEntity.getHostName());
hostEntity.getHostComponentStateEntities().add(stateEntity);
hostEntity.getHostComponentDesiredStateEntities().add(desiredStateEntity);
@@ -697,7 +702,7 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
for (String configType : configTypes) {
if (configType.contains(log4jConfigTypeContains)) {
ClusterConfigEntity configEntity =
clusterDAO.findConfig(clusterId, configType, defaultVersionTag);
-
+
if (configEntity == null) {
String filename = configType + ".xml";
Map<String, String> properties = new HashMap<String, String>();
@@ -856,12 +861,12 @@ public class UpgradeCatalog150 extends
AbstractUpgradeCatalog {
}
private void createQuartzTables() throws SQLException {
- String dbType = getDbType();
+ DatabaseType databaseType = configuration.getDatabaseType();
// Run script to create quartz tables
String scriptPath = configuration.getResourceDirPath() +
File.separator + "upgrade" + File.separator + "ddl" +
- File.separator + String.format(quartzScriptFilePattern, dbType);
+ File.separator + String.format(quartzScriptFilePattern,
databaseType.getName());
try {
dbAccessor.executeScript(scriptPath);
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog151.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog151.java
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog151.java
index 72305c5..afe27b5 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog151.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog151.java
@@ -17,21 +17,22 @@
*/
package org.apache.ambari.server.upgrade;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.DBAccessor;
-
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.configuration.Configuration.DatabaseType;
+import org.apache.ambari.server.orm.DBAccessor;
+
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+
/**
* Upgrade catalog for version 1.5.1.
*/
public class UpgradeCatalog151 extends AbstractUpgradeCatalog {
-
+
//SourceVersion is only for book-keeping purpose
@Override
public String getSourceVersion() {
@@ -52,6 +53,7 @@ public class UpgradeCatalog151 extends AbstractUpgradeCatalog
{
@Override
protected void executeDDLUpdates() throws AmbariException, SQLException {
List<DBAccessor.DBColumnInfo> columns = new
ArrayList<DBAccessor.DBColumnInfo>();
+ DatabaseType databaseType = configuration.getDatabaseType();
// ========================================================================
// Create tables
@@ -108,12 +110,12 @@ public class UpgradeCatalog151 extends
AbstractUpgradeCatalog {
columns.add(new DBAccessor.DBColumnInfo("subResource_names", String.class,
255, null, true));
columns.add(new DBAccessor.DBColumnInfo("provider", String.class, 255,
null, true));
columns.add(new DBAccessor.DBColumnInfo("service", String.class, 255,
null, true));
- if (Configuration.MYSQL_DB_NAME.equals(getDbType())) {
+ if (databaseType == DatabaseType.MYSQL) {
columns.add(new DBAccessor.DBColumnInfo("`resource`", String.class, 255,
null, true));
//TODO incorrect name for MySQL
-
+
} else{
- columns.add(new DBAccessor.DBColumnInfo("\"resource\"", String.class,
255, null, true));
+ columns.add(new DBAccessor.DBColumnInfo("\"resource\"", String.class,
255, null, true));
//TODO incorrect name for oracle
}
dbAccessor.createTable("viewresource", columns, "view_name", "name");
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog160.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog160.java
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog160.java
index cd01779..a1f11d8 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog160.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog160.java
@@ -24,7 +24,7 @@ import java.util.Collections;
import java.util.List;
import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.orm.DBAccessor;
import com.google.inject.Inject;
@@ -100,8 +100,9 @@ public class UpgradeCatalog160 extends
AbstractUpgradeCatalog {
protected void fixViewTablesForMysql() throws SQLException {
//fixes 1.5.1 issue for mysql with non-default db name
//view tables were not created
-
- if (!Configuration.MYSQL_DB_NAME.equals(getDbType()) ||
"ambari".equals(configuration.getServerDBName())) {
+ DatabaseType databaseType = configuration.getDatabaseType();
+ if (!(databaseType == DatabaseType.MYSQL)
+ || "ambari".equals(configuration.getServerDBName())) {
//no need to run for non-mysql dbms or default db name
return;
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog161.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog161.java
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog161.java
index f5afb46..4ab0cb6 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog161.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog161.java
@@ -24,11 +24,12 @@ import java.sql.Types;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+
import javax.persistence.EntityManager;
import javax.persistence.TypedQuery;
import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.orm.DBAccessor;
import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
import org.apache.ambari.server.orm.entities.ClusterEntity;
@@ -68,7 +69,7 @@ public class UpgradeCatalog161 extends AbstractUpgradeCatalog
{
@Override
protected void executeDDLUpdates() throws AmbariException, SQLException {
- String dbType = getDbType();
+ DatabaseType databaseType = configuration.getDatabaseType();
List<DBColumnInfo> columns;
@@ -86,7 +87,7 @@ public class UpgradeCatalog161 extends AbstractUpgradeCatalog
{
// 1.6.0 initially shipped with restart_required as a BOOELAN so some
// upgrades might be BOOLEAN but most are probably SMALLINT
- if (Configuration.POSTGRES_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.POSTGRES) {
int columnType = dbAccessor.getColumnType("hostcomponentdesiredstate",
"restart_required");
@@ -97,7 +98,7 @@ public class UpgradeCatalog161 extends AbstractUpgradeCatalog
{
}
}
- if (Configuration.ORACLE_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.ORACLE) {
dbAccessor.executeQuery(
"ALTER TABLE hostcomponentdesiredstate MODIFY (restart_required
DEFAULT 0)",
true);
@@ -125,12 +126,12 @@ public class UpgradeCatalog161 extends
AbstractUpgradeCatalog {
dbAccessor.dropConstraint("viewinstance", "FK_viewinst_view_name");
//modify primary key of viewinstancedata
- if (Configuration.ORACLE_DB_NAME.equals(dbType)
- || Configuration.MYSQL_DB_NAME.equals(dbType)
- || Configuration.DERBY_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.ORACLE
+ || databaseType == DatabaseType.MYSQL
+ || databaseType == DatabaseType.DERBY) {
dbAccessor.executeQuery("ALTER TABLE viewinstance DROP PRIMARY KEY",
true);
dbAccessor.executeQuery("ALTER TABLE viewinstancedata DROP PRIMARY KEY",
true);
- } else if (Configuration.POSTGRES_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.POSTGRES) {
dbAccessor.executeQuery("ALTER TABLE viewinstance DROP CONSTRAINT
viewinstance_pkey CASCADE", true);
dbAccessor.executeQuery("ALTER TABLE viewinstancedata DROP CONSTRAINT
viewinstancedata_pkey CASCADE", true);
}
@@ -140,7 +141,7 @@ public class UpgradeCatalog161 extends
AbstractUpgradeCatalog {
dbAccessor.addColumn("viewinstancedata",
new DBAccessor.DBColumnInfo("view_instance_id", Long.class, null, null,
true));
- if (Configuration.ORACLE_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.ORACLE) {
//sequence looks to be simpler than rownum
if (dbAccessor.tableHasData("viewinstancedata")) {
dbAccessor.executeQuery("CREATE SEQUENCE TEMP_SEQ " +
@@ -153,12 +154,12 @@ public class UpgradeCatalog161 extends
AbstractUpgradeCatalog {
dbAccessor.executeQuery("UPDATE viewinstance SET view_instance_id =
TEMP_SEQ.NEXTVAL");
dbAccessor.dropSequence("TEMP_SEQ");
}
- } else if (Configuration.MYSQL_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.MYSQL) {
if (dbAccessor.tableHasData("viewinstance")) {
dbAccessor.executeQuery("UPDATE viewinstance " +
"SET view_instance_id = (SELECT @a := @a + 1 FROM (SELECT @a := 1)
s)");
}
- } else if (Configuration.POSTGRES_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.POSTGRES) {
if (dbAccessor.tableHasData("viewinstance")) {
//window functions like row_number were added in 8.4, workaround for
earlier versions (redhat/centos 5)
dbAccessor.executeQuery("CREATE SEQUENCE temp_seq START WITH 1");
@@ -169,7 +170,7 @@ public class UpgradeCatalog161 extends
AbstractUpgradeCatalog {
}
- if (Configuration.DERBY_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.DERBY) {
dbAccessor.executeQuery("ALTER TABLE viewinstance ALTER COLUMN
view_instance_id DEFAULT 0");
dbAccessor.executeQuery("ALTER TABLE viewinstance ALTER COLUMN
view_instance_id NOT NULL");
dbAccessor.executeQuery("ALTER TABLE viewinstancedata ALTER COLUMN
view_instance_id DEFAULT 0");
@@ -188,14 +189,14 @@ public class UpgradeCatalog161 extends
AbstractUpgradeCatalog {
new String[]{"view_name", "view_instance_name"}, "viewinstance", new
String[]{"view_name", "name"}, true);
dbAccessor.addFKConstraint("viewinstance", "FK_viewinst_view_name",
"view_name", "viewmain", "view_name", true);
- if (Configuration.POSTGRES_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.POSTGRES) {
dbAccessor.executeQuery("UPDATE viewinstancedata " +
"SET view_instance_id = vi.view_instance_id FROM viewinstance AS vi " +
"WHERE vi.name = viewinstancedata.view_instance_name AND vi.view_name
= viewinstancedata.view_name");
- } else if (Configuration.ORACLE_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.ORACLE) {
dbAccessor.executeQuery("UPDATE viewinstancedata vid SET
view_instance_id = (" +
"SELECT view_instance_id FROM viewinstance vi WHERE vi.name =
vid.view_instance_name AND vi.view_name = vid.view_name)");
- } else if (Configuration.MYSQL_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.MYSQL) {
dbAccessor.executeQuery("UPDATE viewinstancedata AS vid JOIN
viewinstance AS vi " +
"ON vi.name = vid.view_instance_name AND vi.view_name = vid.view_name
" +
"SET vid.view_instance_id = vi.view_instance_id");
@@ -221,8 +222,8 @@ public class UpgradeCatalog161 extends
AbstractUpgradeCatalog {
}
String valueColumnName = "\"value\"";
- if (Configuration.ORACLE_DB_NAME.equals(dbType)
- || Configuration.MYSQL_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.ORACLE
+ || databaseType == DatabaseType.MYSQL) {
valueColumnName = "value";
}
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
----------------------------------------------------------------------
diff --git
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
index 7c06500..cbbcfd6 100644
---
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
+++
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
@@ -18,11 +18,33 @@
package org.apache.ambari.server.upgrade;
-import com.google.common.reflect.TypeToken;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
+import java.lang.reflect.Type;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import javax.persistence.EntityManager;
+import javax.persistence.TypedQuery;
+import javax.persistence.criteria.CriteriaBuilder;
+import javax.persistence.criteria.CriteriaQuery;
+import javax.persistence.criteria.Expression;
+import javax.persistence.criteria.Predicate;
+import javax.persistence.criteria.Root;
+
import org.apache.ambari.server.AmbariException;
import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.configuration.Configuration.DatabaseType;
import org.apache.ambari.server.controller.AmbariManagementController;
import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
import org.apache.ambari.server.orm.dao.ClusterDAO;
@@ -76,28 +98,9 @@ import org.apache.ambari.server.view.ViewRegistry;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-import javax.persistence.criteria.CriteriaBuilder;
-import javax.persistence.criteria.CriteriaQuery;
-import javax.persistence.criteria.Expression;
-import javax.persistence.criteria.Predicate;
-import javax.persistence.criteria.Root;
-import java.lang.reflect.Type;
-import java.sql.Connection;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
+import com.google.common.reflect.TypeToken;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
/**
* Upgrade catalog for version 1.7.0.
@@ -161,10 +164,11 @@ public class UpgradeCatalog170 extends
AbstractUpgradeCatalog {
@Override
protected void executeDDLUpdates() throws AmbariException, SQLException {
+ DatabaseType databaseType = configuration.getDatabaseType();
+
// needs to be executed first
renameSequenceValueColumnName();
- String dbType = getDbType();
List<DBColumnInfo> columns;
// add group and members tables
@@ -247,7 +251,7 @@ public class UpgradeCatalog170 extends
AbstractUpgradeCatalog {
dbAccessor.insertRow("adminprivilege", new String[]{"privilege_id",
"permission_id", "resource_id", "principal_id"}, new String[]{"1", "1", "1",
"1"}, true);
- if (dbType.equals(Configuration.ORACLE_DB_NAME)) {
+ if (databaseType == DatabaseType.ORACLE) {
dbAccessor.executeQuery("ALTER TABLE clusterconfig ADD config_attributes
CLOB NULL");
} else {
DBColumnInfo clusterConfigAttributesColumn = new DBColumnInfo(
@@ -317,17 +321,17 @@ public class UpgradeCatalog170 extends
AbstractUpgradeCatalog {
dbAccessor.dropConstraint("confgroupclusterconfigmapping", "FK_confg");
- if (Configuration.ORACLE_DB_NAME.equals(dbType)
- || Configuration.MYSQL_DB_NAME.equals(dbType)
- || Configuration.DERBY_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.ORACLE
+ || databaseType == DatabaseType.MYSQL
+ || databaseType == DatabaseType.DERBY) {
dbAccessor.executeQuery("ALTER TABLE clusterconfig DROP PRIMARY KEY",
true);
- } else if (Configuration.POSTGRES_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.POSTGRES) {
dbAccessor.executeQuery("ALTER TABLE clusterconfig DROP CONSTRAINT
clusterconfig_pkey CASCADE", true);
}
dbAccessor.addColumn("clusterconfig", new DBColumnInfo("config_id",
Long.class, null, null, true));
- if (Configuration.ORACLE_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.ORACLE) {
//sequence looks to be simpler than rownum
if (dbAccessor.tableHasData("clusterconfig")) {
dbAccessor.executeQuery("CREATE SEQUENCE TEMP_SEQ " +
@@ -340,12 +344,12 @@ public class UpgradeCatalog170 extends
AbstractUpgradeCatalog {
dbAccessor.executeQuery("UPDATE clusterconfig SET config_id =
TEMP_SEQ.NEXTVAL");
dbAccessor.dropSequence("TEMP_SEQ");
}
- } else if (Configuration.MYSQL_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.MYSQL) {
if (dbAccessor.tableHasData("clusterconfig")) {
dbAccessor.executeQuery("UPDATE clusterconfig " +
"SET config_id = (SELECT @a := @a + 1 FROM (SELECT @a := 1) s)");
}
- } else if (Configuration.POSTGRES_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.POSTGRES) {
if (dbAccessor.tableHasData("clusterconfig")) {
//window functions like row_number were added in 8.4, workaround for
earlier versions (redhat/centos 5)
dbAccessor.executeQuery("CREATE SEQUENCE temp_seq START WITH 1");
@@ -355,20 +359,20 @@ public class UpgradeCatalog170 extends
AbstractUpgradeCatalog {
}
// alter view tables description columns size
- if (dbType.equals(Configuration.ORACLE_DB_NAME) ||
- dbType.equals(Configuration.MYSQL_DB_NAME)) {
+ if (databaseType == DatabaseType.ORACLE
+ || databaseType == DatabaseType.MYSQL) {
dbAccessor.executeQuery("ALTER TABLE viewinstance MODIFY description
VARCHAR(2048)");
dbAccessor.executeQuery("ALTER TABLE viewparameter MODIFY description
VARCHAR(2048)");
- } else if (Configuration.POSTGRES_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.POSTGRES) {
dbAccessor.executeQuery("ALTER TABLE viewinstance ALTER COLUMN
description TYPE VARCHAR(2048)");
dbAccessor.executeQuery("ALTER TABLE viewparameter ALTER COLUMN
description TYPE VARCHAR(2048)");
- } else if (dbType.equals(Configuration.DERBY_DB_NAME)) {
+ } else if (databaseType == DatabaseType.DERBY) {
dbAccessor.executeQuery("ALTER TABLE viewinstance ALTER COLUMN
description SET DATA TYPE VARCHAR(2048)");
dbAccessor.executeQuery("ALTER TABLE viewparameter ALTER COLUMN
description SET DATA TYPE VARCHAR(2048)");
}
//upgrade unit test workaround
- if (Configuration.DERBY_DB_NAME.equals(dbType)) {
+ if (databaseType == DatabaseType.DERBY) {
dbAccessor.executeQuery("ALTER TABLE clusterconfig ALTER COLUMN
config_id DEFAULT 0");
dbAccessor.executeQuery("ALTER TABLE clusterconfig ALTER COLUMN
config_id NOT NULL");
}
@@ -385,7 +389,7 @@ public class UpgradeCatalog170 extends
AbstractUpgradeCatalog {
dbAccessor.executeQuery("ALTER TABLE clusterconfig ADD CONSTRAINT
UQ_config_type_tag UNIQUE (cluster_id, type_name, version_tag)", true);
dbAccessor.executeQuery("ALTER TABLE clusterconfig ADD CONSTRAINT
UQ_config_type_version UNIQUE (cluster_id, type_name, version)", true);
- if (!Configuration.ORACLE_DB_NAME.equals(dbType)) {
+ if (databaseType != DatabaseType.ORACLE) {
dbAccessor.alterColumn("clusterconfig", new DBColumnInfo("config_data",
char[].class, null, null, false));
dbAccessor.alterColumn("blueprint_configuration", new
DBColumnInfo("config_data", char[].class, null, null, false));
dbAccessor.alterColumn("hostgroup_configuration", new
DBColumnInfo("config_data", char[].class, null, null, false));
@@ -494,12 +498,12 @@ public class UpgradeCatalog170 extends
AbstractUpgradeCatalog {
* thus requires custom approach for every database type.
*/
private void renameSequenceValueColumnName() throws AmbariException,
SQLException {
- final String dbType = getDbType();
- if (Configuration.MYSQL_DB_NAME.equals(dbType)) {
+ final DatabaseType databaseType = configuration.getDatabaseType();
+ if (databaseType == DatabaseType.MYSQL) {
dbAccessor.executeQuery("ALTER TABLE ambari_sequences CHANGE value
sequence_value DECIMAL(38) NOT NULL");
- } else if (Configuration.DERBY_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.DERBY) {
dbAccessor.executeQuery("RENAME COLUMN ambari_sequences.\"value\" to
sequence_value");
- } else if (Configuration.ORACLE_DB_NAME.equals(dbType)) {
+ } else if (databaseType == DatabaseType.ORACLE) {
dbAccessor.executeQuery("ALTER TABLE ambari_sequences RENAME COLUMN
value to sequence_value");
} else {
// Postgres
@@ -1113,7 +1117,7 @@ public class UpgradeCatalog170 extends
AbstractUpgradeCatalog {
Config oldConfig =
cluster.getDesiredConfigByType(PIG_PROPERTIES_CONFIG_TYPE);
if (oldConfig != null) {
Map<String, String> properties = oldConfig.getProperties();
-
+
if(!properties.containsKey(CONTENT_FIELD_NAME)) {
String value = properties.remove(PIG_CONTENT_FIELD_NAME);
properties.put(CONTENT_FIELD_NAME, value);
http://git-wip-us.apache.org/repos/asf/ambari/blob/3271d1d1/ambari-server/src/main/resources/META-INF/persistence.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/META-INF/persistence.xml
b/ambari-server/src/main/resources/META-INF/persistence.xml
index 2d8fedb..023b170 100644
--- a/ambari-server/src/main/resources/META-INF/persistence.xml
+++ b/ambari-server/src/main/resources/META-INF/persistence.xml
@@ -71,17 +71,12 @@
<class>org.apache.ambari.server.orm.entities.ServiceConfigApplicationEntity</class>
<properties>
- <!--<property name="javax.persistence.jdbc.url"
value="jdbc:postgresql://localhost/ambari" />-->
- <!--<property name="javax.persistence.jdbc.driver"
value="org.postgresql.Driver" />-->
<property name="eclipselink.cache.size.default" value="10000" />
<property name="eclipselink.jdbc.batch-writing" value="JDBC"/>
<property name="eclipselink.jdbc.batch-writing.size" value="4000"/>
- <property name="eclipselink.jdbc.sequence-connection-pool" value="true"
/>
+ <property name="eclipselink.connection-pool.sequence" value="true" />
<property name="eclipselink.weaving" value="static" />
-
- <!--<property name="eclipselink.logging.level.sql" value="FINEST" />-->
- <!--<property name="eclipselink.id-validation" value="NULL" />-->
-
+ <property name="eclipselink.session.customizer"
value="org.apache.ambari.server.orm.EclipseLinkSessionCustomizer"/>
</properties>
</persistence-unit>