AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/af1bf85c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/af1bf85c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/af1bf85c

Branch: refs/heads/trunk
Commit: af1bf85c6801293fce732e17c13bb4fd5377f0b1
Parents: 3acf908
Author: Dmytro Grinenko <hapyles...@apache.org>
Authored: Sun Jun 25 12:58:05 2017 +0300
Committer: Dmytro Grinenko <hapyles...@apache.org>
Committed: Sun Jun 25 12:59:06 2017 +0300

----------------------------------------------------------------------
 .../server/upgrade/SchemaUpgradeHelper.java     |   13 -
 .../server/upgrade/UpgradeCatalog200.java       |  613 ----
 .../server/upgrade/UpgradeCatalog210.java       | 1765 ----------
 .../server/upgrade/UpgradeCatalog211.java       |  295 --
 .../server/upgrade/UpgradeCatalog212.java       |  427 ---
 .../server/upgrade/UpgradeCatalog2121.java      |  206 --
 .../server/upgrade/UpgradeCatalog220.java       | 1404 --------
 .../server/upgrade/UpgradeCatalog221.java       |  456 ---
 .../server/upgrade/UpgradeCatalog222.java       |  781 -----
 .../server/upgrade/UpgradeCatalog230.java       |  402 ---
 .../server/upgrade/UpgradeCatalog240.java       | 3079 ------------------
 .../server/upgrade/UpgradeCatalog2402.java      |  121 -
 .../server/upgrade/UpgradeCatalog242.java       |  272 --
 .../server/upgrade/UpgradeCatalog250.java       | 1352 --------
 .../server/upgrade/UpgradeCatalog300.java       |    5 +-
 ambari-server/src/main/python/upgradeHelper.py  | 2338 -------------
 .../catalog/UpgradeCatalog_1.3_to_2.2.json      |  948 ------
 .../catalog/UpgradeCatalog_2.0_to_2.2.2.json    |  408 ---
 .../catalog/UpgradeCatalog_2.0_to_2.2.4.json    |  453 ---
 .../catalog/UpgradeCatalog_2.0_to_2.2.json      |  275 --
 .../catalog/UpgradeCatalog_2.1_to_2.2.2.json    |  465 ---
 .../catalog/UpgradeCatalog_2.1_to_2.2.4.json    |  499 ---
 .../catalog/UpgradeCatalog_2.1_to_2.2.json      |  292 --
 .../catalog/UpgradeCatalog_2.1_to_2.3.json      |  440 ---
 .../catalog/UpgradeCatalog_2.2_to_2.3.json      | 2234 -------------
 .../UpgradeCatalog_2.2_to_2.3_step2.json        |   81 -
 .../server/upgrade/UpgradeCatalog200Test.java   |  915 ------
 .../server/upgrade/UpgradeCatalog210Test.java   | 1360 --------
 .../server/upgrade/UpgradeCatalog211Test.java   |  446 ---
 .../server/upgrade/UpgradeCatalog2121Test.java  |  161 -
 .../server/upgrade/UpgradeCatalog212Test.java   |  694 ----
 .../server/upgrade/UpgradeCatalog220Test.java   | 1535 ---------
 .../server/upgrade/UpgradeCatalog221Test.java   |  614 ----
 .../server/upgrade/UpgradeCatalog222Test.java   | 1180 -------
 .../server/upgrade/UpgradeCatalog230Test.java   |  317 --
 .../server/upgrade/UpgradeCatalog240Test.java   | 2688 ---------------
 .../server/upgrade/UpgradeCatalog242Test.java   |  430 ---
 .../server/upgrade/UpgradeCatalog250Test.java   | 2129 ------------
 .../server/upgrade/UpgradeCatalog300Test.java   |    2 +-
 .../server/upgrade/UpgradeCatalogTest.java      |   13 +-
 40 files changed, 11 insertions(+), 32097 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
index dee05c3..bcc8328 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
@@ -176,19 +176,6 @@ public class SchemaUpgradeHelper {
       // Add binding to each newly created catalog
       Multibinder<UpgradeCatalog> catalogBinder =
         Multibinder.newSetBinder(binder(), UpgradeCatalog.class);
-      catalogBinder.addBinding().to(UpgradeCatalog200.class);
-      catalogBinder.addBinding().to(UpgradeCatalog210.class);
-      catalogBinder.addBinding().to(UpgradeCatalog211.class);
-      catalogBinder.addBinding().to(UpgradeCatalog212.class);
-      catalogBinder.addBinding().to(UpgradeCatalog2121.class);
-      catalogBinder.addBinding().to(UpgradeCatalog220.class);
-      catalogBinder.addBinding().to(UpgradeCatalog221.class);
-      catalogBinder.addBinding().to(UpgradeCatalog222.class);
-      catalogBinder.addBinding().to(UpgradeCatalog230.class);
-      catalogBinder.addBinding().to(UpgradeCatalog240.class);
-      catalogBinder.addBinding().to(UpgradeCatalog2402.class);
-      catalogBinder.addBinding().to(UpgradeCatalog242.class);
-      catalogBinder.addBinding().to(UpgradeCatalog250.class);
       catalogBinder.addBinding().to(UpgradeCatalog251.class);
       catalogBinder.addBinding().to(UpgradeCatalog252.class);
       catalogBinder.addBinding().to(UpgradeCatalog300.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
----------------------------------------------------------------------
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
deleted file mode 100644
index a9280a4..0000000
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.annotations.Experimental;
-import org.apache.ambari.annotations.ExperimentalFeature;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import 
org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.ambari.server.state.RepositoryInfo;
-import org.apache.ambari.server.state.SecurityState;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.UpgradeState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Upgrade catalog for version 2.0.0.
- */
-public class UpgradeCatalog200 extends AbstractUpgradeCatalog {
-
-  private static final String ALERT_DEFINITION_TABLE = "alert_definition";
-  private static final String ALERT_TARGET_TABLE = "alert_target";
-  private static final String ALERT_TARGET_STATES_TABLE = 
"alert_target_states";
-  private static final String ALERT_CURRENT_TABLE = "alert_current";
-  private static final String ARTIFACT_TABLE = "artifact";
-  private static final String KERBEROS_PRINCIPAL_TABLE = "kerberos_principal";
-  private static final String KERBEROS_PRINCIPAL_HOST_TABLE = 
"kerberos_principal_host";
-  private static final String TEZ_USE_CLUSTER_HADOOP_LIBS_PROPERTY = 
"tez.use.cluster.hadoop-libs";
-  private static final String FLUME_ENV_CONFIG = "flume-env";
-  private static final String CONTENT_PROPERTY = "content";
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.0.0";
-  }
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger
-      (UpgradeCatalog200.class);
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link 
org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   * @param injector Guice injector to track dependencies and uses bindings to 
inject them.
-   */
-  @Inject
-  public UpgradeCatalog200(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    prepareRollingUpgradesDDL();
-    executeAlertDDLUpdates();
-    createArtifactTable();
-    createKerberosPrincipalTables();
-
-    // add viewparameter columns
-    dbAccessor.addColumn("viewparameter", new DBColumnInfo("label",
-        String.class, 255, null, true));
-
-    dbAccessor.addColumn("viewparameter", new DBColumnInfo("placeholder",
-        String.class, 255, null, true));
-
-    dbAccessor.addColumn("viewparameter", new DBColumnInfo("default_value",
-        String.class, 2000, null, true));
-
-    // add security_type to clusters
-    dbAccessor.addColumn("clusters", new DBColumnInfo(
-        "security_type", String.class, 32, SecurityType.NONE.toString(), 
false));
-
-    // add security_state to various tables
-    dbAccessor.addColumn("hostcomponentdesiredstate", new DBColumnInfo(
-        "security_state", String.class, 32, 
SecurityState.UNSECURED.toString(), false));
-    dbAccessor.addColumn("hostcomponentstate", new DBColumnInfo(
-        "security_state", String.class, 32, 
SecurityState.UNSECURED.toString(), false));
-    dbAccessor.addColumn("servicedesiredstate", new DBColumnInfo(
-        "security_state", String.class, 32, 
SecurityState.UNSECURED.toString(), false));
-
-    // Alter column : make viewinstanceproperty.value & viewinstancedata.value
-    // nullable
-    dbAccessor.alterColumn("viewinstanceproperty", new DBColumnInfo("value",
-        String.class, 2000, null, true));
-    dbAccessor.alterColumn("viewinstancedata", new DBColumnInfo("value",
-        String.class, 2000, null, true));
-  }
-
-  /**
-   * Execute all of the alert DDL updates.
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  private void executeAlertDDLUpdates() throws AmbariException, SQLException {
-    // add ignore_host column to alert_definition
-    dbAccessor.addColumn(ALERT_DEFINITION_TABLE, new DBColumnInfo(
-            "ignore_host", Short.class, 1, 0, false));
-
-    dbAccessor.addColumn(ALERT_DEFINITION_TABLE, new DBColumnInfo(
-            "description", char[].class, 32672, null, true));
-
-    // update alert target
-    dbAccessor.addColumn(ALERT_TARGET_TABLE, new DBColumnInfo("is_global",
-        Short.class, 1, 0, false));
-
-    // create alert_target_states table
-    ArrayList<DBColumnInfo> columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("target_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("alert_state", String.class, 255, null, 
false));
-    dbAccessor.createTable(ALERT_TARGET_STATES_TABLE, columns);
-    dbAccessor.addFKConstraint(ALERT_TARGET_STATES_TABLE,
-        "fk_alert_tgt_states_tgt_id", "target_id", ALERT_TARGET_TABLE,
-        "target_id", false);
-
-    // update alert current maintenance mode
-    dbAccessor.alterColumn(ALERT_CURRENT_TABLE, new DBColumnInfo(
-        "maintenance_state", String.class, 255, null, false));
-  }
-
-  /**
-   * Add any columns, tables, and keys needed for Rolling Upgrades.
-   * @throws SQLException
-   */
-  private void prepareRollingUpgradesDDL() throws SQLException {
-    List<DBAccessor.DBColumnInfo> columns = new ArrayList<>();
-
-    columns.add(new DBColumnInfo("repo_version_id", Long.class,    null,  
null, false));
-    columns.add(new DBColumnInfo("stack",           String.class,  255,   
null, false));
-    columns.add(new DBColumnInfo("version",         String.class,  255,   
null, false));
-    columns.add(new DBColumnInfo("display_name",    String.class,  128,   
null, false));
-    columns.add(new DBColumnInfo("upgrade_package", String.class,  255,   
null, false));
-    columns.add(new DBColumnInfo("repositories",    char[].class,  null,  
null, false));
-    dbAccessor.createTable("repo_version", columns, "repo_version_id");
-    addSequence("repo_version_id_seq", 0L, false);
-
-
-    dbAccessor.addUniqueConstraint("repo_version", 
"UQ_repo_version_display_name", "display_name");
-    dbAccessor.addUniqueConstraint("repo_version", 
"UQ_repo_version_stack_version", "stack", "version");
-
-    // New columns
-    dbAccessor.addColumn("hostcomponentstate", new 
DBAccessor.DBColumnInfo("upgrade_state",
-        String.class, 32, "NONE", false));
-
-    dbAccessor.addColumn("hostcomponentstate", new 
DBAccessor.DBColumnInfo("version",
-        String.class, 32, "UNKNOWN", false));
-
-    dbAccessor.addColumn("host_role_command", new 
DBAccessor.DBColumnInfo("retry_allowed",
-        Integer.class, 1, 0, false));
-
-    dbAccessor.addColumn("stage", new DBAccessor.DBColumnInfo("skippable",
-        Integer.class, 1, 0, false));
-
-    // New tables
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("id", Long.class, null, null, 
false));
-    columns.add(new DBAccessor.DBColumnInfo("repo_version_id", Long.class, 
null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("cluster_id", Long.class, null, 
null, false));
-    columns.add(new DBAccessor.DBColumnInfo("state", String.class, 32, null, 
false));
-    columns.add(new DBAccessor.DBColumnInfo("start_time", Long.class, null, 
null, false));
-    columns.add(new DBAccessor.DBColumnInfo("end_time", Long.class, null, 
null, true));
-    columns.add(new DBAccessor.DBColumnInfo("user_name", String.class, 32, 
null, true));
-    dbAccessor.createTable("cluster_version", columns, "id");
-
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("id", Long.class, null, null, 
false));
-    columns.add(new DBAccessor.DBColumnInfo("repo_version_id", Long.class, 
null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("host_name", String.class, 255, 
null, false));
-    columns.add(new DBAccessor.DBColumnInfo("state", String.class, 32, null, 
false));
-    dbAccessor.createTable("host_version", columns, "id");
-
-    // Foreign Key Constraints
-    dbAccessor.addFKConstraint("cluster_version", 
"FK_cluster_version_cluster_id", "cluster_id", "clusters", "cluster_id", false);
-    dbAccessor.addFKConstraint("cluster_version", 
"FK_cluster_version_repovers_id", "repo_version_id", "repo_version", 
"repo_version_id", false);
-    if (dbAccessor.tableHasColumn("host_version", "host_name")) {
-      dbAccessor.addFKConstraint("host_version", "FK_host_version_host_name", 
"host_name", "hosts", "host_name", false);
-    }
-    dbAccessor.addFKConstraint("host_version", "FK_host_version_repovers_id", 
"repo_version_id", "repo_version", "repo_version_id", false);
-
-    // New sequences
-    addSequence("cluster_version_id_seq", 0L, false);
-    addSequence("host_version_id_seq", 0L, false);
-
-    // upgrade tables
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_id", Long.class, null, 
null, false));
-    columns.add(new DBAccessor.DBColumnInfo("cluster_id", Long.class, null, 
null, false));
-    columns.add(new DBAccessor.DBColumnInfo("request_id", Long.class, null, 
null, false));
-    columns.add(new DBAccessor.DBColumnInfo("from_version", String.class, 255, 
"", false));
-    columns.add(new DBAccessor.DBColumnInfo("to_version", String.class, 255, 
"", false));
-    columns.add(new DBAccessor.DBColumnInfo("direction", String.class, 255, 
"UPGRADE", false));
-    dbAccessor.createTable("upgrade", columns, "upgrade_id");
-    dbAccessor.addFKConstraint("upgrade", "fk_upgrade_cluster_id", 
"cluster_id", "clusters", "cluster_id", false);
-    dbAccessor.addFKConstraint("upgrade", "fk_upgrade_request_id", 
"request_id", "request", "request_id", false);
-    addSequence("upgrade_id_seq", 0L, false);
-
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_group_id", Long.class, 
null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_id", Long.class, null, 
null, false));
-    columns.add(new DBAccessor.DBColumnInfo("group_name", String.class, 255, 
"", false));
-    columns.add(new DBAccessor.DBColumnInfo("group_title", String.class, 1024, 
"", false));
-    dbAccessor.createTable("upgrade_group", columns, "upgrade_group_id");
-    dbAccessor.addFKConstraint("upgrade_group", "fk_upgrade_group_upgrade_id", 
"upgrade_id", "upgrade", "upgrade_id", false);
-    addSequence("upgrade_group_id_seq", 0L, false);
-
-
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_item_id", Long.class, 
null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_group_id", Long.class, 
null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("stage_id", Long.class, null, 
null, false));
-    columns.add(new DBAccessor.DBColumnInfo("state", String.class, 255, 
UpgradeState.NONE.name(), false));
-    columns.add(new DBAccessor.DBColumnInfo("hosts", char[].class, 32672, 
null, true));
-    columns.add(new DBAccessor.DBColumnInfo("tasks", char[].class, 32672, 
null, true));
-    columns.add(new DBAccessor.DBColumnInfo("item_text", String.class, 1024, 
null, true));
-    dbAccessor.createTable("upgrade_item", columns, "upgrade_item_id");
-    dbAccessor.addFKConstraint("upgrade_item", "fk_upg_item_upgrade_group_id", 
"upgrade_group_id", "upgrade_group", "upgrade_group_id", false);
-    addSequence("upgrade_item_id_seq", 0L, false);
-  }
-
-  private void createArtifactTable() throws SQLException {
-    ArrayList<DBColumnInfo> columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("artifact_name", String.class, 255, null, 
false));
-    columns.add(new DBColumnInfo("foreign_keys", String.class, 255, null, 
false));
-    columns.add(new DBColumnInfo("artifact_data", char[].class, null, null, 
false));
-    dbAccessor.createTable(ARTIFACT_TABLE, columns, "artifact_name", 
"foreign_keys");
-  }
-
-  private void createKerberosPrincipalTables() throws SQLException {
-    ArrayList<DBColumnInfo> columns;
-
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("principal_name", String.class, 255, null, 
false));
-    columns.add(new DBColumnInfo("is_service", Short.class, 1, 1, false));
-    columns.add(new DBColumnInfo("cached_keytab_path", String.class, 255, 
null, true));
-    dbAccessor.createTable(KERBEROS_PRINCIPAL_TABLE, columns, 
"principal_name");
-
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("principal_name", String.class, 255, null, 
false));
-    columns.add(new DBColumnInfo("host_name", String.class, 255, null, false));
-    dbAccessor.createTable(KERBEROS_PRINCIPAL_HOST_TABLE, columns, 
"principal_name", "host_name");
-    if (dbAccessor.tableHasColumn(KERBEROS_PRINCIPAL_HOST_TABLE, "host_name")) 
{
-      dbAccessor.addFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, 
"FK_krb_pr_host_hostname", "host_name", "hosts", "host_name", true, false);
-    }
-    dbAccessor.addFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, 
"FK_krb_pr_host_principalname", "principal_name", KERBEROS_PRINCIPAL_TABLE, 
"principal_name", true, false);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void executePreDMLUpdates() {
-    ;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    // remove NAGIOS to make way for the new embedded alert framework
-    removeNagiosService();
-    addNewConfigurationsFromXml();
-    updateHiveDatabaseType();
-    updateTezConfiguration();
-    updateFlumeEnvConfig();
-    addMissingConfigs();
-    persistHDPRepo();
-    updateClusterEnvConfiguration();
-  }
-
-  @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES,
-      comment = "the metainfo table of storing the latest repo will be 
removed")
-  protected void persistHDPRepo() throws AmbariException{
-    /*
-    AmbariManagementController amc = injector.getInstance(
-            AmbariManagementController.class);
-    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
-    Map<String, Cluster> clusterMap = amc.getClusters().getClusters();
-    for (Cluster cluster : clusterMap.values()) {
-      StackId stackId = cluster.getCurrentStackVersion();
-      String stackName = stackId.getStackName();
-      String stackVersion = stackId.getStackVersion();
-      String stackRepoId = stackName + "-" + stackVersion;
-
-      for (OperatingSystemInfo osi : 
ambariMetaInfo.getOperatingSystems(stackName, stackVersion)) {
-        MetainfoDAO metaInfoDAO = injector.getInstance(MetainfoDAO.class);
-        String repoMetaKey = 
AmbariMetaInfo.generateRepoMetaKey(stackName,stackVersion,osi.getOsType(),
-                stackRepoId,AmbariMetaInfo.REPOSITORY_XML_PROPERTY_BASEURL);
-        // Check if default repo is used and not persisted
-        if (metaInfoDAO.findByKey(repoMetaKey) == null) {
-          RepositoryInfo repositoryInfo = 
ambariMetaInfo.getRepository(stackName, stackVersion, osi.getOsType(), 
stackRepoId);
-          // We save default base url which has not changed during upgrade as 
base url
-          String baseUrl = repositoryInfo.getDefaultBaseUrl();
-          ambariMetaInfo.updateRepo(stackName, stackVersion, osi.getOsType(),
-              stackRepoId, baseUrl, null);
-        }
-      }
-
-      // Repositories that have been autoset may be unexpected for user
-      // (especially if they are taken from online json)
-      // We have to output to stdout here, and not to log
-      // to be sure that user sees this message
-      System.out.printf("Ambari has recorded the following repository base 
urls for cluster %s. Please verify the " +
-              "values and ensure that these are correct. If necessary, " +
-              "after starting Ambari Server, you can edit them using Ambari 
UI, " +
-              "Admin -> Stacks and Versions -> Versions Tab and editing the 
base urls for the current Repo. " +
-              "It is critical that these repo base urls are valid for your 
environment as they " +
-              "will be used for Add Host/Service operations.",
-        cluster.getClusterName());
-      System.out.println(repositoryTable(ambariMetaInfo.getStack(stackName, 
stackVersion).getRepositories()));
-    }
-    */
-  }
-
-  /**
-   * Formats a list repositories for printing to console
-   * @param repositories list of repositories
-   * @return multi-line string
-   */
-  static String repositoryTable(List<RepositoryInfo> repositories) {
-    StringBuilder result = new StringBuilder();
-    for (RepositoryInfo repository : repositories) {
-      result.append(String.format(" %8s |", repository.getOsType()));
-      result.append(String.format(" %18s |", repository.getRepoId()));
-      result.append(String.format(" %48s ", repository.getBaseUrl()));
-      result.append("\n");
-    }
-    return result.toString();
-  }
-
-  protected void updateTezConfiguration() throws AmbariException {
-    updateConfigurationProperties("tez-site", 
Collections.singletonMap(TEZ_USE_CLUSTER_HADOOP_LIBS_PROPERTY, 
String.valueOf(false)), false, false);
-  }
-
-  protected void updateFlumeEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = 
injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : 
getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config flumeEnvConfig = cluster.getDesiredConfigByType(FLUME_ENV_CONFIG);
-      if (flumeEnvConfig != null) {
-        String content = flumeEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null && 
!content.contains("/usr/lib/flume/lib/ambari-metrics-flume-sink.jar")) {
-          String newPartOfContent = "\n\n" +
-            "# Note that the Flume conf directory is always included in the 
classpath.\n" +
-            "# Add flume sink to classpath\n" +
-            "if [ -e \"/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\" ]; 
then\n" +
-            "  export 
FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\n"
 +
-            "fi\n";
-          content += newPartOfContent;
-          Map<String, String> updates = 
Collections.singletonMap(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, FLUME_ENV_CONFIG, 
updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateHiveDatabaseType() throws AmbariException {
-    final String PROPERTY_NAME = "hive_database_type";
-    final String PROPERTY_VALUE_OLD = "postgresql";
-    final String PROPERTY_VALUE_NEW = "postgres";
-    final String PROPERTY_CONFIG_NAME = "hive-env";
-
-    AmbariManagementController ambariManagementController = 
injector.getInstance(
-            AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      Map<String, String> prop = new HashMap<>();
-      String hive_database_type = null;
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          hive_database_type = null;
-
-          if (cluster.getDesiredConfigByType(PROPERTY_CONFIG_NAME) != null) {
-            hive_database_type = cluster.getDesiredConfigByType(
-                    PROPERTY_CONFIG_NAME).getProperties().get(PROPERTY_NAME);
-          }
-
-          if (hive_database_type != null && !hive_database_type.isEmpty() &&
-                  hive_database_type.equals(PROPERTY_VALUE_OLD)) {
-            prop.put(PROPERTY_NAME, PROPERTY_VALUE_NEW);
-            updateConfigurationPropertiesForCluster(cluster, 
PROPERTY_CONFIG_NAME, prop, true, false);
-          }
-        }
-      }
-
-    }
-  }
-
-  /**
-   * Removes Nagios and all associated components and states.
-   */
-  protected void removeNagiosService() {
-    executeInTransaction(new RemoveNagiosRunnable());
-  }
-
-  /**
-   * The RemoveNagiosRunnable is used to remove Nagios from the cluster. This
-   * runnable is exepected to run inside of a transation so that if any of the
-   * removals fails, Nagios is returned to a valid service state.
-   */
-  protected final class RemoveNagiosRunnable implements Runnable {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void run() {
-      ClusterDAO clusterDao = injector.getInstance(ClusterDAO.class);
-      ClusterServiceDAO clusterServiceDao = 
injector.getInstance(ClusterServiceDAO.class);
-      ServiceComponentDesiredStateDAO componentDesiredStateDao = 
injector.getInstance(ServiceComponentDesiredStateDAO.class);
-      ServiceDesiredStateDAO desiredStateDao = 
injector.getInstance(ServiceDesiredStateDAO.class);
-      HostComponentDesiredStateDAO hostComponentDesiredStateDao = 
injector.getInstance(HostComponentDesiredStateDAO.class);
-      HostComponentStateDAO hostComponentStateDao = 
injector.getInstance(HostComponentStateDAO.class);
-
-      List<ClusterEntity> clusters = clusterDao.findAll();
-      if (null == clusters) {
-        return;
-      }
-
-      for (ClusterEntity cluster : clusters) {
-        ClusterServiceEntity nagios = 
clusterServiceDao.findByClusterAndServiceNames(
-            cluster.getClusterName(), "NAGIOS");
-
-        if (null == nagios) {
-          continue;
-        }
-
-        Collection<ServiceComponentDesiredStateEntity> 
serviceComponentDesiredStates = 
nagios.getServiceComponentDesiredStateEntities();
-        ServiceDesiredStateEntity serviceDesiredState = 
nagios.getServiceDesiredStateEntity();
-
-        // remove all component states
-        for (ServiceComponentDesiredStateEntity componentDesiredState : 
serviceComponentDesiredStates) {
-          Collection<HostComponentStateEntity> hostComponentStateEntities = 
componentDesiredState.getHostComponentStateEntities();
-          Collection<HostComponentDesiredStateEntity> 
hostComponentDesiredStateEntities = 
componentDesiredState.getHostComponentDesiredStateEntities();
-
-          // remove host states
-          for (HostComponentStateEntity hostComponentState : 
hostComponentStateEntities) {
-            hostComponentStateDao.remove(hostComponentState);
-          }
-
-          // remove host desired states
-          for (HostComponentDesiredStateEntity hostComponentDesiredState : 
hostComponentDesiredStateEntities) {
-            hostComponentDesiredStateDao.remove(hostComponentDesiredState);
-          }
-
-          // remove component state
-          componentDesiredStateDao.removeByName(nagios.getClusterId(),
-              componentDesiredState.getServiceName(), 
componentDesiredState.getComponentName());
-        }
-
-        // remove service state
-        desiredStateDao.remove(serviceDesiredState);
-
-        // remove service
-        cluster.getClusterServiceEntities().remove(nagios);
-        ClusterServiceEntityPK primaryKey = new ClusterServiceEntityPK();
-        primaryKey.setClusterId(nagios.getClusterId());
-        primaryKey.setServiceName(nagios.getServiceName());
-        clusterServiceDao.removeByPK(primaryKey);
-      }
-    }
-  }
-  protected void addMissingConfigs() throws AmbariException {
-    updateConfigurationProperties("hive-site", 
Collections.singletonMap("hive.server2.transport.mode", "binary"), false, 
false);
-  }
-
-  /**
-   * Update the cluster-env configuration (in all clusters) to add missing 
properties and remove
-   * obsolete properties.
-   *
-   * @throws org.apache.ambari.server.AmbariException
-   */
-  protected void updateClusterEnvConfiguration() throws AmbariException {
-    AmbariManagementController ambariManagementController = 
injector.getInstance(AmbariManagementController.class);
-    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
-
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config configClusterEnv = 
cluster.getDesiredConfigByType("cluster-env");
-
-          if (configClusterEnv != null) {
-            Map<String, String> properties = configClusterEnv.getProperties();
-
-            if (properties != null) {
-              // -----------------------------------------
-              // Add missing properties
-
-              if (!properties.containsKey("smokeuser_principal_name")) {
-                // Add smokeuser_principal_name, from cluster-env/smokeuser
-                // Ideally a realm should be added, but for now we can assume 
the default realm and
-                // leave it off
-                String smokeUser = properties.get("smokeuser");
-
-                if ((smokeUser == null) || smokeUser.isEmpty()) {
-                  // If the smokeuser property is not set in the current 
configuration set, grab
-                  // it from the stack defaults:
-                  Set<PropertyInfo> stackProperties = 
configHelper.getStackProperties(cluster);
-
-                  if (stackProperties != null) {
-                    for (PropertyInfo propertyInfo : stackProperties) {
-                      String filename = propertyInfo.getFilename();
-
-                      if ((filename != null) && 
"cluster-env".equals(ConfigHelper.fileNameToConfigType(filename))) {
-                        smokeUser = propertyInfo.getValue();
-                        break;
-                      }
-                    }
-                  }
-
-                  // If a default value for smokeuser was not found, force it 
to be "ambari-qa"
-                  if ((smokeUser == null) || smokeUser.isEmpty()) {
-                    smokeUser = "ambari-qa";
-                  }
-                }
-
-                properties.put("smokeuser_principal_name", smokeUser);
-              }
-
-              // Add missing properties (end)
-              // -----------------------------------------
-
-              // -----------------------------------------
-              // Remove obsolete properties
-
-              // Remove obsolete properties (end)
-              // -----------------------------------------
-
-              // -----------------------------------------
-              // Set the updated configuration
-
-              configHelper.createConfigType(cluster, 
cluster.getDesiredStackVersion(),
-                  ambariManagementController, "cluster-env", properties,
-                  AUTHENTICATED_USER_NAME, "Upgrading to Ambari 2.0");
-
-              // Set configuration (end)
-              // -----------------------------------------
-
-            }
-          }
-        }
-      }
-    }
-  }
-}

Reply via email to