You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ha...@apache.org on 2017/06/25 09:59:59 UTC

[23/25] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
deleted file mode 100644
index eb835ef..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.text.MessageFormat;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.configuration.Configuration.DatabaseType;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.jdbc.support.JdbcUtils;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Upgrade catalog for version 2.1.1.
- */
-public class UpgradeCatalog211 extends AbstractUpgradeCatalog {
-  private static final String HOST_COMPONENT_STATE_TABLE = "hostcomponentstate";
-  private static final String HOST_COMPONENT_STATE_ID_COLUMN = "id";
-  private static final String HOST_COMPONENT_STATE_INDEX = "idx_host_component_state";
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog211.class);
-
-  // this "id holder" is a field only for a test that verifies "big" 4 digit+
-  // numbers are formatted correctly
-  private AtomicLong m_hcsId = new AtomicLong(1);
-
-
-  @Inject
-  DaoUtils daoUtils;
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog211(Injector injector) {
-    super(injector);
-
-    daoUtils = injector.getInstance(DaoUtils.class);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.1.1";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.1.0";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    // change out the PK on hostcomponentstate
-    executeHostComponentStateDDLUpdates();
-
-    // make viewinstanceproperty.value & viewinstancedata.value nullable
-    dbAccessor.setColumnNullable("viewinstanceproperty", "value", true);
-    dbAccessor.setColumnNullable("viewinstancedata", "value", true);
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateExistingConfigurations();
-  }
-
-  // ----- UpgradeCatalog211 --------------------------------------------
-
-  /**
-   * Iterates over the set of clusters to call service-specific configuration
-   * update routines.
-   *
-   * @throws AmbariException
-   *           if an error occurs while updating the configurations
-   */
-  protected void updateExistingConfigurations() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if ((clusterMap != null) && !clusterMap.isEmpty()) {
-        // Iterate through the clusters and perform any configuration updates
-        for (final Cluster cluster : clusterMap.values()) {
-          updateKerberosConfigurations(cluster);
-
-          /* *********************************************************
-           * Add additional configuration update methods here
-           * ********************************************************* */
-        }
-      }
-    }
-  }
-
-  /**
-   * Updates the Kerberos configurations for the given cluster
-   * <p/>
-   * Performs the following updates:
-   * <ul>
-   * <li>Rename <code>create_attributes_template</code> to
-   * <code>ad_create_attributes_template</code></li>
-   * </ul>
-   *
-   * @param cluster
-   *          the cluster
-   * @throws AmbariException
-   *           if an error occurs while updating the configurations
-   */
-  protected void updateKerberosConfigurations(Cluster cluster) throws AmbariException {
-    Config config = cluster.getDesiredConfigByType("kerberos-env");
-
-    if (config != null) {
-      // Rename create_attributes_template to ad_create_attributes_template
-      String value = config.getProperties().get("create_attributes_template");
-      Map<String, String> updates = Collections.singletonMap("ad_create_attributes_template", value);
-      Set<String> removes = Collections.singleton("create_attributes_template");
-
-      updateConfigurationPropertiesForCluster(cluster, "kerberos-env", updates, removes, true, false);
-    }
-  }
-
-  /**
-   * Perform the DDL updates required to add a new Primary Key ID column to the
-   * {@code hostcomponentstate} table. This will perform the following actions:
-   * <ul>
-   * <li>Add a new column to hostcomponentstate named id</li>
-   * <li>Populated id with an incrementing long, then make it non-NULL</li>
-   * <li>Drop the existing PK on hostcomponentstate</li>
-   * <li>Add a new surrogate PK on hostcomponentstate on the id column</li>
-   * <li>Add an index on hostcomponentstate for host_id, component_name,
-   * service_name, cluster_id</li>
-   * </ul>
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  private void executeHostComponentStateDDLUpdates() throws AmbariException, SQLException {
-    if (!dbAccessor.tableHasPrimaryKey(HOST_COMPONENT_STATE_TABLE, HOST_COMPONENT_STATE_ID_COLUMN)) {
-      // add the new column, nullable for now until we insert unique IDs
-      dbAccessor.addColumn(HOST_COMPONENT_STATE_TABLE,
-          new DBColumnInfo(HOST_COMPONENT_STATE_ID_COLUMN, Long.class, null, null, true));
-
-      Statement statement = null;
-      ResultSet resultSet = null;
-      try {
-        statement = dbAccessor.getConnection().createStatement();
-        if (statement != null) {
-          String selectSQL = MessageFormat.format(
-              "SELECT id, cluster_id, service_name, component_name, host_id FROM {0} ORDER BY {1} {2}",
-              HOST_COMPONENT_STATE_TABLE, "id", "DESC");
-
-          resultSet = statement.executeQuery(selectSQL);
-          while (resultSet.next()) {
-            final Long clusterId = resultSet.getLong("cluster_id");
-            final String serviceName = resultSet.getString("service_name");
-            final String componentName = resultSet.getString("component_name");
-            final Long hostId = resultSet.getLong("host_id");
-            final Long idKey = resultSet.getLong("id");
-
-            if (idKey != 0 && m_hcsId.get() == 1) {
-              m_hcsId.set(idKey);
-              m_hcsId.getAndIncrement();
-            } else if(idKey == 0) {
-              String updateSQL = MessageFormat.format(
-                  "UPDATE {0} SET {1} = {2,number,#} WHERE cluster_id = {3} AND service_name = ''{4}'' AND component_name = ''{5}'' and host_id = {6,number,#}",
-                  HOST_COMPONENT_STATE_TABLE, HOST_COMPONENT_STATE_ID_COLUMN, m_hcsId.getAndIncrement(),
-                  clusterId, serviceName, componentName, hostId);
-
-              dbAccessor.executeQuery(updateSQL);
-            }
-          }
-        }
-      } finally {
-        JdbcUtils.closeResultSet(resultSet);
-        JdbcUtils.closeStatement(statement);
-      }
-
-      // make the column NON NULL now
-      dbAccessor.alterColumn(HOST_COMPONENT_STATE_TABLE,
-          new DBColumnInfo(HOST_COMPONENT_STATE_ID_COLUMN, Long.class, null, null, false));
-
-      // Add sequence for hostcomponentstate id
-      addSequence("hostcomponentstate_id_seq", m_hcsId.get(), false);
-
-      // drop the current PK
-      String primaryKeyConstraintName = null;
-      Configuration.DatabaseType databaseType = configuration.getDatabaseType();
-      switch (databaseType) {
-        case POSTGRES: {
-          primaryKeyConstraintName = "hostcomponentstate_pkey";
-          break;
-        }
-        case ORACLE:
-        case SQL_SERVER: {
-          // Oracle and SQL Server require us to lookup the PK name
-          primaryKeyConstraintName = dbAccessor.getPrimaryKeyConstraintName(
-              HOST_COMPONENT_STATE_TABLE);
-
-          break;
-        }
-        default:
-          break;
-      }
-
-      if (databaseType == DatabaseType.MYSQL) {
-        String mysqlDropQuery = MessageFormat.format("ALTER TABLE {0} DROP PRIMARY KEY",
-            HOST_COMPONENT_STATE_TABLE);
-
-        dbAccessor.executeQuery(mysqlDropQuery, true);
-      } else {
-        // warn if we can't find it
-        if (null == primaryKeyConstraintName) {
-          LOG.warn("Unable to determine the primary key constraint name for {}",
-              HOST_COMPONENT_STATE_TABLE);
-        } else {
-          dbAccessor.dropPKConstraint(HOST_COMPONENT_STATE_TABLE, primaryKeyConstraintName, true);
-        }
-      }
-
-      // create a new PK, matching the name of the constraint found in the SQL
-      // files
-      dbAccessor.addPKConstraint(HOST_COMPONENT_STATE_TABLE, "pk_hostcomponentstate", "id");
-
-      // create index, ensuring column order matches that of the SQL files
-      dbAccessor.createIndex(HOST_COMPONENT_STATE_INDEX, HOST_COMPONENT_STATE_TABLE, "host_id",
-          "component_name", "service_name", "cluster_id");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
deleted file mode 100644
index 8eb2654..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Matcher;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Upgrade catalog for version 2.1.2.
- */
-public class UpgradeCatalog212 extends AbstractUpgradeCatalog {
-  private static final String HIVE_SITE = "hive-site";
-  private static final String HIVE_ENV = "hive-env";
-  private static final String HBASE_ENV = "hbase-env";
-  private static final String HBASE_SITE = "hbase-site";
-  private static final String CLUSTER_ENV = "cluster-env";
-  private static final String OOZIE_ENV = "oozie-env";
-
-  private static final String TOPOLOGY_REQUEST_TABLE = "topology_request";
-  private static final String CLUSTERS_TABLE = "clusters";
-  private static final String CLUSTERS_TABLE_CLUSTER_ID_COLUMN = "cluster_id";
-  private static final String TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN = "cluster_name";
-  private static final String TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN = "cluster_id";
-  private static final String TOPOLOGY_REQUEST_CLUSTER_ID_FK_CONSTRAINT_NAME = "FK_topology_request_cluster_id";
-
-  private static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
-  private static final String HOST_ROLE_COMMAND_SKIP_COLUMN = "auto_skip_on_failure";
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog212.class);
-
-  @Inject
-  DaoUtils daoUtils;
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog212(Injector injector) {
-    super(injector);
-
-    daoUtils = injector.getInstance(DaoUtils.class);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.1.2";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.1.1";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    executeTopologyDDLUpdates();
-    executeHostRoleCommandDDLUpdates();
-  }
-
-  private void executeTopologyDDLUpdates() throws AmbariException, SQLException {
-    dbAccessor.addColumn(TOPOLOGY_REQUEST_TABLE, new DBColumnInfo(TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN,
-      Long.class, null, null, true));
-    // TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN will be deleted in PreDML. We need a cluster name to set cluster id.
-    // dbAccessor.dropColumn(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN);
-    // dbAccessor.setColumnNullable(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN, false);
-    // dbAccessor.addFKConstraint(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_ID_FK_CONSTRAINT_NAME,
-    //     TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN, CLUSTERS_TABLE, CLUSTERS_TABLE_CLUSTER_ID_COLUMN, false);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    if (dbAccessor.tableHasColumn(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN)) {
-      addClusterIdToTopology();
-      finilizeTopologyDDL();
-    } else {
-      LOG.debug("The column: [ {} ] has already been dropped from table: [ {} ]. Skipping preDMLUpdate logic.",
-          TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN, TOPOLOGY_REQUEST_TABLE);
-    }
-  }
-
-  protected void finilizeTopologyDDL() throws AmbariException, SQLException {
-    dbAccessor.dropColumn(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN);
-    dbAccessor.setColumnNullable(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN, false);
-    dbAccessor.addFKConstraint(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_ID_FK_CONSTRAINT_NAME,
-      TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN, CLUSTERS_TABLE, CLUSTERS_TABLE_CLUSTER_ID_COLUMN, false);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    addMissingConfigs();
-  }
-
-  protected void addClusterIdToTopology() throws AmbariException, SQLException {
-    Map<String, Long> clusterNameIdMap = new HashMap<>();
-    try (Statement statement = dbAccessor.getConnection().createStatement();
-         ResultSet rs = statement.executeQuery("SELECT DISTINCT cluster_name, cluster_id FROM clusters");
-    ) {
-      while (rs.next()) {
-        long clusterId = rs.getLong("cluster_id");
-        String clusterName = rs.getString("cluster_name");
-        clusterNameIdMap.put(clusterName, clusterId);
-      }
-    }
-
-    for (String clusterName : clusterNameIdMap.keySet()) {
-      try (PreparedStatement preparedStatement = dbAccessor.getConnection().prepareStatement("UPDATE topology_request " +
-          "SET cluster_id=? WHERE cluster_name=?");
-      ) {
-        preparedStatement.setLong(1, clusterNameIdMap.get(clusterName));
-        preparedStatement.setString(2, clusterName);
-        preparedStatement.executeUpdate();
-      }
-    }
-
-    // Set cluster id for all null values.
-    // Useful if cluster was renamed and cluster name does not match.
-    if (clusterNameIdMap.entrySet().size() >= 1) {
-      try (PreparedStatement preparedStatement = dbAccessor.getConnection().prepareStatement("UPDATE topology_request " +
-          "SET cluster_id=? WHERE cluster_id IS NULL");
-      ) {
-        preparedStatement.setLong(1, clusterNameIdMap.entrySet().iterator().next().getValue());
-        preparedStatement.executeUpdate();
-      }
-    }
-    if (clusterNameIdMap.entrySet().size() == 0) {
-      LOG.warn("Cluster not found. topology_request.cluster_id is not set");
-    }
-    if (clusterNameIdMap.entrySet().size() > 1) {
-      LOG.warn("Found more than one cluster. topology_request.cluster_id can be incorrect if you have renamed the cluster.");
-    }
-  }
-
-  protected void addMissingConfigs() throws AmbariException {
-    updateHiveConfigs();
-    updateOozieConfigs();
-    updateHbaseAndClusterConfigurations();
-    updateKafkaConfigurations();
-    updateStormConfigs();
-    removeDataDirMountConfig();
-  }
-
-  protected void updateStormConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if ((clusterMap != null) && !clusterMap.isEmpty()) {
-        // Iterate through the clusters and perform any configuration updates
-        for (final Cluster cluster : clusterMap.values()) {
-          Set<String> removes = new HashSet<>();
-          removes.add("topology.metrics.consumer.register");
-          updateConfigurationPropertiesForCluster(cluster, "storm-site",
-            new HashMap<String, String>(), removes, false, false);
-        }
-      }
-    }
-  }
-
-  protected void updateKafkaConfigurations() throws AmbariException {
-    Map<String, String> properties = new HashMap<>();
-    properties.put("external.kafka.metrics.exclude.prefix",
-      "kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory," +
-        "kafka.server.BrokerTopicMetrics.BytesRejectedPerSec");
-    properties.put("external.kafka.metrics.include.prefix",
-      "kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile," +
-        "kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile," +
-        "kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile," +
-        "kafka.network.RequestMetrics.RequestsPerSec.request");
-
-    updateConfigurationProperties("kafka-broker", properties, false, false);
-  }
-
-  protected void updateHbaseAndClusterConfigurations() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if ((clusterMap != null) && !clusterMap.isEmpty()) {
-        // Iterate through the clusters and perform any configuration updates
-        for (final Cluster cluster : clusterMap.values()) {
-          Config hbaseEnvProps = cluster.getDesiredConfigByType(HBASE_ENV);
-          Config hbaseSiteProps = cluster.getDesiredConfigByType(HBASE_SITE);
-
-          if (hbaseEnvProps != null) {
-            // Remove override_hbase_uid from hbase-env and add override_uid to cluster-env
-            String value = hbaseEnvProps.getProperties().get("override_hbase_uid");
-            if (value != null) {
-              Map<String, String> updates = new HashMap<>();
-              Set<String> removes = new HashSet<>();
-              updates.put("override_uid", value);
-              removes.add("override_hbase_uid");
-              updateConfigurationPropertiesForCluster(cluster, HBASE_ENV, new HashMap<String, String>(), removes, false, true);
-              updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, updates, true, false);
-            } else {
-              updateOverrideUIDClusterConfig("false", cluster);
-            }
-          } else {
-            updateOverrideUIDClusterConfig("false", cluster);
-          }
-
-          if (hbaseSiteProps != null) {
-            String value = hbaseSiteProps.getProperties().get("hbase.bucketcache.size");
-            if (value != null) {
-              if (value.endsWith("m")) {
-                value = value.substring(0, value.length() - 1);
-                Map<String, String> updates = new HashMap<>();
-                updates.put("hbase.bucketcache.size", value);
-                updateConfigurationPropertiesForCluster(cluster, HBASE_SITE, updates, true, false);
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Set override_uid to false during the upgrade to retain UIDs already set on the cluster
-   * This is necessary for upgrading a third party Ambari/stack distribution from
-   * Ambari version 2.1.0 where HBase does not have override_hbase_uid.
-   * */
-  private void updateOverrideUIDClusterConfig(String toOverride, Cluster cluster) throws AmbariException{
-    Map<String, String> updates = new HashMap<>();
-    updates.put("override_uid", toOverride);
-    updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, updates, true, false);
-  }
-
-  protected void updateHiveConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(
-            AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Service service = cluster.getServices().get("HIVE");
-
-          if (null == service) {
-            continue;
-          }
-
-          StackId stackId = service.getDesiredStackId();
-
-          String content = null;
-          Boolean isHiveSitePresent = cluster.getDesiredConfigByType(HIVE_SITE) != null;
-          Boolean isStackNotLess22 = (stackId != null && stackId.getStackName().equals("HDP") &&
-                  VersionUtils.compareVersions(stackId.getStackVersion(), "2.2") >= 0);
-
-          if (cluster.getDesiredConfigByType(HIVE_ENV) != null && isStackNotLess22) {
-            Map<String, String> hiveEnvProps = new HashMap<>();
-            content = cluster.getDesiredConfigByType(HIVE_ENV).getProperties().get("content");
-            if(content != null) {
-              content = updateHiveEnvContent(content);
-              hiveEnvProps.put("content", content);
-            }
-            updateConfigurationPropertiesForCluster(cluster, HIVE_ENV, hiveEnvProps, true, true);
-          }
-
-          if (isHiveSitePresent && isStackNotLess22) {
-            Set<String> hiveSiteRemoveProps = new HashSet<>();
-            hiveSiteRemoveProps.add("hive.heapsize");
-            hiveSiteRemoveProps.add("hive.optimize.mapjoin.mapreduce");
-            hiveSiteRemoveProps.add("hive.server2.enable.impersonation");
-            hiveSiteRemoveProps.add("hive.auto.convert.sortmerge.join.noconditionaltask");
-
-            updateConfigurationPropertiesForCluster(cluster, HIVE_SITE, new HashMap<String, String>(), hiveSiteRemoveProps, false, true);
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateOozieConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config oozieEnv = cluster.getDesiredConfigByType(OOZIE_ENV);
-          if (oozieEnv != null) {
-            Map<String, String> oozieEnvProperties = oozieEnv.getProperties();
-
-            String hostname = oozieEnvProperties.get("oozie_hostname");
-            String db_type = oozieEnvProperties.get("oozie_database");
-            String final_db_host = null;
-            // fix for empty hostname after 1.7 -> 2.1.x+ upgrade
-            if (hostname != null && db_type != null && hostname.equals("")) {
-              switch (db_type.toUpperCase()) {
-                case "EXISTING MYSQL DATABASE":
-                  final_db_host = oozieEnvProperties.get("oozie_existing_mysql_host");
-                  break;
-                case "EXISTING POSTGRESQL DATABASE":
-                  final_db_host = oozieEnvProperties.get("oozie_existing_postgresql_host");
-                  break;
-                case "EXISTING ORACLE DATABASE":
-                  final_db_host = oozieEnvProperties.get("oozie_existing_oracle_host");
-                  break;
-                default:
-                  final_db_host = null;
-                  break;
-              }
-              if (final_db_host != null) {
-                Map<String, String> newProperties = new HashMap<>();
-                newProperties.put("oozie_hostname", final_db_host);
-                updateConfigurationPropertiesForCluster(cluster, OOZIE_ENV, newProperties, true, true);
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected String updateHiveEnvContent(String hiveEnvContent) {
-    if(hiveEnvContent == null) {
-      return null;
-    }
-    String oldHeapSizeRegex = "export HADOOP_HEAPSIZE=\"\\{\\{hive_heapsize\\}\\}\"\\s*\\n" +
-            "export HADOOP_CLIENT_OPTS=\"-Xmx\\$\\{HADOOP_HEAPSIZE\\}m \\$HADOOP_CLIENT_OPTS\"";
-    String newAuxJarPath = "";
-    return hiveEnvContent.replaceAll(oldHeapSizeRegex, Matcher.quoteReplacement(newAuxJarPath));
-  }
-
-  /**
-   * DDL changes for {@link #HOST_ROLE_COMMAND_TABLE}.
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  private void executeHostRoleCommandDDLUpdates() throws AmbariException, SQLException {
-    dbAccessor.addColumn(HOST_ROLE_COMMAND_TABLE,
-        new DBColumnInfo(HOST_ROLE_COMMAND_SKIP_COLUMN, Integer.class, 1, 0, false));
-  }
-
-  protected void removeDataDirMountConfig() throws AmbariException {
-    Set<String> properties = new HashSet<>();
-    properties.add("dfs.datanode.data.dir.mount.file");
-
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          removeConfigurationPropertiesFromCluster(cluster, "hadoop-env", properties);
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
deleted file mode 100644
index ab41b99..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.DesiredConfig;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-/**
- * Upgrade catalog for version 2.1.2.1
- */
-public class UpgradeCatalog2121 extends AbstractUpgradeCatalog {
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog2121.class);
-
-  @Inject
-  DaoUtils daoUtils;
-
-  private static final String OOZIE_SITE_CONFIG = "oozie-site";
-  private static final String OOZIE_AUTHENTICATION_KERBEROS_NAME_RULES = "oozie.authentication.kerberos.name.rules";
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog2121(Injector injector) {
-    super(injector);
-
-    daoUtils = injector.getInstance(DaoUtils.class);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.1.2.1";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.1.2";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    updatePHDConfigs();
-    updateOozieConfigs();
-  }
-
-  /**
-   * Update PHD stack configs
-   * @throws AmbariException
-   */
-  protected void updatePHDConfigs() throws AmbariException {
-
-    Map<String, String> replacements = new LinkedHashMap<>();
-    replacements.put("-Dstack.name=\\{\\{\\s*stack_name\\s*\\}\\}\\s*", "");
-    replacements.put("-Dstack.name=\\$\\{stack.name\\}\\s*", "");
-    replacements.put("-Dstack.version=\\{\\{\\s*stack_version_buildnum\\s*\\}\\}", "-Dhdp.version=\\$HDP_VERSION");
-    replacements.put("-Dstack.version=\\$\\{stack.version\\}", "-Dhdp.version=\\$\\{hdp.version\\}");
-    replacements.put("\\{\\{\\s*stack_name\\s*\\}\\}", "phd");
-    replacements.put("\\$\\{stack.name\\}", "phd");
-    replacements.put("\\$\\{stack.version\\}", "\\$\\{hdp.version\\}");
-
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if ((clusterMap != null) && !clusterMap.isEmpty()) {
-        // Iterate through the clusters and perform any configuration updates
-        Set<StackId> stackIds = new HashSet<>();
-
-        for (final Cluster cluster : clusterMap.values()) {
-          for (Service service : cluster.getServices().values()) {
-            StackId currentStackVersion = service.getDesiredStackId();
-
-            if (stackIds.contains(currentStackVersion)) {
-              continue;
-            } else {
-              stackIds.add(currentStackVersion);
-            }
-
-            String currentStackName = currentStackVersion != null? currentStackVersion.getStackName() : null;
-            if (currentStackName != null && currentStackName.equalsIgnoreCase("PHD")) {
-              // Update configs only if PHD stack is deployed
-              Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
-              if(desiredConfigs != null && !desiredConfigs.isEmpty()) {
-                for (Map.Entry<String, DesiredConfig> dc : desiredConfigs.entrySet()) {
-                  String configType = dc.getKey();
-                  DesiredConfig desiredConfig = dc.getValue();
-                  String configTag = desiredConfig.getTag();
-                  Config config = cluster.getConfig(configType, configTag);
-
-                  Map<String, String> properties = config.getProperties();
-                  if(properties != null && !properties.isEmpty()) {
-                    Map<String, String> updates = new HashMap<>();
-                    for (Map.Entry<String, String> property : properties.entrySet()) {
-                      String propertyKey = property.getKey();
-                      String propertyValue = property.getValue();
-                      String modifiedPropertyValue = propertyValue;
-                      for (String regex : replacements.keySet()) {
-                        modifiedPropertyValue = modifiedPropertyValue.replaceAll(regex, replacements.get(regex));
-                      }
-                      if (!modifiedPropertyValue.equals(propertyValue)) {
-                        updates.put(propertyKey, modifiedPropertyValue);
-                      }
-                    }
-                    if (!updates.isEmpty()) {
-                      updateConfigurationPropertiesForCluster(cluster, configType, updates, true, false);
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateOozieConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config oozieSiteProps = cluster.getDesiredConfigByType(OOZIE_SITE_CONFIG);
-      if (oozieSiteProps != null) {
-        // Remove oozie.authentication.kerberos.name.rules if empty
-        String oozieAuthKerbRules = oozieSiteProps.getProperties().get(OOZIE_AUTHENTICATION_KERBEROS_NAME_RULES);
-        if (StringUtils.isBlank(oozieAuthKerbRules)) {
-          Set<String> removeProperties = new HashSet<>();
-          removeProperties.add(OOZIE_AUTHENTICATION_KERBEROS_NAME_RULES);
-          updateConfigurationPropertiesForCluster(cluster, OOZIE_SITE_CONFIG, new HashMap<String, String>(), removeProperties, true, false);
-        }
-      }
-    }
-
-  }
-}
-