You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by rl...@apache.org on 2017/06/26 13:58:00 UTC

[26/34] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
deleted file mode 100644
index d9afec8..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.gson.JsonArray;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.gson.JsonPrimitive;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-/**
- * Upgrade catalog for version 2.2.1.
- */
-public class UpgradeCatalog221 extends AbstractUpgradeCatalog {
-
-  private static final String AMS_HBASE_SITE = "ams-hbase-site";
-  private static final String AMS_SITE = "ams-site";
-  private static final String AMS_HBASE_SECURITY_SITE = "ams-hbase-security-site";
-  private static final String AMS_ENV = "ams-env";
-  private static final String AMS_HBASE_ENV = "ams-hbase-env";
-  private static final String AMS_MODE = "timeline.metrics.service.operation.mode";
-  private static final String ZK_ZNODE_PARENT = "zookeeper.znode.parent";
-  private static final String ZK_CLIENT_PORT = "hbase.zookeeper.property.clientPort";
-  private static final String ZK_TICK_TIME = "hbase.zookeeper.property.tickTime";
-  private static final String CLUSTER_ENV = "cluster-env";
-  private static final String SECURITY_ENABLED = "security_enabled";
-  private static final String TOPOLOGY_HOST_INFO_TABLE = "topology_host_info";
-  private static final String TOPOLOGY_HOST_INFO_RACK_INFO_COLUMN = "rack_info";
-  private static final String TEZ_SITE = "tez-site";
-
-  @Inject
-  DaoUtils daoUtils;
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog221.class);
-
-  private static final String OOZIE_SITE_CONFIG = "oozie-site";
-  private static final String OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME = "oozie.service.HadoopAccessorService.hadoop.configurations";
-  private static final String OLD_DEFAULT_HADOOP_CONFIG_PATH = "/etc/hadoop/conf";
-  private static final String NEW_DEFAULT_HADOOP_CONFIG_PATH = "{{hadoop_conf_dir}}";
-
-  private static final String BLUEPRINT_HOSTGROUP_COMPONENT_TABLE_NAME = "hostgroup_component";
-  private static final String BLUEPRINT_PROVISION_ACTION_COLUMN_NAME = "provision_action";
-
-  private static final String RANGER_KMS_DBKS_CONFIG = "dbks-site";
-  private static final String RANGER_KMS_DB_FLAVOR = "DB_FLAVOR";
-  private static final String RANGER_KMS_DB_HOST = "db_host";
-  private static final String RANGER_KMS_DB_NAME = "db_name";
-  private static final String RANGER_KMS_JDBC_URL = "ranger.ks.jpa.jdbc.url";
-  private static final String RANGER_KMS_JDBC_DRIVER = "ranger.ks.jpa.jdbc.driver";
-  private static final String RANGER_KMS_PROPERTIES = "kms-properties";
-
-  private static final String TEZ_COUNTERS_MAX = "tez.counters.max";
-  private static final String TEZ_COUNTERS_MAX_GROUPS = "tez.counters.max.groups";
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog221(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.2.1";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.2.0";
-  }
-
-
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    // indices to improve request status calc performance
-    dbAccessor.createIndex("idx_stage_request_id", "stage", "request_id");
-    dbAccessor.createIndex("idx_hrc_request_id", "host_role_command", "request_id");
-    dbAccessor.createIndex("idx_rsc_request_id", "role_success_criteria", "request_id");
-
-    executeBlueprintProvisionActionDDLUpdates();
-
-    dbAccessor.addColumn(TOPOLOGY_HOST_INFO_TABLE,
-        new DBAccessor.DBColumnInfo(TOPOLOGY_HOST_INFO_RACK_INFO_COLUMN, String.class, 255));
-
-  }
-
-  private void executeBlueprintProvisionActionDDLUpdates() throws AmbariException, SQLException {
-    // add provision_action column to the hostgroup_component table for Blueprints
-    dbAccessor.addColumn(BLUEPRINT_HOSTGROUP_COMPONENT_TABLE_NAME, new DBAccessor.DBColumnInfo(BLUEPRINT_PROVISION_ACTION_COLUMN_NAME,
-      String.class, 255, null, true));
-  }
-
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    //To change body of implemented methods use File | Settings | File Templates.
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateAlerts();
-    updateOozieConfigs();
-    updateTezConfigs();
-    updateRangerKmsDbksConfigs();
-    updateAMSConfigs();
-  }
-
-  protected void updateAlerts() {
-    LOG.info("Updating alert definitions.");
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-      final AlertDefinitionEntity hiveMetastoreProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "hive_metastore_process");
-      final AlertDefinitionEntity hiveServerProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "hive_server_process");
-
-      List<AlertDefinitionEntity> hiveAlertDefinitions = new ArrayList<>();
-      if(hiveMetastoreProcessAlertDefinitionEntity != null) {
-        hiveAlertDefinitions.add(hiveMetastoreProcessAlertDefinitionEntity);
-      }
-      if(hiveServerProcessAlertDefinitionEntity != null) {
-        hiveAlertDefinitions.add(hiveServerProcessAlertDefinitionEntity);
-      }
-
-      for(AlertDefinitionEntity alertDefinition : hiveAlertDefinitions){
-        String source = alertDefinition.getSource();
-
-        alertDefinition.setScheduleInterval(3);
-        alertDefinition.setSource(addCheckCommandTimeoutParam(source));
-        alertDefinition.setHash(UUID.randomUUID().toString());
-
-        alertDefinitionDAO.merge(alertDefinition);
-      }
-
-      final AlertDefinitionEntity amsZookeeperProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-        clusterID, "ams_metrics_collector_zookeeper_server_process");
-
-      if (amsZookeeperProcessAlertDefinitionEntity != null) {
-        LOG.info("Removing alert : ams_metrics_collector_zookeeper_server_process");
-        alertDefinitionDAO.remove(amsZookeeperProcessAlertDefinitionEntity);
-      }
-    }
-  }
-
-  protected String addCheckCommandTimeoutParam(String source) {
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    JsonArray parametersJson = sourceJson.getAsJsonArray("parameters");
-
-    boolean parameterExists = parametersJson != null && !parametersJson.isJsonNull();
-
-    if (parameterExists) {
-      Iterator<JsonElement> jsonElementIterator = parametersJson.iterator();
-      while(jsonElementIterator.hasNext()) {
-        JsonElement element = jsonElementIterator.next();
-        JsonElement name = element.getAsJsonObject().get("name");
-        if (name != null && !name.isJsonNull() && name.getAsString().equals("check.command.timeout")) {
-          return sourceJson.toString();
-        }
-      }
-    }
-
-    JsonObject checkCommandTimeoutParamJson = new JsonObject();
-    checkCommandTimeoutParamJson.add("name", new JsonPrimitive("check.command.timeout"));
-    checkCommandTimeoutParamJson.add("display_name", new JsonPrimitive("Check command timeout"));
-    checkCommandTimeoutParamJson.add("value", new JsonPrimitive(60.0));
-    checkCommandTimeoutParamJson.add("type", new JsonPrimitive("NUMERIC"));
-    checkCommandTimeoutParamJson.add("description", new JsonPrimitive("The maximum time before check command will be killed by timeout"));
-    checkCommandTimeoutParamJson.add("units", new JsonPrimitive("seconds"));
-
-    if (!parameterExists) {
-      parametersJson = new JsonArray();
-      parametersJson.add(checkCommandTimeoutParamJson);
-      sourceJson.add("parameters", parametersJson);
-    } else {
-      parametersJson.add(checkCommandTimeoutParamJson);
-      sourceJson.remove("parameters");
-      sourceJson.add("parameters", parametersJson);
-    }
-
-    return sourceJson.toString();
-  }
-
-  protected void updateAMSConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-
-          Config amsEnv = cluster.getDesiredConfigByType(AMS_ENV);
-          if (amsEnv != null) {
-            Map<String, String> amsEnvProperties = amsEnv.getProperties();
-            String content = amsEnvProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsEnvContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_ENV, newProperties, true, true);
-          }
-
-          String znodeParent = null;
-          Config amsHbaseSecuritySite = cluster.getDesiredConfigByType(AMS_HBASE_SECURITY_SITE);
-          if (amsHbaseSecuritySite != null) {
-            Map<String, String> amsHbaseSecuritySiteProperties = amsHbaseSecuritySite.getProperties();
-            znodeParent = amsHbaseSecuritySiteProperties.get(ZK_ZNODE_PARENT);
-            LOG.info("Removing config zookeeper.znode.parent from ams-hbase-security-site");
-            removeConfigurationPropertiesFromCluster(cluster, AMS_HBASE_SECURITY_SITE, Collections.singleton(ZK_ZNODE_PARENT));
-          }
-
-          Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
-          if (amsHbaseSite != null) {
-            Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            if (!amsHbaseSiteProperties.containsKey(ZK_ZNODE_PARENT)) {
-              if (StringUtils.isEmpty(znodeParent) || "/hbase".equals(znodeParent)) {
-                boolean isSecurityEnabled = false;
-                Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV);
-                if (clusterEnv != null) {
-                  Map<String,String> clusterEnvProperties = clusterEnv.getProperties();
-                  if (clusterEnvProperties.containsKey(SECURITY_ENABLED)) {
-                    isSecurityEnabled = Boolean.valueOf(clusterEnvProperties.get(SECURITY_ENABLED));
-                  }
-                }
-                znodeParent = "/ams-hbase-" + (isSecurityEnabled ? "secure" : "unsecure");
-              }
-
-              LOG.info("Adding config zookeeper.znode.parent=" + znodeParent + " to ams-hbase-site");
-              newProperties.put(ZK_ZNODE_PARENT, znodeParent);
-
-            }
-
-            boolean isDistributed = false;
-            Config amsSite = cluster.getDesiredConfigByType(AMS_SITE);
-            if (amsSite != null) {
-              if ("distributed".equals(amsSite.getProperties().get(AMS_MODE))) {
-                isDistributed = true;
-              }
-            }
-
-            // Skip override if custom port found in embedded mode.
-            if (amsHbaseSiteProperties.containsKey(ZK_CLIENT_PORT) &&
-               (isDistributed || amsHbaseSiteProperties.get(ZK_CLIENT_PORT).equals("61181"))) {
-              String newValue = "{{zookeeper_clientPort}}";
-              LOG.info("Replacing value of " + ZK_CLIENT_PORT + " from " +
-                amsHbaseSiteProperties.get(ZK_CLIENT_PORT) + " to " +
-                newValue + " in ams-hbase-site");
-
-              newProperties.put(ZK_CLIENT_PORT, newValue);
-            }
-
-            if (!amsHbaseSiteProperties.containsKey(ZK_TICK_TIME)) {
-              LOG.info("Adding config " + ZK_TICK_TIME + " to ams-hbase-site");
-              newProperties.put(ZK_TICK_TIME, "6000");
-            }
-
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties, true, true);
-          }
-
-          Config amsHbaseEnv = cluster.getDesiredConfigByType(AMS_HBASE_ENV);
-          if (amsHbaseEnv != null) {
-            Map<String, String> amsHbaseEnvProperties = amsHbaseEnv.getProperties();
-            String content = amsHbaseEnvProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsHbaseEnvContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_ENV, newProperties, true, true);
-          }
-        }
-      }
-    }
-  }
-
-  protected String updateAmsHbaseEnvContent(String content) {
-    if (content == null) {
-      return null;
-    }
-    String regSearch = "_jaas_config_file\\}\\} -Dzookeeper.sasl.client.username=\\{\\{zk_servicename\\}\\}";
-    String replacement = "_jaas_config_file}}";
-    content = content.replaceAll(regSearch, replacement);
-    return content;
-  }
-
-  protected String updateAmsEnvContent(String content) {
-
-    if (content == null) {
-      return null;
-    }
-    String regSearch = "-Djava.security.auth.login.config=\\{\\{ams_collector_jaas_config_file\\}\\} " +
-      "-Dzookeeper.sasl.client.username=\\{\\{zk_servicename\\}\\}";
-    String replacement = "-Djava.security.auth.login.config={{ams_collector_jaas_config_file}}";
-    content = content.replaceAll(regSearch, replacement);
-
-    return content;
-  }
-
-  protected void updateOozieConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config oozieSiteProps = cluster.getDesiredConfigByType(OOZIE_SITE_CONFIG);
-      if (oozieSiteProps != null) {
-        // Update oozie.service.HadoopAccessorService.hadoop.configurations
-        Map<String, String> updateProperties = new HashMap<>();
-        String oozieHadoopConfigProperty = oozieSiteProps.getProperties().get(OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME);
-        if(oozieHadoopConfigProperty != null && oozieHadoopConfigProperty.contains(OLD_DEFAULT_HADOOP_CONFIG_PATH)) {
-          String updatedOozieHadoopConfigProperty = oozieHadoopConfigProperty.replaceAll(
-              OLD_DEFAULT_HADOOP_CONFIG_PATH, NEW_DEFAULT_HADOOP_CONFIG_PATH);
-          updateProperties.put(OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME, updatedOozieHadoopConfigProperty);
-          updateConfigurationPropertiesForCluster(cluster, OOZIE_SITE_CONFIG, updateProperties, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateTezConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Service service = cluster.getServices().get("TEZ");
-
-      if (null == service) {
-        continue;
-      }
-
-      Config tezSiteProps = cluster.getDesiredConfigByType(TEZ_SITE);
-      if (tezSiteProps != null) {
-
-        // Update tez.counters.max and tez.counters.max.groups configurations
-        String tezCountersMaxProperty = tezSiteProps.getProperties().get(TEZ_COUNTERS_MAX);
-        String tezCountersMaxGroupesProperty = tezSiteProps.getProperties().get(TEZ_COUNTERS_MAX_GROUPS);
-
-        StackId stackId = service.getDesiredStackId();
-        boolean isStackNotLess23 = (stackId.getStackName().equals("HDP") &&
-            VersionUtils.compareVersions(stackId.getStackVersion(), "2.3") >= 0);
-
-        if (isStackNotLess23) {
-          Map<String, String> updates = new HashMap<>();
-          if (tezCountersMaxProperty != null && tezCountersMaxProperty.equals("2000")) {
-            updates.put(TEZ_COUNTERS_MAX, "10000");
-          }
-          if (tezCountersMaxGroupesProperty != null && tezCountersMaxGroupesProperty.equals("1000")) {
-            updates.put(TEZ_COUNTERS_MAX_GROUPS, "3000");
-          }
-          if (!updates.isEmpty()) {
-            updateConfigurationPropertiesForCluster(cluster, TEZ_SITE, updates, true, false);
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateRangerKmsDbksConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Map<String, String> newRangerKmsProps = new HashMap<>();
-      Config rangerKmsDbConfigs = cluster.getDesiredConfigByType(RANGER_KMS_PROPERTIES);
-      if (rangerKmsDbConfigs != null) {
-        String dbFlavor = rangerKmsDbConfigs.getProperties().get(RANGER_KMS_DB_FLAVOR);
-        String dbHost = rangerKmsDbConfigs.getProperties().get(RANGER_KMS_DB_HOST);
-        String dbName = rangerKmsDbConfigs.getProperties().get(RANGER_KMS_DB_NAME);
-        String dbConnectionString = null;
-        String dbDriver = null;
-
-        if (dbFlavor != null && dbHost != null && dbName != null) {
-          if ("MYSQL".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:mysql://"+dbHost+"/"+dbName;
-            dbDriver = "com.mysql.jdbc.Driver";
-          } else if ("ORACLE".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:oracle:thin:@//"+dbHost;
-            dbDriver = "oracle.jdbc.driver.OracleDriver";
-          } else if ("POSTGRES".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:postgresql://"+dbHost+"/"+dbName;
-            dbDriver = "org.postgresql.Driver";
-          } else if ("MSSQL".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:sqlserver://"+dbHost+";databaseName="+dbName;
-            dbDriver = "com.microsoft.sqlserver.jdbc.SQLServerDriver";
-          } else if ("SQLA".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:sqlanywhere:database="+dbName+";host="+dbHost;
-            dbDriver = "sap.jdbc4.sqlanywhere.IDriver";
-          }
-          newRangerKmsProps.put(RANGER_KMS_JDBC_URL, dbConnectionString);
-          newRangerKmsProps.put(RANGER_KMS_JDBC_DRIVER, dbDriver);
-          updateConfigurationPropertiesForCluster(cluster, RANGER_KMS_DBKS_CONFIG, newRangerKmsProps, true, false);
-        }
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java
deleted file mode 100644
index cc7dcb8..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java
+++ /dev/null
@@ -1,781 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.io.File;
-import java.io.FileReader;
-import java.lang.reflect.Type;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.dao.WidgetDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.WidgetEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.stack.WidgetLayout;
-import org.apache.ambari.server.state.stack.WidgetLayoutInfo;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.reflect.TypeToken;
-import com.google.gson.Gson;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-/**
- * Upgrade catalog for version 2.2.2.
- */
-public class UpgradeCatalog222 extends AbstractUpgradeCatalog {
-
-  @Inject
-  DaoUtils daoUtils;
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog222.class);
-  private static final String AMS_SITE = "ams-site";
-  private static final String AMS_HBASE_SITE = "ams-hbase-site";
-  private static final String HIVE_SITE_CONFIG = "hive-site";
-  private static final String ATLAS_APPLICATION_PROPERTIES_CONFIG = "application-properties";
-  private static final String ATLAS_HOOK_HIVE_MINTHREADS_PROPERTY = "atlas.hook.hive.minThreads";
-  private static final String ATLAS_HOOK_HIVE_MAXTHREADS_PROPERTY = "atlas.hook.hive.maxThreads";
-  private static final String ATLAS_CLUSTER_NAME_PROPERTY = "atlas.cluster.name";
-  private static final String ATLAS_ENABLETLS_PROPERTY = "atlas.enableTLS";
-  private static final String ATLAS_SERVER_HTTP_PORT_PROPERTY = "atlas.server.http.port";
-  private static final String ATLAS_SERVER_HTTPS_PORT_PROPERTY = "atlas.server.https.port";
-  private static final String ATLAS_REST_ADDRESS_PROPERTY = "atlas.rest.address";
-  private static final String HBASE_ENV_CONFIG = "hbase-env";
-  private static final String CONTENT_PROPERTY = "content";
-
-  private static final String UPGRADE_TABLE = "upgrade";
-  private static final String UPGRADE_SUSPENDED_COLUMN = "suspended";
-
-  private static final String HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY =
-    "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier";
-  private static final String CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY =
-    "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier";
-  private static final String TIMELINE_METRICS_SERVICE_WATCHER_DISBALED_PROPERTY = "timeline.metrics.service.watcher.disabled";
-  private static final String AMS_MODE_PROPERTY = "timeline.metrics.service.operation.mode";
-  public static final String PRECISION_TABLE_TTL_PROPERTY = "timeline.metrics.host.aggregator.ttl";
-  public static final String CLUSTER_SECOND_TABLE_TTL_PROPERTY = "timeline.metrics.cluster.aggregator.second.ttl";
-  public static final String CLUSTER_MINUTE_TABLE_TTL_PROPERTY = "timeline.metrics.cluster.aggregator.minute.ttl";
-  public static final String AMS_WEBAPP_ADDRESS_PROPERTY = "timeline.metrics.service.webapp.address";
-  public static final String HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD_PROPERTY = "hbase.client.scanner.timeout.period";
-  public static final String HBASE_RPC_TIMEOUT_PROPERTY = "hbase.rpc.timeout";
-
-  public static final String PHOENIX_QUERY_TIMEOUT_PROPERTY = "phoenix.query.timeoutMs";
-  public static final String PHOENIX_QUERY_KEEPALIVE_PROPERTY = "phoenix.query.keepAliveMs";
-  public static final String TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED
-    = "timeline.metrics.cluster.aggregator.interpolation.enabled";
-  public static final String TIMELINE_METRICS_SINK_COLLECTION_PERIOD = "timeline.metrics.sink.collection.period";
-
-  public static final String AMS_SERVICE_NAME = "AMBARI_METRICS";
-  public static final String AMS_COLLECTOR_COMPONENT_NAME = "METRICS_COLLECTOR";
-
-  protected static final String WIDGET_TABLE = "widget";
-  protected static final String WIDGET_DESCRIPTION = "description";
-  protected static final String WIDGET_NAME = "widget_name";
-  protected static final String WIDGET_CORRUPT_BLOCKS = "Corrupted Blocks";
-  protected static final String WIDGET_CORRUPT_REPLICAS = "Blocks With Corrupted Replicas";
-  protected static final String WIDGET_CORRUPT_REPLICAS_DESCRIPTION = "Number represents data blocks with at least one " +
-    "corrupted replica (but not all of them). Its indicative of HDFS bad health.";
-  protected static final String WIDGET_VALUES = "widget_values";
-  protected static final String WIDGET_VALUES_VALUE =
-    "${Hadoop:service\\" +
-    "\\u003dNameNode,name\\" +
-    "\\u003dFSNamesystem.CorruptBlocks}";
-
-  public final static String HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES = "hbase.coprocessor.master.classes";
-  public final static String HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES = "hbase.coprocessor.region.classes";
-  public final static String HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES = "hbase.coprocessor.regionserver.classes";
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog222(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.2.2";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.2.1";
-  }
-
-
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    DBAccessor.DBColumnInfo columnInfo = new DBAccessor.DBColumnInfo("host_id", Long.class);
-    dbAccessor.addColumn("topology_host_info", columnInfo);
-    dbAccessor.addFKConstraint("topology_host_info", "FK_hostinfo_host_id", "host_id", "hosts", "host_id", true);
-    dbAccessor.executeUpdate("update topology_host_info set host_id = (select hosts.host_id from hosts where hosts.host_name = topology_host_info.fqdn)");
-
-    updateUpgradeTable();
-  }
-
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    //To change body of implemented methods use File | Settings | File Templates.
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateAlerts();
-    updateStormConfigs();
-    updateAMSConfigs();
-    updateHiveConfig();
-    updateHostRoleCommands();
-    updateHDFSWidgetDefinition();
-    updateYARNWidgetDefinition();
-    updateHBASEWidgetDefinition();
-    updateHbaseEnvConfig();
-    updateCorruptedReplicaWidget();
-    updateZookeeperConfigs();
-    updateHBASEConfigs();
-    createNewSliderConfigVersion();
-    initializeStromAndKafkaWidgets();
-  }
-
-  protected void createNewSliderConfigVersion() {
-    // Here we are creating new service config version for SLIDER, to link slider-client
-    // config to SLIDER service, in serviceconfigmapping table. It could be not mapped because
-    // of bug which we had a long time ago.
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-      Service sliderService = null;
-      try {
-        sliderService = cluster.getService("SLIDER");
-      } catch(AmbariException ambariException) {
-        LOG.info("SLIDER service not found in cluster while creating new serviceconfig version for SLIDER service.");
-      }
-      if (sliderService != null) {
-        cluster.createServiceConfigVersion("SLIDER", AUTHENTICATED_USER_NAME, "Creating new service config version for SLIDER service.", null);
-      }
-    }
-  }
-
-  protected void updateZookeeperConfigs() throws  AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-      Config zooEnv = cluster.getDesiredConfigByType("zookeeper-env");
-      if (zooEnv != null && zooEnv.getProperties().containsKey("zk_server_heapsize")) {
-        String heapSizeValue = zooEnv.getProperties().get("zk_server_heapsize");
-        if(!heapSizeValue.endsWith("m")) {
-          Map<String, String> updates = new HashMap<>();
-          updates.put("zk_server_heapsize", heapSizeValue+"m");
-          updateConfigurationPropertiesForCluster(cluster, "zookeeper-env", updates, true, false);
-        }
-
-      }
-    }
-  }
-
-  protected void updateHBASEConfigs() throws  AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-
-      Service service = cluster.getServices().get("HBASE");
-
-      if (null == service) {
-        continue;
-      }
-
-      StackId stackId = service.getDesiredStackId();
-
-      Config hbaseSite = cluster.getDesiredConfigByType("hbase-site");
-      boolean rangerHbasePluginEnabled = isConfigEnabled(cluster,
-        AbstractUpgradeCatalog.CONFIGURATION_TYPE_RANGER_HBASE_PLUGIN_PROPERTIES,
-        AbstractUpgradeCatalog.PROPERTY_RANGER_HBASE_PLUGIN_ENABLED);
-      if (hbaseSite != null && rangerHbasePluginEnabled) {
-        Map<String, String> updates = new HashMap<>();
-        String stackVersion = stackId.getStackVersion();
-        if (VersionUtils.compareVersions(stackVersion, "2.2") == 0) {
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES,
-              "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor");
-          }
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES,
-              "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor");
-          }
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES,
-                "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint," +
-                    "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor");
-          }
-        } else if (VersionUtils.compareVersions(stackVersion, "2.3") == 0) {
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES,
-              "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor ");
-          }
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES,
-              "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor");
-          }
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES,
-              "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint," +
-                "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor");
-          }
-        }
-        if (! updates.isEmpty()) {
-          updateConfigurationPropertiesForCluster(cluster, "hbase-site", updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateStormConfigs() throws  AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-      if (cluster.getDesiredConfigByType("storm-site") != null && cluster.getDesiredConfigByType("storm-site").getProperties().containsKey("storm.zookeeper.superACL")
-              && cluster.getDesiredConfigByType("storm-site").getProperties().get("storm.zookeeper.superACL").equals("sasl:{{storm_base_jaas_principal}}")) {
-        Map<String, String> newStormProps = new HashMap<>();
-        newStormProps.put("storm.zookeeper.superACL", "sasl:{{storm_bare_jaas_principal}}");
-        updateConfigurationPropertiesForCluster(cluster, "storm-site", newStormProps, true, false);
-      }
-    }
-  }
-
-  protected void updateAlerts() {
-    LOG.info("Updating alert definitions.");
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-
-      final AlertDefinitionEntity regionserverHealthSummaryDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "regionservers_health_summary");
-
-      final AlertDefinitionEntity atsWebAlert = alertDefinitionDAO.findByName(
-              clusterID, "yarn_app_timeline_server_webui");
-
-      if (regionserverHealthSummaryDefinitionEntity != null) {
-        alertDefinitionDAO.remove(regionserverHealthSummaryDefinitionEntity);
-      }
-
-      if (atsWebAlert != null) {
-        String source = atsWebAlert.getSource();
-        JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-
-        JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
-        uriJson.remove("http");
-        uriJson.remove("https");
-        uriJson.addProperty("http", "{{yarn-site/yarn.timeline-service.webapp.address}}/ws/v1/timeline");
-        uriJson.addProperty("https", "{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline");
-
-        atsWebAlert.setSource(sourceJson.toString());
-        alertDefinitionDAO.merge(atsWebAlert);
-      }
-
-      //update Atlas alert
-      final AlertDefinitionEntity atlasMetadataServerWebUI = alertDefinitionDAO.findByName(
-              clusterID, "metadata_server_webui");
-      if (atlasMetadataServerWebUI != null) {
-        String source = atlasMetadataServerWebUI.getSource();
-        JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-
-        JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
-        uriJson.remove("http");
-        uriJson.remove("https");
-        uriJson.addProperty("http", "{{application-properties/atlas.server.http.port}}");
-        uriJson.addProperty("https", "{{application-properties/atlas.server.https.port}}");
-
-        atlasMetadataServerWebUI.setSource(sourceJson.toString());
-        alertDefinitionDAO.merge(atlasMetadataServerWebUI);
-      }
-
-    }
-
-
-  }
-
-  protected void updateHostRoleCommands() throws SQLException {
-    dbAccessor.createIndex("idx_hrc_status_role", "host_role_command", "status", "role");
-  }
-
-  protected void updateAMSConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-
-          Config amsSite = cluster.getDesiredConfigByType(AMS_SITE);
-          if (amsSite != null) {
-            Map<String, String> amsSiteProperties = amsSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            if (amsSiteProperties.containsKey(AMS_WEBAPP_ADDRESS_PROPERTY)) {
-              Set<String> collectorHostNames = cluster.getHosts(AMS_SERVICE_NAME, AMS_COLLECTOR_COMPONENT_NAME);
-              for (String collector: collectorHostNames) {
-                String currentValue = amsSiteProperties.get(AMS_WEBAPP_ADDRESS_PROPERTY);
-
-                if (currentValue.startsWith("0.0.0.0")) {
-                  newProperties.put(AMS_WEBAPP_ADDRESS_PROPERTY, currentValue.replace("0.0.0.0", collector));
-                } else if (currentValue.startsWith("localhost")) {
-                  newProperties.put(AMS_WEBAPP_ADDRESS_PROPERTY, currentValue.replace("localhost", collector));
-                }
-              }
-            }
-
-            if (amsSiteProperties.containsKey(HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY) &&
-              amsSiteProperties.get(HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY).equals("1")) {
-
-              LOG.info("Setting value of " + HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY + " : 2");
-              newProperties.put(HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY, String.valueOf(2));
-
-            }
-
-            if (amsSiteProperties.containsKey(CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY) &&
-              amsSiteProperties.get(CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY).equals("1")) {
-
-              LOG.info("Setting value of " + CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY + " : 2");
-              newProperties.put(CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY, String.valueOf(2));
-
-            }
-
-            if (!amsSiteProperties.containsKey(TIMELINE_METRICS_SERVICE_WATCHER_DISBALED_PROPERTY)) {
-              LOG.info("Add config  " + TIMELINE_METRICS_SERVICE_WATCHER_DISBALED_PROPERTY + " = false");
-              newProperties.put(TIMELINE_METRICS_SERVICE_WATCHER_DISBALED_PROPERTY, String.valueOf(false));
-            }
-
-            boolean isDistributed = false;
-            if ("distributed".equals(amsSite.getProperties().get(AMS_MODE_PROPERTY))) {
-              isDistributed = true;
-            }
-
-            if (amsSiteProperties.containsKey(PRECISION_TABLE_TTL_PROPERTY)) {
-              String oldTtl = amsSiteProperties.get(PRECISION_TABLE_TTL_PROPERTY);
-              String newTtl = oldTtl;
-              if (isDistributed) {
-                if ("86400".equals(oldTtl)) {
-                  newTtl = String.valueOf(3 * 86400); // 3 days
-                }
-              }
-              newProperties.put(PRECISION_TABLE_TTL_PROPERTY, newTtl);
-              LOG.info("Setting value of " + PRECISION_TABLE_TTL_PROPERTY + " : " + newTtl);
-            }
-
-            if (amsSiteProperties.containsKey(CLUSTER_SECOND_TABLE_TTL_PROPERTY)) {
-              String oldTtl = amsSiteProperties.get(CLUSTER_SECOND_TABLE_TTL_PROPERTY);
-              String newTtl = oldTtl;
-
-              if ("2592000".equals(oldTtl)) {
-                newTtl = String.valueOf(7 * 86400); // 7 days
-              }
-
-              newProperties.put(CLUSTER_SECOND_TABLE_TTL_PROPERTY, newTtl);
-              LOG.info("Setting value of " + CLUSTER_SECOND_TABLE_TTL_PROPERTY + " : " + newTtl);
-            }
-
-            if (amsSiteProperties.containsKey(CLUSTER_MINUTE_TABLE_TTL_PROPERTY)) {
-              String oldTtl = amsSiteProperties.get(CLUSTER_MINUTE_TABLE_TTL_PROPERTY);
-              String newTtl = oldTtl;
-
-              if ("7776000".equals(oldTtl)) {
-                newTtl = String.valueOf(30 * 86400); // 30 days
-              }
-
-              newProperties.put(CLUSTER_MINUTE_TABLE_TTL_PROPERTY, newTtl);
-              LOG.info("Setting value of " + CLUSTER_MINUTE_TABLE_TTL_PROPERTY + " : " + newTtl);
-            }
-
-            if (!amsSiteProperties.containsKey(TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED)) {
-              LOG.info("Add config  " + TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED + " = true");
-              newProperties.put(TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED, String.valueOf(true));
-            }
-
-            if (!amsSiteProperties.containsKey(TIMELINE_METRICS_SINK_COLLECTION_PERIOD) ||
-              "60".equals(amsSiteProperties.get(TIMELINE_METRICS_SINK_COLLECTION_PERIOD))) {
-
-              newProperties.put(TIMELINE_METRICS_SINK_COLLECTION_PERIOD, "10");
-              LOG.info("Setting value of " + TIMELINE_METRICS_SINK_COLLECTION_PERIOD + " : 10");
-            }
-
-            updateConfigurationPropertiesForCluster(cluster, AMS_SITE, newProperties, true, true);
-          }
-
-          Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
-          if (amsHbaseSite != null) {
-            Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            if (!amsHbaseSiteProperties.containsKey(HBASE_RPC_TIMEOUT_PROPERTY)) {
-              newProperties.put(HBASE_RPC_TIMEOUT_PROPERTY, String.valueOf(300000));
-            }
-
-            if (!amsHbaseSiteProperties.containsKey(PHOENIX_QUERY_KEEPALIVE_PROPERTY)) {
-              newProperties.put(PHOENIX_QUERY_KEEPALIVE_PROPERTY, String.valueOf(300000));
-            }
-
-            if (!amsHbaseSiteProperties.containsKey(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD_PROPERTY) ||
-              amsHbaseSiteProperties.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD_PROPERTY).equals("900000")) {
-              amsHbaseSiteProperties.put(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD_PROPERTY, String.valueOf(300000));
-            }
-
-            if (!amsHbaseSiteProperties.containsKey(PHOENIX_QUERY_TIMEOUT_PROPERTY) ||
-              amsHbaseSiteProperties.get(PHOENIX_QUERY_TIMEOUT_PROPERTY).equals("1200000")) {
-              amsHbaseSiteProperties.put(PHOENIX_QUERY_TIMEOUT_PROPERTY, String.valueOf(300000));
-            }
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties, true, true);
-          }
-
-        }
-      }
-    }
-  }
-
-  protected void updateHDFSWidgetDefinition() throws AmbariException {
-    LOG.info("Updating HDFS widget definition.");
-
-    Map<String, List<String>> widgetMap = new HashMap<>();
-    Map<String, String> sectionLayoutMap = new HashMap<>();
-
-    List<String> hdfsSummaryWidgets = new ArrayList<>(Arrays.asList("NameNode RPC", "NN Connection Load",
-      "NameNode GC count", "NameNode GC time", "NameNode Host Load"));
-    widgetMap.put("HDFS_SUMMARY", hdfsSummaryWidgets);
-    sectionLayoutMap.put("HDFS_SUMMARY", "default_hdfs_dashboard");
-
-    List<String> hdfsHeatmapWidgets = new ArrayList<>(Arrays.asList("HDFS Bytes Read", "HDFS Bytes Written",
-      "DataNode Process Disk I/O Utilization", "DataNode Process Network I/O Utilization"));
-    widgetMap.put("HDFS_HEATMAPS", hdfsHeatmapWidgets);
-    sectionLayoutMap.put("HDFS_HEATMAPS", "default_hdfs_heatmap");
-
-    updateWidgetDefinitionsForService("HDFS", widgetMap, sectionLayoutMap);
-  }
-
-  protected void updateYARNWidgetDefinition() throws AmbariException {
-    LOG.info("Updating YARN widget definition.");
-
-    Map<String, List<String>> widgetMap = new HashMap<>();
-    Map<String, String> sectionLayoutMap = new HashMap<>();
-
-    List<String> yarnSummaryWidgets = new ArrayList<>(Arrays.asList("Container Failures", "App Failures", "Cluster Memory"));
-    widgetMap.put("YARN_SUMMARY", yarnSummaryWidgets);
-    sectionLayoutMap.put("YARN_SUMMARY", "default_yarn_dashboard");
-
-    List<String> yarnHeatmapWidgets = new ArrayList<>(Arrays.asList("Container Failures"));
-    widgetMap.put("YARN_HEATMAPS", yarnHeatmapWidgets);
-    sectionLayoutMap.put("YARN_HEATMAPS", "default_yarn_heatmap");
-
-    updateWidgetDefinitionsForService("YARN", widgetMap, sectionLayoutMap);
-
-  }
-
-  protected void updateHBASEWidgetDefinition() throws AmbariException {
-
-    LOG.info("Updating HBASE widget definition.");
-
-    Map<String, List<String>> widgetMap = new HashMap<>();
-    Map<String, String> sectionLayoutMap = new HashMap<>();
-
-    List<String> hbaseSummaryWidgets = new ArrayList<>(Arrays.asList("Reads and Writes", "Blocked Updates"));
-    widgetMap.put("HBASE_SUMMARY", hbaseSummaryWidgets);
-    sectionLayoutMap.put("HBASE_SUMMARY", "default_hbase_dashboard");
-
-    updateWidgetDefinitionsForService("HBASE", widgetMap, sectionLayoutMap);
-  }
-
-
-  protected void updateHbaseEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config hbaseEnvConfig = cluster.getDesiredConfigByType(HBASE_ENV_CONFIG);
-      if (hbaseEnvConfig != null) {
-        Map<String, String> updates = getUpdatedHbaseEnvProperties(hbaseEnvConfig.getProperties().get(CONTENT_PROPERTY));
-        if (!updates.isEmpty()) {
-          updateConfigurationPropertiesForCluster(cluster, HBASE_ENV_CONFIG, updates, true, false);
-        }
-
-      }
-    }
-  }
-
-  protected Map<String, String> getUpdatedHbaseEnvProperties(String content) {
-    if (content != null) {
-      //Fix bad config added in Upgrade 2.2.0.
-      String badConfig = "export HBASE_OPTS=\"-Djava.io.tmpdir={{java_io_tmpdir}}\"";
-      String correctConfig = "export HBASE_OPTS=\"${HBASE_OPTS} -Djava.io.tmpdir={{java_io_tmpdir}}\"";
-
-      if (content.contains(badConfig)) {
-        content = content.replace(badConfig, correctConfig);
-        return Collections.singletonMap(CONTENT_PROPERTY, content);
-      }
-    }
-    return Collections.emptyMap();
-  }
-
-  @Override
-  protected void updateWidgetDefinitionsForService(String serviceName, Map<String, List<String>> widgetMap,
-                                                 Map<String, String> sectionLayoutMap) throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
-    Type widgetLayoutType = new TypeToken<Map<String, List<WidgetLayout>>>(){}.getType();
-    Gson gson = injector.getInstance(Gson.class);
-    WidgetDAO widgetDAO = injector.getInstance(WidgetDAO.class);
-
-    Clusters clusters = ambariManagementController.getClusters();
-
-
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-
-
-      Set<StackId> stackIds = new HashSet<>();
-      for (Service service : cluster.getServices().values()) {
-        StackId stackId = service.getDesiredStackId();
-        if (stackIds.contains(stackId)) {
-          continue;
-        } else {
-          stackIds.add(stackId);
-        }
-
-        Map<String, Object> widgetDescriptor = null;
-        StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
-        ServiceInfo serviceInfo = stackInfo.getService(serviceName);
-        if (serviceInfo == null) {
-          LOG.info("Skipping updating widget definition, because " + serviceName +  " service is not present in cluster " +
-            "cluster_name= " + cluster.getClusterName());
-          continue;
-        }
-
-        for (String section : widgetMap.keySet()) {
-          List<String> widgets = widgetMap.get(section);
-          for (String widgetName : widgets) {
-            List<WidgetEntity> widgetEntities = widgetDAO.findByName(clusterID,
-              widgetName, "ambari", section);
-
-            if (widgetEntities != null && widgetEntities.size() > 0) {
-              WidgetEntity entityToUpdate = null;
-              if (widgetEntities.size() > 1) {
-                LOG.info("Found more that 1 entity with name = "+ widgetName +
-                  " for cluster = " + cluster.getClusterName() + ", skipping update.");
-              } else {
-                entityToUpdate = widgetEntities.iterator().next();
-              }
-              if (entityToUpdate != null) {
-                LOG.info("Updating widget: " + entityToUpdate.getWidgetName());
-                // Get the definition from widgets.json file
-                WidgetLayoutInfo targetWidgetLayoutInfo = null;
-                File widgetDescriptorFile = serviceInfo.getWidgetsDescriptorFile();
-                if (widgetDescriptorFile != null && widgetDescriptorFile.exists()) {
-                  try {
-                    widgetDescriptor = gson.fromJson(new FileReader(widgetDescriptorFile), widgetLayoutType);
-                  } catch (Exception ex) {
-                    String msg = "Error loading widgets from file: " + widgetDescriptorFile;
-                    LOG.error(msg, ex);
-                    widgetDescriptor = null;
-                  }
-                }
-                if (widgetDescriptor != null) {
-                  LOG.debug("Loaded widget descriptor: {}", widgetDescriptor);
-                  for (Object artifact : widgetDescriptor.values()) {
-                    List<WidgetLayout> widgetLayouts = (List<WidgetLayout>) artifact;
-                    for (WidgetLayout widgetLayout : widgetLayouts) {
-                      if (widgetLayout.getLayoutName().equals(sectionLayoutMap.get(section))) {
-                        for (WidgetLayoutInfo layoutInfo : widgetLayout.getWidgetLayoutInfoList()) {
-                          if (layoutInfo.getWidgetName().equals(widgetName)) {
-                            targetWidgetLayoutInfo = layoutInfo;
-                          }
-                        }
-                      }
-                    }
-                  }
-                }
-                if (targetWidgetLayoutInfo != null) {
-                  entityToUpdate.setMetrics(gson.toJson(targetWidgetLayoutInfo.getMetricsInfo()));
-                  entityToUpdate.setWidgetValues(gson.toJson(targetWidgetLayoutInfo.getValues()));
-                  if ("HBASE".equals(serviceName) && "Reads and Writes".equals(widgetName)) {
-                    entityToUpdate.setDescription(targetWidgetLayoutInfo.getDescription());
-                    LOG.info("Update description for HBase Reads and Writes widget");
-                  }
-                  widgetDAO.merge(entityToUpdate);
-                } else {
-                  LOG.warn("Unable to find widget layout info for " + widgetName +
-                    " in the stack: " + stackId);
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateHiveConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config hiveSiteConfig = cluster.getDesiredConfigByType(HIVE_SITE_CONFIG);
-      Config atlasConfig = cluster.getDesiredConfigByType(ATLAS_APPLICATION_PROPERTIES_CONFIG);
-
-      Service service = cluster.getServices().get("ATLAS");
-
-      if (null == service) {
-        continue;
-      }
-
-      StackId stackId = service.getDesiredStackId();
-
-      boolean isStackNotLess23 = (stackId != null && stackId.getStackName().equals("HDP") &&
-        VersionUtils.compareVersions(stackId.getStackVersion(), "2.3") >= 0);
-
-      List<ServiceComponentHost> atlasHost = cluster.getServiceComponentHosts("ATLAS", "ATLAS_SERVER");
-      Map<String, String> updates = new HashMap<>();
-
-      if (isStackNotLess23 && atlasHost.size() != 0 && hiveSiteConfig != null) {
-
-        updates.put(ATLAS_HOOK_HIVE_MINTHREADS_PROPERTY, "1");
-        updates.put(ATLAS_HOOK_HIVE_MAXTHREADS_PROPERTY, "1");
-        updates.put(ATLAS_CLUSTER_NAME_PROPERTY, "primary");
-
-        if (atlasConfig != null && atlasConfig.getProperties().containsKey(ATLAS_ENABLETLS_PROPERTY)) {
-          String atlasEnableTLSProperty = atlasConfig.getProperties().get(ATLAS_ENABLETLS_PROPERTY);
-          String atlasScheme = "http";
-          String atlasServerHttpPortProperty = atlasConfig.getProperties().get(ATLAS_SERVER_HTTP_PORT_PROPERTY);
-          if (atlasEnableTLSProperty.toLowerCase().equals("true")) {
-            atlasServerHttpPortProperty = atlasConfig.getProperties().get(ATLAS_SERVER_HTTPS_PORT_PROPERTY);
-            atlasScheme = "https";
-          }
-          updates.put(ATLAS_REST_ADDRESS_PROPERTY, String.format("%s://%s:%s", atlasScheme, atlasHost.get(0).getHostName(), atlasServerHttpPortProperty));
-        }
-        updateConfigurationPropertiesForCluster(cluster, HIVE_SITE_CONFIG, updates, false, false);
-      }
-    }
-  }
-
-  protected void updateCorruptedReplicaWidget() throws SQLException {
-    String widgetValues = String.format("[{\"name\": \"%s\", \"value\": \"%s\"}]",
-      WIDGET_CORRUPT_REPLICAS, WIDGET_VALUES_VALUE);
-    String updateStatement = "UPDATE %s SET %s='%s', %s='%s', %s='%s' WHERE %s='%s'";
-
-    LOG.info("Update widget definition for HDFS corrupted blocks metric");
-    dbAccessor.executeUpdate(String.format(updateStatement,
-      WIDGET_TABLE,
-      WIDGET_NAME, WIDGET_CORRUPT_REPLICAS,
-      WIDGET_DESCRIPTION, WIDGET_CORRUPT_REPLICAS_DESCRIPTION,
-      WIDGET_VALUES, widgetValues,
-      WIDGET_NAME, WIDGET_CORRUPT_BLOCKS
-    ));
-  }
-
-  /**
-   * Updates the {@value #UPGRADE_TABLE} in the following ways:
-   * <ul>
-   * <li>{value {@link #UPGRADE_SUSPENDED_COLUMN} is added</li>
-   * </ul>
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  protected void updateUpgradeTable() throws AmbariException, SQLException {
-    dbAccessor.addColumn(UPGRADE_TABLE,
-      new DBAccessor.DBColumnInfo(UPGRADE_SUSPENDED_COLUMN, Short.class, 1, 0, false));
-  }
-
-  /**
-   * Copy cluster & service widgets for Storm and Kafka from stack to DB.
-   */
-  protected void initializeStromAndKafkaWidgets() throws AmbariException {
-    AmbariManagementController controller = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = controller.getClusters();
-    if (clusters == null) {
-      return;
-    }
-
-    Map<String, Cluster> clusterMap = clusters.getClusters();
-
-    if (clusterMap != null && !clusterMap.isEmpty()) {
-      for (Cluster cluster : clusterMap.values()) {
-
-        Map<String, Service> serviceMap = cluster.getServices();
-        if (serviceMap != null && !serviceMap.isEmpty()) {
-          for (Service service : serviceMap.values()) {
-            if ("STORM".equals(service.getName()) || "KAFKA".equals(service.getName())) {
-              controller.initializeWidgetsAndLayouts(cluster, service);
-            }
-          }
-        }
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog230.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog230.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog230.java
deleted file mode 100644
index a53ac95..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog230.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.PermissionDAO;
-import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
-import org.apache.ambari.server.orm.dao.RoleAuthorizationDAO;
-import org.apache.ambari.server.orm.entities.PermissionEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
-import org.apache.ambari.server.orm.entities.RoleAuthorizationEntity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Upgrade catalog for version 2.3.0.
- */
-public class UpgradeCatalog230 extends AbstractUpgradeCatalog {
-  private static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
-  private static final String USERS_TABLE = "users";
-
-  private static final String HOST_ID_COL = "host_id";
-  private static final String USER_TYPE_COL = "user_type";
-
-  private static final String ADMIN_PERMISSION_TABLE = "adminpermission";
-  private static final String PERMISSION_ID_COL = "permission_id";
-  private static final String PERMISSION_NAME_COL = "permission_name";
-  private static final String PERMISSION_LABEL_COL = "permission_label";
-
-  private static final String ROLE_AUTHORIZATION_TABLE = "roleauthorization";
-  private static final String PERMISSION_ROLE_AUTHORIZATION_TABLE = "permission_roleauthorization";
-  private static final String ROLE_AUTHORIZATION_ID_COL = "authorization_id";
-  private static final String ROLE_AUTHORIZATION_NAME_COL = "authorization_name";
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.2.1";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.3.0";
-  }
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog230.class);
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog230(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-
-    dbAccessor.alterColumn(HOST_ROLE_COMMAND_TABLE, new DBColumnInfo(HOST_ID_COL, Long.class, null, null, true));
-    dbAccessor.addColumn(USERS_TABLE, new DBColumnInfo(USER_TYPE_COL, String.class, null, "LOCAL", true));
-
-    dbAccessor.executeQuery("UPDATE users SET user_type='LDAP' WHERE ldap_user=1");
-
-    dbAccessor.addUniqueConstraint(USERS_TABLE, "UNQ_users_0", "user_name", "user_type");
-
-    updateAdminPermissionTable();
-    createRoleAuthorizationTables();
-  }
-
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    setPermissionLabels();
-    updatePermissionNames();
-    addNewPermissions();
-    createRoleAuthorizations();
-    createPermissionRoleAuthorizationMap();
-  }
-
-  private void addNewPermissions() throws SQLException {
-    LOG.info("Adding new permissions: CLUSTER.OPERATOR, SERVICE.ADMINISTRATOR, SERVICE.OPERATOR");
-
-    PermissionDAO permissionDAO = injector.getInstance(PermissionDAO.class);
-    ResourceTypeDAO resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
-    ResourceTypeEntity clusterResourceTypeEntity = resourceTypeDAO.findByName("CLUSTER");
-
-    // CLUSTER.OPERATOR: Cluster Operator
-    if(permissionDAO.findPermissionByNameAndType("CLUSTER.OPERATOR", clusterResourceTypeEntity) == null) {
-      PermissionEntity permissionEntity = new PermissionEntity();
-      permissionEntity.setId(null);
-      permissionEntity.setPermissionName("CLUSTER.OPERATOR");
-      permissionEntity.setPermissionLabel("Cluster Operator");
-      permissionEntity.setResourceType(clusterResourceTypeEntity);
-      permissionDAO.create(permissionEntity);
-    }
-
-    // SERVICE.ADMINISTRATOR: Service Administrator
-    if(permissionDAO.findPermissionByNameAndType("SERVICE.ADMINISTRATOR", clusterResourceTypeEntity) == null) {
-      PermissionEntity permissionEntity = new PermissionEntity();
-      permissionEntity.setId(null);
-      permissionEntity.setPermissionName("SERVICE.ADMINISTRATOR");
-      permissionEntity.setPermissionLabel("Service Administrator");
-      permissionEntity.setResourceType(clusterResourceTypeEntity);
-      permissionDAO.create(permissionEntity);
-    }
-
-    // SERVICE.OPERATOR: Service Operator
-    if(permissionDAO.findPermissionByNameAndType("SERVICE.OPERATOR", clusterResourceTypeEntity) == null) {
-      PermissionEntity permissionEntity = new PermissionEntity();
-      permissionEntity.setId(null);
-      permissionEntity.setPermissionName("SERVICE.OPERATOR");
-      permissionEntity.setPermissionLabel("Service Operator");
-      permissionEntity.setResourceType(clusterResourceTypeEntity);
-      permissionDAO.create(permissionEntity);
-    }
-  }
-
-
-  private void createRoleAuthorizations() throws SQLException {
-    LOG.info("Adding authorizations");
-
-    RoleAuthorizationDAO roleAuthorizationDAO = injector.getInstance(RoleAuthorizationDAO.class);
-
-    createRoleAuthorization(roleAuthorizationDAO, "VIEW.USE", "Use View");
-
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.VIEW_METRICS", "View metrics");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.VIEW_STATUS_INFO", "View status information");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.VIEW_CONFIGS", "View configurations");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.COMPARE_CONFIGS", "Compare configurations");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.VIEW_ALERTS", "View service-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.START_STOP", "Start/Stop/Restart Service");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.DECOMMISSION_RECOMMISSION", "Decommission/recommission");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.RUN_SERVICE_CHECK", "Run service checks");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.TOGGLE_MAINTENANCE", "Turn on/off maintenance mode");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.RUN_CUSTOM_COMMAND", "Perform service-specific tasks");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.MODIFY_CONFIGS", "Modify configurations");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.MANAGE_CONFIG_GROUPS", "Manage configuration groups");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.MANAGE_ALERTS", "Manage service-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.MOVE", "Move to another host");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.ENABLE_HA", "Enable HA");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.TOGGLE_ALERTS", "Enable/disable service-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.ADD_DELETE_SERVICES", "Add/delete services");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.SET_SERVICE_USERS_GROUPS", "Set service users and groups");
-
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.VIEW_METRICS", "View metrics");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.VIEW_STATUS_INFO", "View status information");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.VIEW_CONFIGS", "View configuration");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.TOGGLE_MAINTENANCE", "Turn on/off maintenance mode");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.ADD_DELETE_COMPONENTS", "Install components");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.ADD_DELETE_HOSTS", "Add/Delete hosts");
-
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_METRICS", "View metrics");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_STATUS_INFO", "View status information");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_CONFIGS", "View configuration");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_STACK_DETAILS", "View stack version details");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_ALERTS", "View cluster-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.MANAGE_CREDENTIALS", "Manage external credentials");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.MODIFY_CONFIGS", "Modify cluster configurations");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.MANAGE_CONFIG_GROUPS", "Manage cluster configuration groups");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.MANAGE_ALERTS", "Manage cluster-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.TOGGLE_ALERTS", "Enable/disable cluster-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.TOGGLE_KERBEROS", "Enable/disable Kerberos");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.UPGRADE_DOWNGRADE_STACK", "Upgrade/downgrade stack");
-
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.ADD_DELETE_CLUSTERS", "Create new clusters");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.RENAME_CLUSTER", "Rename clusters");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.MANAGE_USERS", "Manage users");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.MANAGE_GROUPS", "Manage groups");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.MANAGE_VIEWS", "Manage Ambari Views");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.ASSIGN_ROLES", "Assign roles");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.MANAGE_STACK_VERSIONS", "Manage stack versions");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.EDIT_STACK_REPOS", "Edit stack repository URLs");
-  }
-
-  private void createRoleAuthorization(RoleAuthorizationDAO roleAuthorizationDAO, String id, String name) {
-    if(roleAuthorizationDAO.findById(id) == null) {
-      RoleAuthorizationEntity roleAuthorizationEntity = new RoleAuthorizationEntity();
-      roleAuthorizationEntity.setAuthorizationId(id);
-      roleAuthorizationEntity.setAuthorizationName(name);
-      roleAuthorizationDAO.create(roleAuthorizationEntity);
-    }
-  }
-
-  private void createPermissionRoleAuthorizationMap() throws SQLException {
-    LOG.info("Creating permission to authorizations map");
-
-    // Determine the role entities
-    PermissionDAO permissionDAO = injector.getInstance(PermissionDAO.class);
-    ResourceTypeDAO resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
-
-    ResourceTypeEntity ambariResource = resourceTypeDAO.findByName("AMBARI");
-    ResourceTypeEntity clusterResource = resourceTypeDAO.findByName("CLUSTER");
-    ResourceTypeEntity viewResource = resourceTypeDAO.findByName("VIEW");
-
-    PermissionEntity viewPermission = permissionDAO.findPermissionByNameAndType("VIEW.USER", viewResource);
-    PermissionEntity administratorPermission = permissionDAO.findPermissionByNameAndType("AMBARI.ADMINISTRATOR", ambariResource);
-    PermissionEntity clusterUserPermission = permissionDAO.findPermissionByNameAndType("CLUSTER.USER", clusterResource);
-    PermissionEntity clusterOperatorPermission = permissionDAO.findPermissionByNameAndType("CLUSTER.OPERATOR", clusterResource);
-    PermissionEntity clusterAdministratorPermission = permissionDAO.findPermissionByNameAndType("CLUSTER.ADMINISTRATOR", clusterResource);
-    PermissionEntity serviceAdministratorPermission = permissionDAO.findPermissionByNameAndType("SERVICE.ADMINISTRATOR", clusterResource);
-    PermissionEntity serviceOperatorPermission = permissionDAO.findPermissionByNameAndType("SERVICE.OPERATOR", clusterResource);
-
-    // Create role groups
-    Collection<PermissionEntity> viewUserAndAdministrator = Arrays.asList(viewPermission, administratorPermission);
-    Collection<PermissionEntity> clusterUserAndUp = Arrays.asList(
-        clusterUserPermission,
-        serviceOperatorPermission,
-        serviceAdministratorPermission,
-        clusterOperatorPermission,
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> serviceOperatorAndUp = Arrays.asList(
-        serviceOperatorPermission,
-        serviceAdministratorPermission,
-        clusterOperatorPermission,
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> serviceAdministratorAndUp = Arrays.asList(
-        serviceAdministratorPermission,
-        clusterOperatorPermission,
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> clusterOperatorAndUp = Arrays.asList(
-        clusterOperatorPermission,
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> clusterAdministratorAndUp = Arrays.asList(
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> administratorOnly = Collections.singleton(administratorPermission);
-
-    // A map of the authorizations to the relevant roles
-    Map<String, Collection<PermissionEntity>> map = new HashMap<>();
-    map.put("VIEW.USE", viewUserAndAdministrator);
-    map.put("SERVICE.VIEW_METRICS", clusterUserAndUp);
-    map.put("SERVICE.VIEW_STATUS_INFO", clusterUserAndUp);
-    map.put("SERVICE.VIEW_CONFIGS", clusterUserAndUp);
-    map.put("SERVICE.COMPARE_CONFIGS", clusterUserAndUp);
-    map.put("SERVICE.VIEW_ALERTS", clusterUserAndUp);
-    map.put("SERVICE.START_STOP", serviceOperatorAndUp);
-    map.put("SERVICE.DECOMMISSION_RECOMMISSION", serviceOperatorAndUp);
-    map.put("SERVICE.RUN_SERVICE_CHECK", serviceOperatorAndUp);
-    map.put("SERVICE.TOGGLE_MAINTENANCE", serviceOperatorAndUp);
-    map.put("SERVICE.RUN_CUSTOM_COMMAND", serviceOperatorAndUp);
-    map.put("SERVICE.MODIFY_CONFIGS", serviceAdministratorAndUp);
-    map.put("SERVICE.MANAGE_CONFIG_GROUPS", serviceAdministratorAndUp);
-    map.put("CLUSTER.MANAGE_CONFIG_GROUPS", serviceAdministratorAndUp);
-    map.put("SERVICE.MANAGE_ALERTS", serviceAdministratorAndUp);
-    map.put("SERVICE.MOVE", serviceAdministratorAndUp);
-    map.put("SERVICE.ENABLE_HA", serviceAdministratorAndUp);
-    map.put("SERVICE.TOGGLE_ALERTS", serviceAdministratorAndUp);
-    map.put("SERVICE.ADD_DELETE_SERVICES", clusterAdministratorAndUp);
-    map.put("SERVICE.SET_SERVICE_USERS_GROUPS", clusterAdministratorAndUp);
-    map.put("HOST.VIEW_METRICS", clusterUserAndUp);
-    map.put("HOST.VIEW_STATUS_INFO", clusterUserAndUp);
-    map.put("HOST.VIEW_CONFIGS", clusterUserAndUp);
-    map.put("HOST.TOGGLE_MAINTENANCE", clusterOperatorAndUp);
-    map.put("HOST.ADD_DELETE_COMPONENTS", clusterOperatorAndUp);
-    map.put("HOST.ADD_DELETE_HOSTS", clusterOperatorAndUp);
-    map.put("CLUSTER.VIEW_METRICS", clusterUserAndUp);
-    map.put("CLUSTER.VIEW_STATUS_INFO", clusterUserAndUp);
-    map.put("CLUSTER.VIEW_CONFIGS", clusterUserAndUp);
-    map.put("CLUSTER.VIEW_STACK_DETAILS", clusterUserAndUp);
-    map.put("CLUSTER.VIEW_ALERTS", clusterUserAndUp);
-    map.put("CLUSTER.MANAGE_CREDENTIALS", clusterAdministratorAndUp);
-    map.put("CLUSTER.MODIFY_CONFIGS", clusterAdministratorAndUp);
-    map.put("CLUSTER.MANAGE_ALERTS", clusterAdministratorAndUp);
-    map.put("CLUSTER.TOGGLE_ALERTS", clusterAdministratorAndUp);
-    map.put("CLUSTER.TOGGLE_KERBEROS", clusterAdministratorAndUp);
-    map.put("CLUSTER.UPGRADE_DOWNGRADE_STACK", clusterAdministratorAndUp);
-    map.put("AMBARI.ADD_DELETE_CLUSTERS", administratorOnly);
-    map.put("AMBARI.RENAME_CLUSTER", administratorOnly);
-    map.put("AMBARI.MANAGE_USERS", administratorOnly);
-    map.put("AMBARI.MANAGE_GROUPS", administratorOnly);
-    map.put("AMBARI.MANAGE_VIEWS", administratorOnly);
-    map.put("AMBARI.ASSIGN_ROLES", administratorOnly);
-    map.put("AMBARI.MANAGE_STACK_VERSIONS", administratorOnly);
-    map.put("AMBARI.EDIT_STACK_REPOS", administratorOnly);
-
-    // Iterate over the map of authorizations to role to find the set of roles to map to each
-    // authorization and then add the relevant record
-    for (Map.Entry<String, Collection<PermissionEntity>> entry : map.entrySet()) {
-      String authorizationId = entry.getKey();
-
-      for (PermissionEntity permission : entry.getValue()) {
-        addAuthorizationToRole(permission, authorizationId);
-      }
-    }
-  }
-
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  private void updateAdminPermissionTable() throws SQLException {
-    // Add the permission_label column to the adminpermission table
-    dbAccessor.addColumn(ADMIN_PERMISSION_TABLE, new DBColumnInfo(PERMISSION_LABEL_COL, String.class, 255, null, true));
-  }
-
-  private void createRoleAuthorizationTables() throws SQLException {
-
-    ArrayList<DBColumnInfo> columns;
-
-    //  Add roleauthorization table
-    LOG.info("Creating " + ROLE_AUTHORIZATION_TABLE + " table");
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo(ROLE_AUTHORIZATION_ID_COL, String.class, 100, null, false));
-    columns.add(new DBColumnInfo(ROLE_AUTHORIZATION_NAME_COL, String.class, 255, null, false));
-    dbAccessor.createTable(ROLE_AUTHORIZATION_TABLE, columns, ROLE_AUTHORIZATION_ID_COL);
-
-    //  Add permission_roleauthorization table to map roleauthorizations to permissions (aka roles)
-    LOG.info("Creating " + PERMISSION_ROLE_AUTHORIZATION_TABLE + " table");
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo(PERMISSION_ID_COL, Long.class, null, null, false));
-    columns.add(new DBColumnInfo(ROLE_AUTHORIZATION_ID_COL, String.class, 100, null, false));
-    dbAccessor.createTable(PERMISSION_ROLE_AUTHORIZATION_TABLE, columns, PERMISSION_ID_COL, ROLE_AUTHORIZATION_ID_COL);
-
-    dbAccessor.addFKConstraint(PERMISSION_ROLE_AUTHORIZATION_TABLE, "FK_permission_roleauth_pid",
-        PERMISSION_ID_COL, ADMIN_PERMISSION_TABLE, PERMISSION_ID_COL, false);
-
-    dbAccessor.addFKConstraint(PERMISSION_ROLE_AUTHORIZATION_TABLE, "FK_permission_roleauth_aid",
-        ROLE_AUTHORIZATION_ID_COL, ROLE_AUTHORIZATION_TABLE, ROLE_AUTHORIZATION_ID_COL, false);
-  }
-
-  private void setPermissionLabels() throws SQLException {
-    String updateStatement = "UPDATE " + ADMIN_PERMISSION_TABLE + " SET " + PERMISSION_LABEL_COL + "='%s' WHERE " + PERMISSION_ID_COL + "=%d";
-
-    LOG.info("Setting permission labels");
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        "Ambari Administrator", PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        "Cluster User", PermissionEntity.CLUSTER_USER_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        "Cluster Administrator", PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        "View User", PermissionEntity.VIEW_USER_PERMISSION));
-  }
-
-  private void updatePermissionNames() throws SQLException {
-    String updateStatement = "UPDATE " + ADMIN_PERMISSION_TABLE + " SET " + PERMISSION_NAME_COL + "='%s' WHERE " + PERMISSION_ID_COL + "=%d";
-
-    // Update permissions names
-    LOG.info("Updating permission names");
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION_NAME, PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        PermissionEntity.CLUSTER_USER_PERMISSION_NAME, PermissionEntity.CLUSTER_USER_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION_NAME, PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        PermissionEntity.VIEW_USER_PERMISSION_NAME, PermissionEntity.VIEW_USER_PERMISSION));
-  }
-
-}