You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2015/12/23 00:26:41 UTC

[1/4] ambari git commit: AMBARI-14357: UpgradeCatalogs need to be reversioned (jluniya)

Repository: ambari
Updated Branches:
  refs/heads/branch-2.2 52c969e42 -> b3ff65d39


http://git-wip-us.apache.org/repos/asf/ambari/blob/b3ff65d3/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
new file mode 100644
index 0000000..0ee5828
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
@@ -0,0 +1,1617 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.upgrade;
+
+import com.google.common.collect.Maps;
+import com.google.gson.Gson;
+import com.google.inject.AbstractModule;
+import com.google.inject.Binder;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Module;
+import com.google.inject.Provider;
+import com.google.inject.persist.PersistService;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.ActionManager;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
+import org.apache.ambari.server.controller.ConfigurationRequest;
+import org.apache.ambari.server.controller.ConfigurationResponse;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.controller.MaintenanceStateHelper;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.GuiceJpaInitializer;
+import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
+import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
+import org.apache.ambari.server.orm.dao.ArtifactDAO;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
+import org.apache.ambari.server.orm.dao.DaoUtils;
+import org.apache.ambari.server.orm.dao.HostVersionDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
+import org.apache.ambari.server.orm.entities.ArtifactEntity;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.HostVersionEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.stack.StackManagerFactory;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
+import org.easymock.Capture;
+import org.easymock.EasyMock;
+import org.easymock.EasyMockSupport;
+import org.easymock.IMocksControl;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import javax.persistence.EntityManager;
+import java.io.File;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static junit.framework.Assert.assertNotNull;
+import static junit.framework.Assert.assertNull;
+import static org.easymock.EasyMock.anyLong;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.capture;
+import static org.easymock.EasyMock.createMockBuilder;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.createStrictMock;
+import static org.easymock.EasyMock.eq;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertTrue;
+/**
+ * {@link org.apache.ambari.server.upgrade.UpgradeCatalog220} unit tests.
+ */
+public class UpgradeCatalog220Test {
+  private Injector injector;
+  private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
+  private EntityManager entityManager = createNiceMock(EntityManager.class);
+  private UpgradeCatalogHelper upgradeCatalogHelper;
+  private StackEntity desiredStackEntity;
+  private AmbariManagementController amc = createNiceMock(AmbariManagementController.class);
+  private AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
+  private StackDAO stackDAO = createNiceMock(StackDAO.class);
+  private RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
+  private ClusterVersionDAO clusterVersionDAO = createNiceMock(ClusterVersionDAO.class);
+  private HostVersionDAO hostVersionDAO = createNiceMock(HostVersionDAO.class);
+  private ClusterDAO clusterDAO = createNiceMock(ClusterDAO.class);
+
+  private IMocksControl mocksControl = EasyMock.createControl();
+
+  @Before
+  public void init() {
+    reset(entityManagerProvider);
+    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
+    replay(entityManagerProvider);
+    injector = Guice.createInjector(new InMemoryDefaultTestModule());
+    injector.getInstance(GuiceJpaInitializer.class);
+
+    upgradeCatalogHelper = injector.getInstance(UpgradeCatalogHelper.class);
+    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
+    injector.getInstance(AmbariMetaInfo.class);
+    // load the stack entity
+    StackDAO stackDAO = injector.getInstance(StackDAO.class);
+    desiredStackEntity = stackDAO.find("HDP", "2.2.0");
+  }
+
+  @After
+  public void tearDown() {
+    injector.getInstance(PersistService.class).stop();
+  }
+
+  @Test
+  public void testExecuteUpgradeDDLUpdates() throws Exception{
+    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
+
+    dbAccessor.addColumn(eq("upgrade"), anyObject(DBAccessor.DBColumnInfo.class));
+    expectLastCall().times(3);
+
+    replay(dbAccessor);
+    Module module = new Module() {
+      @Override
+      public void configure(Binder binder) {
+        binder.bind(DBAccessor.class).toInstance(dbAccessor);
+        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        binder.bind(EntityManager.class).toInstance(entityManager);
+      }
+    };
+
+    Injector injector = Guice.createInjector(module);
+    UpgradeCatalog220 upgradeCatalog220 = injector.getInstance(UpgradeCatalog220.class);
+    upgradeCatalog220.executeUpgradeDDLUpdates();
+    verify(dbAccessor);
+  }
+
+  @Test
+  public void testExecuteStageDDLUpdates() throws Exception {
+    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
+
+    dbAccessor.addColumn(eq("stage"), anyObject(DBAccessor.DBColumnInfo.class));
+    expectLastCall().times(1);
+
+    replay(dbAccessor);
+    Module module = new Module() {
+      @Override
+      public void configure(Binder binder) {
+        binder.bind(DBAccessor.class).toInstance(dbAccessor);
+        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        binder.bind(EntityManager.class).toInstance(entityManager);
+      }
+    };
+
+    Injector injector = Guice.createInjector(module);
+    UpgradeCatalog220 upgradeCatalog220 = injector.getInstance(UpgradeCatalog220.class);
+    upgradeCatalog220.executeStageDDLUpdates();
+    verify(dbAccessor);
+  }
+
+  @Test
+  public void testExecuteDMLUpdates() throws Exception {
+    // TODO AMBARI-13001, readd unit test section.
+    /*
+    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
+    Configuration configuration = createNiceMock(Configuration.class);
+    Connection connection = createNiceMock(Connection.class);
+    Statement statement = createNiceMock(Statement.class);
+    ResultSet resultSet = createNiceMock(ResultSet.class);
+    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
+    dbAccessor.getConnection();
+    expectLastCall().andReturn(connection).anyTimes();
+    connection.createStatement();
+    expectLastCall().andReturn(statement).anyTimes();
+    statement.executeQuery(anyObject(String.class));
+    expectLastCall().andReturn(resultSet).anyTimes();
+
+    // Technically, this is a DDL, but it has to be ran during the DML portion
+    // because it requires the persistence layer to be started.
+    UpgradeSectionDDL upgradeSectionDDL = new UpgradeSectionDDL();
+
+    // Execute any DDL schema changes
+    upgradeSectionDDL.execute(dbAccessor);
+
+    // Begin DML verifications
+    verifyBootstrapHDP21();
+
+    // Replay main sections
+    replay(dbAccessor, configuration, resultSet, connection, statement);
+
+
+    AbstractUpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
+    Class<?> c = AbstractUpgradeCatalog.class;
+    Field f = c.getDeclaredField("configuration");
+    f.setAccessible(true);
+    f.set(upgradeCatalog, configuration);
+    */
+
+    Method updateStormConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateStormConfigs");
+    Method updateAMSConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateAMSConfigs");
+    Method updateHDFSConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateHDFSConfigs");
+    Method updateKafkaConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateKafkaConfigs");
+    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
+    Method updateHbaseEnvConfig = UpgradeCatalog220.class.getDeclaredMethod("updateHbaseEnvConfig");
+    Method updateFlumeEnvConfig = UpgradeCatalog220.class.getDeclaredMethod("updateFlumeEnvConfig");
+    Method updateZookeeperLog4j = UpgradeCatalog220.class.getDeclaredMethod("updateZookeeperLog4j");
+    Method updateHadoopEnvConfig = UpgradeCatalog220.class.getDeclaredMethod("updateHadoopEnv");
+    Method updateAlertDefinitions = UpgradeCatalog220.class.getDeclaredMethod("updateAlertDefinitions");
+    Method updateRangerEnvConfig = UpgradeCatalog220.class.getDeclaredMethod("updateRangerEnvConfig");
+    Method updateRangerUgsyncSiteConfig = UpgradeCatalog220.class.getDeclaredMethod("updateRangerUgsyncSiteConfig");
+    Method updateHiveConfig = UpgradeCatalog220.class.getDeclaredMethod("updateHiveConfig");
+    Method updateAccumuloConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateAccumuloConfigs");
+    Method updateKerberosDescriptorArtifacts = AbstractUpgradeCatalog.class.getDeclaredMethod("updateKerberosDescriptorArtifacts");
+    Method updateKnoxTopology = UpgradeCatalog220.class.getDeclaredMethod("updateKnoxTopology");
+
+    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
+      .addMockedMethod(updateAMSConfigs)
+      .addMockedMethod(updateHDFSConfigs)
+      .addMockedMethod(updateStormConfigs)
+      .addMockedMethod(addNewConfigurationsFromXml)
+      .addMockedMethod(updateHbaseEnvConfig)
+      .addMockedMethod(updateFlumeEnvConfig)
+      .addMockedMethod(updateAlertDefinitions)
+      .addMockedMethod(updateKafkaConfigs)
+      .addMockedMethod(updateZookeeperLog4j)
+      .addMockedMethod(updateHadoopEnvConfig)
+      .addMockedMethod(updateRangerEnvConfig)
+      .addMockedMethod(updateRangerUgsyncSiteConfig)
+      .addMockedMethod(updateHiveConfig)
+      .addMockedMethod(updateAccumuloConfigs)
+      .addMockedMethod(updateKerberosDescriptorArtifacts)
+      .addMockedMethod(updateKnoxTopology)
+      .createMock();
+
+    upgradeCatalog220.updateHbaseEnvConfig();
+    expectLastCall().once();
+    upgradeCatalog220.updateFlumeEnvConfig();
+    upgradeCatalog220.addNewConfigurationsFromXml();
+    expectLastCall().once();
+    upgradeCatalog220.updateStormConfigs();
+    expectLastCall().once();
+    upgradeCatalog220.updateHadoopEnv();
+    expectLastCall().once();
+    upgradeCatalog220.updateAMSConfigs();
+    expectLastCall().once();
+    upgradeCatalog220.updateAlertDefinitions();
+    expectLastCall().once();
+    upgradeCatalog220.updateKafkaConfigs();
+    expectLastCall().once();
+    upgradeCatalog220.updateHDFSConfigs();
+    expectLastCall().once();
+    upgradeCatalog220.updateZookeeperLog4j();
+    expectLastCall().once();
+    upgradeCatalog220.updateRangerEnvConfig();
+    expectLastCall().once();
+    upgradeCatalog220.updateRangerUgsyncSiteConfig();
+    expectLastCall().once();
+    upgradeCatalog220.updateHiveConfig();
+    expectLastCall().once();
+    upgradeCatalog220.updateAccumuloConfigs();
+    expectLastCall().once();
+    upgradeCatalog220.updateKnoxTopology();
+    expectLastCall().once();
+    upgradeCatalog220.updateKerberosDescriptorArtifacts();
+    expectLastCall().once();
+
+    replay(upgradeCatalog220);
+
+    upgradeCatalog220.executeDMLUpdates();
+
+    verify(upgradeCatalog220);
+  }
+
+  /**
+   * Verify that when bootstrapping HDP 2.1, records get inserted into the
+   * repo_version, cluster_version, and host_version tables.
+   * @throws AmbariException
+   */
+  private void verifyBootstrapHDP21() throws Exception, AmbariException {
+    final String stackName = "HDP";
+    final String stackVersion = "2.1";
+    final String stackNameAndVersion = stackName + "-" + stackVersion;
+    final String buildNumber = "2.1.0.0-0001";
+    final String stackAndBuild = stackName + "-" + buildNumber;
+    final String clusterName = "c1";
+
+    expect(amc.getAmbariMetaInfo()).andReturn(metaInfo);
+
+    // Mock the actions to bootstrap if using HDP 2.1
+    Clusters clusters = createNiceMock(Clusters.class);
+    expect(amc.getClusters()).andReturn(clusters);
+
+    Map<String, Cluster> clusterHashMap = new HashMap<String, Cluster>();
+    Cluster cluster = createNiceMock(Cluster.class);
+    clusterHashMap.put(clusterName, cluster);
+    expect(clusters.getClusters()).andReturn(clusterHashMap);
+
+    StackId stackId = new StackId(stackNameAndVersion);
+    expect(cluster.getCurrentStackVersion()).andReturn(stackId);
+
+    StackInfo stackInfo = new StackInfo();
+    stackInfo.setVersion(buildNumber);
+    expect(metaInfo.getStack(stackName, stackVersion)).andReturn(stackInfo);
+
+    StackEntity stackEntity = createNiceMock(StackEntity.class);
+    expect(stackEntity.getStackName()).andReturn(stackName);
+    expect(stackEntity.getStackVersion()).andReturn(stackVersion);
+
+    expect(stackDAO.find(stackName, stackVersion)).andReturn(stackEntity);
+
+    replay(amc, metaInfo, clusters, cluster, stackEntity, stackDAO);
+
+    // Mock more function calls
+    // Repository Version
+    RepositoryVersionEntity repositoryVersionEntity = createNiceMock(RepositoryVersionEntity.class);
+    expect(repositoryVersionDAO.findByDisplayName(stackAndBuild)).andReturn(null);
+    expect(repositoryVersionDAO.findMaxId("id")).andReturn(0L);
+    expect(repositoryVersionDAO.findAll()).andReturn(Collections.<RepositoryVersionEntity>emptyList());
+    expect(repositoryVersionDAO.create(anyObject(StackEntity.class), anyObject(String.class), anyObject(String.class), anyObject(String.class))).andReturn(repositoryVersionEntity);
+    expect(repositoryVersionEntity.getId()).andReturn(1L);
+    expect(repositoryVersionEntity.getVersion()).andReturn(buildNumber);
+    replay(repositoryVersionDAO, repositoryVersionEntity);
+
+    // Cluster Version
+    ClusterVersionEntity clusterVersionEntity = createNiceMock(ClusterVersionEntity.class);
+    expect(clusterVersionEntity.getId()).andReturn(1L);
+    expect(clusterVersionEntity.getState()).andReturn(RepositoryVersionState.CURRENT);
+    expect(clusterVersionEntity.getRepositoryVersion()).andReturn(repositoryVersionEntity);
+
+    expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class), anyObject(StackId.class), anyObject(String.class))).andReturn(null);
+    expect(clusterVersionDAO.findMaxId("id")).andReturn(0L);
+    expect(clusterVersionDAO.findAll()).andReturn(Collections.<ClusterVersionEntity>emptyList());
+    expect(clusterVersionDAO.create(anyObject(ClusterEntity.class), anyObject(RepositoryVersionEntity.class), anyObject(RepositoryVersionState.class), anyLong(), anyLong(), anyObject(String.class))).andReturn(clusterVersionEntity);
+    replay(clusterVersionDAO, clusterVersionEntity);
+
+    // Host Version
+    ClusterEntity clusterEntity = createNiceMock(ClusterEntity.class);
+    expect(clusterEntity.getClusterName()).andReturn(clusterName).anyTimes();
+    expect(clusterDAO.findByName(anyObject(String.class))).andReturn(clusterEntity);
+
+    Collection<HostEntity> hostEntities = new ArrayList<HostEntity>();
+    HostEntity hostEntity1 = createNiceMock(HostEntity.class);
+    HostEntity hostEntity2 = createNiceMock(HostEntity.class);
+    expect(hostEntity1.getHostName()).andReturn("host1");
+    expect(hostEntity2.getHostName()).andReturn("host2");
+    hostEntities.add(hostEntity1);
+    hostEntities.add(hostEntity2);
+    expect(clusterEntity.getHostEntities()).andReturn(hostEntities);
+
+    expect(hostVersionDAO.findByClusterStackVersionAndHost(anyObject(String.class), anyObject(StackId.class), anyObject(String.class), anyObject(String.class))).andReturn(null);
+    expect(hostVersionDAO.findMaxId("id")).andReturn(0L);
+    expect(hostVersionDAO.findAll()).andReturn(Collections.<HostVersionEntity>emptyList());
+
+    replay(clusterEntity, clusterDAO, hostVersionDAO, hostEntity1, hostEntity2);
+  }
+
+  @Test
+  public void testExecuteUpgradePreDMLUpdates() throws Exception {
+    Method executeStackPreDMLUpdates = UpgradeCatalog220.class.getDeclaredMethod("executeUpgradePreDMLUpdates");
+    Method executeStackUpgradeDDLUpdates = UpgradeCatalog220.class.getDeclaredMethod("executeStackUpgradeDDLUpdates");
+    Method bootstrapRepoVersionForHDP21 = UpgradeCatalog220.class.getDeclaredMethod("bootstrapRepoVersionForHDP21");
+
+    final UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
+      .addMockedMethod(executeStackUpgradeDDLUpdates)
+      .addMockedMethod(bootstrapRepoVersionForHDP21)
+      .addMockedMethod(executeStackPreDMLUpdates).createMock();
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(UpgradeCatalog220.class).toInstance(upgradeCatalog220);
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        bind(EntityManager.class).toInstance(entityManager);
+      }
+    });
+
+    upgradeCatalog220.executeUpgradePreDMLUpdates();
+    expectLastCall().once();
+
+    upgradeCatalog220.executeStackUpgradeDDLUpdates();
+    expectLastCall().once();
+
+    upgradeCatalog220.bootstrapRepoVersionForHDP21();
+    expectLastCall().once();
+
+    replay(upgradeCatalog220);
+    mockInjector.getInstance(UpgradeCatalog220.class).executePreDMLUpdates();
+
+    verify(upgradeCatalog220);
+  }
+
+  @Test
+  public void testUpdateStormSiteConfigs() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
+
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesStormSite = new HashMap<String, String>() {
+      {
+        put("nimbus.monitor.freq.secs", "10");
+        put("metrics.reporter.register", "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter");
+      }
+    };
+
+    final Config mockStormSite = easyMockSupport.createNiceMock(Config.class);
+    expect(mockStormSite.getProperties()).andReturn(propertiesStormSite).once();
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(ConfigHelper.class).toInstance(mockConfigHelper);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).once();
+
+    expect(mockClusterExpected.getDesiredConfigByType("storm-site")).andReturn(mockStormSite).atLeastOnce();
+    expect(mockStormSite.getProperties()).andReturn(propertiesStormSite).atLeastOnce();
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog220.class).updateStormConfigs();
+    easyMockSupport.verifyAll();
+  }
+
+  @Test
+  public void testUpdateKerberosDescriptorArtifact() throws Exception {
+    final KerberosDescriptorFactory kerberosDescriptorFactory = new KerberosDescriptorFactory();
+
+    KerberosServiceDescriptor serviceDescriptor;
+
+    URL systemResourceURL = ClassLoader.getSystemResource("kerberos/test_kerberos_descriptor_2_1_3.json");
+    assertNotNull(systemResourceURL);
+
+    final KerberosDescriptor kerberosDescriptorOrig = kerberosDescriptorFactory.createInstance(new File(systemResourceURL.getFile()));
+    assertNotNull(kerberosDescriptorOrig);
+
+    serviceDescriptor = kerberosDescriptorOrig.getService("HDFS");
+    assertNotNull(serviceDescriptor);
+    assertNotNull(serviceDescriptor.getIdentity("hdfs"));
+
+    serviceDescriptor = kerberosDescriptorOrig.getService("OOZIE");
+    assertNotNull(serviceDescriptor);
+    assertNotNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
+
+    UpgradeCatalog220 upgradeMock = createMockBuilder(UpgradeCatalog220.class).createMock();
+
+    Capture<Map<String, Object>> updatedData = new Capture<Map<String, Object>>();
+
+    ArtifactEntity artifactEntity = createNiceMock(ArtifactEntity.class);
+    expect(artifactEntity.getArtifactData())
+        .andReturn(kerberosDescriptorOrig.toMap())
+        .once();
+
+    artifactEntity.setArtifactData(capture(updatedData));
+    expectLastCall().once();
+
+    replay(artifactEntity, upgradeMock);
+    upgradeMock.updateKerberosDescriptorArtifact(createNiceMock(ArtifactDAO.class), artifactEntity);
+    verify(artifactEntity, upgradeMock);
+
+    KerberosDescriptor kerberosDescriptorUpdated = new KerberosDescriptorFactory().createInstance(updatedData.getValue());
+    assertNotNull(kerberosDescriptorUpdated);
+
+    serviceDescriptor = kerberosDescriptorUpdated.getService("HDFS");
+    assertNotNull(serviceDescriptor);
+    assertNull(serviceDescriptor.getIdentity("hdfs"));
+
+    KerberosComponentDescriptor namenodeComponent = serviceDescriptor.getComponent("NAMENODE");
+    assertNotNull(namenodeComponent.getIdentity("hdfs"));
+
+    serviceDescriptor = kerberosDescriptorUpdated.getService("OOZIE");
+    assertNotNull(serviceDescriptor);
+    assertNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
+    assertNotNull(serviceDescriptor.getIdentity("/HDFS/NAMENODE/hdfs"));
+  }
+
+
+
+  @Test
+  public void testUpdateHbaseEnvConfig() throws AmbariException {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesHbaseEnv = new HashMap<String, String>() {
+      {
+        put("content", "test");
+      }
+    };
+
+    final Config mockHbaseEnv = easyMockSupport.createNiceMock(Config.class);
+    expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).once();
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).atLeastOnce();
+    expect(mockClusterExpected.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.2"));
+
+    expect(mockClusterExpected.getDesiredConfigByType("hbase-env")).andReturn(mockHbaseEnv).atLeastOnce();
+    expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).atLeastOnce();
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog220.class).updateHbaseEnvConfig();
+    easyMockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testUpdateHDFSConfiguration() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
+
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    final Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
+
+    final Map<String, String> propertiesExpectedHdfs = new HashMap<String, String>();
+    propertiesExpectedHdfs.put("dfs.namenode.rpc-address", "nn.rpc.address");
+    propertiesExpectedHdfs.put("dfs.nameservices", "nn1");
+    propertiesExpectedHdfs.put("dfs.ha.namenodes.nn1", "value");
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(ConfigHelper.class).toInstance(mockConfigHelper);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).once();
+
+    // Expected operation
+    expect(mockClusterExpected.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
+    expect(mockHdfsSite.getProperties()).andReturn(propertiesExpectedHdfs).anyTimes();
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog220.class).updateHDFSConfigs();
+    easyMockSupport.verifyAll();
+  }
+
+  @Test
+  public void testUpdateAmsHbaseEnvContent() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
+    Method updateAmsHbaseEnvContent = UpgradeCatalog220.class.getDeclaredMethod("updateAmsHbaseEnvContent", String.class);
+    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
+    String oldContent = "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
+      "\n" +
+      "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
+      "export HBASE_HEAPSIZE={{hbase_heapsize}}\n";
+
+    String expectedContent = "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
+      "\n" +
+      "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
+      "#export HBASE_HEAPSIZE={{hbase_heapsize}}\n" +
+      "\n" +
+      "# The maximum amount of heap to use for hbase shell.\n" +
+      "export HBASE_SHELL_OPTS=\"-Xmx256m\"\n";
+    String result = (String) updateAmsHbaseEnvContent.invoke(upgradeCatalog220, oldContent);
+    Assert.assertEquals(expectedContent, result);
+  }
+
+  @Test
+  public void testAmsSiteUpdateConfigs() throws Exception{
+
+    Map<String, String> oldPropertiesAmsSite = new HashMap<String, String>() {
+      {
+        //Including only those properties that might be present in an older version.
+        put("timeline.metrics.service.default.result.limit", String.valueOf(5760));
+        put("timeline.metrics.cluster.aggregator.minute.interval", String.valueOf(1000));
+        put("timeline.metrics.host.aggregator.minute.interval", String.valueOf(1000));
+        put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(1000));
+      }
+    };
+    Map<String, String> newPropertiesAmsSite = new HashMap<String, String>() {
+      {
+        put("timeline.metrics.service.default.result.limit", String.valueOf(15840));
+        put("timeline.metrics.cluster.aggregator.second.interval", String.valueOf(120));
+        put("timeline.metrics.cluster.aggregator.minute.interval", String.valueOf(300));
+        put("timeline.metrics.host.aggregator.minute.interval", String.valueOf(300));
+        put("timeline.metrics.cluster.aggregator.second.ttl", String.valueOf(2592000));
+        put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(7776000));
+        put("timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier", String.valueOf(2));
+        put("timeline.metrics.cluster.aggregator.second.disabled", String.valueOf(false));
+        put("timeline.metrics.hbase.fifo.compaction.enabled", String.valueOf(true));
+      }
+    };
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+    Config mockAmsSite = easyMockSupport.createNiceMock(Config.class);
+
+    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", cluster);
+    }}).once();
+    expect(cluster.getDesiredConfigByType("ams-site")).andReturn(mockAmsSite).atLeastOnce();
+    expect(mockAmsSite.getProperties()).andReturn(oldPropertiesAmsSite).times(2);
+
+    Injector injector = easyMockSupport.createNiceMock(Injector.class);
+    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
+
+    replay(injector, clusters, mockAmsSite, cluster);
+
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+      .addMockedMethod("createConfiguration")
+      .addMockedMethod("getClusters", new Class[] { })
+      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
+      .createNiceMock();
+
+    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+    Capture<ConfigurationRequest> configurationRequestCapture = EasyMock.newCapture();
+    ConfigurationResponse configurationResponseMock = easyMockSupport.createMock(ConfigurationResponse.class);
+
+    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(controller.createConfiguration(capture(configurationRequestCapture))).andReturn(configurationResponseMock).once();
+
+    replay(controller, injector2, configurationResponseMock);
+    new UpgradeCatalog220(injector2).updateAMSConfigs();
+    easyMockSupport.verifyAll();
+
+    ConfigurationRequest configurationRequest = configurationRequestCapture.getValue();
+    Map<String, String> updatedProperties = configurationRequest.getProperties();
+    assertTrue(Maps.difference(newPropertiesAmsSite, updatedProperties).areEqual());
+
+  }
+
+  @Test
+  public void testAmsHbaseSiteUpdateConfigs() throws Exception{
+
+    Map<String, String> oldPropertiesAmsHbaseSite = new HashMap<String, String>() {
+      {
+        //Including only those properties that might be present in an older version.
+        put("zookeeper.session.timeout.localHBaseCluster", String.valueOf(20000));
+      }
+    };
+    Map<String, String> newPropertiesAmsSite = new HashMap<String, String>() {
+      {
+        put("zookeeper.session.timeout.localHBaseCluster", String.valueOf(120000));
+        put("hbase.normalizer.enabled", String.valueOf(true));
+        put("hbase.normalizer.period", String.valueOf(600000));
+        put("hbase.master.normalizer.class", "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer");
+
+      }
+    };
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+    Config mockAmsHbaseSite = easyMockSupport.createNiceMock(Config.class);
+
+    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", cluster);
+    }}).once();
+    expect(cluster.getDesiredConfigByType("ams-hbase-site")).andReturn(mockAmsHbaseSite).atLeastOnce();
+    expect(mockAmsHbaseSite.getProperties()).andReturn(oldPropertiesAmsHbaseSite).atLeastOnce();
+
+    Injector injector = easyMockSupport.createNiceMock(Injector.class);
+    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
+
+    replay(injector, clusters, mockAmsHbaseSite, cluster);
+
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+      .addMockedMethod("createConfiguration")
+      .addMockedMethod("getClusters", new Class[] { })
+      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
+      .createNiceMock();
+
+    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+    Capture<ConfigurationRequest> configurationRequestCapture = EasyMock.newCapture();
+    ConfigurationResponse configurationResponseMock = easyMockSupport.createMock(ConfigurationResponse.class);
+
+    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(controller.createConfiguration(capture(configurationRequestCapture))).andReturn(configurationResponseMock).once();
+
+    replay(controller, injector2, configurationResponseMock);
+    new UpgradeCatalog220(injector2).updateAMSConfigs();
+    easyMockSupport.verifyAll();
+
+    ConfigurationRequest configurationRequest = configurationRequestCapture.getValue();
+    Map<String, String> updatedProperties = configurationRequest.getProperties();
+    assertTrue(Maps.difference(newPropertiesAmsSite, updatedProperties).areEqual());
+  }
+
+  @Test
+  public void testUpdateAlertDefinitions() {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
+    long clusterId = 1;
+
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final AlertDefinitionEntity mockJournalNodeProcessAlertDefinitionEntity = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
+    final AlertDefinitionEntity mockHostDiskUsageAlertDefinitionEntity = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
+
+    final String journalNodeProcessAlertSource = "{\"uri\":\"{{hdfs-site/dfs.journalnode.http-address}}\",\"default_port\":8480," +
+        "\"type\":\"PORT\",\"reporting\":{\"ok\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\"}," +
+        "\"warning\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\",\"value\":1.5}," +
+        "\"critical\":{\"text\":\"Connection failed: {0} to {1}:{2}\",\"value\":5.0}}}";
+    final String journalNodeProcessAlertSourceExpected = "{\"reporting\":{\"ok\":{\"text\":\"HTTP {0} response in {2:.3f}s\"}," +
+        "\"warning\":{\"text\":\"HTTP {0} response from {1} in {2:.3f}s ({3})\"}," +
+        "\"critical\":{\"text\":\"Connection failed to {1} ({3})\"}},\"type\":\"WEB\"," +
+        "\"uri\":{\"http\":\"{{hdfs-site/dfs.journalnode.http-address}}\"," +
+        "\"https\":\"{{hdfs-site/dfs.journalnode.https-address}}\"," +
+        "\"kerberos_keytab\":\"{{hdfs-site/dfs.web.authentication.kerberos.keytab}}\","+
+        "\"kerberos_principal\":\"{{hdfs-site/dfs.web.authentication.kerberos.principal}}\"," +
+        "\"https_property\":\"{{hdfs-site/dfs.http.policy}}\"," +
+        "\"https_property_value\":\"HTTPS_ONLY\",\"connection_timeout\":5.0}}";
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+        bind(AlertDefinitionDAO.class).toInstance(mockAlertDefinitionDAO);
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).atLeastOnce();
+
+    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
+
+    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("journalnode_process"))).andReturn(mockJournalNodeProcessAlertDefinitionEntity).atLeastOnce();
+    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("ambari_agent_disk_usage"))).andReturn(mockHostDiskUsageAlertDefinitionEntity).atLeastOnce();
+
+    expect(mockJournalNodeProcessAlertDefinitionEntity.getSource()).andReturn(journalNodeProcessAlertSource).atLeastOnce();
+    Assert.assertEquals(journalNodeProcessAlertSourceExpected, upgradeCatalog220.modifyJournalnodeProcessAlertSource(journalNodeProcessAlertSource));
+
+    mockHostDiskUsageAlertDefinitionEntity.setDescription(eq("This host-level alert is triggered if the amount of disk space " +
+        "used goes above specific thresholds. The default threshold values are 50% for WARNING and 80% for CRITICAL."));
+    expectLastCall().atLeastOnce();
+    mockHostDiskUsageAlertDefinitionEntity.setLabel(eq("Host Disk Usage"));
+    expectLastCall().atLeastOnce();
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog220.class).updateAlertDefinitions();
+    easyMockSupport.verifyAll();
+  }
+
+  @Test
+  public void testUpdateAmsEnvContent() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
+    Method updateAmsEnvContent = UpgradeCatalog220.class.getDeclaredMethod("updateAmsEnvContent", String.class);
+    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
+    String oldContent = "some_content";
+
+    String expectedContent = "some_content" + "\n" +
+      "# AMS Collector GC options\n" +
+      "export AMS_COLLECTOR_GC_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 " +
+      "-XX:+UseCMSInitiatingOccupancyOnly -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps " +
+      "-XX:+UseGCLogFileRotation -XX:GCLogFileSize=10M " +
+      "-Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`\"\n" +
+      "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS\"\n"+
+      "\n" +
+      "# HBase compaction policy enabled\n" +
+      "export HBASE_NORMALIZATION_ENABLED={{ams_hbase_normalizer_enabled}}\n" +
+      "\n" +
+      "# HBase compaction policy enabled\n" +
+      "export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n";
+
+    String result = (String) updateAmsEnvContent.invoke(upgradeCatalog220, oldContent);
+    Assert.assertEquals(expectedContent, result);
+  }
+  
+  public void testUpdateKafkaConfigs() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final ConfigurationResponse mockConfigurationResponse = easyMockSupport.createMock(ConfigurationResponse.class);
+    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
+
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    final Map<String, String> propertiesKafkaEnv = new HashMap<String, String>() {
+      {
+        put("content", "test");
+      }
+    };
+    Map<String, String> updates = Collections.singletonMap("content", "test\n\nexport KAFKA_KERBEROS_PARAMS=\"$KAFKA_KERBEROS_PARAMS {{kafka_kerberos_params}}");
+
+    final Map<String, String> propertiesAmsEnv = new HashMap<String, String>() {
+      {
+        put("kafka.metrics.reporters", "{{kafka_metrics_reporters}}");
+      }
+    };
+    final Map<String, Service> installedServices = new HashMap<String, Service>() {
+      {
+        put("KAFKA", null);
+        put("AMBARI_METRICS", null);
+      }
+    };
+
+    final Config mockAmsEnv = easyMockSupport.createNiceMock(Config.class);
+    final Config mockKafkaEnv = easyMockSupport.createNiceMock(Config.class);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(ConfigHelper.class).toInstance(mockConfigHelper);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).once();
+
+    expect(mockClusterExpected.getServices()).andReturn(installedServices).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("kafka-broker")).andReturn(mockAmsEnv).atLeastOnce();
+    expect(mockAmsEnv.getProperties()).andReturn(propertiesAmsEnv).atLeastOnce();
+
+    expect(mockClusterExpected.getDesiredConfigByType("kafka-env")).andReturn(mockKafkaEnv).atLeastOnce();
+    expect(mockKafkaEnv.getProperties()).andReturn(propertiesKafkaEnv).atLeastOnce();
+
+    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
+        .withConstructor(Injector.class)
+        .withArgs(mockInjector)
+        .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
+            Map.class, boolean.class, boolean.class)
+        .createMock();
+    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
+      "kafka-env", updates, true, false);
+    expectLastCall().once();
+
+    expect(mockAmbariManagementController.createConfiguration(EasyMock.<ConfigurationRequest>anyObject())).andReturn(mockConfigurationResponse);
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog220.class).updateKafkaConfigs();
+    easyMockSupport.verifyAll();
+  }
+
+  @Test
+  public void testUpdateFlumeEnvConfig() throws AmbariException {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesFlumeEnv = new HashMap<String, String>() {
+      {
+        put("content", "test");
+      }
+    };
+
+    final Config mockFlumeEnv = easyMockSupport.createNiceMock(Config.class);
+    expect(mockFlumeEnv.getProperties()).andReturn(propertiesFlumeEnv).once();
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).atLeastOnce();
+
+    expect(mockClusterExpected.getDesiredConfigByType("flume-env")).andReturn(mockFlumeEnv).atLeastOnce();
+    expect(mockFlumeEnv.getProperties()).andReturn(propertiesFlumeEnv).atLeastOnce();
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog220.class).updateFlumeEnvConfig();
+    easyMockSupport.verifyAll();
+  }
+
+  /**
+   * @param dbAccessor
+   * @return
+   */
+  private AbstractUpgradeCatalog getUpgradeCatalog(final DBAccessor dbAccessor) {
+    Module module = new Module() {
+      @Override
+      public void configure(Binder binder) {
+        binder.bind(DBAccessor.class).toInstance(dbAccessor);
+        binder.bind(EntityManager.class).toInstance(entityManager);
+        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+        binder.bind(DaoUtils.class).toInstance(createNiceMock(DaoUtils.class));
+        binder.bind(ClusterDAO.class).toInstance(clusterDAO);
+        binder.bind(RepositoryVersionHelper.class).toInstance(createNiceMock(RepositoryVersionHelper.class));
+        binder.bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
+        binder.bind(AmbariManagementController.class).toInstance(amc);
+        binder.bind(AmbariMetaInfo.class).toInstance(metaInfo);
+        binder.bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
+        binder.bind(StackDAO.class).toInstance(stackDAO);
+        binder.bind(RepositoryVersionDAO.class).toInstance(repositoryVersionDAO);
+        binder.bind(ClusterVersionDAO.class).toInstance(clusterVersionDAO);
+        binder.bind(HostVersionDAO.class).toInstance(hostVersionDAO);
+      }
+    };
+
+    Injector injector = Guice.createInjector(module);
+    return injector.getInstance(UpgradeCatalog220.class);
+  }
+
+  @Test
+  public void testUpdateZookeeperLog4jConfig() throws AmbariException {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesZookeeperLog4j = new HashMap<String, String>() {
+      {
+        put("content", "log4j.rootLogger=INFO, CONSOLE");
+      }
+    };
+
+    final Config mockZookeeperLog4j = easyMockSupport.createNiceMock(Config.class);
+    expect(mockZookeeperLog4j.getProperties()).andReturn(propertiesZookeeperLog4j).once();
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).atLeastOnce();
+
+    expect(mockClusterExpected.getDesiredConfigByType("zookeeper-log4j")).andReturn(mockZookeeperLog4j).atLeastOnce();
+    expect(mockZookeeperLog4j.getProperties()).andReturn(propertiesZookeeperLog4j).atLeastOnce();
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog220.class).updateZookeeperLog4j();
+    easyMockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testUpdateRangerEnvConfig() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesHiveEnv = new HashMap<String, String>() {{
+        put("hive_security_authorization", "Ranger");
+    }};
+    final Map<String, String> propertiesRangerHdfsPlugin = new HashMap<String, String>() {{
+      put("ranger-hdfs-plugin-enabled", "Yes");
+    }};
+    final Map<String, String> propertiesRangerHbasePlugin = new HashMap<String, String>() {{
+      put("ranger-hbase-plugin-enabled", "Yes");
+    }};
+    final Map<String, String> propertiesRangerKafkaPlugin = new HashMap<String, String>() {{
+      put("ranger-kafka-plugin-enabled", "Yes");
+    }};
+    final Map<String, String> propertiesRangerYarnPlugin = new HashMap<String, String>() {{
+      put("ranger-yarn-plugin-enabled", "No");
+    }};
+
+    final Config mockHiveEnvConf = easyMockSupport.createNiceMock(Config.class);
+    final Config mockRangerHdfsPluginConf = easyMockSupport.createNiceMock(Config.class);
+    final Config mockRangerHbasePluginConf = easyMockSupport.createNiceMock(Config.class);
+    final Config mockRangerKafkaPluginConf = easyMockSupport.createNiceMock(Config.class);
+    final Config mockRangerYarnPluginConf = easyMockSupport.createNiceMock(Config.class);
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(mockHiveEnvConf).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("ranger-hdfs-plugin-properties")).andReturn(mockRangerHdfsPluginConf).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("ranger-hbase-plugin-properties")).andReturn(mockRangerHbasePluginConf).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("ranger-kafka-plugin-properties")).andReturn(mockRangerKafkaPluginConf).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("ranger-yarn-plugin-properties")).andReturn(mockRangerYarnPluginConf).atLeastOnce();
+
+    expect(mockHiveEnvConf.getProperties()).andReturn(propertiesHiveEnv).times(2);
+    expect(mockRangerHdfsPluginConf.getProperties()).andReturn(propertiesRangerHdfsPlugin).times(2);
+    expect(mockRangerHbasePluginConf.getProperties()).andReturn(propertiesRangerHbasePlugin).times(2);
+    expect(mockRangerKafkaPluginConf.getProperties()).andReturn(propertiesRangerKafkaPlugin).times(2);
+    expect(mockRangerYarnPluginConf.getProperties()).andReturn(propertiesRangerYarnPlugin).times(2);
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog220.class).updateRangerEnvConfig();
+    easyMockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testGetSourceVersion() {
+    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
+    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
+    Assert.assertEquals("2.1.2.1", upgradeCatalog.getSourceVersion());
+  }
+
+  @Test
+  public void testGetTargetVersion() throws Exception {
+    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
+    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
+
+    Assert.assertEquals("2.2.0", upgradeCatalog.getTargetVersion());
+  }
+
+  // *********** Inner Classes that represent sections of the DDL ***********
+  // ************************************************************************
+
+  /**
+   * Verify that the upgrade table has two columns added to it.
+   */
+  class UpgradeSectionDDL implements SectionDDL {
+
+    Capture<DBAccessor.DBColumnInfo> upgradeTablePackageNameColumnCapture = new Capture<DBAccessor.DBColumnInfo>();
+    Capture<DBAccessor.DBColumnInfo> upgradeTableUpgradeTypeColumnCapture = new Capture<DBAccessor.DBColumnInfo>();
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public void execute(DBAccessor dbAccessor) throws SQLException {
+      // Add columns
+      dbAccessor.addColumn(eq("upgrade"), capture(upgradeTablePackageNameColumnCapture));
+      dbAccessor.addColumn(eq("upgrade"), capture(upgradeTableUpgradeTypeColumnCapture));
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override
+    public void verify(DBAccessor dbAccessor) throws SQLException {
+      // Verification section
+      DBAccessor.DBColumnInfo packageNameCol = upgradeTablePackageNameColumnCapture.getValue();
+      Assert.assertEquals(String.class, packageNameCol.getType());
+      Assert.assertEquals("upgrade_package", packageNameCol.getName());
+
+      DBAccessor.DBColumnInfo upgradeTypeCol = upgradeTableUpgradeTypeColumnCapture.getValue();
+      Assert.assertEquals(String.class, upgradeTypeCol.getType());
+      Assert.assertEquals("upgrade_type", upgradeTypeCol.getName());
+    }
+  }
+
+  @Test
+  public void testUpdateRangerUgsyncSiteConfig() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesRangerUgsyncSite = new HashMap<String, String>() {{
+        put("ranger.usersync.source.impl.class", "ldap");
+    }};
+
+    final Config mockRangerUgsyncSite = easyMockSupport.createNiceMock(Config.class);
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("ranger-ugsync-site")).andReturn(mockRangerUgsyncSite).atLeastOnce();
+
+    expect(mockRangerUgsyncSite.getProperties()).andReturn(propertiesRangerUgsyncSite).atLeastOnce();
+
+    Map<String, String> updates = Collections.singletonMap("ranger.usersync.source.impl.class", "org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder");
+    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
+            .withConstructor(Injector.class)
+            .withArgs(mockInjector)
+            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
+                    Map.class, boolean.class, boolean.class)
+            .createMock();
+    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
+            "ranger-ugsync-site", updates, true, false);
+    expectLastCall().once();
+
+    easyMockSupport.replayAll();
+    mockInjector.getInstance(UpgradeCatalog220.class).updateRangerUgsyncSiteConfig();
+    easyMockSupport.verifyAll();
+  }
+
+  @Test
+  public void testShouldDDLsBeExecutedOnUpgrade() throws Exception {
+    // GIVEN
+    Injector mockedInjector = mocksControl.createMock(Injector.class);
+    DBAccessor mockedDbAccessor = mocksControl.createMock(DBAccessor.class);
+    DaoUtils mockedDaoUtils = mocksControl.createMock(DaoUtils.class);
+    Configuration mockedConfiguration = mocksControl.createMock(Configuration.class);
+    StackUpgradeUtil mockedStackUpgradeUtil = mocksControl.createMock(StackUpgradeUtil.class);
+
+    Capture<String> capturedTableName = EasyMock.newCapture();
+    Capture<String> capturedPKColumn = EasyMock.newCapture();
+    Capture<List<DBAccessor.DBColumnInfo>> capturedColumns = EasyMock.newCapture();
+    Capture<DBAccessor.DBColumnInfo> capturedColumn = EasyMock.newCapture();
+    Capture<DBAccessor.DBColumnInfo> capturedHostRoleCommandColumn = EasyMock.newCapture();
+
+    Capture<String> capturedBlueprintTableName = EasyMock.newCapture();
+    Capture<DBAccessor.DBColumnInfo> capturedNewBlueprintColumn1 = EasyMock.newCapture();
+    Capture<DBAccessor.DBColumnInfo> capturedNewBlueprintColumn2 = EasyMock.newCapture();
+
+    Capture<DBAccessor.DBColumnInfo> stageSkipColumnCapture = EasyMock.newCapture();
+
+    EasyMock.expect(mockedInjector.getInstance(DaoUtils.class)).andReturn(mockedDaoUtils);
+    mockedInjector.injectMembers(anyObject(UpgradeCatalog.class));
+    EasyMock.expect(mockedConfiguration.getDatabaseType()).andReturn(Configuration.DatabaseType.POSTGRES).anyTimes();
+    EasyMock.expect(mockedConfiguration.getDatabaseUser()).andReturn("ambari");
+    EasyMock.expect(mockedConfiguration.getServerJDBCPostgresSchemaName()).andReturn("fo");
+
+
+    mockedDbAccessor.executeQuery("ALTER SCHEMA fo OWNER TO \"ambari\";");
+    mockedDbAccessor.executeQuery("ALTER ROLE \"ambari\" SET search_path to 'fo';");
+
+    // executeUpgradeDDLUpdates
+    mockedDbAccessor.addColumn(eq("upgrade"), capture(capturedColumn));
+    mockedDbAccessor.addColumn(eq("upgrade"), capture(capturedColumn));
+    mockedDbAccessor.addColumn(eq("upgrade"), capture(capturedColumn));
+
+    // addKerberosDescriptorTable
+    mockedDbAccessor.createTable(capture(capturedTableName), capture(capturedColumns), capture(capturedPKColumn));
+    mockedDbAccessor.alterColumn(eq("host_role_command"), capture(capturedHostRoleCommandColumn));
+
+    mockedDbAccessor.addColumn(capture(capturedBlueprintTableName), capture(capturedNewBlueprintColumn1));
+    mockedDbAccessor.addColumn(capture(capturedBlueprintTableName), capture(capturedNewBlueprintColumn2));
+
+    mockedDbAccessor.addColumn(eq("stage"), capture(stageSkipColumnCapture));
+
+    mocksControl.replay();
+
+    UpgradeCatalog220 testSubject = new UpgradeCatalog220(mockedInjector);
+    EasyMockSupport.injectMocks(testSubject);
+
+    //todo refactor the DI approach, don't directly access these members!!!
+    testSubject.stackUpgradeUtil = mockedStackUpgradeUtil;
+    testSubject.dbAccessor = mockedDbAccessor;
+    testSubject.configuration = mockedConfiguration;
+
+    // WHEN
+    testSubject.upgradeSchema();
+
+    // THEN
+    Assert.assertEquals("The table name is wrong!", "kerberos_descriptor", capturedTableName.getValue());
+    Assert.assertEquals("The primary key is wrong!", "kerberos_descriptor_name", capturedPKColumn.getValue());
+    Assert.assertTrue("Ther number of columns is wrong!", capturedColumns.getValue().size() == 2);
+
+    Assert.assertEquals("The table name is wrong!", "blueprint", capturedBlueprintTableName.getValue());
+
+    Assert.assertEquals("The column name is wrong!", "security_type", capturedNewBlueprintColumn1.getValue().getName());
+    Assert.assertEquals("The column name is wrong!", "security_descriptor_reference", capturedNewBlueprintColumn2
+      .getValue().getName());
+
+    Assert.assertEquals("The column name is wrong!", "supports_auto_skip_failure",
+        stageSkipColumnCapture.getValue().getName());
+  }
+
+  @Test
+  public void testUpdateHiveConfig() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesHiveSite = new HashMap<String, String>() {{
+      put("hive.server2.logging.operation.log.location", "${system:java.io.tmpdir}/${system:user.name}/operation_logs");
+    }};
+    final Map<String, String> propertiesHiveSiteExpected = new HashMap<String, String>() {{
+      put("hive.server2.logging.operation.log.location", "/tmp/hive/operation_logs");
+    }};
+    final Map<String, String> propertiesHiveEnv = new HashMap<String, String>() {{
+      put("content", "test content");
+    }};
+    final Config hiveSiteConf = easyMockSupport.createNiceMock(Config.class);
+    final Config hiveEnvConf = easyMockSupport.createNiceMock(Config.class);
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(hiveSiteConf).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(hiveEnvConf).atLeastOnce();
+
+    expect(hiveSiteConf.getProperties()).andReturn(propertiesHiveSite).once();
+    expect(hiveEnvConf.getProperties()).andReturn(propertiesHiveEnv).once();
+
+    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
+            .withConstructor(Injector.class)
+            .withArgs(mockInjector)
+            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
+                    Map.class, boolean.class, boolean.class)
+            .createMock();
+    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
+            "hive-site", propertiesHiveSiteExpected, true, false);
+    expectLastCall().once();
+    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
+            "hive-env", propertiesHiveEnv, true, true);
+    expectLastCall().once();
+
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog220);
+    upgradeCatalog220.updateHiveConfig();
+    easyMockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testUpdateHiveEnvContentHDP23() throws Exception {
+    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
+    String testContent = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+            "\n" +
+            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
+    String expectedResult = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+            "\n" +
+            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
+            "  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
+            "else\n" +
+            "  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
+            "fi\n" +
+            "\n" +
+            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
+            "\n" +
+            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
+    Assert.assertEquals(expectedResult, upgradeCatalog220.updateHiveEnvContentHDP23(testContent));
+  }
+
+
+  @Test
+  public void testUpdateHiveEnvContent() throws Exception {
+    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
+    // Test first case
+    String testContent = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+            "\n" +
+            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
+            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
+            "else\n" +
+            "  export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
+            "fi\n" +
+            "\n" +
+            "export HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n" +
+            "\n" +
+            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
+    String expectedResult = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+            "\n" +
+            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
+            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
+            "else\n" +
+            "  export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
+            "fi\n" +
+            "\n" +
+            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
+            "\n" +
+            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
+    Assert.assertEquals(expectedResult, upgradeCatalog220.updateHiveEnvContent(testContent));
+    // Test second case
+    testContent = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+            "export SERVICE=$SERVICE\n" +
+            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
+            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
+            "else\n" +
+            "  export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
+            "fi\n" +
+            "\n" +
+            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
+    expectedResult = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+            "export SERVICE=$SERVICE\n" +
+            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
+            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
+            "else\n" +
+            "  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
+            "fi\n" +
+            "\n" +
+            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
+            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
+    Assert.assertEquals(expectedResult, upgradeCatalog220.updateHiveEnvContent(testContent));
+  }
+
+  @Test
+  public void testupdateKnoxTopology_NoRangerPlugin() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesTopologyWithoutAuthorizationProvider = new HashMap<String, String>() {{
+      put("content", "<topology> <gateway>  </gateway> </topology>");
+    }};
+    final Map<String, String> propertiesTopologyExpected = new HashMap<String, String>() {{
+      put("content", "<topology> <gateway>  <provider>\n" +
+              "               <role>authorization</role>\n" +
+              "               <name>AclsAuthz</name>\n" +
+              "               <enabled>true</enabled>\n" +
+              "          </provider>\n" +
+              "     </gateway> </topology>\n");
+    }};
+    final Config mockTopologyConf = easyMockSupport.createNiceMock(Config.class);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("cl1", mockClusterExpected);
+    }}).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("topology")).andReturn(mockTopologyConf).atLeastOnce();
+    expect(mockTopologyConf.getProperties()).andReturn(propertiesTopologyWithoutAuthorizationProvider).once();
+
+
+    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
+            .withConstructor(Injector.class)
+            .withArgs(mockInjector)
+            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
+                    Map.class, boolean.class, boolean.class)
+            .createMock();
+    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
+            "topology", propertiesTopologyExpected, true, false);
+    expectLastCall().once();
+
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog220);
+    upgradeCatalog220.updateKnoxTopology();
+    easyMockSupport.verifyAll();
+  }
+
+  @Test
+  public void testupdateKnoxTopology_ProviderAlreadyExists() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesTopologyWitAuthorizationProvider = new HashMap<String, String>() {{
+      put("content", "<topology> <gateway>  <provider>" +
+              "<role>authorization</role>" +
+              "<name>AclsAuthz</name>" +
+              "<enabled>true</enabled>" +
+              "</provider>" +
+              "</gateway> </topology>\n");
+    }};
+
+    final Config mockTopologyConf = easyMockSupport.createNiceMock(Config.class);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("cl1", mockClusterExpected);
+    }}).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("topology")).andReturn(mockTopologyConf).atLeastOnce();
+    expect(mockTopologyConf.getProperties()).andReturn(propertiesTopologyWitAuthorizationProvider).once();
+
+    // ATTENTION, this mock should not be called at all. If it was, then something wrong with code
+    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
+            .withConstructor(Injector.class)
+            .withArgs(mockInjector)
+            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
+                    Map.class, boolean.class, boolean.class)
+            .createMock();
+
+
+
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog220);
+    upgradeCatalog220.updateKnoxTopology();
+    easyMockSupport.verifyAll();
+  }
+
+  @Test
+  public void testupdateKnoxTopology_RangerPluginAvailable() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+    final Map<String, String> propertiesTopologyWithoutAuthorizationProvider = new HashMap<String, String>() {{
+      put("content", "<topology> <gateway>  </gateway> </topology>");
+    }};
+    final Map<String, String> propertiesRangerKnoxPluginProperties = new HashMap<String, String>() {{
+      put("ranger-knox-plugin-enabled", "Yes");
+    }};
+    final Map<String, String> propertiesTopologyExpected = new HashMap<String, String>() {{
+      put("content", "<topology> <gateway>  <provider>\n" +
+              "               <role>authorization</role>\n" +
+              "               <name>XASecurePDPKnox</name>\n" +
+              "               <enabled>true</enabled>\n" +
+              "          </provider>\n" +
+              "     </gateway> </topology>\n");
+    }};
+    final Config mockTopologyConf = easyMockSupport.createNiceMock(Config.class);
+    final Config mockRangerKnoxPluginConf = easyMockSupport.createNiceMock(Config.class);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("cl1", mockClusterExpected);
+    }}).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("topology")).andReturn(mockTopologyConf).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("ranger-knox-plugin-properties")).andReturn(mockRangerKnoxPluginConf).atLeastOnce();
+    expect(mockTopologyConf.getProperties()).andReturn(propertiesTopologyWithoutAuthorizationProvider).once();
+    expect(mockRangerKnoxPluginConf.getProperties()).andReturn(propertiesRangerKnoxPluginProperties).once();
+
+
+    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
+            .withConstructor(Injector.class)
+            .withArgs(mockInjector)
+            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
+                    Map.class, boolean.class, boolean.class)
+            .createMock();
+    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
+            "topology", propertiesTopologyExpected, true, false);
+    expectLastCall().once();
+
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog220);
+    upgradeCatalog220.updateKnoxTopology();
+    easyMockSupport.verifyAll();
+
+  }
+
+  @Test
+  public void testUpdateAccumuloConfigs() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    // We start with no client properties (< 2.2.0).
+    final Map<String, String> originalClientProperties = new HashMap<String, String>();
+    // And should get the following property on upgrade.
+    final Map<String, String> updatedClientProperties = new HashMap<String, String>() {
+      {
+        put("kerberos.server.primary", "{{bare_accumulo_principal}}");
+      }
+    };
+    
+    final Config clientConfig = easyMockSupport.createNiceMock(Config.class);
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).once();
+
+    // Enable KERBEROS
+    expect(mockClusterExpected.getSecurityType()).andReturn(SecurityType.KERBEROS).once();
+    // Mock out our empty original properties
+    expect(mockClusterExpected.getDesiredConfigByType("client")).andReturn(clientConfig).atLeastOnce();
+    expect(clientConfig.getProperties()).andReturn(originalClientProperties).atLeastOnce();
+
+    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
+            .withConstructor(Injector.class)
+            .withArgs(mockInjector)
+            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
+                    Map.class, boolean.class, boolean.class)
+            .createMock();
+    // Verify that we get this method called with the updated properties
+    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
+            "client", updatedClientProperties, true, false);
+    expectLastCall().once();
+
+    // Run it
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog220);
+    upgradeCatalog220.updateAccumuloConfigs();
+    easyMockSupport.verifyAll();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3ff65d3/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
index 7cf386e..5eb3c14 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
@@ -19,22 +19,33 @@
 package org.apache.ambari.server.upgrade;
 
 
+import com.google.inject.AbstractModule;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
 import com.google.inject.Provider;
 import com.google.inject.persist.PersistService;
 import junit.framework.Assert;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.orm.DBAccessor;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.state.stack.OsFamily;
+import org.easymock.EasyMockSupport;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
 import javax.persistence.EntityManager;
 import java.lang.reflect.Method;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
 
 import static org.easymock.EasyMock.createMockBuilder;
 import static org.easymock.EasyMock.createNiceMock;
@@ -79,18 +90,20 @@ public class UpgradeCatalog221Test {
   public void testExecuteDMLUpdates() throws Exception {
     Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
     Method updateAlerts = UpgradeCatalog221.class.getDeclaredMethod("updateAlerts");
-
-
+    Method updateOozieConfigs = UpgradeCatalog221.class.getDeclaredMethod("updateOozieConfigs");
 
     UpgradeCatalog221 upgradeCatalog221 = createMockBuilder(UpgradeCatalog221.class)
-            .addMockedMethod(addNewConfigurationsFromXml)
-            .addMockedMethod(updateAlerts)
-            .createMock();
+      .addMockedMethod(addNewConfigurationsFromXml)
+      .addMockedMethod(updateAlerts)
+      .addMockedMethod(updateOozieConfigs)
+      .createMock();
 
     upgradeCatalog221.addNewConfigurationsFromXml();
     expectLastCall().once();
     upgradeCatalog221.updateAlerts();
     expectLastCall().once();
+    upgradeCatalog221.updateOozieConfigs();
+    expectLastCall().once();
 
 
     replay(upgradeCatalog221);
@@ -133,4 +146,52 @@ public class UpgradeCatalog221Test {
     Assert.assertEquals(result, expectedSource);
   }
 
+  @Test
+  public void testUpdateOozieConfigs() throws Exception {
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
+    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
+    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
+
+    final Config oozieSiteConf = easyMockSupport.createNiceMock(Config.class);
+    final Map<String, String> propertiesOozieSite = new HashMap<String, String>() {{
+      put("oozie.service.HadoopAccessorService.hadoop.configurations", "*=/etc/hadoop/conf");
+    }};
+
+    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
+      @Override
+      protected void configure() {
+        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
+        bind(Clusters.class).toInstance(mockClusters);
+        bind(EntityManager.class).toInstance(entityManager);
+
+        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
+        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
+      }
+    });
+
+    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
+    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", mockClusterExpected);
+    }}).atLeastOnce();
+    expect(mockClusterExpected.getDesiredConfigByType("oozie-site")).andReturn(oozieSiteConf).atLeastOnce();
+    expect(oozieSiteConf.getProperties()).andReturn(propertiesOozieSite).once();
+
+    UpgradeCatalog221 upgradeCatalog221 = createMockBuilder(UpgradeCatalog221.class)
+        .withConstructor(Injector.class)
+        .withArgs(mockInjector)
+        .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
+            Map.class, boolean.class, boolean.class)
+        .createMock();
+    upgradeCatalog221.updateConfigurationPropertiesForCluster(mockClusterExpected, "oozie-site",
+        Collections.singletonMap("oozie.service.HadoopAccessorService.hadoop.configurations", "*={{hadoop_conf_dir}}"),
+        true, false);
+    expectLastCall().once();
+
+    easyMockSupport.replayAll();
+    replay(upgradeCatalog221);
+    upgradeCatalog221.updateOozieConfigs();
+    easyMockSupport.verifyAll();
+  }
+
 }


[3/4] ambari git commit: AMBARI-14357: UpgradeCatalogs need to be reversioned (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b3ff65d3/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
new file mode 100644
index 0000000..1e39143
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
@@ -0,0 +1,1371 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.upgrade;
+
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+import com.google.inject.Inject;
+import com.google.inject.Injector;
+import com.google.inject.persist.Transactional;
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Config;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
+import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
+import org.apache.ambari.server.orm.dao.ArtifactDAO;
+import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterVersionDAO;
+import org.apache.ambari.server.orm.dao.DaoUtils;
+import org.apache.ambari.server.orm.dao.HostVersionDAO;
+import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.dao.UpgradeDAO;
+import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
+import org.apache.ambari.server.orm.entities.ArtifactEntity;
+import org.apache.ambari.server.orm.entities.ClusterEntity;
+import org.apache.ambari.server.orm.entities.ClusterVersionEntity;
+import org.apache.ambari.server.orm.entities.HostEntity;
+import org.apache.ambari.server.orm.entities.HostVersionEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
+import org.apache.ambari.server.orm.entities.UpgradeEntity;
+
+
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.RepositoryVersionState;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.alert.SourceType;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.stack.upgrade.Direction;
+import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
+import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
+import org.apache.ambari.server.utils.VersionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerConfigurationException;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import java.io.StringWriter;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.regex.Matcher;
+
+/**
+ * Upgrade catalog for version 2.2.0.
+ */
+public class UpgradeCatalog220 extends AbstractUpgradeCatalog {
+
+  private static final String UPGRADE_TABLE = "upgrade";
+  private static final String STORM_SITE = "storm-site";
+  private static final String HDFS_SITE_CONFIG = "hdfs-site";
+  private static final String TOPOLOGY_CONFIG = "topology";
+  private static final String KAFKA_BROKER = "kafka-broker";
+  private static final String KAFKA_ENV_CONFIG = "kafka-env";
+  private static final String KAFKA_ENV_CONTENT_KERBEROS_PARAMS =
+    "export KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}";
+  private static final String AMS_ENV = "ams-env";
+  private static final String AMS_HBASE_ENV = "ams-hbase-env";
+  private static final String AMS_SITE = "ams-site";
+  private static final String AMS_HBASE_SITE = "ams-hbase-site";
+  private static final String AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY =
+    "zookeeper.session.timeout.localHBaseCluster";
+  private static final String AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY = "hbase.normalizer.enabled";
+  private static final String AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY = "hbase.normalizer.period";
+  private static final String AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY = "hbase.master.normalizer.class";
+  private static final String TIMELINE_METRICS_HBASE_FIFO_COMPACTION_ENABLED = "timeline.metrics.hbase.fifo.compaction.enabled";
+  private static final String HBASE_ENV_CONFIG = "hbase-env";
+  private static final String FLUME_ENV_CONFIG = "flume-env";
+  private static final String HIVE_SITE_CONFIG = "hive-site";
+  private static final String HIVE_ENV_CONFIG = "hive-env";
+  private static final String RANGER_ENV_CONFIG = "ranger-env";
+  private static final String RANGER_UGSYNC_SITE_CONFIG = "ranger-ugsync-site";
+  private static final String ZOOKEEPER_LOG4J_CONFIG = "zookeeper-log4j";
+  private static final String NIMBS_MONITOR_FREQ_SECS_PROPERTY = "nimbus.monitor.freq.secs";
+  private static final String STORM_METRICS_REPORTER = "metrics.reporter.register";
+  private static final String HIVE_SERVER2_OPERATION_LOG_LOCATION_PROPERTY = "hive.server2.logging.operation.log.location";
+  private static final String HADOOP_ENV_CONFIG = "hadoop-env";
+  private static final String CONTENT_PROPERTY = "content";
+  private static final String HADOOP_ENV_CONTENT_TO_APPEND = "\n{% if is_datanode_max_locked_memory_set %}\n" +
+    "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
+    "# Makes sense to fix only when runing DN as root \n" +
+    "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
+    "  ulimit -l {{datanode_max_locked_memory}}\n" +
+    "fi\n" +
+    "{% endif %}\n";
+
+  private static final String DOWNGRADE_ALLOWED_COLUMN = "downgrade_allowed";
+  private static final String UPGRADE_SKIP_FAILURE_COLUMN = "skip_failures";
+  private static final String UPGRADE_SKIP_SC_FAILURE_COLUMN = "skip_sc_failures";
+  public static final String UPGRADE_PACKAGE_COL = "upgrade_package";
+  public static final String UPGRADE_TYPE_COL = "upgrade_type";
+  public static final String REPO_VERSION_TABLE = "repo_version";
+
+  private static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
+  private static final String HOST_ID_COL = "host_id";
+
+  private static final String KERBEROS_DESCRIPTOR_TABLE = "kerberos_descriptor";
+  private static final String KERBEROS_DESCRIPTOR_NAME_COLUMN = "kerberos_descriptor_name";
+  private static final String KERBEROS_DESCRIPTOR_COLUMN = "kerberos_descriptor";
+  private static final String RANGER_HDFS_PLUGIN_ENABLED_PROPERTY = "ranger-hdfs-plugin-enabled";
+  private static final String RANGER_HIVE_PLUGIN_ENABLED_PROPERTY = "ranger-hive-plugin-enabled";
+  private static final String RANGER_HBASE_PLUGIN_ENABLED_PROPERTY = "ranger-hbase-plugin-enabled";
+  private static final String RANGER_STORM_PLUGIN_ENABLED_PROPERTY = "ranger-storm-plugin-enabled";
+  private static final String RANGER_KNOX_PLUGIN_ENABLED_PROPERTY = "ranger-knox-plugin-enabled";
+  private static final String RANGER_YARN_PLUGIN_ENABLED_PROPERTY = "ranger-yarn-plugin-enabled";
+  private static final String RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY = "ranger-kafka-plugin-enabled";
+
+  private static final String RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY = "ranger.usersync.source.impl.class";
+
+  private static final String BLUEPRINT_TABLE = "blueprint";
+  private static final String SECURITY_TYPE_COLUMN = "security_type";
+  private static final String SECURITY_DESCRIPTOR_REF_COLUMN = "security_descriptor_reference";
+
+  private static final String STAGE_TABLE = "stage";
+
+  private static final String KNOX_SERVICE = "KNOX";
+
+  /**
+   * Logger.
+   */
+  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog220.class);
+
+  private static final String OOZIE_SITE_CONFIG = "oozie-site";
+  private static final String OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME = "oozie.service.HadoopAccessorService.hadoop.configurations";
+  private static final String OLD_DEFAULT_HADOOP_CONFIG_PATH = "/etc/hadoop/conf";
+  private static final String NEW_DEFAULT_HADOOP_CONFIG_PATH = "{{hadoop_conf_dir}}";
+
+  @Inject
+  DaoUtils daoUtils;
+
+  @Inject
+  private RepositoryVersionDAO repositoryVersionDAO;
+
+  @Inject
+  private ClusterDAO clusterDAO;
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
+   *
+   * @param injector Guice injector to track dependencies and uses bindings to inject them.
+   */
+  @Inject
+  public UpgradeCatalog220(Injector injector) {
+    super(injector);
+    this.injector = injector;
+  }
+
+  // ----- UpgradeCatalog ----------------------------------------------------
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String getTargetVersion() {
+    return "2.2.0";
+  }
+
+  // ----- AbstractUpgradeCatalog --------------------------------------------
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public String getSourceVersion() {
+    return "2.1.2.1";
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executeDDLUpdates() throws AmbariException, SQLException {
+    executeUpgradeDDLUpdates();
+
+    // Alter the host_role_command table to allow host_id to be nullable
+    dbAccessor.alterColumn(HOST_ROLE_COMMAND_TABLE, new DBColumnInfo(HOST_ID_COL, Long.class, null, null, true));
+
+    addKerberosDescriptorTable();
+    executeBlueprintDDLUpdates();
+    executeStageDDLUpdates();
+  }
+
+  protected void executeUpgradeDDLUpdates() throws AmbariException, SQLException {
+    updateUpgradesDDL();
+  }
+
+  private void addKerberosDescriptorTable() throws SQLException {
+    List<DBAccessor.DBColumnInfo> columns = new ArrayList<DBAccessor.DBColumnInfo>();
+    columns.add(new DBAccessor.DBColumnInfo(KERBEROS_DESCRIPTOR_NAME_COLUMN, String.class, 255, null, false));
+    columns.add(new DBAccessor.DBColumnInfo(KERBEROS_DESCRIPTOR_COLUMN, char[].class, null, null, false));
+
+    LOG.debug("Creating table [ {} ] with columns [ {} ] and primary key: [ {} ]", KERBEROS_DESCRIPTOR_TABLE, columns, KERBEROS_DESCRIPTOR_NAME_COLUMN);
+    dbAccessor.createTable(KERBEROS_DESCRIPTOR_TABLE, columns, KERBEROS_DESCRIPTOR_NAME_COLUMN);
+  }
+
+  private void executeBlueprintDDLUpdates() throws AmbariException, SQLException {
+    dbAccessor.addColumn(BLUEPRINT_TABLE, new DBAccessor.DBColumnInfo(SECURITY_TYPE_COLUMN,
+      String.class, 32, "NONE", false));
+    dbAccessor.addColumn(BLUEPRINT_TABLE, new DBAccessor.DBColumnInfo(SECURITY_DESCRIPTOR_REF_COLUMN,
+      String.class, null, null, true));
+  }
+
+  /**
+   * Updates the {@code stage} table by:
+   * <ul>
+   * <li>Adding the {@code supports_auto_skip_failure} column</li>
+   * </ul>
+   *
+   * @throws SQLException
+   */
+  protected void executeStageDDLUpdates() throws SQLException {
+    dbAccessor.addColumn(STAGE_TABLE,
+      new DBAccessor.DBColumnInfo("supports_auto_skip_failure", Integer.class, 1, 0, false));
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void executePreDMLUpdates() throws AmbariException, SQLException {
+    // execute DDL updates
+    executeStackUpgradeDDLUpdates();
+
+    // DDL and DML mixed code, double check here
+    bootstrapRepoVersionForHDP21();
+
+    // execute DML updates, no DDL things after this line
+    executeUpgradePreDMLUpdates();
+  }
+
+  /**
+   * Updates the following columns on the {@value #UPGRADE_TABLE} table to
+   * default values:
+   * <ul>
+   * <li>{value {@link #DOWNGRADE_ALLOWED_COLUMN}}</li>
+   * <li>{value {@link #UPGRADE_SKIP_FAILURE_COLUMN}}</li>
+   * <li>{value {@link #UPGRADE_SKIP_SC_FAILURE_COLUMN}}</li>
+   * </ul>
+   *
+   * @throws AmbariException
+   * @throws SQLException
+   */
+  protected void executeUpgradePreDMLUpdates() throws AmbariException, SQLException {
+    UpgradeDAO upgradeDAO = injector.getInstance(UpgradeDAO.class);
+    List<UpgradeEntity> upgrades = upgradeDAO.findAll();
+    for (UpgradeEntity upgrade: upgrades){
+      if (upgrade.isDowngradeAllowed() == null) {
+        upgrade.setDowngradeAllowed(true);
+      }
+
+      // ensure that these are set to false for existing upgrades
+      upgrade.setAutoSkipComponentFailures(false);
+      upgrade.setAutoSkipServiceCheckFailures(false);
+
+      // apply changes
+      upgradeDAO.merge(upgrade);
+
+      LOG.info(String.format("Updated upgrade id %s, upgrade pack %s from version %s to %s",
+        upgrade.getId(), upgrade.getUpgradePackage(), upgrade.getFromVersion(),
+        upgrade.getToVersion()));
+    }
+
+    // make the columns nullable now that they have defaults
+    dbAccessor.setColumnNullable(UPGRADE_TABLE, DOWNGRADE_ALLOWED_COLUMN, false);
+    dbAccessor.setColumnNullable(UPGRADE_TABLE, UPGRADE_SKIP_FAILURE_COLUMN, false);
+    dbAccessor.setColumnNullable(UPGRADE_TABLE, UPGRADE_SKIP_SC_FAILURE_COLUMN, false);
+  }
+
+  @Override
+  protected void executeDMLUpdates() throws AmbariException, SQLException {
+    addNewConfigurationsFromXml();
+    updateAlertDefinitions();
+    updateStormConfigs();
+    updateAMSConfigs();
+    updateHDFSConfigs();
+    updateHbaseEnvConfig();
+    updateFlumeEnvConfig();
+    updateHadoopEnv();
+    updateKafkaConfigs();
+    updateRangerEnvConfig();
+    updateRangerUgsyncSiteConfig();
+    updateZookeeperLog4j();
+    updateHiveConfig();
+    updateAccumuloConfigs();
+    updateKerberosDescriptorArtifacts();
+    updateKnoxTopology();
+  }
+
+  protected void updateKnoxTopology() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config topology = cluster.getDesiredConfigByType(TOPOLOGY_CONFIG);
+      if (topology != null) {
+        String content = topology.getProperties().get(CONTENT_PROPERTY);
+        if (content != null) {
+          Document topologyXml = convertStringToDocument(content);
+          if (topologyXml != null) {
+            Element root = topologyXml.getDocumentElement();
+            if (root != null)  {
+              NodeList providerNodes = root.getElementsByTagName("provider");
+              boolean authorizationProviderExists = false;
+              try {
+                for (int i = 0; i < providerNodes.getLength(); i++) {
+                  Node providerNode = providerNodes.item(i);
+                  NodeList childNodes = providerNode.getChildNodes();
+                  for (int k = 0; k < childNodes.getLength(); k++) {
+                    Node child = childNodes.item(k);
+                    child.normalize();
+                    String childTextContent = child.getTextContent();
+                    if (childTextContent != null && childTextContent.toLowerCase().equals("authorization")) {
+                      authorizationProviderExists = true;
+                      break;
+                    }
+                  }
+                  if (authorizationProviderExists) {
+                    break;
+                  }
+                }
+              } catch(Exception e) {
+                e.printStackTrace();
+                LOG.error("Error occurred during check 'authorization' provider already exists in topology." + e);
+                return;
+              }
+              if (!authorizationProviderExists) {
+                NodeList nodeList = root.getElementsByTagName("gateway");
+                if (nodeList != null && nodeList.getLength() > 0) {
+                  boolean rangerPluginEnabled = isRangerPluginEnabled(cluster);
+
+                  Node gatewayNode = nodeList.item(0);
+                  Element newProvider = topologyXml.createElement("provider");
+
+                  Element role = topologyXml.createElement("role");
+                  role.appendChild(topologyXml.createTextNode("authorization"));
+                  newProvider.appendChild(role);
+
+                  Element name = topologyXml.createElement("name");
+                  if (rangerPluginEnabled) {
+                    name.appendChild(topologyXml.createTextNode("XASecurePDPKnox"));
+                  } else {
+                    name.appendChild(topologyXml.createTextNode("AclsAuthz"));
+                  }
+                  newProvider.appendChild(name);
+
+                  Element enabled = topologyXml.createElement("enabled");
+                  enabled.appendChild(topologyXml.createTextNode("true"));
+                  newProvider.appendChild(enabled);
+
+
+                  gatewayNode.appendChild(newProvider);
+
+                  DOMSource topologyDomSource = new DOMSource(root);
+                  StringWriter writer = new StringWriter();
+                  try {
+                    Transformer transformer = TransformerFactory.newInstance().newTransformer();
+                    transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
+                    transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
+                    transformer.setOutputProperty(OutputKeys.METHOD, "xml");
+                    transformer.setOutputProperty(OutputKeys.INDENT, "yes");
+                    transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "5");
+                    transformer.transform(topologyDomSource, new StreamResult(writer));
+                  } catch (TransformerConfigurationException e) {
+                    e.printStackTrace();
+                    LOG.error("Unable to create transformer instance, to convert Document(XML) to String. " + e);
+                    return;
+                  } catch (TransformerException e) {
+                    e.printStackTrace();
+                    LOG.error("Unable to transform Document(XML) to StringWriter. " + e);
+                    return;
+                  }
+
+                  content = writer.toString();
+                  Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
+                  updateConfigurationPropertiesForCluster(cluster, TOPOLOGY_CONFIG, updates, true, false);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Move the upgrade_package column from the repo_version table to the upgrade table as follows,
+   * add column upgrade_package to upgrade table as String 255 and nullable
+   * populate column in the upgrade table
+   * drop the column in the repo_version table
+   * make the column in the upgrade table non-nullable.
+   * This has to be called as part of DML and not DDL since the persistence service has to be started.
+   * @throws AmbariException
+   * @throws SQLException
+   */
+  @Transactional
+  protected void executeStackUpgradeDDLUpdates() throws SQLException, AmbariException {
+    final Configuration.DatabaseType databaseType = configuration.getDatabaseType();
+
+    // Add columns
+    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_PACKAGE_COL)) {
+      LOG.info("Adding upgrade_package column to upgrade table.");
+      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, true));
+    }
+    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_TYPE_COL)) {
+      LOG.info("Adding upgrade_type column to upgrade table.");
+      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, true));
+    }
+
+    // Populate values in upgrade table.
+    boolean success = populateUpgradeTable();
+
+    if (!success) {
+      throw new AmbariException("Errors found while populating the upgrade table with values for columns upgrade_type and upgrade_package.");
+    }
+
+    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL)) {
+      LOG.info("Dropping upgrade_package column from repo_version table.");
+      dbAccessor.dropColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL);
+
+      // Now, make the added column non-nullable
+      // Make the hosts id non-null after all the values are populated
+      LOG.info("Making upgrade_package column in the upgrade table non-nullable.");
+      if (databaseType == Configuration.DatabaseType.DERBY) {
+        // This is a workaround for UpgradeTest.java unit test
+        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER column " + UPGRADE_PACKAGE_COL + " NOT NULL");
+      } else {
+        dbAccessor.alterColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, false));
+      }
+    }
+
+    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_TYPE_COL)) {
+      // Now, make the added column non-nullable
+      // Make the hosts id non-null after all the values are populated
+      LOG.info("Making upgrade_type column in the upgrade table non-nullable.");
+      if (databaseType == Configuration.DatabaseType.DERBY) {
+        // This is a workaround for UpgradeTest.java unit test
+        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER column " + UPGRADE_TYPE_COL + " NOT NULL");
+      } else {
+        dbAccessor.alterColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, false));
+      }
+    }
+  }
+
+  /**
+   * Populate the upgrade table with values for the columns upgrade_type and upgrade_package.
+   * The upgrade_type will default to {@code org.apache.ambari.server.state.stack.upgrade.UpgradeType.ROLLING}
+   * whereas the upgrade_package will be calculated.
+   * @return {@code} true on success, and {@code} false otherwise.
+   */
+  private boolean populateUpgradeTable() {
+    boolean success = true;
+    Statement statement = null;
+    ResultSet rs = null;
+    try {
+      statement = dbAccessor.getConnection().createStatement();
+      if (statement != null) {
+        // Need to use SQL since the schema is changing and some of the columns have not yet been added..
+        rs = statement.executeQuery("SELECT upgrade_id, cluster_id, from_version, to_version, direction, upgrade_package, upgrade_type FROM upgrade");
+        if (rs != null) {
+          try {
+            while (rs.next()) {
+              final long upgradeId = rs.getLong("upgrade_id");
+              final long clusterId = rs.getLong("cluster_id");
+              final String fromVersion = rs.getString("from_version");
+              final String toVersion = rs.getString("to_version");
+              final Direction direction = Direction.valueOf(rs.getString("direction"));
+              // These two values are likely null.
+              String upgradePackage = rs.getString("upgrade_package");
+              String upgradeType = rs.getString("upgrade_type");
+
+              LOG.info(MessageFormat.format("Populating rows for the upgrade table record with " +
+                  "upgrade_id: {0,number,#}, cluster_id: {1,number,#}, from_version: {2}, to_version: {3}, direction: {4}",
+                upgradeId, clusterId, fromVersion, toVersion, direction));
+
+              // Set all upgrades that have been done so far to type "rolling"
+              if (StringUtils.isEmpty(upgradeType)) {
+                LOG.info("Updating the record's upgrade_type to " + UpgradeType.ROLLING);
+                dbAccessor.executeQuery("UPDATE upgrade SET upgrade_type = '" + UpgradeType.ROLLING + "' WHERE upgrade_id = " + upgradeId);
+              }
+
+              if (StringUtils.isEmpty(upgradePackage)) {
+                String version = null;
+                StackEntity stack = null;
+
+                if (direction == Direction.UPGRADE) {
+                  version = toVersion;
+                } else if (direction == Direction.DOWNGRADE) {
+                  // TODO AMBARI-12698, this is going to be a problem.
+                  // During a downgrade, the "to_version" is overwritten to the source version, but the "from_version"
+                  // doesn't swap. E.g.,
+                  //  upgrade_id | from_version |  to_version  | direction
+                  // ------------+--------------+--------------+----------
+                  //           1 | 2.2.6.0-2800 | 2.3.0.0-2557 | UPGRADE
+                  //           2 | 2.2.6.0-2800 | 2.2.6.0-2800 | DOWNGRADE
+                  version = fromVersion;
+                }
+
+                ClusterEntity cluster = clusterDAO.findById(clusterId);
+
+                if (null != cluster) {
+                  stack = cluster.getDesiredStack();
+                  upgradePackage = calculateUpgradePackage(stack, version);
+                } else {
+                  LOG.error("Could not find a cluster with cluster_id " + clusterId);
+                }
+
+                if (!StringUtils.isEmpty(upgradePackage)) {
+                  LOG.info("Updating the record's upgrade_package to " + upgradePackage);
+                  dbAccessor.executeQuery("UPDATE upgrade SET upgrade_package = '" + upgradePackage + "' WHERE upgrade_id = " + upgradeId);
+                } else {
+                  success = false;
+                  LOG.error("Unable to populate column upgrade_package for record in table upgrade with id " + upgradeId);
+                }
+              }
+            }
+          } catch (Exception e) {
+            success = false;
+            e.printStackTrace();
+            LOG.error("Unable to populate the upgrade_type and upgrade_package columns of the upgrade table. " + e);
+          }
+        }
+      }
+    } catch (Exception e) {
+      success = false;
+      e.printStackTrace();
+      LOG.error("Failed to retrieve records from the upgrade table to populate the upgrade_type and upgrade_package columns. Exception: " + e);
+    } finally {
+      try {
+        if (rs != null) {
+          rs.close();
+        }
+        if (statement != null) {
+          statement.close();
+        }
+      } catch (SQLException e) {
+        ;
+      }
+    }
+    return success;
+  }
+
+  /**
+   * Find the single Repo Version for the given stack and version, and return its upgrade_package column.
+   * Because the upgrade_package column is going to be removed from this entity, must use raw SQL
+   * instead of the entity class.
+   * @param stack Stack
+   * @param version Stack version
+   * @return The value of the upgrade_package column, or null if not found.
+   */
+
+  private String calculateUpgradePackage(StackEntity stack, String version) {
+    String upgradePackage = null;
+    // Find the corresponding repo_version, and extract its upgrade_package
+    if (null != version && null != stack) {
+      RepositoryVersionEntity repoVersion = repositoryVersionDAO.findByStackNameAndVersion(stack.getStackName(), version);
+
+      Statement statement = null;
+      ResultSet rs = null;
+      try {
+        statement = dbAccessor.getConnection().createStatement();
+        if (statement != null) {
+          // Need to use SQL since the schema is changing and the entity will no longer have the upgrade_package column.
+          rs = statement.executeQuery("SELECT upgrade_package FROM repo_version WHERE repo_version_id = " + repoVersion.getId());
+          if (rs != null && rs.next()) {
+            upgradePackage = rs.getString("upgrade_package");
+          }
+        }
+      } catch (Exception e) {
+        LOG.error("Failed to retrieve upgrade_package for repo_version record with id " + repoVersion.getId() + ". Exception: " + e.getMessage());
+      } finally {
+        try {
+          if (rs != null) {
+            rs.close();
+          }
+          if (statement != null) {
+            statement.close();
+          }
+        } catch (SQLException e) {
+          ;
+        }
+      }
+    }
+    return upgradePackage;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
+    if (artifactEntity != null) {
+      Map<String, Object> data = artifactEntity.getArtifactData();
+
+      if (data != null) {
+        final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
+
+        if (kerberosDescriptor != null) {
+          KerberosServiceDescriptor hdfsService = kerberosDescriptor.getService("HDFS");
+          if(hdfsService != null) {
+            // before 2.2.0 hdfs indentity expected to be in HDFS service
+            KerberosIdentityDescriptor hdfsIdentity = hdfsService.getIdentity("hdfs");
+            KerberosComponentDescriptor namenodeComponent = hdfsService.getComponent("NAMENODE");
+            hdfsIdentity.setName("hdfs");
+            hdfsService.removeIdentity("hdfs");
+            namenodeComponent.putIdentity(hdfsIdentity);
+          }
+          updateKerberosDescriptorIdentityReferences(kerberosDescriptor, "/HDFS/hdfs", "/HDFS/NAMENODE/hdfs");
+          updateKerberosDescriptorIdentityReferences(kerberosDescriptor.getServices(), "/HDFS/hdfs", "/HDFS/NAMENODE/hdfs");
+
+          artifactEntity.setArtifactData(kerberosDescriptor.toMap());
+          artifactDAO.merge(artifactEntity);
+        }
+      }
+    }
+  }
+
+  /**
+   * If still on HDP 2.1, then no repo versions exist, so need to bootstrap the HDP 2.1 repo version,
+   * If still on HDP 2.1, then no repo versions exist, so need to bootstrap the HDP 2.1 repo version,
+   * and mark it as CURRENT in the cluster_version table for the cluster, as well as the host_version table
+   * for all hosts.
+   */
+  @Transactional
+  public void bootstrapRepoVersionForHDP21() throws AmbariException, SQLException {
+    final String hardcodedInitialVersion = "2.1.0.0-0001";
+    AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
+    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
+    StackDAO stackDAO = injector.getInstance(StackDAO.class);
+    RepositoryVersionHelper repositoryVersionHelper = injector.getInstance(RepositoryVersionHelper.class);
+    RepositoryVersionDAO repositoryVersionDAO = injector.getInstance(RepositoryVersionDAO.class);
+    ClusterVersionDAO clusterVersionDAO = injector.getInstance(ClusterVersionDAO.class);
+    HostVersionDAO hostVersionDAO = injector.getInstance(HostVersionDAO.class);
+
+    Clusters clusters = amc.getClusters();
+    if (clusters == null) {
+      LOG.error("Unable to get Clusters entity.");
+      return;
+    }
+
+    for (Cluster cluster : clusters.getClusters().values()) {
+      ClusterEntity clusterEntity = clusterDAO.findByName(cluster.getClusterName());
+      final StackId stackId = cluster.getCurrentStackVersion();
+      LOG.info(MessageFormat.format("Analyzing cluster {0}, currently at stack {1} and version {2}",
+        cluster.getClusterName(), stackId.getStackName(), stackId.getStackVersion()));
+
+      if (stackId.getStackName().equalsIgnoreCase("HDP") && stackId.getStackVersion().equalsIgnoreCase("2.1")) {
+        final StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
+        StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
+
+        LOG.info("Bootstrapping the versions since using HDP-2.1");
+
+        // The actual value is not known, so use this.
+        String displayName = stackId.getStackName() + "-" + hardcodedInitialVersion;
+
+        // However, the Repo URLs should be correct.
+        String operatingSystems = repositoryVersionHelper.serializeOperatingSystems(stackInfo.getRepositories());
+
+        // Create the Repo Version if it doesn't already exist.
+        RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByDisplayName(displayName);
+        if (null != repoVersionEntity) {
+          LOG.info(MessageFormat.format("A Repo Version already exists with Display Name: {0}", displayName));
+        } else {
+          final long repoVersionIdSeq = repositoryVersionDAO.findMaxId("id");
+          // Safe to attempt to add the sequence if it doesn't exist already.
+          addSequence("repo_version_id_seq", repoVersionIdSeq, false);
+
+          repoVersionEntity = repositoryVersionDAO.create(
+            stackEntity, hardcodedInitialVersion, displayName, operatingSystems);
+          LOG.info(MessageFormat.format("Created Repo Version with ID: {0,number,#}\n, Display Name: {1}, Repo URLs: {2}\n",
+            repoVersionEntity.getId(), displayName, operatingSystems));
+        }
+
+        // Create the Cluster Version if it doesn't already exist.
+        ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStackAndVersion(cluster.getClusterName(),
+          stackId, hardcodedInitialVersion);
+
+        if (null != clusterVersionEntity) {
+          LOG.info(MessageFormat.format("A Cluster Version version for cluster: {0}, version: {1}, already exists; its state is {2}.",
+            cluster.getClusterName(), clusterVersionEntity.getRepositoryVersion().getVersion(), clusterVersionEntity.getState()));
+
+          // If there are not CURRENT cluster versions, make this one the CURRENT one.
+          if (clusterVersionEntity.getState() != RepositoryVersionState.CURRENT &&
+            clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).isEmpty()) {
+            clusterVersionEntity.setState(RepositoryVersionState.CURRENT);
+            clusterVersionDAO.merge(clusterVersionEntity);
+          }
+        } else {
+          final long clusterVersionIdSeq = clusterVersionDAO.findMaxId("id");
+          // Safe to attempt to add the sequence if it doesn't exist already.
+          addSequence("cluster_version_id_seq", clusterVersionIdSeq, false);
+
+          clusterVersionEntity = clusterVersionDAO.create(clusterEntity, repoVersionEntity, RepositoryVersionState.CURRENT,
+            System.currentTimeMillis(), System.currentTimeMillis(), "admin");
+          LOG.info(MessageFormat.format("Created Cluster Version with ID: {0,number,#}, cluster: {1}, version: {2}, state: {3}.",
+            clusterVersionEntity.getId(), cluster.getClusterName(), clusterVersionEntity.getRepositoryVersion().getVersion(),
+            clusterVersionEntity.getState()));
+        }
+
+        // Create the Host Versions if they don't already exist.
+        Collection<HostEntity> hosts = clusterEntity.getHostEntities();
+        boolean addedAtLeastOneHost = false;
+        if (null != hosts && !hosts.isEmpty()) {
+          for (HostEntity hostEntity : hosts) {
+            HostVersionEntity hostVersionEntity = hostVersionDAO.findByClusterStackVersionAndHost(cluster.getClusterName(),
+              stackId, hardcodedInitialVersion, hostEntity.getHostName());
+
+            if (null != hostVersionEntity) {
+              LOG.info(MessageFormat.format("A Host Version version for cluster: {0}, version: {1}, host: {2}, already exists; its state is {3}.",
+                cluster.getClusterName(), hostVersionEntity.getRepositoryVersion().getVersion(),
+                hostEntity.getHostName(), hostVersionEntity.getState()));
+
+              if (hostVersionEntity.getState() != RepositoryVersionState.CURRENT &&
+                hostVersionDAO.findByClusterHostAndState(cluster.getClusterName(), hostEntity.getHostName(),
+                  RepositoryVersionState.CURRENT).isEmpty()) {
+                hostVersionEntity.setState(RepositoryVersionState.CURRENT);
+                hostVersionDAO.merge(hostVersionEntity);
+              }
+            } else {
+              // This should only be done the first time.
+              if (!addedAtLeastOneHost) {
+                final long hostVersionIdSeq = hostVersionDAO.findMaxId("id");
+                // Safe to attempt to add the sequence if it doesn't exist already.
+                addSequence("host_version_id_seq", hostVersionIdSeq, false);
+                addedAtLeastOneHost = true;
+              }
+
+              hostVersionEntity = new HostVersionEntity(hostEntity, repoVersionEntity, RepositoryVersionState.CURRENT);
+              hostVersionDAO.create(hostVersionEntity);
+              LOG.info(MessageFormat.format("Created Host Version with ID: {0,number,#}, cluster: {1}, version: {2}, host: {3}, state: {4}.",
+                hostVersionEntity.getId(), cluster.getClusterName(), hostVersionEntity.getRepositoryVersion().getVersion(),
+                hostEntity.getHostName(), hostVersionEntity.getState()));
+            }
+          }
+        } else {
+          LOG.info(MessageFormat.format("Not inserting any Host Version records since cluster {0} does not have any hosts.",
+            cluster.getClusterName()));
+        }
+      }
+    }
+  }
+
+  /**
+   * Adds the following columns to the {@value #UPGRADE_TABLE} table:
+   * <ul>
+   * <li>{@value #DOWNGRADE_ALLOWED_COLUMN}</li>
+   * <li>{@value #UPGRADE_SKIP_FAILURE_COLUMN}</li>
+   * <li>{@value #UPGRADE_SKIP_SC_FAILURE_COLUMN}</li>
+   * </ul>
+   *
+   * @throws SQLException
+   */
+  protected void updateUpgradesDDL() throws SQLException{
+    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(DOWNGRADE_ALLOWED_COLUMN, Short.class, 1, null, true));
+    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_SKIP_FAILURE_COLUMN, Short.class, 1, null, true));
+    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_SKIP_SC_FAILURE_COLUMN, Short.class, 1, null, true));
+  }
+
+  /**
+   * Modifies the JSON of some of the alert definitions which have changed
+   * between Ambari versions.
+   */
+  protected void updateAlertDefinitions() {
+    LOG.info("Updating alert definitions.");
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
+    Clusters clusters = ambariManagementController.getClusters();
+
+    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+    for (final Cluster cluster : clusterMap.values()) {
+      long clusterID = cluster.getClusterId();
+
+      final AlertDefinitionEntity journalNodeProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
+        clusterID, "journalnode_process");
+      final AlertDefinitionEntity hostDiskUsageAlertDefinitionEntity = alertDefinitionDAO.findByName(
+          clusterID, "ambari_agent_disk_usage");
+
+      if (journalNodeProcessAlertDefinitionEntity != null) {
+        String source = journalNodeProcessAlertDefinitionEntity.getSource();
+
+        journalNodeProcessAlertDefinitionEntity.setSource(modifyJournalnodeProcessAlertSource(source));
+        journalNodeProcessAlertDefinitionEntity.setSourceType(SourceType.WEB);
+        journalNodeProcessAlertDefinitionEntity.setHash(UUID.randomUUID().toString());
+
+        alertDefinitionDAO.merge(journalNodeProcessAlertDefinitionEntity);
+        LOG.info("journalnode_process alert definition was updated.");
+      }
+
+      if (hostDiskUsageAlertDefinitionEntity != null) {
+        hostDiskUsageAlertDefinitionEntity.setDescription("This host-level alert is triggered if the amount of disk space " +
+            "used goes above specific thresholds. The default threshold values are 50% for WARNING and 80% for CRITICAL.");
+        hostDiskUsageAlertDefinitionEntity.setLabel("Host Disk Usage");
+
+        alertDefinitionDAO.merge(hostDiskUsageAlertDefinitionEntity);
+        LOG.info("ambari_agent_disk_usage alert definition was updated.");
+      }
+
+    }
+  }
+
+  /**
+   * Modifies type of the journalnode_process alert to WEB.
+   * Changes reporting text and uri according to the WEB type.
+   * Removes default_port property.
+   */
+  String modifyJournalnodeProcessAlertSource(String source) {
+    JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
+
+    rootJson.remove("type");
+    rootJson.addProperty("type", "WEB");
+
+    rootJson.remove("default_port");
+
+    rootJson.remove("uri");
+    JsonObject uriJson = new JsonObject();
+    uriJson.addProperty("http", "{{hdfs-site/dfs.journalnode.http-address}}");
+    uriJson.addProperty("https", "{{hdfs-site/dfs.journalnode.https-address}}");
+    uriJson.addProperty("kerberos_keytab", "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}");
+    uriJson.addProperty("kerberos_principal", "{{hdfs-site/dfs.web.authentication.kerberos.principal}}");
+    uriJson.addProperty("https_property", "{{hdfs-site/dfs.http.policy}}");
+    uriJson.addProperty("https_property_value", "HTTPS_ONLY");
+    uriJson.addProperty("connection_timeout", 5.0);
+    rootJson.add("uri", uriJson);
+
+    rootJson.getAsJsonObject("reporting").getAsJsonObject("ok").remove("text");
+    rootJson.getAsJsonObject("reporting").getAsJsonObject("ok").addProperty(
+      "text", "HTTP {0} response in {2:.3f}s");
+
+    rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").remove("text");
+    rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").addProperty(
+      "text", "HTTP {0} response from {1} in {2:.3f}s ({3})");
+    rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").remove("value");
+
+    rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").remove("text");
+    rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").addProperty("text",
+      "Connection failed to {1} ({3})");
+    rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").remove("value");
+
+    return rootJson.toString();
+  }
+
+  protected void updateHadoopEnv() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config hadoopEnvConfig = cluster.getDesiredConfigByType(HADOOP_ENV_CONFIG);
+      if (hadoopEnvConfig != null) {
+        String content = hadoopEnvConfig.getProperties().get(CONTENT_PROPERTY);
+        if (content != null) {
+          content += HADOOP_ENV_CONTENT_TO_APPEND;
+          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
+          updateConfigurationPropertiesForCluster(cluster, HADOOP_ENV_CONFIG, updates, true, false);
+        }
+      }
+    }
+  }
+
+  protected void updateHDFSConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(
+      AmbariManagementController.class);
+    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
+
+    for (final Cluster cluster : clusterMap.values()) {
+      // Remove dfs.namenode.rpc-address property when NN HA is enabled
+      if (cluster.getDesiredConfigByType(HDFS_SITE_CONFIG) != null && isNNHAEnabled(cluster)) {
+        Set<String> removePropertiesSet = new HashSet<>();
+        removePropertiesSet.add("dfs.namenode.rpc-address");
+        removeConfigurationPropertiesFromCluster(cluster, HDFS_SITE_CONFIG, removePropertiesSet);
+      }
+    }
+  }
+
+  protected void updateZookeeperLog4j() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config zookeeperLog4jConfig = cluster.getDesiredConfigByType(ZOOKEEPER_LOG4J_CONFIG);
+      if (zookeeperLog4jConfig != null) {
+        String content = zookeeperLog4jConfig.getProperties().get(CONTENT_PROPERTY);
+        if (content != null) {
+          content = content.replaceAll("[\n^]\\s*log4j\\.rootLogger\\s*=\\s*INFO\\s*,\\s*CONSOLE", "\nlog4j.rootLogger=INFO, ROLLINGFILE");
+          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
+          updateConfigurationPropertiesForCluster(cluster, ZOOKEEPER_LOG4J_CONFIG, updates, true, false);
+        }
+      }
+    }
+  }
+
+  protected void updateStormConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config stormSiteProps = cluster.getDesiredConfigByType(STORM_SITE);
+      if (stormSiteProps != null) {
+        Map<String, String> updates = new HashMap<>();
+
+        String nimbusMonitorFreqSecs = stormSiteProps.getProperties().get(NIMBS_MONITOR_FREQ_SECS_PROPERTY);
+        if (nimbusMonitorFreqSecs != null && nimbusMonitorFreqSecs.equals("10")) {
+          updates.put(NIMBS_MONITOR_FREQ_SECS_PROPERTY, "120");
+        }
+
+        Service amsService = null;
+        try {
+          amsService = cluster.getService("AMBARI_METRICS");
+        } catch(AmbariException ambariException) {
+          LOG.info("AMBARI_METRICS service not found in cluster while updating storm-site properties");
+        }
+        String metricsReporter = stormSiteProps.getProperties().get(STORM_METRICS_REPORTER);
+        if (amsService != null && StringUtils.isEmpty(metricsReporter)) {
+          updates.put(STORM_METRICS_REPORTER, "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter");
+        }
+
+        updateConfigurationPropertiesForCluster(cluster, STORM_SITE, updates, true, false);
+      }
+    }
+  }
+
+  protected void updateHiveConfig() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config hiveSiteConfig = cluster.getDesiredConfigByType(HIVE_SITE_CONFIG);
+      if (hiveSiteConfig != null) {
+        String hiveServer2OperationLogLocation = hiveSiteConfig.getProperties().get(HIVE_SERVER2_OPERATION_LOG_LOCATION_PROPERTY);
+        if (hiveServer2OperationLogLocation != null && hiveServer2OperationLogLocation.equals("${system:java.io.tmpdir}/${system:user.name}/operation_logs")) {
+          Map<String, String> updates = Collections.singletonMap(HIVE_SERVER2_OPERATION_LOG_LOCATION_PROPERTY, "/tmp/hive/operation_logs");
+          updateConfigurationPropertiesForCluster(cluster, HIVE_SITE_CONFIG, updates, true, false);
+        }
+      }
+      StackId stackId = cluster.getCurrentStackVersion();
+      boolean isStackNotLess23 = (stackId != null && stackId.getStackName().equals("HDP") &&
+              VersionUtils.compareVersions(stackId.getStackVersion(), "2.3") >= 0);
+
+      Config hiveEnvConfig = cluster.getDesiredConfigByType(HIVE_ENV_CONFIG);
+      if (hiveEnvConfig != null) {
+        Map<String, String> hiveEnvProps = new HashMap<String, String>();
+        String content = hiveEnvConfig.getProperties().get(CONTENT_PROPERTY);
+        // For HDP-2.3 we need to add hive heap size management to content,
+        // for others we need to update content
+        if(content != null) {
+          if(isStackNotLess23) {
+            content = updateHiveEnvContentHDP23(content);
+          } else {
+            content = updateHiveEnvContent(content);
+          }
+          hiveEnvProps.put(CONTENT_PROPERTY, content);
+          updateConfigurationPropertiesForCluster(cluster, HIVE_ENV_CONFIG, hiveEnvProps, true, true);
+        }
+      }
+
+    }
+  }
+
+  protected void updateHbaseEnvConfig() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    boolean updateConfig = false;
+
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      StackId stackId = cluster.getCurrentStackVersion();
+      Config hbaseEnvConfig = cluster.getDesiredConfigByType(HBASE_ENV_CONFIG);
+      if (hbaseEnvConfig != null) {
+        String content = hbaseEnvConfig.getProperties().get(CONTENT_PROPERTY);
+        if (content != null) {
+          if (!content.contains("-Djava.io.tmpdir")) {
+            content += "\n\nexport HBASE_OPTS=\"-Djava.io.tmpdir={{java_io_tmpdir}}\"";
+            updateConfig = true;
+          }
+          if (stackId != null && stackId.getStackName().equals("HDP") &&
+              VersionUtils.compareVersions(stackId.getStackVersion(), "2.2") >= 0) {
+            if (!content.contains("MaxDirectMemorySize={{hbase_max_direct_memory_size}}m")) {
+              String newPartOfContent = "\n\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}\"\n\n";
+              content += newPartOfContent;
+              updateConfig = true;
+            }
+            if (updateConfig) {
+              Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
+              updateConfigurationPropertiesForCluster(cluster, HBASE_ENV_CONFIG, updates, true, false);
+            }
+          }
+        }
+      }
+    }
+  }
+
+  protected void updateFlumeEnvConfig() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config flumeEnvConfig = cluster.getDesiredConfigByType(FLUME_ENV_CONFIG);
+      if (flumeEnvConfig != null) {
+        String content = flumeEnvConfig.getProperties().get(CONTENT_PROPERTY);
+        if (content != null && !content.contains("/usr/lib/flume/lib/ambari-metrics-flume-sink.jar")) {
+          String newPartOfContent = "\n\n" +
+            "# Note that the Flume conf directory is always included in the classpath.\n" +
+            "# Add flume sink to classpath\n" +
+            "if [ -e \"/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\" ]; then\n" +
+            "  export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\n" +
+            "fi\n";
+          content += newPartOfContent;
+          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
+          updateConfigurationPropertiesForCluster(cluster, FLUME_ENV_CONFIG, updates, true, false);
+        }
+      }
+    }
+  }
+
+  protected void updateAMSConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = clusters.getClusters();
+
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+
+          Config amsHbaseEnv = cluster.getDesiredConfigByType(AMS_HBASE_ENV);
+          if (amsHbaseEnv != null) {
+            Map<String, String> amsHbaseEnvProperties = amsHbaseEnv.getProperties();
+            String content = amsHbaseEnvProperties.get("content");
+            Map<String, String> newProperties = new HashMap<>();
+            newProperties.put("content", updateAmsHbaseEnvContent(content));
+            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_ENV, newProperties, true, true);
+          }
+
+          Config amsEnv = cluster.getDesiredConfigByType(AMS_ENV);
+          if (amsHbaseEnv != null) {
+            Map<String, String> amsEnvProperties = amsEnv.getProperties();
+            String content = amsEnvProperties.get("content");
+            Map<String, String> newProperties = new HashMap<>();
+            newProperties.put("content", updateAmsEnvContent(content));
+            updateConfigurationPropertiesForCluster(cluster, AMS_ENV, newProperties, true, true);
+          }
+
+          Config amsSite = cluster.getDesiredConfigByType(AMS_SITE);
+          if (amsSite != null) {
+            Map<String, String> currentAmsSiteProperties = amsSite.getProperties();
+            Map<String, String> newProperties = new HashMap<>();
+
+            //Changed AMS result set limit from 5760 to 15840.
+            if(currentAmsSiteProperties.containsKey("timeline.metrics.service.default.result.limit") &&
+              currentAmsSiteProperties.get("timeline.metrics.service.default.result.limit").equals(String.valueOf(5760))) {
+              LOG.info("Updating timeline.metrics.service.default.result.limit to 15840");
+              newProperties.put("timeline.metrics.service.default.result.limit", String.valueOf(15840));
+            }
+
+            //Interval
+            newProperties.put("timeline.metrics.cluster.aggregator.second.interval", String.valueOf(120));
+            newProperties.put("timeline.metrics.cluster.aggregator.minute.interval", String.valueOf(300));
+            newProperties.put("timeline.metrics.host.aggregator.minute.interval", String.valueOf(300));
+
+            //ttl
+            newProperties.put("timeline.metrics.cluster.aggregator.second.ttl", String.valueOf(2592000));
+            newProperties.put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(7776000));
+
+            //checkpoint
+            newProperties.put("timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier", String.valueOf(2));
+
+            //disabled
+            newProperties.put("timeline.metrics.cluster.aggregator.second.disabled", String.valueOf(false));
+
+            //Add compaction policy property
+            newProperties.put(TIMELINE_METRICS_HBASE_FIFO_COMPACTION_ENABLED, String.valueOf(true));
+
+            updateConfigurationPropertiesForCluster(cluster, AMS_SITE, newProperties, true, true);
+          }
+
+          Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
+          if (amsHbaseSite != null) {
+            Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
+            Map<String, String> newProperties = new HashMap<>();
+
+            String zkTimeout = amsHbaseSiteProperties.get(AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY);
+            // if old default, set new default
+            if ("20000".equals(zkTimeout)) {
+              newProperties.put(AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY, "120000");
+            }
+
+            //Adding hbase.normalizer.period to upgrade
+            if(!amsHbaseSiteProperties.containsKey(AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY)) {
+              LOG.info("Enabling " + AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY);
+              newProperties.put(AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY, String.valueOf(true));
+            }
+
+            if(!amsHbaseSiteProperties.containsKey(AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY)) {
+              LOG.info("Updating " + AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY);
+              newProperties.put(AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY, String.valueOf(600000));
+            }
+
+            if(!amsHbaseSiteProperties.containsKey(AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY)) {
+              LOG.info("Updating " + AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY);
+              newProperties.put(AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY,
+                "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer");
+            }
+            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties, true, true);
+          }
+        }
+      }
+    }
+
+  }
+
+  protected String updateAmsHbaseEnvContent(String content) {
+    if (content == null) {
+      return null;
+    }
+    String regSearch = "export HBASE_HEAPSIZE=";
+    String replacement = "#export HBASE_HEAPSIZE=";
+    content = content.replaceAll(regSearch, replacement);
+    content += "\n" +
+      "# The maximum amount of heap to use for hbase shell.\n" +
+      "export HBASE_SHELL_OPTS=\"-Xmx256m\"\n";
+    return content;
+  }
+
+  protected String updateAmsEnvContent(String content) {
+    if (content == null) {
+      return null;
+    }
+    if (!content.contains("AMS_COLLECTOR_GC_OPTS")) {
+      content += "\n" +
+        "# AMS Collector GC options\n" +
+        "export AMS_COLLECTOR_GC_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 " +
+        "-XX:+UseCMSInitiatingOccupancyOnly -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps " +
+        "-XX:+UseGCLogFileRotation -XX:GCLogFileSize=10M " +
+        "-Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`\"\n" +
+        "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS\"\n";
+    }
+
+    if (!content.contains("HBASE_NORMALIZATION_ENABLED")) {
+      content += "\n" +
+        "# HBase compaction policy enabled\n" +
+        "export HBASE_NORMALIZATION_ENABLED={{ams_hbase_normalizer_enabled}}\n";
+    }
+
+    if (!content.contains("HBASE_FIFO_COMPACTION_POLICY_ENABLED")) {
+      content += "\n" +
+        "# HBase compaction policy enabled\n" +
+        "export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n";
+    }
+
+    return content;
+  }
+
+  protected void updateKafkaConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = clusters.getClusters();
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          Set<String> installedServices = cluster.getServices().keySet();
+          Config kafkaBroker = cluster.getDesiredConfigByType(KAFKA_BROKER);
+          if (kafkaBroker != null) {
+            Map<String, String> newProperties = new HashMap<>();
+            Map<String, String> kafkaBrokerProperties = kafkaBroker.getProperties();
+            String kafkaMetricsReporters = kafkaBrokerProperties.get("kafka.metrics.reporters");
+            if (kafkaMetricsReporters == null ||
+              "{{kafka_metrics_reporters}}".equals(kafkaMetricsReporters)) {
+
+              if (installedServices.contains("AMBARI_METRICS")) {
+                newProperties.put("kafka.metrics.reporters", "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter");
+              } else if (installedServices.contains("GANGLIA")) {
+                newProperties.put("kafka.metrics.reporters", "kafka.ganglia.KafkaGangliaMetricsReporter");
+              } else {
+                newProperties.put("kafka.metrics.reporters", " ");
+              }
+
+            }
+            if (!newProperties.isEmpty()) {
+              updateConfigurationPropertiesForCluster(cluster, KAFKA_BROKER, newProperties, true, true);
+            }
+          }
+
+          Config kafkaEnv = cluster.getDesiredConfigByType(KAFKA_ENV_CONFIG);
+          if (kafkaEnv != null) {
+            String kafkaEnvContent = kafkaEnv.getProperties().get(CONTENT_PROPERTY);
+            if (kafkaEnvContent != null && !kafkaEnvContent.contains(KAFKA_ENV_CONTENT_KERBEROS_PARAMS)) {
+              kafkaEnvContent += "\n\nexport KAFKA_KERBEROS_PARAMS=\"$KAFKA_KERBEROS_PARAMS {{kafka_kerberos_params}}\"";
+              Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, kafkaEnvContent);
+              updateConfigurationPropertiesForCluster(cluster, KAFKA_ENV_CONFIG, updates, true, false);
+            }
+          }
+        }
+      }
+    }
+  }
+
+  protected void updateRangerEnvConfig() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Map<String, String> newRangerEnvProps = new HashMap<>();
+      Config rangerHdfsPluginProperties = cluster.getDesiredConfigByType("ranger-hdfs-plugin-properties");
+      if (rangerHdfsPluginProperties != null && rangerHdfsPluginProperties.getProperties().containsKey(RANGER_HDFS_PLUGIN_ENABLED_PROPERTY)) {
+        newRangerEnvProps.put(RANGER_HDFS_PLUGIN_ENABLED_PROPERTY, rangerHdfsPluginProperties.getProperties().get(RANGER_HDFS_PLUGIN_ENABLED_PROPERTY));
+      }
+      Config hiveEnvProperties = cluster.getDesiredConfigByType("hive-env");
+      if (hiveEnvProperties != null && hiveEnvProperties.getProperties().containsKey("hive_security_authorization")
+              && hiveEnvProperties.getProperties().get("hive_security_authorization").toLowerCase().equals("ranger")) {
+        newRangerEnvProps.put(RANGER_HIVE_PLUGIN_ENABLED_PROPERTY, "Yes");
+      }
+      Config rangerHbasePluginProperties = cluster.getDesiredConfigByType("ranger-hbase-plugin-properties");
+      if (rangerHbasePluginProperties != null && rangerHbasePluginProperties.getProperties().containsKey(RANGER_HBASE_PLUGIN_ENABLED_PROPERTY)) {
+        newRangerEnvProps.put(RANGER_HBASE_PLUGIN_ENABLED_PROPERTY, rangerHbasePluginProperties.getProperties().get(RANGER_HBASE_PLUGIN_ENABLED_PROPERTY));
+      }
+
+      Config rangerStormPluginProperties = cluster.getDesiredConfigByType("ranger-storm-plugin-properties");
+      if (rangerStormPluginProperties != null && rangerStormPluginProperties.getProperties().containsKey(RANGER_STORM_PLUGIN_ENABLED_PROPERTY)) {
+        newRangerEnvProps.put(RANGER_STORM_PLUGIN_ENABLED_PROPERTY, rangerStormPluginProperties.getProperties().get(RANGER_STORM_PLUGIN_ENABLED_PROPERTY));
+      }
+      Config rangerKnoxPluginProperties = cluster.getDesiredConfigByType("ranger-knox-plugin-properties");
+      if (rangerKnoxPluginProperties != null && rangerKnoxPluginProperties.getProperties().containsKey(RANGER_KNOX_PLUGIN_ENABLED_PROPERTY)) {
+        newRangerEnvProps.put(RANGER_KNOX_PLUGIN_ENABLED_PROPERTY, rangerKnoxPluginProperties.getProperties().get(RANGER_KNOX_PLUGIN_ENABLED_PROPERTY));
+      }
+      Config rangerYarnPluginProperties = cluster.getDesiredConfigByType("ranger-yarn-plugin-properties");
+      if (rangerYarnPluginProperties != null && rangerYarnPluginProperties.getProperties().containsKey(RANGER_YARN_PLUGIN_ENABLED_PROPERTY)) {
+        newRangerEnvProps.put(RANGER_YARN_PLUGIN_ENABLED_PROPERTY, rangerYarnPluginProperties.getProperties().get(RANGER_YARN_PLUGIN_ENABLED_PROPERTY));
+      }
+      Config rangerKafkaPluginProperties = cluster.getDesiredConfigByType("ranger-kafka-plugin-properties");
+      if (rangerKafkaPluginProperties != null && rangerKafkaPluginProperties.getProperties().containsKey(RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY)) {
+        newRangerEnvProps.put(RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY, rangerKafkaPluginProperties.getProperties().get(RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY));
+      }
+      if (!newRangerEnvProps.isEmpty()) {
+        updateConfigurationPropertiesForCluster(cluster, RANGER_ENV_CONFIG, newRangerEnvProps, true, true);
+      }
+    }
+  }
+
+  protected void updateRangerUgsyncSiteConfig() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config rangerUgsyncSiteProperties = cluster.getDesiredConfigByType(RANGER_UGSYNC_SITE_CONFIG);
+      if (rangerUgsyncSiteProperties != null && rangerUgsyncSiteProperties.getProperties().containsKey(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY)) {
+        String sourceClassValue = rangerUgsyncSiteProperties.getProperties().get(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY);
+        if (sourceClassValue != null) {
+          if ("ldap".equals(sourceClassValue)) {
+            Map<String, String> updates = Collections.singletonMap(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY,
+                "org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder");
+            updateConfigurationPropertiesForCluster(cluster, RANGER_UGSYNC_SITE_CONFIG, updates, true, false);
+          } else if ("unix".equals(sourceClassValue)) {
+            Map<String, String> updates = Collections.singletonMap(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY,
+                "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder");
+            updateConfigurationPropertiesForCluster(cluster, RANGER_UGSYNC_SITE_CONFIG, updates, true, false);
+          } else if ("file".equals(sourceClassValue)) {
+            Map<String, String> updates = Collections.singletonMap(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY,
+                "org.apache.ranger.unixusersync.process.FileSourceUserGroupBuilder");
+            updateConfigurationPropertiesForCluster(cluster, RANGER_UGSYNC_SITE_CONFIG, updates, true, false);
+          }
+        }
+      }
+    }
+  }
+
+  protected String updateHiveEnvContent(String hiveEnvContent) {
+    if(hiveEnvContent == null) {
+      return null;
+    }
+    // There are two cases here
+    // We do not have "export HADOOP_CLIENT_OPTS" and we need to add it
+    // We have "export HADOOP_CLIENT_OPTS" with wrong order
+    String exportHadoopClientOpts = "(?s).*export\\s*HADOOP_CLIENT_OPTS.*";
+    if (hiveEnvContent.matches(exportHadoopClientOpts)) {
+      String oldHeapSizeRegex = "export\\s*HADOOP_CLIENT_OPTS=\"-Xmx\\$\\{HADOOP_HEAPSIZE\\}m\\s*\\$HADOOP_CLIENT_OPTS\"";
+      String newHeapSizeRegex = "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"";
+      return hiveEnvContent.replaceAll(oldHeapSizeRegex, Matcher.quoteReplacement(newHeapSizeRegex));
+    } else {
+      String oldHeapSizeRegex = "export\\s*HADOOP_HEAPSIZE\\s*=\\s*\"\\{\\{hive_heapsize\\}\\}\"\\.*\\n\\s*fi\\s*\\n";
+      String newHeapSizeRegex = "export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
+              "fi\n" +
+              "\n" +
+              "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n";
+      return hiveEnvContent.replaceAll(oldHeapSizeRegex, Matcher.quoteReplacement(newHeapSizeRegex));
+    }
+  }
+
+  protected String updateHiveEnvContentHDP23(String hiveEnvContent) {
+    if(hiveEnvContent == null) {
+      return null;
+    }
+    String oldHeapSizeRegex = "# The heap size of the jvm stared by hive shell script can be controlled via:\\s*\\n";
+    String newHeapSizeRegex = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
+            "\n" +
+            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
+            "  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
+            "else\n" +
+            "  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
+            "fi\n" +
+            "\n" +
+            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
+            "\n";
+    return hiveEnvContent.replaceFirst(oldHeapSizeRegex, Matcher.quoteReplacement(newHeapSizeRegex));
+  }
+
+  protected void updateAccumuloConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      // If security type is set to Kerberos, update Kerberos-related configs
+      if(cluster.getSecurityType() == SecurityType.KERBEROS) {
+        Config clientProps = cluster.getDesiredConfigByType("client");
+        if (clientProps != null) {
+          Map<String, String> properties = clientProps.getProperties();
+          if (properties == null) {
+            properties = new HashMap<String, String>();
+          }
+          // <2.2.0 did not account for a custom service principal.
+          // Need to ensure that the client knows the server's principal (the primary) to properly authenticate.
+          properties.put("kerberos.server.primary", "{{bare_accumulo_principal}}");
+          updateConfigurationPropertiesForCluster(cluster, "client", properties, true, false);
+        }
+      } // else -- no special client-configuration is necessary.
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3ff65d3/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
index a27a2b2..21f601e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
@@ -32,11 +32,13 @@ import org.apache.ambari.server.orm.dao.DaoUtils;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Config;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.sql.SQLException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -55,6 +57,11 @@ public class UpgradeCatalog221 extends AbstractUpgradeCatalog {
    */
   private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog221.class);
 
+  private static final String OOZIE_SITE_CONFIG = "oozie-site";
+  private static final String OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME = "oozie.service.HadoopAccessorService.hadoop.configurations";
+  private static final String OLD_DEFAULT_HADOOP_CONFIG_PATH = "/etc/hadoop/conf";
+  private static final String NEW_DEFAULT_HADOOP_CONFIG_PATH = "{{hadoop_conf_dir}}";
+
 
   // ----- Constructors ------------------------------------------------------
 
@@ -104,6 +111,7 @@ public class UpgradeCatalog221 extends AbstractUpgradeCatalog {
   protected void executeDMLUpdates() throws AmbariException, SQLException {
     addNewConfigurationsFromXml();
     updateAlerts();
+    updateOozieConfigs();
   }
 
   protected void updateAlerts() {
@@ -175,5 +183,22 @@ public class UpgradeCatalog221 extends AbstractUpgradeCatalog {
     return sourceJson.toString();
   }
 
+  protected void updateOozieConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+      Config oozieSiteProps = cluster.getDesiredConfigByType(OOZIE_SITE_CONFIG);
+      if (oozieSiteProps != null) {
+        // Update oozie.service.HadoopAccessorService.hadoop.configurations
+        Map<String, String> updateProperties = new HashMap<>();
+        String oozieHadoopConfigProperty = oozieSiteProps.getProperties().get(OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME);
+        if(oozieHadoopConfigProperty != null && oozieHadoopConfigProperty.contains(OLD_DEFAULT_HADOOP_CONFIG_PATH)) {
+          String updatedOozieHadoopConfigProperty = oozieHadoopConfigProperty.replaceAll(
+              OLD_DEFAULT_HADOOP_CONFIG_PATH, NEW_DEFAULT_HADOOP_CONFIG_PATH);
+          updateProperties.put(OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME, updatedOozieHadoopConfigProperty);
+          updateConfigurationPropertiesForCluster(cluster, OOZIE_SITE_CONFIG, updateProperties, true, false);
+        }
+      }
+    }
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3ff65d3/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
index 32445a1..8ff23f8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
@@ -449,7 +449,7 @@ public class UpgradeCatalog200Test {
     expect(mockFlumeEnv.getProperties()).andReturn(propertiesFlumeEnv).atLeastOnce();
 
     easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateFlumeEnvConfig();
+    mockInjector.getInstance(UpgradeCatalog200.class).updateFlumeEnvConfig();
     easyMockSupport.verifyAll();
   }
 


[2/4] ambari git commit: AMBARI-14357: UpgradeCatalogs need to be reversioned (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b3ff65d3/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
deleted file mode 100644
index 425eb1d..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog213Test.java
+++ /dev/null
@@ -1,1633 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import com.google.common.collect.Maps;
-import com.google.gson.Gson;
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-import com.google.inject.persist.PersistService;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
-import org.apache.ambari.server.controller.ConfigurationRequest;
-import org.apache.ambari.server.controller.ConfigurationResponse;
-import org.apache.ambari.server.controller.KerberosHelper;
-import org.apache.ambari.server.controller.MaintenanceStateHelper;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.*;
-import org.apache.ambari.server.orm.entities.*;
-import org.apache.ambari.server.stack.StackManagerFactory;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.RepositoryVersionState;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.kerberos.*;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
-import org.easymock.Capture;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockSupport;
-import org.easymock.IMocksControl;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import javax.persistence.EntityManager;
-import java.io.File;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertNull;
-import static org.easymock.EasyMock.anyLong;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertTrue;
-
-/**
- * {@link org.apache.ambari.server.upgrade.UpgradeCatalog213} unit tests.
- */
-public class UpgradeCatalog213Test {
-  private Injector injector;
-  private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-  private EntityManager entityManager = createNiceMock(EntityManager.class);
-  private UpgradeCatalogHelper upgradeCatalogHelper;
-  private StackEntity desiredStackEntity;
-  private AmbariManagementController amc = createNiceMock(AmbariManagementController.class);
-  private AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-  private StackDAO stackDAO = createNiceMock(StackDAO.class);
-  private RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
-  private ClusterVersionDAO clusterVersionDAO = createNiceMock(ClusterVersionDAO.class);
-  private HostVersionDAO hostVersionDAO = createNiceMock(HostVersionDAO.class);
-  private ClusterDAO clusterDAO = createNiceMock(ClusterDAO.class);
-
-  private IMocksControl mocksControl = EasyMock.createControl();
-
-  @Before
-  public void init() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    upgradeCatalogHelper = injector.getInstance(UpgradeCatalogHelper.class);
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    desiredStackEntity = stackDAO.find("HDP", "2.2.0");
-  }
-
-  @After
-  public void tearDown() {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testExecuteDDLUpdates() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog213 upgradeCatalog = (UpgradeCatalog213) getUpgradeCatalog(dbAccessor);
-
-    Configuration configuration = createNiceMock(Configuration.class);
-    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
-
-    Capture<DBAccessor.DBColumnInfo> columnCapture = EasyMock.newCapture();
-    dbAccessor.alterColumn(eq("host_role_command"), capture(columnCapture));
-    expectLastCall();
-
-    replay(dbAccessor, configuration);
-    Class<?> c = AbstractUpgradeCatalog.class;
-    Field f = c.getDeclaredField("configuration");
-    f.setAccessible(true);
-    f.set(upgradeCatalog, configuration);
-
-    upgradeCatalog.executeDDLUpdates();
-    verify(dbAccessor, configuration);
-
-    Assert.assertTrue(columnCapture.getValue().isNullable());
-  }
-
-  @Test
-  public void testExecuteUpgradeDDLUpdates() throws Exception{
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-
-    dbAccessor.addColumn(eq("upgrade"), anyObject(DBAccessor.DBColumnInfo.class));
-    expectLastCall().times(3);
-
-    replay(dbAccessor);
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(EntityManager.class).toInstance(entityManager);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog213 upgradeCatalog213 = injector.getInstance(UpgradeCatalog213.class);
-    upgradeCatalog213.executeUpgradeDDLUpdates();
-    verify(dbAccessor);
-  }
-
-  @Test
-  public void testExecuteStageDDLUpdates() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-
-    dbAccessor.addColumn(eq("stage"), anyObject(DBAccessor.DBColumnInfo.class));
-    expectLastCall().times(1);
-
-    replay(dbAccessor);
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(EntityManager.class).toInstance(entityManager);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog213 upgradeCatalog213 = injector.getInstance(UpgradeCatalog213.class);
-    upgradeCatalog213.executeStageDDLUpdates();
-    verify(dbAccessor);
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    // TODO AMBARI-13001, readd unit test section.
-    /*
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    Configuration configuration = createNiceMock(Configuration.class);
-    Connection connection = createNiceMock(Connection.class);
-    Statement statement = createNiceMock(Statement.class);
-    ResultSet resultSet = createNiceMock(ResultSet.class);
-    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
-    dbAccessor.getConnection();
-    expectLastCall().andReturn(connection).anyTimes();
-    connection.createStatement();
-    expectLastCall().andReturn(statement).anyTimes();
-    statement.executeQuery(anyObject(String.class));
-    expectLastCall().andReturn(resultSet).anyTimes();
-
-    // Technically, this is a DDL, but it has to be ran during the DML portion
-    // because it requires the persistence layer to be started.
-    UpgradeSectionDDL upgradeSectionDDL = new UpgradeSectionDDL();
-
-    // Execute any DDL schema changes
-    upgradeSectionDDL.execute(dbAccessor);
-
-    // Begin DML verifications
-    verifyBootstrapHDP21();
-
-    // Replay main sections
-    replay(dbAccessor, configuration, resultSet, connection, statement);
-
-
-    AbstractUpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Class<?> c = AbstractUpgradeCatalog.class;
-    Field f = c.getDeclaredField("configuration");
-    f.setAccessible(true);
-    f.set(upgradeCatalog, configuration);
-    */
-
-    Method updateAlertDefinitions = UpgradeCatalog213.class.getDeclaredMethod("updateAlertDefinitions");
-    Method updateStormConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateStormConfigs");
-    Method updateAMSConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateAMSConfigs");
-    Method updateHDFSConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateHDFSConfigs");
-    Method updateKafkaConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateKafkaConfigs");
-    Method updateHbaseEnvConfig = UpgradeCatalog213.class.getDeclaredMethod("updateHbaseEnvConfig");
-    Method updateFlumeEnvConfig = UpgradeCatalog213.class.getDeclaredMethod("updateFlumeEnvConfig");
-    Method updateHadoopEnv = UpgradeCatalog213.class.getDeclaredMethod("updateHadoopEnv");
-    Method updateZookeeperLog4j = UpgradeCatalog213.class.getDeclaredMethod("updateZookeeperLog4j");
-    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-    Method updateRangerEnvConfig = UpgradeCatalog213.class.getDeclaredMethod("updateRangerEnvConfig");
-    Method updateRangerUgsyncSiteConfig = UpgradeCatalog213.class.getDeclaredMethod("updateRangerUgsyncSiteConfig");
-    Method updateHiveConfig = UpgradeCatalog213.class.getDeclaredMethod("updateHiveConfig");
-    Method updateAccumuloConfigs = UpgradeCatalog213.class.getDeclaredMethod("updateAccumuloConfigs");
-    Method updateKerberosDescriptorArtifacts = AbstractUpgradeCatalog.class.getDeclaredMethod("updateKerberosDescriptorArtifacts");
-    Method updateKnoxTopology = UpgradeCatalog213.class.getDeclaredMethod("updateKnoxTopology");
-
-
-    UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-        .addMockedMethod(addNewConfigurationsFromXml)
-        .addMockedMethod(updateAMSConfigs)
-        .addMockedMethod(updateHDFSConfigs)
-        .addMockedMethod(updateAlertDefinitions)
-        .addMockedMethod(updateStormConfigs)
-        .addMockedMethod(updateHbaseEnvConfig)
-        .addMockedMethod(updateFlumeEnvConfig)
-        .addMockedMethod(updateKafkaConfigs)
-        .addMockedMethod(updateHadoopEnv)
-        .addMockedMethod(updateZookeeperLog4j)
-        .addMockedMethod(updateRangerEnvConfig)
-        .addMockedMethod(updateRangerUgsyncSiteConfig)
-        .addMockedMethod(updateHiveConfig)
-        .addMockedMethod(updateAccumuloConfigs)
-        .addMockedMethod(updateKerberosDescriptorArtifacts)
-        .addMockedMethod(updateKnoxTopology)
-        .createMock();
-
-    upgradeCatalog213.addNewConfigurationsFromXml();
-    expectLastCall().once();
-    upgradeCatalog213.updateStormConfigs();
-    expectLastCall().once();
-    upgradeCatalog213.updateHbaseEnvConfig();
-    expectLastCall().once();
-    upgradeCatalog213.updateFlumeEnvConfig();
-    expectLastCall().once();
-    upgradeCatalog213.updateHadoopEnv();
-    expectLastCall().once();
-    upgradeCatalog213.updateAMSConfigs();
-    expectLastCall().once();
-    upgradeCatalog213.updateAlertDefinitions();
-    expectLastCall().once();
-    upgradeCatalog213.updateKafkaConfigs();
-    expectLastCall().once();
-    upgradeCatalog213.updateHDFSConfigs();
-    expectLastCall().once();
-    upgradeCatalog213.updateZookeeperLog4j();
-    expectLastCall().once();
-    upgradeCatalog213.updateRangerEnvConfig();
-    expectLastCall().once();
-    upgradeCatalog213.updateRangerUgsyncSiteConfig();
-    expectLastCall().once();
-    upgradeCatalog213.updateHiveConfig();
-    expectLastCall().once();
-    upgradeCatalog213.updateAccumuloConfigs();
-    expectLastCall().once();
-    upgradeCatalog213.updateKerberosDescriptorArtifacts();
-    expectLastCall().once();
-    upgradeCatalog213.updateKnoxTopology();
-    expectLastCall().once();
-
-    replay(upgradeCatalog213);
-
-    upgradeCatalog213.executeDMLUpdates();
-
-    verify(upgradeCatalog213);
-
-    //verify(dbAccessor, configuration, resultSet, connection, statement);
-
-    // Verify sections
-    //upgradeSectionDDL.verify(dbAccessor);
-  }
-
-  @Test
-  public void testUpdateKerberosDescriptorArtifact() throws Exception {
-    final KerberosDescriptorFactory kerberosDescriptorFactory = new KerberosDescriptorFactory();
-
-    KerberosServiceDescriptor serviceDescriptor;
-
-    URL systemResourceURL = ClassLoader.getSystemResource("kerberos/test_kerberos_descriptor_2_1_3.json");
-    assertNotNull(systemResourceURL);
-
-    final KerberosDescriptor kerberosDescriptorOrig = kerberosDescriptorFactory.createInstance(new File(systemResourceURL.getFile()));
-    assertNotNull(kerberosDescriptorOrig);
-
-    serviceDescriptor = kerberosDescriptorOrig.getService("HDFS");
-    assertNotNull(serviceDescriptor);
-    assertNotNull(serviceDescriptor.getIdentity("hdfs"));
-
-    serviceDescriptor = kerberosDescriptorOrig.getService("OOZIE");
-    assertNotNull(serviceDescriptor);
-    assertNotNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
-
-    UpgradeCatalog213 upgradeMock = createMockBuilder(UpgradeCatalog213.class).createMock();
-
-    Capture<Map<String, Object>> updatedData = new Capture<Map<String, Object>>();
-
-    ArtifactEntity artifactEntity = createNiceMock(ArtifactEntity.class);
-    expect(artifactEntity.getArtifactData())
-        .andReturn(kerberosDescriptorOrig.toMap())
-        .once();
-
-    artifactEntity.setArtifactData(capture(updatedData));
-    expectLastCall().once();
-
-    replay(artifactEntity, upgradeMock);
-    upgradeMock.updateKerberosDescriptorArtifact(createNiceMock(ArtifactDAO.class), artifactEntity);
-    verify(artifactEntity, upgradeMock);
-
-    KerberosDescriptor kerberosDescriptorUpdated = new KerberosDescriptorFactory().createInstance(updatedData.getValue());
-    assertNotNull(kerberosDescriptorUpdated);
-
-    serviceDescriptor = kerberosDescriptorUpdated.getService("HDFS");
-    assertNotNull(serviceDescriptor);
-    assertNull(serviceDescriptor.getIdentity("hdfs"));
-
-    KerberosComponentDescriptor namenodeComponent = serviceDescriptor.getComponent("NAMENODE");
-    assertNotNull(namenodeComponent.getIdentity("hdfs"));
-
-    serviceDescriptor = kerberosDescriptorUpdated.getService("OOZIE");
-    assertNotNull(serviceDescriptor);
-    assertNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
-    assertNotNull(serviceDescriptor.getIdentity("/HDFS/NAMENODE/hdfs"));
-  }
-
-
-
-  @Test
-  public void testUpdateHbaseEnvConfig() throws AmbariException {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesHbaseEnv = new HashMap<String, String>() {
-      {
-        put("content", "test");
-      }
-    };
-
-    final Config mockHbaseEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).once();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.2"));
-
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-env")).andReturn(mockHbaseEnv).atLeastOnce();
-    expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateHbaseEnvConfig();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testUpdateFlumeEnvConfig() throws AmbariException {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesFlumeEnv = new HashMap<String, String>() {
-      {
-        put("content", "test");
-      }
-    };
-
-    final Config mockFlumeEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(mockFlumeEnv.getProperties()).andReturn(propertiesFlumeEnv).once();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    expect(mockClusterExpected.getDesiredConfigByType("flume-env")).andReturn(mockFlumeEnv).atLeastOnce();
-    expect(mockFlumeEnv.getProperties()).andReturn(propertiesFlumeEnv).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateFlumeEnvConfig();
-    easyMockSupport.verifyAll();
-  }
-
-  /**
-   * Verify that when bootstrapping HDP 2.1, records get inserted into the
-   * repo_version, cluster_version, and host_version tables.
-   * @throws AmbariException
-   */
-  private void verifyBootstrapHDP21() throws Exception, AmbariException {
-    final String stackName = "HDP";
-    final String stackVersion = "2.1";
-    final String stackNameAndVersion = stackName + "-" + stackVersion;
-    final String buildNumber = "2.1.0.0-0001";
-    final String stackAndBuild = stackName + "-" + buildNumber;
-    final String clusterName = "c1";
-
-    expect(amc.getAmbariMetaInfo()).andReturn(metaInfo);
-
-    // Mock the actions to bootstrap if using HDP 2.1
-    Clusters clusters = createNiceMock(Clusters.class);
-    expect(amc.getClusters()).andReturn(clusters);
-
-    Map<String, Cluster> clusterHashMap = new HashMap<String, Cluster>();
-    Cluster cluster = createNiceMock(Cluster.class);
-    clusterHashMap.put(clusterName, cluster);
-    expect(clusters.getClusters()).andReturn(clusterHashMap);
-
-    StackId stackId = new StackId(stackNameAndVersion);
-    expect(cluster.getCurrentStackVersion()).andReturn(stackId);
-
-    StackInfo stackInfo = new StackInfo();
-    stackInfo.setVersion(buildNumber);
-    expect(metaInfo.getStack(stackName, stackVersion)).andReturn(stackInfo);
-
-    StackEntity stackEntity = createNiceMock(StackEntity.class);
-    expect(stackEntity.getStackName()).andReturn(stackName);
-    expect(stackEntity.getStackVersion()).andReturn(stackVersion);
-
-    expect(stackDAO.find(stackName, stackVersion)).andReturn(stackEntity);
-
-    replay(amc, metaInfo, clusters, cluster, stackEntity, stackDAO);
-
-    // Mock more function calls
-    // Repository Version
-    RepositoryVersionEntity repositoryVersionEntity = createNiceMock(RepositoryVersionEntity.class);
-    expect(repositoryVersionDAO.findByDisplayName(stackAndBuild)).andReturn(null);
-    expect(repositoryVersionDAO.findMaxId("id")).andReturn(0L);
-    expect(repositoryVersionDAO.findAll()).andReturn(Collections.<RepositoryVersionEntity>emptyList());
-    expect(repositoryVersionDAO.create(anyObject(StackEntity.class), anyObject(String.class), anyObject(String.class), anyObject(String.class))).andReturn(repositoryVersionEntity);
-    expect(repositoryVersionEntity.getId()).andReturn(1L);
-    expect(repositoryVersionEntity.getVersion()).andReturn(buildNumber);
-    replay(repositoryVersionDAO, repositoryVersionEntity);
-
-    // Cluster Version
-    ClusterVersionEntity clusterVersionEntity = createNiceMock(ClusterVersionEntity.class);
-    expect(clusterVersionEntity.getId()).andReturn(1L);
-    expect(clusterVersionEntity.getState()).andReturn(RepositoryVersionState.CURRENT);
-    expect(clusterVersionEntity.getRepositoryVersion()).andReturn(repositoryVersionEntity);
-
-    expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class), anyObject(StackId.class), anyObject(String.class))).andReturn(null);
-    expect(clusterVersionDAO.findMaxId("id")).andReturn(0L);
-    expect(clusterVersionDAO.findAll()).andReturn(Collections.<ClusterVersionEntity>emptyList());
-    expect(clusterVersionDAO.create(anyObject(ClusterEntity.class), anyObject(RepositoryVersionEntity.class), anyObject(RepositoryVersionState.class), anyLong(), anyLong(), anyObject(String.class))).andReturn(clusterVersionEntity);
-    replay(clusterVersionDAO, clusterVersionEntity);
-
-    // Host Version
-    ClusterEntity clusterEntity = createNiceMock(ClusterEntity.class);
-    expect(clusterEntity.getClusterName()).andReturn(clusterName).anyTimes();
-    expect(clusterDAO.findByName(anyObject(String.class))).andReturn(clusterEntity);
-
-    Collection<HostEntity> hostEntities = new ArrayList<HostEntity>();
-    HostEntity hostEntity1 = createNiceMock(HostEntity.class);
-    HostEntity hostEntity2 = createNiceMock(HostEntity.class);
-    expect(hostEntity1.getHostName()).andReturn("host1");
-    expect(hostEntity2.getHostName()).andReturn("host2");
-    hostEntities.add(hostEntity1);
-    hostEntities.add(hostEntity2);
-    expect(clusterEntity.getHostEntities()).andReturn(hostEntities);
-
-    expect(hostVersionDAO.findByClusterStackVersionAndHost(anyObject(String.class), anyObject(StackId.class), anyObject(String.class), anyObject(String.class))).andReturn(null);
-    expect(hostVersionDAO.findMaxId("id")).andReturn(0L);
-    expect(hostVersionDAO.findAll()).andReturn(Collections.<HostVersionEntity>emptyList());
-
-    replay(clusterEntity, clusterDAO, hostVersionDAO, hostEntity1, hostEntity2);
-  }
-
-  @Test
-  public void testExecuteUpgradePreDMLUpdates() throws Exception {
-    Method executeStackPreDMLUpdates = UpgradeCatalog213.class.getDeclaredMethod("executeUpgradePreDMLUpdates");
-    Method executeStackUpgradeDDLUpdates = UpgradeCatalog213.class.getDeclaredMethod("executeStackUpgradeDDLUpdates");
-    Method bootstrapRepoVersionForHDP21 = UpgradeCatalog213.class.getDeclaredMethod("bootstrapRepoVersionForHDP21");
-
-    final UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-      .addMockedMethod(executeStackUpgradeDDLUpdates)
-      .addMockedMethod(bootstrapRepoVersionForHDP21)
-      .addMockedMethod(executeStackPreDMLUpdates).createMock();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(UpgradeCatalog213.class).toInstance(upgradeCatalog213);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        bind(EntityManager.class).toInstance(entityManager);
-      }
-    });
-
-    upgradeCatalog213.executeUpgradePreDMLUpdates();
-    expectLastCall().once();
-
-    upgradeCatalog213.executeStackUpgradeDDLUpdates();
-    expectLastCall().once();
-
-    upgradeCatalog213.bootstrapRepoVersionForHDP21();
-    expectLastCall().once();
-
-    replay(upgradeCatalog213);
-    mockInjector.getInstance(UpgradeCatalog213.class).executePreDMLUpdates();
-
-    verify(upgradeCatalog213);
-  }
-
-  @Test
-  public void testUpdateStormSiteConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesStormSite = new HashMap<String, String>() {
-      {
-        put("nimbus.monitor.freq.secs", "10");
-        put("metrics.reporter.register", "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter");
-      }
-    };
-
-    final Config mockStormSite = easyMockSupport.createNiceMock(Config.class);
-    expect(mockStormSite.getProperties()).andReturn(propertiesStormSite).once();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("storm-site")).andReturn(mockStormSite).atLeastOnce();
-    expect(mockStormSite.getProperties()).andReturn(propertiesStormSite).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateStormConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateZookeeperLog4jConfig() throws AmbariException {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesZookeeperLog4j = new HashMap<String, String>() {
-      {
-        put("content", "log4j.rootLogger=INFO, CONSOLE");
-      }
-    };
-
-    final Config mockZookeeperLog4j = easyMockSupport.createNiceMock(Config.class);
-    expect(mockZookeeperLog4j.getProperties()).andReturn(propertiesZookeeperLog4j).once();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    expect(mockClusterExpected.getDesiredConfigByType("zookeeper-log4j")).andReturn(mockZookeeperLog4j).atLeastOnce();
-    expect(mockZookeeperLog4j.getProperties()).andReturn(propertiesZookeeperLog4j).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateZookeeperLog4j();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testUpdateHDFSConfiguration() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHdfs = new HashMap<String, String>();
-    propertiesExpectedHdfs.put("dfs.namenode.rpc-address", "nn.rpc.address");
-    propertiesExpectedHdfs.put("dfs.nameservices", "nn1");
-    propertiesExpectedHdfs.put("dfs.ha.namenodes.nn1", "value");
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    // Expected operation
-    expect(mockClusterExpected.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
-    expect(mockHdfsSite.getProperties()).andReturn(propertiesExpectedHdfs).anyTimes();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateHDFSConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateAmsHbaseEnvContent() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
-    Method updateAmsHbaseEnvContent = UpgradeCatalog213.class.getDeclaredMethod("updateAmsHbaseEnvContent", String.class);
-    UpgradeCatalog213 upgradeCatalog213 = new UpgradeCatalog213(injector);
-    String oldContent = "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
-      "\n" +
-      "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
-      "export HBASE_HEAPSIZE={{hbase_heapsize}}\n";
-
-    String expectedContent = "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
-      "\n" +
-      "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
-      "#export HBASE_HEAPSIZE={{hbase_heapsize}}\n" +
-      "\n" +
-      "# The maximum amount of heap to use for hbase shell.\n" +
-      "export HBASE_SHELL_OPTS=\"-Xmx256m\"\n";
-    String result = (String) updateAmsHbaseEnvContent.invoke(upgradeCatalog213, oldContent);
-    Assert.assertEquals(expectedContent, result);
-  }
-
-  @Test
-  public void testAmsSiteUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        //Including only those properties that might be present in an older version.
-        put("timeline.metrics.service.default.result.limit", String.valueOf(5760));
-        put("timeline.metrics.cluster.aggregator.minute.interval", String.valueOf(1000));
-        put("timeline.metrics.host.aggregator.minute.interval", String.valueOf(1000));
-        put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(1000));
-      }
-    };
-    Map<String, String> newPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        put("timeline.metrics.service.default.result.limit", String.valueOf(15840));
-        put("timeline.metrics.cluster.aggregator.second.interval", String.valueOf(120));
-        put("timeline.metrics.cluster.aggregator.minute.interval", String.valueOf(300));
-        put("timeline.metrics.host.aggregator.minute.interval", String.valueOf(300));
-        put("timeline.metrics.cluster.aggregator.second.ttl", String.valueOf(2592000));
-        put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(7776000));
-        put("timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier", String.valueOf(2));
-        put("timeline.metrics.cluster.aggregator.second.disabled", String.valueOf(false));
-        put("timeline.metrics.hbase.fifo.compaction.enabled", String.valueOf(true));
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsSite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-site")).andReturn(mockAmsSite).atLeastOnce();
-    expect(mockAmsSite.getProperties()).andReturn(oldPropertiesAmsSite).times(2);
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<ConfigurationRequest> configurationRequestCapture = EasyMock.newCapture();
-    ConfigurationResponse configurationResponseMock = easyMockSupport.createMock(ConfigurationResponse.class);
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfiguration(capture(configurationRequestCapture))).andReturn(configurationResponseMock).once();
-
-    replay(controller, injector2, configurationResponseMock);
-    new UpgradeCatalog213(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    ConfigurationRequest configurationRequest = configurationRequestCapture.getValue();
-    Map<String, String> updatedProperties = configurationRequest.getProperties();
-    assertTrue(Maps.difference(newPropertiesAmsSite, updatedProperties).areEqual());
-
-  }
-
-  @Test
-  public void testAmsHbaseSiteUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsHbaseSite = new HashMap<String, String>() {
-      {
-        //Including only those properties that might be present in an older version.
-        put("zookeeper.session.timeout.localHBaseCluster", String.valueOf(20000));
-      }
-    };
-    Map<String, String> newPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        put("zookeeper.session.timeout.localHBaseCluster", String.valueOf(120000));
-        put("hbase.normalizer.enabled", String.valueOf(true));
-        put("hbase.normalizer.period", String.valueOf(600000));
-        put("hbase.master.normalizer.class", "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer");
-
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsHbaseSite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-hbase-site")).andReturn(mockAmsHbaseSite).atLeastOnce();
-    expect(mockAmsHbaseSite.getProperties()).andReturn(oldPropertiesAmsHbaseSite).atLeastOnce();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsHbaseSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<ConfigurationRequest> configurationRequestCapture = EasyMock.newCapture();
-    ConfigurationResponse configurationResponseMock = easyMockSupport.createMock(ConfigurationResponse.class);
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfiguration(capture(configurationRequestCapture))).andReturn(configurationResponseMock).once();
-
-    replay(controller, injector2, configurationResponseMock);
-    new UpgradeCatalog213(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    ConfigurationRequest configurationRequest = configurationRequestCapture.getValue();
-    Map<String, String> updatedProperties = configurationRequest.getProperties();
-    assertTrue(Maps.difference(newPropertiesAmsSite, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testUpdateAlertDefinitions() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    UpgradeCatalog213 upgradeCatalog213 = new UpgradeCatalog213(injector);
-    long clusterId = 1;
-
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionEntity mockJournalNodeProcessAlertDefinitionEntity = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-    final AlertDefinitionEntity mockHostDiskUsageAlertDefinitionEntity = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    final String journalNodeProcessAlertSource = "{\"uri\":\"{{hdfs-site/dfs.journalnode.http-address}}\",\"default_port\":8480," +
-        "\"type\":\"PORT\",\"reporting\":{\"ok\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\"}," +
-        "\"warning\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\",\"value\":1.5}," +
-        "\"critical\":{\"text\":\"Connection failed: {0} to {1}:{2}\",\"value\":5.0}}}";
-    final String journalNodeProcessAlertSourceExpected = "{\"reporting\":{\"ok\":{\"text\":\"HTTP {0} response in {2:.3f}s\"}," +
-        "\"warning\":{\"text\":\"HTTP {0} response from {1} in {2:.3f}s ({3})\"}," +
-        "\"critical\":{\"text\":\"Connection failed to {1} ({3})\"}},\"type\":\"WEB\"," +
-        "\"uri\":{\"http\":\"{{hdfs-site/dfs.journalnode.http-address}}\"," +
-        "\"https\":\"{{hdfs-site/dfs.journalnode.https-address}}\"," +
-        "\"kerberos_keytab\":\"{{hdfs-site/dfs.web.authentication.kerberos.keytab}}\"," +
-        "\"kerberos_principal\":\"{{hdfs-site/dfs.web.authentication.kerberos.principal}}\"," +
-        "\"https_property\":\"{{hdfs-site/dfs.http.policy}}\"," +
-        "\"https_property_value\":\"HTTPS_ONLY\",\"connection_timeout\":5.0}}";
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(AlertDefinitionDAO.class).toInstance(mockAlertDefinitionDAO);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("journalnode_process"))).andReturn(mockJournalNodeProcessAlertDefinitionEntity).atLeastOnce();
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("ambari_agent_disk_usage"))).andReturn(mockHostDiskUsageAlertDefinitionEntity).atLeastOnce();
-
-    expect(mockJournalNodeProcessAlertDefinitionEntity.getSource()).andReturn(journalNodeProcessAlertSource).atLeastOnce();
-    Assert.assertEquals(journalNodeProcessAlertSourceExpected, upgradeCatalog213.modifyJournalnodeProcessAlertSource(journalNodeProcessAlertSource));
-
-    mockHostDiskUsageAlertDefinitionEntity.setDescription(eq("This host-level alert is triggered if the amount of disk" +
-        " space used goes above specific thresholds. The default threshold values are 50% for WARNING and 80% for CRITICAL"));
-    expectLastCall().atLeastOnce();
-    mockHostDiskUsageAlertDefinitionEntity.setLabel(eq("Host Disk Usage"));
-    expectLastCall().atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateAlertDefinitions();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateAmsEnvContent() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
-    Method updateAmsEnvContent = UpgradeCatalog213.class.getDeclaredMethod("updateAmsEnvContent", String.class);
-    UpgradeCatalog213 upgradeCatalog213 = new UpgradeCatalog213(injector);
-    String oldContent = "some_content";
-
-    String expectedContent = "some_content" + "\n" +
-      "# AMS Collector GC options\n" +
-      "export AMS_COLLECTOR_GC_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 " +
-      "-XX:+UseCMSInitiatingOccupancyOnly -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps " +
-      "-XX:+UseGCLogFileRotation -XX:GCLogFileSize=10M " +
-      "-Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`\"\n" +
-      "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS\"\n"+
-      "\n" +
-      "# HBase compaction policy enabled\n" +
-      "export HBASE_NORMALIZATION_ENABLED={{ams_hbase_normalizer_enabled}}\n" +
-      "\n" +
-      "# HBase compaction policy enabled\n" +
-      "export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n";
-
-    String result = (String) updateAmsEnvContent.invoke(upgradeCatalog213, oldContent);
-    Assert.assertEquals(expectedContent, result);
-  }
-  
-  public void testUpdateKafkaConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigurationResponse mockConfigurationResponse = easyMockSupport.createMock(ConfigurationResponse.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Map<String, String> propertiesKafkaEnv = new HashMap<String, String>() {
-      {
-        put("content", "test");
-      }
-    };
-    Map<String, String> updates = Collections.singletonMap("content", "test\n\nexport KAFKA_KERBEROS_PARAMS=\"$KAFKA_KERBEROS_PARAMS {{kafka_kerberos_params}}");
-
-    final Map<String, String> propertiesAmsEnv = new HashMap<String, String>() {
-      {
-        put("kafka.metrics.reporters", "{{kafka_metrics_reporters}}");
-      }
-    };
-    final Map<String, Service> installedServices = new HashMap<String, Service>() {
-      {
-        put("KAFKA", null);
-        put("AMBARI_METRICS", null);
-      }
-    };
-
-    final Config mockAmsEnv = easyMockSupport.createNiceMock(Config.class);
-    final Config mockKafkaEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getServices()).andReturn(installedServices).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("kafka-broker")).andReturn(mockAmsEnv).atLeastOnce();
-    expect(mockAmsEnv.getProperties()).andReturn(propertiesAmsEnv).atLeastOnce();
-
-    expect(mockClusterExpected.getDesiredConfigByType("kafka-env")).andReturn(mockKafkaEnv).atLeastOnce();
-    expect(mockKafkaEnv.getProperties()).andReturn(propertiesKafkaEnv).atLeastOnce();
-
-    UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-        .withConstructor(Injector.class)
-        .withArgs(mockInjector)
-        .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-            Map.class, boolean.class, boolean.class)
-        .createMock();
-    upgradeCatalog213.updateConfigurationPropertiesForCluster(mockClusterExpected,
-      "kafka-env", updates, true, false);
-    expectLastCall().once();
-
-    expect(mockAmbariManagementController.createConfiguration(EasyMock.<ConfigurationRequest>anyObject())).andReturn(mockConfigurationResponse);
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateKafkaConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  /**
-   * @param dbAccessor
-   * @return
-   */
-  private AbstractUpgradeCatalog getUpgradeCatalog(final DBAccessor dbAccessor) {
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(DaoUtils.class).toInstance(createNiceMock(DaoUtils.class));
-        binder.bind(ClusterDAO.class).toInstance(clusterDAO);
-        binder.bind(RepositoryVersionHelper.class).toInstance(createNiceMock(RepositoryVersionHelper.class));
-        binder.bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
-        binder.bind(AmbariManagementController.class).toInstance(amc);
-        binder.bind(AmbariMetaInfo.class).toInstance(metaInfo);
-        binder.bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
-        binder.bind(StackDAO.class).toInstance(stackDAO);
-        binder.bind(RepositoryVersionDAO.class).toInstance(repositoryVersionDAO);
-        binder.bind(ClusterVersionDAO.class).toInstance(clusterVersionDAO);
-        binder.bind(HostVersionDAO.class).toInstance(hostVersionDAO);
-      }
-    };
-    Injector injector = Guice.createInjector(module);
-    return injector.getInstance(UpgradeCatalog213.class);
-  }
-
-  @Test
-  public void testUpdateRangerEnvConfig() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesHiveEnv = new HashMap<String, String>() {{
-        put("hive_security_authorization", "Ranger");
-    }};
-    final Map<String, String> propertiesRangerHdfsPlugin = new HashMap<String, String>() {{
-      put("ranger-hdfs-plugin-enabled", "Yes");
-    }};
-    final Map<String, String> propertiesRangerHbasePlugin = new HashMap<String, String>() {{
-      put("ranger-hbase-plugin-enabled", "Yes");
-    }};
-    final Map<String, String> propertiesRangerKafkaPlugin = new HashMap<String, String>() {{
-      put("ranger-kafka-plugin-enabled", "Yes");
-    }};
-    final Map<String, String> propertiesRangerYarnPlugin = new HashMap<String, String>() {{
-      put("ranger-yarn-plugin-enabled", "No");
-    }};
-
-    final Config mockHiveEnvConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerHdfsPluginConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerHbasePluginConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerKafkaPluginConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerYarnPluginConf = easyMockSupport.createNiceMock(Config.class);
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(mockHiveEnvConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-hdfs-plugin-properties")).andReturn(mockRangerHdfsPluginConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-hbase-plugin-properties")).andReturn(mockRangerHbasePluginConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-kafka-plugin-properties")).andReturn(mockRangerKafkaPluginConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-yarn-plugin-properties")).andReturn(mockRangerYarnPluginConf).atLeastOnce();
-
-    expect(mockHiveEnvConf.getProperties()).andReturn(propertiesHiveEnv).times(2);
-    expect(mockRangerHdfsPluginConf.getProperties()).andReturn(propertiesRangerHdfsPlugin).times(2);
-    expect(mockRangerHbasePluginConf.getProperties()).andReturn(propertiesRangerHbasePlugin).times(2);
-    expect(mockRangerKafkaPluginConf.getProperties()).andReturn(propertiesRangerKafkaPlugin).times(2);
-    expect(mockRangerYarnPluginConf.getProperties()).andReturn(propertiesRangerYarnPlugin).times(2);
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateRangerEnvConfig();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testUpdateRangerUgsyncSiteConfig() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesRangerUgsyncSite = new HashMap<String, String>() {{
-        put("ranger.usersync.source.impl.class", "ldap");
-    }};
-
-    final Config mockRangerUgsyncSite = easyMockSupport.createNiceMock(Config.class);
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-ugsync-site")).andReturn(mockRangerUgsyncSite).atLeastOnce();
-
-    expect(mockRangerUgsyncSite.getProperties()).andReturn(propertiesRangerUgsyncSite).atLeastOnce();
-
-    Map<String, String> updates = Collections.singletonMap("ranger.usersync.source.impl.class", "org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder");
-    UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    upgradeCatalog213.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "ranger-ugsync-site", updates, true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog213.class).updateRangerUgsyncSiteConfig();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testGetSourceVersion() {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Assert.assertEquals("2.1.2.1", upgradeCatalog.getSourceVersion());
-  }
-
-  @Test
-  public void testGetTargetVersion() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-
-    Assert.assertEquals("2.1.3", upgradeCatalog.getTargetVersion());
-  }
-
-  // *********** Inner Classes that represent sections of the DDL ***********
-  // ************************************************************************
-
-  /**
-   * Verify that the upgrade table has two columns added to it.
-   */
-  class UpgradeSectionDDL implements SectionDDL {
-
-    Capture<DBAccessor.DBColumnInfo> upgradeTablePackageNameColumnCapture = new Capture<DBAccessor.DBColumnInfo>();
-    Capture<DBAccessor.DBColumnInfo> upgradeTableUpgradeTypeColumnCapture = new Capture<DBAccessor.DBColumnInfo>();
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void execute(DBAccessor dbAccessor) throws SQLException {
-      // Add columns
-      dbAccessor.addColumn(eq("upgrade"), capture(upgradeTablePackageNameColumnCapture));
-      dbAccessor.addColumn(eq("upgrade"), capture(upgradeTableUpgradeTypeColumnCapture));
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void verify(DBAccessor dbAccessor) throws SQLException {
-      // Verification section
-      DBAccessor.DBColumnInfo packageNameCol = upgradeTablePackageNameColumnCapture.getValue();
-      Assert.assertEquals(String.class, packageNameCol.getType());
-      Assert.assertEquals("upgrade_package", packageNameCol.getName());
-
-      DBAccessor.DBColumnInfo upgradeTypeCol = upgradeTableUpgradeTypeColumnCapture.getValue();
-      Assert.assertEquals(String.class, upgradeTypeCol.getType());
-      Assert.assertEquals("upgrade_type", upgradeTypeCol.getName());
-    }
-  }
-
-  @Test
-  public void testShouldDDLsBeExecutedOnUpgrade() throws Exception {
-    // GIVEN
-    Injector mockedInjector = mocksControl.createMock(Injector.class);
-    DBAccessor mockedDbAccessor = mocksControl.createMock(DBAccessor.class);
-    DaoUtils mockedDaoUtils = mocksControl.createMock(DaoUtils.class);
-    Configuration mockedConfiguration = mocksControl.createMock(Configuration.class);
-    StackUpgradeUtil mockedStackUpgradeUtil = mocksControl.createMock(StackUpgradeUtil.class);
-
-    Capture<String> capturedTableName = EasyMock.newCapture();
-    Capture<String> capturedPKColumn = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> capturedColumns = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedColumn = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedHostRoleCommandColumn = EasyMock.newCapture();
-
-    Capture<String> capturedBlueprintTableName = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedNewBlueprintColumn1 = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedNewBlueprintColumn2 = EasyMock.newCapture();
-
-    Capture<DBAccessor.DBColumnInfo> stageSkipColumnCapture = EasyMock.newCapture();
-
-    EasyMock.expect(mockedInjector.getInstance(DaoUtils.class)).andReturn(mockedDaoUtils);
-    mockedInjector.injectMembers(anyObject(UpgradeCatalog.class));
-    EasyMock.expect(mockedConfiguration.getDatabaseType()).andReturn(Configuration.DatabaseType.POSTGRES).anyTimes();
-    EasyMock.expect(mockedConfiguration.getDatabaseUser()).andReturn("ambari");
-    EasyMock.expect(mockedConfiguration.getServerJDBCPostgresSchemaName()).andReturn("fo");
-
-
-    mockedDbAccessor.executeQuery("ALTER SCHEMA fo OWNER TO \"ambari\";");
-    mockedDbAccessor.executeQuery("ALTER ROLE \"ambari\" SET search_path to 'fo';");
-
-    // executeUpgradeDDLUpdates
-    mockedDbAccessor.addColumn(eq("upgrade"), capture(capturedColumn));
-    mockedDbAccessor.addColumn(eq("upgrade"), capture(capturedColumn));
-    mockedDbAccessor.addColumn(eq("upgrade"), capture(capturedColumn));
-
-    // addKerberosDescriptorTable
-    mockedDbAccessor.createTable(capture(capturedTableName), capture(capturedColumns), capture(capturedPKColumn));
-    mockedDbAccessor.alterColumn(eq("host_role_command"), capture(capturedHostRoleCommandColumn));
-
-    mockedDbAccessor.addColumn(capture(capturedBlueprintTableName), capture(capturedNewBlueprintColumn1));
-    mockedDbAccessor.addColumn(capture(capturedBlueprintTableName), capture(capturedNewBlueprintColumn2));
-
-    mockedDbAccessor.addColumn(eq("stage"), capture(stageSkipColumnCapture));
-
-    mocksControl.replay();
-
-    UpgradeCatalog213 testSubject = new UpgradeCatalog213(mockedInjector);
-    EasyMockSupport.injectMocks(testSubject);
-
-    //todo refactor the DI approach, don't directly access these members!!!
-    testSubject.stackUpgradeUtil = mockedStackUpgradeUtil;
-    testSubject.dbAccessor = mockedDbAccessor;
-    testSubject.configuration = mockedConfiguration;
-
-    // WHEN
-    testSubject.upgradeSchema();
-
-    // THEN
-    Assert.assertEquals("The table name is wrong!", "kerberos_descriptor", capturedTableName.getValue());
-    Assert.assertEquals("The primary key is wrong!", "kerberos_descriptor_name", capturedPKColumn.getValue());
-    Assert.assertTrue("Ther number of columns is wrong!", capturedColumns.getValue().size() == 2);
-
-    Assert.assertEquals("The table name is wrong!", "blueprint", capturedBlueprintTableName.getValue());
-
-    Assert.assertEquals("The column name is wrong!", "security_type", capturedNewBlueprintColumn1.getValue().getName());
-    Assert.assertEquals("The column name is wrong!", "security_descriptor_reference", capturedNewBlueprintColumn2
-      .getValue().getName());
-
-    Assert.assertEquals("The column name is wrong!", "supports_auto_skip_failure",
-        stageSkipColumnCapture.getValue().getName());
-  }
-
-  @Test
-  public void testUpdateHiveConfig() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesHiveSite = new HashMap<String, String>() {{
-      put("hive.server2.logging.operation.log.location", "${system:java.io.tmpdir}/${system:user.name}/operation_logs");
-    }};
-    final Map<String, String> propertiesHiveSiteExpected = new HashMap<String, String>() {{
-      put("hive.server2.logging.operation.log.location", "/tmp/hive/operation_logs");
-    }};
-    final Map<String, String> propertiesHiveEnv = new HashMap<String, String>() {{
-      put("content", "test content");
-    }};
-    final Config hiveSiteConf = easyMockSupport.createNiceMock(Config.class);
-    final Config hiveEnvConf = easyMockSupport.createNiceMock(Config.class);
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(hiveSiteConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(hiveEnvConf).atLeastOnce();
-
-    expect(hiveSiteConf.getProperties()).andReturn(propertiesHiveSite).once();
-    expect(hiveEnvConf.getProperties()).andReturn(propertiesHiveEnv).once();
-
-    UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    upgradeCatalog213.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "hive-site", propertiesHiveSiteExpected, true, false);
-    expectLastCall().once();
-    upgradeCatalog213.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "hive-env", propertiesHiveEnv, true, true);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog213);
-    upgradeCatalog213.updateHiveConfig();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testupdateKnoxTopology_NoRangerPlugin() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesTopologyWithoutAuthorizationProvider = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  </gateway> </topology>");
-    }};
-    final Map<String, String> propertiesTopologyExpected = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  <provider>\n" +
-              "               <role>authorization</role>\n" +
-              "               <name>AclsAuthz</name>\n" +
-              "               <enabled>true</enabled>\n" +
-              "          </provider>\n" +
-              "     </gateway> </topology>\n");
-    }};
-    final Config mockTopologyConf = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("cl1", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("topology")).andReturn(mockTopologyConf).atLeastOnce();
-    expect(mockTopologyConf.getProperties()).andReturn(propertiesTopologyWithoutAuthorizationProvider).once();
-
-
-    UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    upgradeCatalog213.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "topology", propertiesTopologyExpected, true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog213);
-    upgradeCatalog213.updateKnoxTopology();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testupdateKnoxTopology_ProviderAlreadyExists() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesTopologyWitAuthorizationProvider = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  <provider>" +
-              "<role>authorization</role>" +
-              "<name>AclsAuthz</name>" +
-              "<enabled>true</enabled>" +
-              "</provider>" +
-              "</gateway> </topology>\n");
-    }};
-
-    final Config mockTopologyConf = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("cl1", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("topology")).andReturn(mockTopologyConf).atLeastOnce();
-    expect(mockTopologyConf.getProperties()).andReturn(propertiesTopologyWitAuthorizationProvider).once();
-
-    // ATTENTION, this mock should not be called at all. If it was, then something wrong with code
-    UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-
-
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog213);
-    upgradeCatalog213.updateKnoxTopology();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testupdateKnoxTopology_RangerPluginAvailable() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesTopologyWithoutAuthorizationProvider = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  </gateway> </topology>");
-    }};
-    final Map<String, String> propertiesRangerKnoxPluginProperties = new HashMap<String, String>() {{
-      put("ranger-knox-plugin-enabled", "Yes");
-    }};
-    final Map<String, String> propertiesTopologyExpected = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  <provider>\n" +
-              "               <role>authorization</role>\n" +
-              "               <name>XASecurePDPKnox</name>\n" +
-              "               <enabled>true</enabled>\n" +
-              "          </provider>\n" +
-              "     </gateway> </topology>\n");
-    }};
-    final Config mockTopologyConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerKnoxPluginConf = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("cl1", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("topology")).andReturn(mockTopologyConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-knox-plugin-properties")).andReturn(mockRangerKnoxPluginConf).atLeastOnce();
-    expect(mockTopologyConf.getProperties()).andReturn(propertiesTopologyWithoutAuthorizationProvider).once();
-    expect(mockRangerKnoxPluginConf.getProperties()).andReturn(propertiesRangerKnoxPluginProperties).once();
-
-
-    UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    upgradeCatalog213.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "topology", propertiesTopologyExpected, true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog213);
-    upgradeCatalog213.updateKnoxTopology();
-    easyMockSupport.verifyAll();
-
-  }
-
-
-  @Test
-  public void testUpdateHiveEnvContentHDP23() throws Exception {
-    UpgradeCatalog213 upgradeCatalog213 = new UpgradeCatalog213(injector);
-    String testContent = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    String expectedResult = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    Assert.assertEquals(expectedResult, upgradeCatalog213.updateHiveEnvContentHDP23(testContent));
-  }
-
-
-  @Test
-  public void testUpdateHiveEnvContent() throws Exception {
-    UpgradeCatalog213 upgradeCatalog213 = new UpgradeCatalog213(injector);
-    // Test first case
-    String testContent = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    String expectedResult = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    Assert.assertEquals(expectedResult, upgradeCatalog213.updateHiveEnvContent(testContent));
-    // Test second case
-    testContent = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "export SERVICE=$SERVICE\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
-            "fi\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    expectedResult = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "export SERVICE=$SERVICE\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    Assert.assertEquals(expectedResult, upgradeCatalog213.updateHiveEnvContent(testContent));
-  }
-
-  @Test
-  public void testUpdateAccumuloConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    // We start with no client properties (< 2.1.3).
-    final Map<String, String> originalClientProperties = new HashMap<String, String>();
-    // And should get the following property on upgrade.
-    final Map<String, String> updatedClientProperties = new HashMap<String, String>() {
-      {
-        put("kerberos.server.primary", "{{bare_accumulo_principal}}");
-      }
-    };
-    
-    final Config clientConfig = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    // Enable KERBEROS
-    expect(mockClusterExpected.getSecurityType()).andReturn(SecurityType.KERBEROS).once();
-    // Mock out our empty original properties
-    expect(mockClusterExpected.getDesiredConfigByType("client")).andReturn(clientConfig).atLeastOnce();
-    expect(clientConfig.getProperties()).andReturn(originalClientProperties).atLeastOnce();
-
-    UpgradeCatalog213 upgradeCatalog213 = createMockBuilder(UpgradeCatalog213.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    // Verify that we get this method called with the updated properties
-    upgradeCatalog213.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "client", updatedClientProperties, true, false);
-    expectLastCall().once();
-
-    // Run it
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog213);
-    upgradeCatalog213.updateAccumuloConfigs();
-    easyMockSupport.verifyAll();
-  }
-}


[4/4] ambari git commit: AMBARI-14357: UpgradeCatalogs need to be reversioned (jluniya)

Posted by jl...@apache.org.
AMBARI-14357: UpgradeCatalogs need to be reversioned (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b3ff65d3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b3ff65d3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b3ff65d3

Branch: refs/heads/branch-2.2
Commit: b3ff65d39488f01dcef0c83b1f22de6ef1f51448
Parents: 52c969e
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Tue Dec 22 15:26:27 2015 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Tue Dec 22 15:26:27 2015 -0800

----------------------------------------------------------------------
 .../server/upgrade/SchemaUpgradeHelper.java     |    2 +-
 .../server/upgrade/UpgradeCatalog213.java       | 1340 --------------
 .../server/upgrade/UpgradeCatalog220.java       | 1371 +++++++++++++++
 .../server/upgrade/UpgradeCatalog221.java       |   25 +
 .../server/upgrade/UpgradeCatalog200Test.java   |    2 +-
 .../server/upgrade/UpgradeCatalog213Test.java   | 1633 ------------------
 .../server/upgrade/UpgradeCatalog220Test.java   | 1617 +++++++++++++++++
 .../server/upgrade/UpgradeCatalog221Test.java   |   71 +-
 8 files changed, 3081 insertions(+), 2980 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b3ff65d3/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
index 3ad8328..4bb6843 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
@@ -180,7 +180,7 @@ public class SchemaUpgradeHelper {
       catalogBinder.addBinding().to(UpgradeCatalog211.class);
       catalogBinder.addBinding().to(UpgradeCatalog212.class);
       catalogBinder.addBinding().to(UpgradeCatalog2121.class);
-      catalogBinder.addBinding().to(UpgradeCatalog213.class);
+      catalogBinder.addBinding().to(UpgradeCatalog220.class);
       catalogBinder.addBinding().to(UpgradeCatalog221.class);
       catalogBinder.addBinding().to(FinalUpgradeCatalog.class);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b3ff65d3/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
deleted file mode 100644
index 2ee63ed..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
+++ /dev/null
@@ -1,1340 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.Transactional;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.*;
-import org.apache.ambari.server.orm.entities.*;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.RepositoryVersionState;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.alert.SourceType;
-import org.apache.ambari.server.state.kerberos.*;
-import org.apache.ambari.server.state.stack.upgrade.Direction;
-import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
-import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-
-import javax.xml.transform.OutputKeys;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerConfigurationException;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-import java.io.StringWriter;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.text.MessageFormat;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.regex.Matcher;
-
-/**
- * Upgrade catalog for version 2.1.3.
- */
-public class UpgradeCatalog213 extends AbstractUpgradeCatalog {
-
-  private static final String UPGRADE_TABLE = "upgrade";
-  private static final String STORM_SITE = "storm-site";
-  private static final String HDFS_SITE_CONFIG = "hdfs-site";
-  private static final String TOPOLOGY_CONFIG = "topology";
-  private static final String KAFKA_BROKER = "kafka-broker";
-  private static final String KAFKA_ENV_CONFIG = "kafka-env";
-  private static final String KAFKA_ENV_CONTENT_KERBEROS_PARAMS =
-    "export KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}";
-  private static final String AMS_ENV = "ams-env";
-  private static final String AMS_HBASE_ENV = "ams-hbase-env";
-  private static final String AMS_SITE = "ams-site";
-  private static final String AMS_HBASE_SITE = "ams-hbase-site";
-  private static final String AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY =
-    "zookeeper.session.timeout.localHBaseCluster";
-  private static final String AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY = "hbase.normalizer.enabled";
-  private static final String AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY = "hbase.normalizer.period";
-  private static final String AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY = "hbase.master.normalizer.class";
-  private static final String TIMELINE_METRICS_HBASE_FIFO_COMPACTION_ENABLED = "timeline.metrics.hbase.fifo.compaction.enabled";
-  private static final String HBASE_ENV_CONFIG = "hbase-env";
-  private static final String FLUME_ENV_CONFIG = "flume-env";
-  private static final String HIVE_SITE_CONFIG = "hive-site";
-  private static final String HIVE_ENV_CONFIG = "hive-env";
-  private static final String RANGER_ENV_CONFIG = "ranger-env";
-  private static final String RANGER_UGSYNC_SITE_CONFIG = "ranger-ugsync-site";
-  private static final String ZOOKEEPER_LOG4J_CONFIG = "zookeeper-log4j";
-  private static final String HADOOP_ENV_CONFIG = "hadoop-env";
-  private static final String NIMBS_MONITOR_FREQ_SECS_PROPERTY = "nimbus.monitor.freq.secs";
-  private static final String STORM_METRICS_REPORTER = "metrics.reporter.register";
-  private static final String HIVE_SERVER2_OPERATION_LOG_LOCATION_PROPERTY = "hive.server2.logging.operation.log.location";
-  private static final String CONTENT_PROPERTY = "content";
-  private static final String HADOOP_ENV_CONTENT_TO_APPEND = "\n{% if is_datanode_max_locked_memory_set %}\n" +
-                                    "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
-                                    "# Makes sense to fix only when runing DN as root \n" +
-                                    "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
-                                    "  ulimit -l {{datanode_max_locked_memory}}\n" +
-                                    "fi\n" +
-                                    "{% endif %}\n";
-
-  private static final String DOWNGRADE_ALLOWED_COLUMN = "downgrade_allowed";
-  private static final String UPGRADE_SKIP_FAILURE_COLUMN = "skip_failures";
-  private static final String UPGRADE_SKIP_SC_FAILURE_COLUMN = "skip_sc_failures";
-  public static final String UPGRADE_PACKAGE_COL = "upgrade_package";
-  public static final String UPGRADE_TYPE_COL = "upgrade_type";
-  public static final String REPO_VERSION_TABLE = "repo_version";
-
-  private static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
-  private static final String HOST_ID_COL = "host_id";
-
-  private static final String KERBEROS_DESCRIPTOR_TABLE = "kerberos_descriptor";
-  private static final String KERBEROS_DESCRIPTOR_NAME_COLUMN = "kerberos_descriptor_name";
-  private static final String KERBEROS_DESCRIPTOR_COLUMN = "kerberos_descriptor";
-  private static final String RANGER_HDFS_PLUGIN_ENABLED_PROPERTY = "ranger-hdfs-plugin-enabled";
-  private static final String RANGER_HIVE_PLUGIN_ENABLED_PROPERTY = "ranger-hive-plugin-enabled";
-  private static final String RANGER_HBASE_PLUGIN_ENABLED_PROPERTY = "ranger-hbase-plugin-enabled";
-  private static final String RANGER_STORM_PLUGIN_ENABLED_PROPERTY = "ranger-storm-plugin-enabled";
-  private static final String RANGER_KNOX_PLUGIN_ENABLED_PROPERTY = "ranger-knox-plugin-enabled";
-  private static final String RANGER_YARN_PLUGIN_ENABLED_PROPERTY = "ranger-yarn-plugin-enabled";
-  private static final String RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY = "ranger-kafka-plugin-enabled";
-
-  private static final String RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY = "ranger.usersync.source.impl.class";
-
-  private static final String BLUEPRINT_TABLE = "blueprint";
-  private static final String SECURITY_TYPE_COLUMN = "security_type";
-  private static final String SECURITY_DESCRIPTOR_REF_COLUMN = "security_descriptor_reference";
-
-  private static final String STAGE_TABLE = "stage";
-
-  private static final String KNOX_SERVICE = "KNOX";
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog213.class);
-
-  @Inject
-  DaoUtils daoUtils;
-
-  @Inject
-  private RepositoryVersionDAO repositoryVersionDAO;
-
-  @Inject
-  private ClusterDAO clusterDAO;
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog213(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.1.3";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.1.2.1";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    executeUpgradeDDLUpdates();
-
-    // Alter the host_role_command table to allow host_id to be nullable
-    dbAccessor.alterColumn(HOST_ROLE_COMMAND_TABLE, new DBColumnInfo(HOST_ID_COL, Long.class, null, null, true));
-
-    addKerberosDescriptorTable();
-    executeBlueprintDDLUpdates();
-    executeStageDDLUpdates();
-  }
-
-  protected void executeUpgradeDDLUpdates() throws AmbariException, SQLException {
-    updateUpgradesDDL();
-  }
-
-  private void addKerberosDescriptorTable() throws SQLException {
-    List<DBAccessor.DBColumnInfo> columns = new ArrayList<DBAccessor.DBColumnInfo>();
-    columns.add(new DBAccessor.DBColumnInfo(KERBEROS_DESCRIPTOR_NAME_COLUMN, String.class, 255, null, false));
-    columns.add(new DBAccessor.DBColumnInfo(KERBEROS_DESCRIPTOR_COLUMN, char[].class, null, null, false));
-
-    LOG.debug("Creating table [ {} ] with columns [ {} ] and primary key: [ {} ]", KERBEROS_DESCRIPTOR_TABLE, columns, KERBEROS_DESCRIPTOR_NAME_COLUMN);
-    dbAccessor.createTable(KERBEROS_DESCRIPTOR_TABLE, columns, KERBEROS_DESCRIPTOR_NAME_COLUMN);
-  }
-
-  private void executeBlueprintDDLUpdates() throws AmbariException, SQLException {
-    dbAccessor.addColumn(BLUEPRINT_TABLE, new DBAccessor.DBColumnInfo(SECURITY_TYPE_COLUMN,
-      String.class, 32, "NONE", false));
-    dbAccessor.addColumn(BLUEPRINT_TABLE, new DBAccessor.DBColumnInfo(SECURITY_DESCRIPTOR_REF_COLUMN,
-      String.class, null, null, true));
-  }
-
-  /**
-   * Updates the {@code stage} table by:
-   * <ul>
-   * <li>Adding the {@code supports_auto_skip_failure} column</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  protected void executeStageDDLUpdates() throws SQLException {
-    dbAccessor.addColumn(STAGE_TABLE,
-      new DBAccessor.DBColumnInfo("supports_auto_skip_failure", Integer.class, 1, 0, false));
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    // execute DDL updates
-    executeStackUpgradeDDLUpdates();
-
-    // DDL and DML mixed code, double check here
-    bootstrapRepoVersionForHDP21();
-
-    // execute DML updates, no DDL things after this line
-    executeUpgradePreDMLUpdates();
-  }
-
-  /**
-   * Updates the following columns on the {@value #UPGRADE_TABLE} table to
-   * default values:
-   * <ul>
-   * <li>{value {@link #DOWNGRADE_ALLOWED_COLUMN}}</li>
-   * <li>{value {@link #UPGRADE_SKIP_FAILURE_COLUMN}}</li>
-   * <li>{value {@link #UPGRADE_SKIP_SC_FAILURE_COLUMN}}</li>
-   * </ul>
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  protected void executeUpgradePreDMLUpdates() throws AmbariException, SQLException {
-    UpgradeDAO upgradeDAO = injector.getInstance(UpgradeDAO.class);
-    List<UpgradeEntity> upgrades = upgradeDAO.findAll();
-    for (UpgradeEntity upgrade: upgrades){
-      if (upgrade.isDowngradeAllowed() == null) {
-        upgrade.setDowngradeAllowed(true);
-      }
-
-      // ensure that these are set to false for existing upgrades
-      upgrade.setAutoSkipComponentFailures(false);
-      upgrade.setAutoSkipServiceCheckFailures(false);
-
-      // apply changes
-      upgradeDAO.merge(upgrade);
-
-      LOG.info(String.format("Updated upgrade id %s, upgrade pack %s from version %s to %s",
-          upgrade.getId(), upgrade.getUpgradePackage(), upgrade.getFromVersion(),
-          upgrade.getToVersion()));
-    }
-
-    // make the columns nullable now that they have defaults
-    dbAccessor.setColumnNullable(UPGRADE_TABLE, DOWNGRADE_ALLOWED_COLUMN, false);
-    dbAccessor.setColumnNullable(UPGRADE_TABLE, UPGRADE_SKIP_FAILURE_COLUMN, false);
-    dbAccessor.setColumnNullable(UPGRADE_TABLE, UPGRADE_SKIP_SC_FAILURE_COLUMN, false);
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateHadoopEnv();
-    updateStormConfigs();
-    updateAMSConfigs();
-    updateHDFSConfigs();
-    updateHbaseEnvConfig();
-    updateFlumeEnvConfig();
-    updateAlertDefinitions();
-    updateKafkaConfigs();
-    updateRangerEnvConfig();
-    updateRangerUgsyncSiteConfig();
-    updateZookeeperLog4j();
-    updateHiveConfig();
-    updateAccumuloConfigs();
-    updateKerberosDescriptorArtifacts();
-    updateKnoxTopology();
-  }
-
-  protected void updateKnoxTopology() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config topology = cluster.getDesiredConfigByType(TOPOLOGY_CONFIG);
-      if (topology != null) {
-        String content = topology.getProperties().get(CONTENT_PROPERTY);
-        if (content != null) {
-          Document topologyXml = convertStringToDocument(content);
-          if (topologyXml != null) {
-            Element root = topologyXml.getDocumentElement();
-            if (root != null)  {
-              NodeList providerNodes = root.getElementsByTagName("provider");
-              boolean authorizationProviderExists = false;
-              try {
-                for (int i = 0; i < providerNodes.getLength(); i++) {
-                  Node providerNode = providerNodes.item(i);
-                  NodeList childNodes = providerNode.getChildNodes();
-                  for (int k = 0; k < childNodes.getLength(); k++) {
-                    Node child = childNodes.item(k);
-                    child.normalize();
-                    String childTextContent = child.getTextContent();
-                    if (childTextContent != null && childTextContent.toLowerCase().equals("authorization")) {
-                      authorizationProviderExists = true;
-                      break;
-                    }
-                  }
-                  if (authorizationProviderExists) {
-                    break;
-                  }
-                }
-              } catch(Exception e) {
-                e.printStackTrace();
-                LOG.error("Error occurred during check 'authorization' provider already exists in topology." + e);
-                return;
-              }
-              if (!authorizationProviderExists) {
-                NodeList nodeList = root.getElementsByTagName("gateway");
-                if (nodeList != null && nodeList.getLength() > 0) {
-                  boolean rangerPluginEnabled = isRangerPluginEnabled(cluster);
-
-                  Node gatewayNode = nodeList.item(0);
-                  Element newProvider = topologyXml.createElement("provider");
-
-                  Element role = topologyXml.createElement("role");
-                  role.appendChild(topologyXml.createTextNode("authorization"));
-                  newProvider.appendChild(role);
-
-                  Element name = topologyXml.createElement("name");
-                  if (rangerPluginEnabled) {
-                    name.appendChild(topologyXml.createTextNode("XASecurePDPKnox"));
-                  } else {
-                    name.appendChild(topologyXml.createTextNode("AclsAuthz"));
-                  }
-                  newProvider.appendChild(name);
-
-                  Element enabled = topologyXml.createElement("enabled");
-                  enabled.appendChild(topologyXml.createTextNode("true"));
-                  newProvider.appendChild(enabled);
-
-
-                  gatewayNode.appendChild(newProvider);
-
-                  DOMSource topologyDomSource = new DOMSource(root);
-                  StringWriter writer = new StringWriter();
-                  try {
-                    Transformer transformer = TransformerFactory.newInstance().newTransformer();
-                    transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
-                    transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
-                    transformer.setOutputProperty(OutputKeys.METHOD, "xml");
-                    transformer.setOutputProperty(OutputKeys.INDENT, "yes");
-                    transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "5");
-                    transformer.transform(topologyDomSource, new StreamResult(writer));
-                  } catch (TransformerConfigurationException e) {
-                    e.printStackTrace();
-                    LOG.error("Unable to create transformer instance, to convert Document(XML) to String. " + e);
-                    return;
-                  } catch (TransformerException e) {
-                    e.printStackTrace();
-                    LOG.error("Unable to transform Document(XML) to StringWriter. " + e);
-                    return;
-                  }
-
-                  content = writer.toString();
-                  Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-                  updateConfigurationPropertiesForCluster(cluster, TOPOLOGY_CONFIG, updates, true, false);
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Move the upgrade_package column from the repo_version table to the upgrade table as follows,
-   * add column upgrade_package to upgrade table as String 255 and nullable
-   * populate column in the upgrade table
-   * drop the column in the repo_version table
-   * make the column in the upgrade table non-nullable.
-   * This has to be called as part of DML and not DDL since the persistence service has to be started.
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  @Transactional
-  protected void executeStackUpgradeDDLUpdates() throws SQLException, AmbariException {
-    final Configuration.DatabaseType databaseType = configuration.getDatabaseType();
-
-    // Add columns
-    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_PACKAGE_COL)) {
-      LOG.info("Adding upgrade_package column to upgrade table.");
-      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, true));
-    }
-    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_TYPE_COL)) {
-      LOG.info("Adding upgrade_type column to upgrade table.");
-      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, true));
-    }
-
-    // Populate values in upgrade table.
-    boolean success = populateUpgradeTable();
-
-    if (!success) {
-      throw new AmbariException("Errors found while populating the upgrade table with values for columns upgrade_type and upgrade_package.");
-    }
-
-    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL)) {
-      LOG.info("Dropping upgrade_package column from repo_version table.");
-      dbAccessor.dropColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL);
-
-      // Now, make the added column non-nullable
-      // Make the hosts id non-null after all the values are populated
-      LOG.info("Making upgrade_package column in the upgrade table non-nullable.");
-      if (databaseType == Configuration.DatabaseType.DERBY) {
-        // This is a workaround for UpgradeTest.java unit test
-        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER column " + UPGRADE_PACKAGE_COL + " NOT NULL");
-      } else {
-        dbAccessor.alterColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, false));
-      }
-    }
-
-    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_TYPE_COL)) {
-      // Now, make the added column non-nullable
-      // Make the hosts id non-null after all the values are populated
-      LOG.info("Making upgrade_type column in the upgrade table non-nullable.");
-      if (databaseType == Configuration.DatabaseType.DERBY) {
-        // This is a workaround for UpgradeTest.java unit test
-        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER column " + UPGRADE_TYPE_COL + " NOT NULL");
-      } else {
-        dbAccessor.alterColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, false));
-      }
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
-    if (artifactEntity != null) {
-      Map<String, Object> data = artifactEntity.getArtifactData();
-
-      if (data != null) {
-        final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
-
-        if (kerberosDescriptor != null) {
-          KerberosServiceDescriptor hdfsService = kerberosDescriptor.getService("HDFS");
-          if(hdfsService != null) {
-            // before 2.1.3 hdfs indentity expected to be in HDFS service
-            KerberosIdentityDescriptor hdfsIdentity = hdfsService.getIdentity("hdfs");
-            KerberosComponentDescriptor namenodeComponent = hdfsService.getComponent("NAMENODE");
-            hdfsIdentity.setName("hdfs");
-            hdfsService.removeIdentity("hdfs");
-            namenodeComponent.putIdentity(hdfsIdentity);
-          }
-          updateKerberosDescriptorIdentityReferences(kerberosDescriptor, "/HDFS/hdfs", "/HDFS/NAMENODE/hdfs");
-          updateKerberosDescriptorIdentityReferences(kerberosDescriptor.getServices(), "/HDFS/hdfs", "/HDFS/NAMENODE/hdfs");
-
-          artifactEntity.setArtifactData(kerberosDescriptor.toMap());
-          artifactDAO.merge(artifactEntity);
-        }
-      }
-    }
-  }
-
-  /**
-   * Populate the upgrade table with values for the columns upgrade_type and upgrade_package.
-   * The upgrade_type will default to {@code org.apache.ambari.server.state.stack.upgrade.UpgradeType.ROLLING}
-   * whereas the upgrade_package will be calculated.
-   * @return {@code} true on success, and {@code} false otherwise.
-   */
-  private boolean populateUpgradeTable() {
-    boolean success = true;
-    Statement statement = null;
-    ResultSet rs = null;
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-      if (statement != null) {
-        // Need to use SQL since the schema is changing and some of the columns have not yet been added..
-        rs = statement.executeQuery("SELECT upgrade_id, cluster_id, from_version, to_version, direction, upgrade_package, upgrade_type FROM upgrade");
-        if (rs != null) {
-          try {
-            while (rs.next()) {
-              final long upgradeId = rs.getLong("upgrade_id");
-              final long clusterId = rs.getLong("cluster_id");
-              final String fromVersion = rs.getString("from_version");
-              final String toVersion = rs.getString("to_version");
-              final Direction direction = Direction.valueOf(rs.getString("direction"));
-              // These two values are likely null.
-              String upgradePackage = rs.getString("upgrade_package");
-              String upgradeType = rs.getString("upgrade_type");
-
-              LOG.info(MessageFormat.format("Populating rows for the upgrade table record with " +
-                      "upgrade_id: {0,number,#}, cluster_id: {1,number,#}, from_version: {2}, to_version: {3}, direction: {4}",
-                  upgradeId, clusterId, fromVersion, toVersion, direction));
-
-              // Set all upgrades that have been done so far to type "rolling"
-              if (StringUtils.isEmpty(upgradeType)) {
-                LOG.info("Updating the record's upgrade_type to " + UpgradeType.ROLLING);
-                dbAccessor.executeQuery("UPDATE upgrade SET upgrade_type = '" + UpgradeType.ROLLING + "' WHERE upgrade_id = " + upgradeId);
-              }
-
-              if (StringUtils.isEmpty(upgradePackage)) {
-                String version = null;
-                StackEntity stack = null;
-
-                if (direction == Direction.UPGRADE) {
-                  version = toVersion;
-                } else if (direction == Direction.DOWNGRADE) {
-                  // TODO AMBARI-12698, this is going to be a problem.
-                  // During a downgrade, the "to_version" is overwritten to the source version, but the "from_version"
-                  // doesn't swap. E.g.,
-                  //  upgrade_id | from_version |  to_version  | direction
-                  // ------------+--------------+--------------+----------
-                  //           1 | 2.2.6.0-2800 | 2.3.0.0-2557 | UPGRADE
-                  //           2 | 2.2.6.0-2800 | 2.2.6.0-2800 | DOWNGRADE
-                  version = fromVersion;
-                }
-
-                ClusterEntity cluster = clusterDAO.findById(clusterId);
-
-                if (null != cluster) {
-                  stack = cluster.getDesiredStack();
-                  upgradePackage = calculateUpgradePackage(stack, version);
-                } else {
-                  LOG.error("Could not find a cluster with cluster_id " + clusterId);
-                }
-
-                if (!StringUtils.isEmpty(upgradePackage)) {
-                  LOG.info("Updating the record's upgrade_package to " + upgradePackage);
-                  dbAccessor.executeQuery("UPDATE upgrade SET upgrade_package = '" + upgradePackage + "' WHERE upgrade_id = " + upgradeId);
-                } else {
-                  success = false;
-                  LOG.error("Unable to populate column upgrade_package for record in table upgrade with id " + upgradeId);
-                }
-              }
-            }
-          } catch (Exception e) {
-            success = false;
-            e.printStackTrace();
-            LOG.error("Unable to populate the upgrade_type and upgrade_package columns of the upgrade table. " + e);
-          }
-        }
-      }
-    } catch (Exception e) {
-      success = false;
-      e.printStackTrace();
-      LOG.error("Failed to retrieve records from the upgrade table to populate the upgrade_type and upgrade_package columns. Exception: " + e);
-    } finally {
-      try {
-        if (rs != null) {
-          rs.close();
-        }
-        if (statement != null) {
-          statement.close();
-        }
-      } catch (SQLException e) {
-        ;
-      }
-    }
-    return success;
-  }
-
-  /**
-   * Find the single Repo Version for the given stack and version, and return its upgrade_package column.
-   * Because the upgrade_package column is going to be removed from this entity, must use raw SQL
-   * instead of the entity class.
-   * @param stack Stack
-   * @param version Stack version
-   * @return The value of the upgrade_package column, or null if not found.
-   */
-
-  private String calculateUpgradePackage(StackEntity stack, String version) {
-    String upgradePackage = null;
-    // Find the corresponding repo_version, and extract its upgrade_package
-    if (null != version && null != stack) {
-      RepositoryVersionEntity repoVersion = repositoryVersionDAO.findByStackNameAndVersion(stack.getStackName(), version);
-
-      Statement statement = null;
-      ResultSet rs = null;
-      try {
-        statement = dbAccessor.getConnection().createStatement();
-        if (statement != null) {
-          // Need to use SQL since the schema is changing and the entity will no longer have the upgrade_package column.
-          rs = statement.executeQuery("SELECT upgrade_package FROM repo_version WHERE repo_version_id = " + repoVersion.getId());
-          if (rs != null && rs.next()) {
-            upgradePackage = rs.getString("upgrade_package");
-          }
-        }
-      } catch (Exception e) {
-        LOG.error("Failed to retrieve upgrade_package for repo_version record with id " + repoVersion.getId() + ". Exception: " + e.getMessage());
-      } finally {
-        try {
-          if (rs != null) {
-            rs.close();
-          }
-          if (statement != null) {
-            statement.close();
-          }
-        } catch (SQLException e) {
-          ;
-        }
-      }
-    }
-    return upgradePackage;
-  }
-
-  /**
-   * If still on HDP 2.1, then no repo versions exist, so need to bootstrap the HDP 2.1 repo version,
-   * and mark it as CURRENT in the cluster_version table for the cluster, as well as the host_version table
-   * for all hosts.
-   */
-  @Transactional
-  public void bootstrapRepoVersionForHDP21() throws AmbariException, SQLException {
-    final String hardcodedInitialVersion = "2.1.0.0-0001";
-    AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    RepositoryVersionHelper repositoryVersionHelper = injector.getInstance(RepositoryVersionHelper.class);
-    RepositoryVersionDAO repositoryVersionDAO = injector.getInstance(RepositoryVersionDAO.class);
-    ClusterVersionDAO clusterVersionDAO = injector.getInstance(ClusterVersionDAO.class);
-    HostVersionDAO hostVersionDAO = injector.getInstance(HostVersionDAO.class);
-
-    Clusters clusters = amc.getClusters();
-    if (clusters == null) {
-      LOG.error("Unable to get Clusters entity.");
-      return;
-    }
-
-    for (Cluster cluster : clusters.getClusters().values()) {
-      ClusterEntity clusterEntity = clusterDAO.findByName(cluster.getClusterName());
-      final StackId stackId = cluster.getCurrentStackVersion();
-      LOG.info(MessageFormat.format("Analyzing cluster {0}, currently at stack {1} and version {2}",
-          cluster.getClusterName(), stackId.getStackName(), stackId.getStackVersion()));
-
-      if (stackId.getStackName().equalsIgnoreCase("HDP") && stackId.getStackVersion().equalsIgnoreCase("2.1")) {
-        final StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
-        StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
-
-        LOG.info("Bootstrapping the versions since using HDP-2.1");
-
-        // The actual value is not known, so use this.
-        String displayName = stackId.getStackName() + "-" + hardcodedInitialVersion;
-
-        // However, the Repo URLs should be correct.
-        String operatingSystems = repositoryVersionHelper.serializeOperatingSystems(stackInfo.getRepositories());
-
-        // Create the Repo Version if it doesn't already exist.
-        RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByDisplayName(displayName);
-        if (null != repoVersionEntity) {
-          LOG.info(MessageFormat.format("A Repo Version already exists with Display Name: {0}", displayName));
-        } else {
-          final long repoVersionIdSeq = repositoryVersionDAO.findMaxId("id");
-          // Safe to attempt to add the sequence if it doesn't exist already.
-          addSequence("repo_version_id_seq", repoVersionIdSeq, false);
-
-          repoVersionEntity = repositoryVersionDAO.create(
-              stackEntity, hardcodedInitialVersion, displayName, operatingSystems);
-          LOG.info(MessageFormat.format("Created Repo Version with ID: {0,number,#}\n, Display Name: {1}, Repo URLs: {2}\n",
-              repoVersionEntity.getId(), displayName, operatingSystems));
-        }
-
-        // Create the Cluster Version if it doesn't already exist.
-        ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStackAndVersion(cluster.getClusterName(),
-            stackId, hardcodedInitialVersion);
-
-        if (null != clusterVersionEntity) {
-          LOG.info(MessageFormat.format("A Cluster Version version for cluster: {0}, version: {1}, already exists; its state is {2}.",
-              cluster.getClusterName(), clusterVersionEntity.getRepositoryVersion().getVersion(), clusterVersionEntity.getState()));
-
-          // If there are not CURRENT cluster versions, make this one the CURRENT one.
-          if (clusterVersionEntity.getState() != RepositoryVersionState.CURRENT &&
-              clusterVersionDAO.findByClusterAndState(cluster.getClusterName(), RepositoryVersionState.CURRENT).isEmpty()) {
-            clusterVersionEntity.setState(RepositoryVersionState.CURRENT);
-            clusterVersionDAO.merge(clusterVersionEntity);
-          }
-        } else {
-          final long clusterVersionIdSeq = clusterVersionDAO.findMaxId("id");
-          // Safe to attempt to add the sequence if it doesn't exist already.
-          addSequence("cluster_version_id_seq", clusterVersionIdSeq, false);
-
-          clusterVersionEntity = clusterVersionDAO.create(clusterEntity, repoVersionEntity, RepositoryVersionState.CURRENT,
-              System.currentTimeMillis(), System.currentTimeMillis(), "admin");
-          LOG.info(MessageFormat.format("Created Cluster Version with ID: {0,number,#}, cluster: {1}, version: {2}, state: {3}.",
-              clusterVersionEntity.getId(), cluster.getClusterName(), clusterVersionEntity.getRepositoryVersion().getVersion(),
-              clusterVersionEntity.getState()));
-        }
-
-        // Create the Host Versions if they don't already exist.
-        Collection<HostEntity> hosts = clusterEntity.getHostEntities();
-        boolean addedAtLeastOneHost = false;
-        if (null != hosts && !hosts.isEmpty()) {
-          for (HostEntity hostEntity : hosts) {
-            HostVersionEntity hostVersionEntity = hostVersionDAO.findByClusterStackVersionAndHost(cluster.getClusterName(),
-                stackId, hardcodedInitialVersion, hostEntity.getHostName());
-
-            if (null != hostVersionEntity) {
-              LOG.info(MessageFormat.format("A Host Version version for cluster: {0}, version: {1}, host: {2}, already exists; its state is {3}.",
-                  cluster.getClusterName(), hostVersionEntity.getRepositoryVersion().getVersion(),
-                  hostEntity.getHostName(), hostVersionEntity.getState()));
-
-              if (hostVersionEntity.getState() != RepositoryVersionState.CURRENT &&
-                  hostVersionDAO.findByClusterHostAndState(cluster.getClusterName(), hostEntity.getHostName(),
-                      RepositoryVersionState.CURRENT).isEmpty()) {
-                hostVersionEntity.setState(RepositoryVersionState.CURRENT);
-                hostVersionDAO.merge(hostVersionEntity);
-              }
-            } else {
-              // This should only be done the first time.
-              if (!addedAtLeastOneHost) {
-                final long hostVersionIdSeq = hostVersionDAO.findMaxId("id");
-                // Safe to attempt to add the sequence if it doesn't exist already.
-                addSequence("host_version_id_seq", hostVersionIdSeq, false);
-                addedAtLeastOneHost = true;
-              }
-
-              hostVersionEntity = new HostVersionEntity(hostEntity, repoVersionEntity, RepositoryVersionState.CURRENT);
-              hostVersionDAO.create(hostVersionEntity);
-              LOG.info(MessageFormat.format("Created Host Version with ID: {0,number,#}, cluster: {1}, version: {2}, host: {3}, state: {4}.",
-                  hostVersionEntity.getId(), cluster.getClusterName(), hostVersionEntity.getRepositoryVersion().getVersion(),
-                  hostEntity.getHostName(), hostVersionEntity.getState()));
-            }
-          }
-        } else {
-          LOG.info(MessageFormat.format("Not inserting any Host Version records since cluster {0} does not have any hosts.",
-              cluster.getClusterName()));
-        }
-      }
-    }
-  }
-
-  /**
-   * Adds the following columns to the {@value #UPGRADE_TABLE} table:
-   * <ul>
-   * <li>{@value #DOWNGRADE_ALLOWED_COLUMN}</li>
-   * <li>{@value #UPGRADE_SKIP_FAILURE_COLUMN}</li>
-   * <li>{@value #UPGRADE_SKIP_SC_FAILURE_COLUMN}</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  protected void updateUpgradesDDL() throws SQLException{
-    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(DOWNGRADE_ALLOWED_COLUMN, Short.class, 1, null, true));
-    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_SKIP_FAILURE_COLUMN, Short.class, 1, null, true));
-    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_SKIP_SC_FAILURE_COLUMN, Short.class, 1, null, true));
-  }
-
-  /**
-   * Modifies the JSON of some of the alert definitions which have changed
-   * between Ambari versions.
-   */
-  protected void updateAlertDefinitions() {
-    LOG.info("Updating alert definitions.");
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-      final AlertDefinitionEntity journalNodeProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-        clusterID, "journalnode_process");
-      final AlertDefinitionEntity hostDiskUsageAlertDefinitionEntity = alertDefinitionDAO.findByName(
-          clusterID, "ambari_agent_disk_usage");
-
-      if (journalNodeProcessAlertDefinitionEntity != null) {
-        String source = journalNodeProcessAlertDefinitionEntity.getSource();
-
-        journalNodeProcessAlertDefinitionEntity.setSource(modifyJournalnodeProcessAlertSource(source));
-        journalNodeProcessAlertDefinitionEntity.setSourceType(SourceType.WEB);
-        journalNodeProcessAlertDefinitionEntity.setHash(UUID.randomUUID().toString());
-
-        alertDefinitionDAO.merge(journalNodeProcessAlertDefinitionEntity);
-        LOG.info("journalnode_process alert definition was updated.");
-      }
-
-      if (hostDiskUsageAlertDefinitionEntity != null) {
-        hostDiskUsageAlertDefinitionEntity.setDescription("This host-level alert is triggered if the amount of disk " +
-          "space used goes above specific thresholds. The default threshold values are 50% for WARNING and 80% for CRITICAL");
-        hostDiskUsageAlertDefinitionEntity.setLabel("Host Disk Usage");
-
-        alertDefinitionDAO.merge(hostDiskUsageAlertDefinitionEntity);
-        LOG.info("ambari_agent_disk_usage alert definition was updated.");
-      }
-    }
-  }
-
-  /**
-   * Modifies type of the journalnode_process alert to WEB.
-   * Changes reporting text and uri according to the WEB type.
-   * Removes default_port property.
-   */
-  String modifyJournalnodeProcessAlertSource(String source) {
-    JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
-
-    rootJson.remove("type");
-    rootJson.addProperty("type", "WEB");
-
-    rootJson.remove("default_port");
-
-    rootJson.remove("uri");
-    JsonObject uriJson = new JsonObject();
-    uriJson.addProperty("http", "{{hdfs-site/dfs.journalnode.http-address}}");
-    uriJson.addProperty("https", "{{hdfs-site/dfs.journalnode.https-address}}");
-    uriJson.addProperty("kerberos_keytab", "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}");
-    uriJson.addProperty("kerberos_principal", "{{hdfs-site/dfs.web.authentication.kerberos.principal}}");
-    uriJson.addProperty("https_property", "{{hdfs-site/dfs.http.policy}}");
-    uriJson.addProperty("https_property_value", "HTTPS_ONLY");
-    uriJson.addProperty("connection_timeout", 5.0);
-    rootJson.add("uri", uriJson);
-
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("ok").remove("text");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("ok").addProperty(
-            "text", "HTTP {0} response in {2:.3f}s");
-
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").remove("text");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").addProperty(
-            "text", "HTTP {0} response from {1} in {2:.3f}s ({3})");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").remove("value");
-
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").remove("text");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").addProperty("text",
-            "Connection failed to {1} ({3})");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").remove("value");
-
-    return rootJson.toString();
-  }
-
-  protected void updateZookeeperLog4j() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config zookeeperLog4jConfig = cluster.getDesiredConfigByType(ZOOKEEPER_LOG4J_CONFIG);
-      if (zookeeperLog4jConfig != null) {
-        String content = zookeeperLog4jConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null) {
-          content = content.replaceAll("[\n^]\\s*log4j\\.rootLogger\\s*=\\s*INFO\\s*,\\s*CONSOLE", "\nlog4j.rootLogger=INFO, ROLLINGFILE");
-          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, ZOOKEEPER_LOG4J_CONFIG, updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateHadoopEnv() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config hadoopEnvConfig = cluster.getDesiredConfigByType(HADOOP_ENV_CONFIG);
-      if (hadoopEnvConfig != null) {
-        String content = hadoopEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null) {
-          content += HADOOP_ENV_CONTENT_TO_APPEND;
-          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, HADOOP_ENV_CONFIG, updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateHDFSConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(
-            AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-      // Remove dfs.namenode.rpc-address property when NN HA is enabled
-      if (cluster.getDesiredConfigByType(HDFS_SITE_CONFIG) != null && isNNHAEnabled(cluster)) {
-        Set<String> removePropertiesSet = new HashSet<>();
-        removePropertiesSet.add("dfs.namenode.rpc-address");
-        removeConfigurationPropertiesFromCluster(cluster, HDFS_SITE_CONFIG, removePropertiesSet);
-      }
-    }
-  }
-
-  protected void updateStormConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config stormSiteProps = cluster.getDesiredConfigByType(STORM_SITE);
-      if (stormSiteProps != null) {
-        Map<String, String> updates = new HashMap<>();
-
-        String nimbusMonitorFreqSecs = stormSiteProps.getProperties().get(NIMBS_MONITOR_FREQ_SECS_PROPERTY);
-        if (nimbusMonitorFreqSecs != null && nimbusMonitorFreqSecs.equals("10")) {
-          updates.put(NIMBS_MONITOR_FREQ_SECS_PROPERTY, "120");
-        }
-
-        Service amsService = null;
-        try {
-          amsService = cluster.getService("AMBARI_METRICS");
-        } catch(AmbariException ambariException) {
-          LOG.info("AMBARI_METRICS service not found in cluster while updating storm-site properties");
-        }
-        String metricsReporter = stormSiteProps.getProperties().get(STORM_METRICS_REPORTER);
-        if (amsService != null && StringUtils.isEmpty(metricsReporter)) {
-          updates.put(STORM_METRICS_REPORTER, "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter");
-        }
-
-        updateConfigurationPropertiesForCluster(cluster, STORM_SITE, updates, true, false);
-      }
-    }
-  }
-
-  protected void updateHiveConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config hiveSiteConfig = cluster.getDesiredConfigByType(HIVE_SITE_CONFIG);
-      if (hiveSiteConfig != null) {
-        String hiveServer2OperationLogLocation = hiveSiteConfig.getProperties().get(HIVE_SERVER2_OPERATION_LOG_LOCATION_PROPERTY);
-        if (hiveServer2OperationLogLocation != null && hiveServer2OperationLogLocation.equals("${system:java.io.tmpdir}/${system:user.name}/operation_logs")) {
-          Map<String, String> updates = Collections.singletonMap(HIVE_SERVER2_OPERATION_LOG_LOCATION_PROPERTY, "/tmp/hive/operation_logs");
-          updateConfigurationPropertiesForCluster(cluster, HIVE_SITE_CONFIG, updates, true, false);
-        }
-      }
-      StackId stackId = cluster.getCurrentStackVersion();
-      boolean isStackNotLess23 = (stackId != null && stackId.getStackName().equals("HDP") &&
-              VersionUtils.compareVersions(stackId.getStackVersion(), "2.3") >= 0);
-
-      Config hiveEnvConfig = cluster.getDesiredConfigByType(HIVE_ENV_CONFIG);
-      if (hiveEnvConfig != null) {
-        Map<String, String> hiveEnvProps = new HashMap<String, String>();
-        String content = hiveEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        // For HDP-2.3 we need to add hive heap size management to content,
-        // for others we need to update content
-        if(content != null) {
-          if(isStackNotLess23) {
-            content = updateHiveEnvContentHDP23(content);
-          } else {
-            content = updateHiveEnvContent(content);
-          }
-          hiveEnvProps.put(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, HIVE_ENV_CONFIG, hiveEnvProps, true, true);
-        }
-      }
-
-    }
-  }
-
-  protected void updateHbaseEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    boolean updateConfig = false;
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      StackId stackId = cluster.getCurrentStackVersion();
-      Config hbaseEnvConfig = cluster.getDesiredConfigByType(HBASE_ENV_CONFIG);
-      if (hbaseEnvConfig != null) {
-        String content = hbaseEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null) {
-          if (!content.contains("-Djava.io.tmpdir")) {
-            content += "\n\nexport HBASE_OPTS=\"-Djava.io.tmpdir={{java_io_tmpdir}}\"";
-            updateConfig = true;
-          }
-          if (stackId != null && stackId.getStackName().equals("HDP") &&
-              VersionUtils.compareVersions(stackId.getStackVersion(), "2.2") >= 0) {
-            if (!content.contains("MaxDirectMemorySize={{hbase_max_direct_memory_size}}m")) {
-              String newPartOfContent = "\n\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}\"\n\n";
-              content += newPartOfContent;
-              updateConfig = true;
-            }
-            if (updateConfig) {
-              Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-              updateConfigurationPropertiesForCluster(cluster, HBASE_ENV_CONFIG, updates, true, false);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateFlumeEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config flumeEnvConfig = cluster.getDesiredConfigByType(FLUME_ENV_CONFIG);
-      if (flumeEnvConfig != null) {
-        String content = flumeEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null && !content.contains("/usr/lib/flume/lib/ambari-metrics-flume-sink.jar")) {
-          String newPartOfContent = "\n\n" +
-            "# Note that the Flume conf directory is always included in the classpath.\n" +
-            "# Add flume sink to classpath\n" +
-            "if [ -e \"/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\" ]; then\n" +
-            "  export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\n" +
-            "fi\n";
-          content += newPartOfContent;
-          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, FLUME_ENV_CONFIG, updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateAMSConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-
-          Config amsHbaseEnv = cluster.getDesiredConfigByType(AMS_HBASE_ENV);
-          if (amsHbaseEnv != null) {
-            Map<String, String> amsHbaseEnvProperties = amsHbaseEnv.getProperties();
-            String content = amsHbaseEnvProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsHbaseEnvContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_ENV, newProperties, true, true);
-          }
-
-          Config amsEnv = cluster.getDesiredConfigByType(AMS_ENV);
-          if (amsHbaseEnv != null) {
-            Map<String, String> amsEnvProperties = amsEnv.getProperties();
-            String content = amsEnvProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsEnvContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_ENV, newProperties, true, true);
-          }
-
-          Config amsSite = cluster.getDesiredConfigByType(AMS_SITE);
-          if (amsSite != null) {
-            Map<String, String> currentAmsSiteProperties = amsSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            //Changed AMS result set limit from 5760 to 15840.
-            if(currentAmsSiteProperties.containsKey("timeline.metrics.service.default.result.limit") &&
-              currentAmsSiteProperties.get("timeline.metrics.service.default.result.limit").equals(String.valueOf(5760))) {
-              LOG.info("Updating timeline.metrics.service.default.result.limit to 15840");
-              newProperties.put("timeline.metrics.service.default.result.limit", String.valueOf(15840));
-            }
-
-            //Interval
-            newProperties.put("timeline.metrics.cluster.aggregator.second.interval", String.valueOf(120));
-            newProperties.put("timeline.metrics.cluster.aggregator.minute.interval", String.valueOf(300));
-            newProperties.put("timeline.metrics.host.aggregator.minute.interval", String.valueOf(300));
-
-            //ttl
-            newProperties.put("timeline.metrics.cluster.aggregator.second.ttl", String.valueOf(2592000));
-            newProperties.put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(7776000));
-
-            //checkpoint
-            newProperties.put("timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier", String.valueOf(2));
-
-            //disabled
-            newProperties.put("timeline.metrics.cluster.aggregator.second.disabled", String.valueOf(false));
-
-            //Add compaction policy property
-            newProperties.put(TIMELINE_METRICS_HBASE_FIFO_COMPACTION_ENABLED, String.valueOf(true));
-
-            updateConfigurationPropertiesForCluster(cluster, AMS_SITE, newProperties, true, true);
-          }
-
-          Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
-          if (amsHbaseSite != null) {
-            Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            String zkTimeout = amsHbaseSiteProperties.get(AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY);
-            // if old default, set new default
-            if ("20000".equals(zkTimeout)) {
-              newProperties.put(AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY, "120000");
-            }
-
-            //Adding hbase.normalizer.period to upgrade
-            if(!amsHbaseSiteProperties.containsKey(AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY)) {
-              LOG.info("Enabling " + AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY);
-              newProperties.put(AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY, String.valueOf(true));
-            }
-
-            if(!amsHbaseSiteProperties.containsKey(AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY)) {
-              LOG.info("Updating " + AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY);
-              newProperties.put(AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY, String.valueOf(600000));
-            }
-
-            if(!amsHbaseSiteProperties.containsKey(AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY)) {
-              LOG.info("Updating " + AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY);
-              newProperties.put(AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY,
-                "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer");
-            }
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties, true, true);
-          }
-        }
-      }
-    }
-
-  }
-
-  protected String updateAmsHbaseEnvContent(String content) {
-    if (content == null) {
-      return null;
-    }
-    String regSearch = "export HBASE_HEAPSIZE=";
-    String replacement = "#export HBASE_HEAPSIZE=";
-    content = content.replaceAll(regSearch, replacement);
-    content += "\n" +
-      "# The maximum amount of heap to use for hbase shell.\n" +
-      "export HBASE_SHELL_OPTS=\"-Xmx256m\"\n";
-    return content;
-  }
-
-  protected String updateAmsEnvContent(String content) {
-    if (content == null) {
-      return null;
-    }
-    if (!content.contains("AMS_COLLECTOR_GC_OPTS")) {
-      content += "\n" +
-        "# AMS Collector GC options\n" +
-        "export AMS_COLLECTOR_GC_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 " +
-        "-XX:+UseCMSInitiatingOccupancyOnly -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps " +
-        "-XX:+UseGCLogFileRotation -XX:GCLogFileSize=10M " +
-        "-Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`\"\n" +
-        "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS\"\n";
-    }
-
-    if (!content.contains("HBASE_NORMALIZATION_ENABLED")) {
-      content += "\n" +
-        "# HBase compaction policy enabled\n" +
-        "export HBASE_NORMALIZATION_ENABLED={{ams_hbase_normalizer_enabled}}\n";
-    }
-
-    if (!content.contains("HBASE_FIFO_COMPACTION_POLICY_ENABLED")) {
-      content += "\n" +
-        "# HBase compaction policy enabled\n" +
-        "export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n";
-    }
-
-    return content;
-  }
-
-  protected void updateKafkaConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Set<String> installedServices = cluster.getServices().keySet();
-          Config kafkaBroker = cluster.getDesiredConfigByType(KAFKA_BROKER);
-          if (kafkaBroker != null) {
-            Map<String, String> newProperties = new HashMap<>();
-            Map<String, String> kafkaBrokerProperties = kafkaBroker.getProperties();
-            String kafkaMetricsReporters = kafkaBrokerProperties.get("kafka.metrics.reporters");
-            if (kafkaMetricsReporters == null ||
-              "{{kafka_metrics_reporters}}".equals(kafkaMetricsReporters)) {
-
-              if (installedServices.contains("AMBARI_METRICS")) {
-                newProperties.put("kafka.metrics.reporters", "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter");
-              } else if (installedServices.contains("GANGLIA")) {
-                newProperties.put("kafka.metrics.reporters", "kafka.ganglia.KafkaGangliaMetricsReporter");
-              } else {
-                newProperties.put("kafka.metrics.reporters", " ");
-              }
-
-            }
-            if (!newProperties.isEmpty()) {
-              updateConfigurationPropertiesForCluster(cluster, KAFKA_BROKER, newProperties, true, true);
-            }
-          }
-          Config kafkaEnv = cluster.getDesiredConfigByType(KAFKA_ENV_CONFIG);
-          if (kafkaEnv != null) {
-            String kafkaEnvContent = kafkaEnv.getProperties().get(CONTENT_PROPERTY);
-            if (kafkaEnvContent != null && !kafkaEnvContent.contains(KAFKA_ENV_CONTENT_KERBEROS_PARAMS)) {
-              kafkaEnvContent += "\n\nexport KAFKA_KERBEROS_PARAMS=\"$KAFKA_KERBEROS_PARAMS {{kafka_kerberos_params}}\"";
-              Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, kafkaEnvContent);
-              updateConfigurationPropertiesForCluster(cluster, KAFKA_ENV_CONFIG, updates, true, false);
-            }
-          }
-        }
-      }
-    }
-
-  }
-
-  protected void updateRangerEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Map<String, String> newRangerEnvProps = new HashMap<>();
-      Config rangerHdfsPluginProperties = cluster.getDesiredConfigByType("ranger-hdfs-plugin-properties");
-      if (rangerHdfsPluginProperties != null && rangerHdfsPluginProperties.getProperties().containsKey(RANGER_HDFS_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_HDFS_PLUGIN_ENABLED_PROPERTY, rangerHdfsPluginProperties.getProperties().get(RANGER_HDFS_PLUGIN_ENABLED_PROPERTY));
-      }
-      Config hiveEnvProperties = cluster.getDesiredConfigByType("hive-env");
-      if (hiveEnvProperties != null && hiveEnvProperties.getProperties().containsKey("hive_security_authorization")
-              && hiveEnvProperties.getProperties().get("hive_security_authorization").toLowerCase().equals("ranger")) {
-        newRangerEnvProps.put(RANGER_HIVE_PLUGIN_ENABLED_PROPERTY, "Yes");
-      }
-      Config rangerHbasePluginProperties = cluster.getDesiredConfigByType("ranger-hbase-plugin-properties");
-      if (rangerHbasePluginProperties != null && rangerHbasePluginProperties.getProperties().containsKey(RANGER_HBASE_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_HBASE_PLUGIN_ENABLED_PROPERTY, rangerHbasePluginProperties.getProperties().get(RANGER_HBASE_PLUGIN_ENABLED_PROPERTY));
-      }
-
-      Config rangerStormPluginProperties = cluster.getDesiredConfigByType("ranger-storm-plugin-properties");
-      if (rangerStormPluginProperties != null && rangerStormPluginProperties.getProperties().containsKey(RANGER_STORM_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_STORM_PLUGIN_ENABLED_PROPERTY, rangerStormPluginProperties.getProperties().get(RANGER_STORM_PLUGIN_ENABLED_PROPERTY));
-      }
-      Config rangerKnoxPluginProperties = cluster.getDesiredConfigByType("ranger-knox-plugin-properties");
-      if (rangerKnoxPluginProperties != null && rangerKnoxPluginProperties.getProperties().containsKey(RANGER_KNOX_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_KNOX_PLUGIN_ENABLED_PROPERTY, rangerKnoxPluginProperties.getProperties().get(RANGER_KNOX_PLUGIN_ENABLED_PROPERTY));
-      }
-      Config rangerYarnPluginProperties = cluster.getDesiredConfigByType("ranger-yarn-plugin-properties");
-      if (rangerYarnPluginProperties != null && rangerYarnPluginProperties.getProperties().containsKey(RANGER_YARN_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_YARN_PLUGIN_ENABLED_PROPERTY, rangerYarnPluginProperties.getProperties().get(RANGER_YARN_PLUGIN_ENABLED_PROPERTY));
-      }
-      Config rangerKafkaPluginProperties = cluster.getDesiredConfigByType("ranger-kafka-plugin-properties");
-      if (rangerKafkaPluginProperties != null && rangerKafkaPluginProperties.getProperties().containsKey(RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY, rangerKafkaPluginProperties.getProperties().get(RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY));
-      }
-      if (!newRangerEnvProps.isEmpty()) {
-        updateConfigurationPropertiesForCluster(cluster, RANGER_ENV_CONFIG, newRangerEnvProps, true, true);
-      }
-    }
-  }
-
-  protected void updateRangerUgsyncSiteConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config rangerUgsyncSiteProperties = cluster.getDesiredConfigByType(RANGER_UGSYNC_SITE_CONFIG);
-      if (rangerUgsyncSiteProperties != null && rangerUgsyncSiteProperties.getProperties().containsKey(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY)) {
-        String sourceClassValue = rangerUgsyncSiteProperties.getProperties().get(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY);
-        if (sourceClassValue != null) {
-          if ("ldap".equals(sourceClassValue)) {
-            Map<String, String> updates = Collections.singletonMap(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY,
-                "org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder");
-            updateConfigurationPropertiesForCluster(cluster, RANGER_UGSYNC_SITE_CONFIG, updates, true, false);
-          } else if ("unix".equals(sourceClassValue)) {
-            Map<String, String> updates = Collections.singletonMap(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY,
-                "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder");
-            updateConfigurationPropertiesForCluster(cluster, RANGER_UGSYNC_SITE_CONFIG, updates, true, false);
-          } else if ("file".equals(sourceClassValue)) {
-            Map<String, String> updates = Collections.singletonMap(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY,
-                "org.apache.ranger.unixusersync.process.FileSourceUserGroupBuilder");
-            updateConfigurationPropertiesForCluster(cluster, RANGER_UGSYNC_SITE_CONFIG, updates, true, false);
-          }
-        }
-      }
-    }
-  }
-
-  protected String updateHiveEnvContent(String hiveEnvContent) {
-    if(hiveEnvContent == null) {
-      return null;
-    }
-    // There are two cases here
-    // We do not have "export HADOOP_CLIENT_OPTS" and we need to add it
-    // We have "export HADOOP_CLIENT_OPTS" with wrong order
-    String exportHadoopClientOpts = "(?s).*export\\s*HADOOP_CLIENT_OPTS.*";
-    if (hiveEnvContent.matches(exportHadoopClientOpts)) {
-      String oldHeapSizeRegex = "export\\s*HADOOP_CLIENT_OPTS=\"-Xmx\\$\\{HADOOP_HEAPSIZE\\}m\\s*\\$HADOOP_CLIENT_OPTS\"";
-      String newHeapSizeRegex = "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"";
-      return hiveEnvContent.replaceAll(oldHeapSizeRegex, Matcher.quoteReplacement(newHeapSizeRegex));
-    } else {
-      String oldHeapSizeRegex = "export\\s*HADOOP_HEAPSIZE\\s*=\\s*\"\\{\\{hive_heapsize\\}\\}\"\\.*\\n\\s*fi\\s*\\n";
-      String newHeapSizeRegex = "export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-              "fi\n" +
-              "\n" +
-              "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n";
-      return hiveEnvContent.replaceAll(oldHeapSizeRegex, Matcher.quoteReplacement(newHeapSizeRegex));
-    }
-  }
-
-  protected String updateHiveEnvContentHDP23(String hiveEnvContent) {
-    if(hiveEnvContent == null) {
-      return null;
-    }
-    String oldHeapSizeRegex = "# The heap size of the jvm stared by hive shell script can be controlled via:\\s*\\n";
-    String newHeapSizeRegex = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-            "\n";
-    return hiveEnvContent.replaceFirst(oldHeapSizeRegex, Matcher.quoteReplacement(newHeapSizeRegex));
-  }
-
-  protected void updateAccumuloConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      // If security type is set to Kerberos, update Kerberos-related configs
-      if(cluster.getSecurityType() == SecurityType.KERBEROS) {
-        Config clientProps = cluster.getDesiredConfigByType("client");
-        if (clientProps != null) {
-          Map<String, String> properties = clientProps.getProperties();
-          if (properties == null) {
-            properties = new HashMap<String, String>();
-          }
-          // <2.1.3 did not account for a custom service principal.
-          // Need to ensure that the client knows the server's principal (the primary) to properly authenticate.
-          properties.put("kerberos.server.primary", "{{bare_accumulo_principal}}");
-          updateConfigurationPropertiesForCluster(cluster, "client", properties, true, false);
-        }
-      } // else -- no special client-configuration is necessary.
-    }
-  }
-}