You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ab...@apache.org on 2017/06/30 13:37:51 UTC

[01/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-logsearch-ui ed660940a -> 0b6679afc


http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
index 782cf2c..a342baa 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog300Test.java
@@ -179,7 +179,7 @@ public class UpgradeCatalog300Test {
 
     // component table
     Capture<DBAccessor.DBColumnInfo> componentStateColumn = newCapture();
-    dbAccessor.addColumn(eq(UpgradeCatalog250.COMPONENT_TABLE), capture(componentStateColumn));
+    dbAccessor.addColumn(eq(UpgradeCatalog300.COMPONENT_TABLE), capture(componentStateColumn));
 
     replay(dbAccessor, configuration);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogTest.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogTest.java
index fa40db9..aee334b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalogTest.java
@@ -82,8 +82,9 @@ public class UpgradeCatalogTest {
       Multibinder<UpgradeCatalog> catalogBinder =
         Multibinder.newSetBinder(binder(), UpgradeCatalog.class);
       catalogBinder.addBinding().to(UpgradeCatalog201.class);
-      catalogBinder.addBinding().to(UpgradeCatalog200.class);
-      catalogBinder.addBinding().to(UpgradeCatalog210.class);
+      catalogBinder.addBinding().to(UpgradeCatalog251.class);
+      catalogBinder.addBinding().to(UpgradeCatalog252.class);
+      catalogBinder.addBinding().to(UpgradeCatalog300.class);
     }
   }
 
@@ -113,13 +114,13 @@ public class UpgradeCatalogTest {
     Set<UpgradeCatalog> upgradeCatalogSet = schemaUpgradeHelper.getAllUpgradeCatalogs();
 
     Assert.assertNotNull(upgradeCatalogSet);
-    Assert.assertEquals(3, upgradeCatalogSet.size());
+    Assert.assertEquals(4, upgradeCatalogSet.size());
 
-    List<UpgradeCatalog> upgradeCatalogs = schemaUpgradeHelper.getUpgradePath(null, "2.0.1");
+    List<UpgradeCatalog> upgradeCatalogs = schemaUpgradeHelper.getUpgradePath(null, "2.5.1");
 
     Assert.assertNotNull(upgradeCatalogs);
     Assert.assertEquals(2, upgradeCatalogs.size());
-    Assert.assertEquals("2.0.0", upgradeCatalogs.get(0).getTargetVersion());
-    Assert.assertEquals("2.0.1", upgradeCatalogs.get(1).getTargetVersion());
+    Assert.assertEquals("2.0.1", upgradeCatalogs.get(0).getTargetVersion());
+    Assert.assertEquals("2.5.1", upgradeCatalogs.get(1).getTargetVersion());
   }
 }


[41/63] [abbrv] ambari git commit: AMBARI-21317. Config update API should not need to have a unique tag, BE can auto add the tag when it is missing.(vbrodetskyi)

Posted by ab...@apache.org.
AMBARI-21317. Config update API should not need to have a unique tag, BE can auto add the tag when it is missing.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/40e6352b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/40e6352b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/40e6352b

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 40e6352b0e704ca1af7b0e88a267b03bde5cea59
Parents: 8634718
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Tue Jun 27 22:44:28 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Tue Jun 27 22:44:28 2017 +0300

----------------------------------------------------------------------
 .../controller/AmbariManagementController.java  |  4 +++
 .../AmbariManagementControllerImpl.java         | 29 +++++++++++++++---
 .../server/controller/ConfigGroupResponse.java  | 10 +++++++
 .../internal/ConfigGroupResourceProvider.java   | 31 ++++++++++++++++++--
 .../apache/ambari/server/state/ConfigImpl.java  |  3 +-
 .../AmbariManagementControllerImplTest.java     | 16 ++++++++--
 .../ConfigGroupResourceProviderTest.java        |  2 ++
 7 files changed, 85 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
index bb1c95e..f0f13e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
@@ -911,5 +911,9 @@ public interface AmbariManagementController {
    */
   QuickLinkVisibilityController getQuicklinkVisibilityController();
 
+  ConfigGroupResponse getConfigGroupUpdateResults(ConfigGroupRequest configGroupRequest);
+
+  void saveConfigGroupUpdate(ConfigGroupRequest configGroupRequest, ConfigGroupResponse configGroupResponse);
+
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 6781f65..77883e3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -65,6 +65,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
 import javax.persistence.RollbackException;
@@ -346,6 +347,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
   private Cache<ClusterRequest, ClusterResponse> clusterUpdateCache =
       CacheBuilder.newBuilder().expireAfterWrite(5, TimeUnit.MINUTES).build();
+  private Cache<ConfigGroupRequest, ConfigGroupResponse> configGroupUpdateCache =
+          CacheBuilder.newBuilder().expireAfterWrite(5, TimeUnit.MINUTES).build();
 
   @Inject
   private AmbariCustomCommandExecutionHelper customCommandExecutionHelper;
@@ -1632,6 +1635,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       cluster = clusters.getClusterById(request.getClusterId());
     }
 
+    List<ConfigurationRequest> desiredConfigs = request.getDesiredConfig();
+    if (desiredConfigs != null) {
+      for (ConfigurationRequest configurationRequest : desiredConfigs) {
+        if (StringUtils.isEmpty(configurationRequest.getVersionTag())) {
+          configurationRequest.setVersionTag(UUID.randomUUID().toString());
+        }
+      }
+    }
+
     // Ensure the user has access to update this cluster
     AuthorizationHelper.verifyAuthorization(ResourceType.CLUSTER, cluster.getResourceId(), RoleAuthorization.AUTHORIZATIONS_UPDATE_CLUSTER);
 
@@ -1640,7 +1652,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       new LinkedList<>();
     ServiceConfigVersionResponse serviceConfigVersionResponse = null;
 
-    if (request.getDesiredConfig() != null && request.getServiceConfigVersionRequest() != null) {
+    if (desiredConfigs != null && request.getServiceConfigVersionRequest() != null) {
       String msg = "Unable to set desired configs and rollback at same time, request = " + request;
       LOG.error(msg);
       throw new IllegalArgumentException(msg);
@@ -1661,8 +1673,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     //check if desired configs are available in request and they were changed
     boolean isConfigurationCreationNeeded = false;
-    if (request.getDesiredConfig() != null) {
-      for (ConfigurationRequest desiredConfig : request.getDesiredConfig()) {
+    if (desiredConfigs != null) {
+      for (ConfigurationRequest desiredConfig : desiredConfigs) {
         Map<String, String> requestConfigProperties = desiredConfig.getProperties();
         Map<String,Map<String,String>> requestConfigAttributes = desiredConfig.getPropertiesAttributes();
 
@@ -1739,7 +1751,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
     // set or create configuration mapping (and optionally create the map of properties)
     if (isConfigurationCreationNeeded) {
-      List<ConfigurationRequest> desiredConfigs = request.getDesiredConfig();
 
       if (!desiredConfigs.isEmpty()) {
         Set<Config> configs = new HashSet<>();
@@ -2073,6 +2084,16 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   }
 
   @Override
+  public ConfigGroupResponse getConfigGroupUpdateResults(ConfigGroupRequest configGroupRequest) {
+    return configGroupUpdateCache.getIfPresent(configGroupRequest);
+  }
+
+  @Override
+  public void saveConfigGroupUpdate(ConfigGroupRequest configGroupRequest, ConfigGroupResponse configGroupResponse) {
+    configGroupUpdateCache.put(configGroupRequest, configGroupResponse);
+  }
+
+  @Override
   public String getJobTrackerHost(Cluster cluster) {
     try {
       Service svc = cluster.getService("MAPREDUCE");

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java
index 58c680d..937df46 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ConfigGroupResponse.java
@@ -17,6 +17,7 @@
  */
 package org.apache.ambari.server.controller;
 
+import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
@@ -28,6 +29,7 @@ public class ConfigGroupResponse {
   private String description;
   private Set<Map<String, Object>> hosts;
   private Set<Map<String, Object>> configVersions;
+  private Set<Map<String, Object>> versionTags =  new HashSet<Map<String, Object>>();
 
   public ConfigGroupResponse(Long id, String clusterName,
           String groupName, String tag, String description,
@@ -97,4 +99,12 @@ public class ConfigGroupResponse {
   public void setConfigurations(Set<Map<String, Object>> configurations) {
     this.configVersions = configurations;
   }
+
+  public Set<Map<String, Object>> getVersionTags() {
+    return versionTags;
+  }
+
+  public void setVersionTags(Set<Map<String, Object>> versionTags) {
+    this.versionTags = versionTags;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
index c2b998c..25af9d2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProvider.java
@@ -96,6 +96,8 @@ public class ConfigGroupResourceProvider extends
     .getPropertyId("ConfigGroup", "hosts");
   public static final String CONFIGGROUP_CONFIGS_PROPERTY_ID =
     PropertyHelper.getPropertyId("ConfigGroup", "desired_configs");
+  public static final String CONFIGGROUP_VERSION_TAGS_PROPERTY_ID =
+    PropertyHelper.getPropertyId("ConfigGroup", "version_tags");
 
   private static Set<String> pkPropertyIds = new HashSet<>(Arrays
     .asList(new String[]{CONFIGGROUP_ID_PROPERTY_ID}));
@@ -217,9 +219,23 @@ public class ConfigGroupResourceProvider extends
 
     RequestStatus status = updateResources(requests);
 
+    Set<Resource> associatedResources = new HashSet<>();
+    for (ConfigGroupRequest configGroupRequest : requests) {
+      ConfigGroupResponse configGroupResponse = getManagementController().getConfigGroupUpdateResults(configGroupRequest);
+      Resource resource = new ResourceImpl(Resource.Type.ConfigGroup);
+
+      resource.setProperty(CONFIGGROUP_ID_PROPERTY_ID, configGroupResponse.getId());
+      resource.setProperty(CONFIGGROUP_CLUSTER_NAME_PROPERTY_ID, configGroupResponse.getClusterName());
+      resource.setProperty(CONFIGGROUP_NAME_PROPERTY_ID, configGroupResponse.getGroupName());
+      resource.setProperty(CONFIGGROUP_TAG_PROPERTY_ID, configGroupResponse.getTag());
+      resource.setProperty(CONFIGGROUP_VERSION_TAGS_PROPERTY_ID, configGroupResponse.getVersionTags());
+
+      associatedResources.add(resource);
+    }
+
     notifyUpdate(Resource.Type.ConfigGroup, request, predicate);
 
-    return status;
+    return getRequestStatus(null, associatedResources);
   }
 
   @Override
@@ -701,7 +717,18 @@ public class ConfigGroupResourceProvider extends
 
       if (serviceName != null) {
         cluster.createServiceConfigVersion(serviceName, getManagementController().getAuthName(),
-          request.getServiceConfigVersionNote(), configGroup);
+                request.getServiceConfigVersionNote(), configGroup);
+
+        ConfigGroupResponse configGroupResponse = new ConfigGroupResponse(configGroup.getId(), cluster.getClusterName(), configGroup.getName(),
+                request.getTag(), "", new HashSet<Map<String, Object>>(), new HashSet<Map<String, Object>>());
+        Set<Map<String, Object>> versionTags = new HashSet<Map<String, Object>>();
+        Map<String, Object> tagsMap = new HashMap<String, Object>();
+        for (Config config : configGroup.getConfigurations().values()) {
+          tagsMap.put(config.getType(), config.getTag());
+        }
+        versionTags.add(tagsMap);
+        configGroupResponse.setVersionTags(versionTags);
+        getManagementController().saveConfigGroupUpdate(request, configGroupResponse);
       } else {
         LOG.warn("Could not determine service name for config group {}, service config version not created",
             configGroup.getId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
index 65b7863..cfcadd4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ConfigImpl.java
@@ -22,6 +22,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.locks.ReadWriteLock;
 
@@ -133,7 +134,7 @@ public class ConfigImpl implements Config {
     version = cluster.getNextConfigVersion(type);
 
     // tag is nullable from factory but not in the DB, so ensure we generate something
-    tag = StringUtils.isBlank(tag) ? GENERATED_TAG_PREFIX + version : tag;
+    tag = StringUtils.isBlank(tag) ? UUID.randomUUID().toString() : tag;
     this.tag = tag;
 
     ClusterEntity clusterEntity = clusterDAO.findById(cluster.getClusterId());

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index c0e3ef1..eadc678 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -561,9 +561,14 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     ActionManager actionManager = createNiceMock(ActionManager.class);
     ClusterRequest clusterRequest = createNiceMock(ClusterRequest.class);
+    ConfigurationRequest configurationRequest = createNiceMock(ConfigurationRequest.class);
 
     // requests
-    Set<ClusterRequest> setRequests = Collections.singleton(clusterRequest);
+    Set<ClusterRequest> setRequests = new HashSet<ClusterRequest>();
+    setRequests.add(clusterRequest);
+
+    List<ConfigurationRequest> configRequests = new ArrayList<>();
+    configRequests.add(configurationRequest);
 
     KerberosHelper kerberosHelper = createStrictMock(KerberosHelper.class);
     // expectations
@@ -573,6 +578,8 @@ public class AmbariManagementControllerImplTest {
     expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelper);
     expect(clusterRequest.getClusterName()).andReturn("clusterNew").times(3);
     expect(clusterRequest.getClusterId()).andReturn(1L).times(6);
+    expect(clusterRequest.getDesiredConfig()).andReturn(configRequests);
+    expect(configurationRequest.getVersionTag()).andReturn(null).times(1);
     expect(clusters.getClusterById(1L)).andReturn(cluster).times(2);
     expect(cluster.getClusterName()).andReturn("clusterOld").times(1);
 
@@ -582,8 +589,11 @@ public class AmbariManagementControllerImplTest {
     cluster.setClusterName("clusterNew");
     expectLastCall();
 
+    configurationRequest.setVersionTag(EasyMock.anyObject(String.class));
+    expectLastCall();
+
     // replay mocks
-    replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    replay(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest);
 
     // test
     AmbariManagementController controller = new AmbariManagementControllerImpl(actionManager, clusters, injector);
@@ -591,7 +601,7 @@ public class AmbariManagementControllerImplTest {
 
     // assert and verify
     assertSame(controller, controllerCapture.getValue());
-    verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager);
+    verify(actionManager, cluster, clusters, injector, clusterRequest, sessionManager, configurationRequest);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/40e6352b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
index 12cbadf..6dd0748 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ConfigGroupResourceProviderTest.java
@@ -495,6 +495,8 @@ public class ConfigGroupResourceProviderTest {
     expect(hostEntity2.getHostId()).andReturn(2L).atLeastOnce();
     expect(h1.getHostId()).andReturn(1L).anyTimes();
     expect(h2.getHostId()).andReturn(2L).anyTimes();
+    expect(managementController.getConfigGroupUpdateResults((ConfigGroupRequest)anyObject())).
+            andReturn(new ConfigGroupResponse(1L, "", "", "", "", new HashSet<Map<String, Object>>(), new HashSet<Map<String, Object>>())).atLeastOnce();
 
     expect(configGroup.getName()).andReturn("test-1").anyTimes();
     expect(configGroup.getId()).andReturn(25L).anyTimes();


[47/63] [abbrv] ambari git commit: AMBARI-21352.Workflow Manager view build failure(Venkata Sairam)

Posted by ab...@apache.org.
AMBARI-21352.Workflow Manager view build failure(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aac9fe6e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aac9fe6e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aac9fe6e

Branch: refs/heads/branch-feature-logsearch-ui
Commit: aac9fe6e2e358b4ad1a1cfb0b86c3231897e38f1
Parents: 5e50042
Author: Venkata Sairam <ve...@gmail.com>
Authored: Wed Jun 28 14:56:09 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Wed Jun 28 14:56:09 2017 +0530

----------------------------------------------------------------------
 contrib/views/wfmanager/src/main/resources/ui/bower.json | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aac9fe6e/contrib/views/wfmanager/src/main/resources/ui/bower.json
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/bower.json b/contrib/views/wfmanager/src/main/resources/ui/bower.json
index 3f9de44..9812fa6 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/bower.json
+++ b/contrib/views/wfmanager/src/main/resources/ui/bower.json
@@ -21,7 +21,7 @@
     "abdmob/x2js": "~1.2.0",
     "datatables": "~1.10.11",
     "vkBeautify": "https://github.com/vkiryukhin/vkBeautify.git",
-    "cytoscape": "2.7.20",
+    "cytoscape": "2.7.18",
     "cytoscape-dagre": "~1.3.0",
     "cytoscape-panzoom": "~2.4.0",
     "codemirror": "~5.15.0",


[30/63] [abbrv] ambari git commit: AMBARI-21334 Ability to disable Container metrics in AMS (dsen)

Posted by ab...@apache.org.
AMBARI-21334 Ability to disable Container metrics in AMS (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6eaabc12
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6eaabc12
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6eaabc12

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 6eaabc120a2604b3e15c41f77dd88da52b3f6dd0
Parents: a2464b9
Author: Dmytro Sen <ds...@apache.org>
Authored: Mon Jun 26 19:52:14 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Mon Jun 26 19:52:14 2017 +0300

----------------------------------------------------------------------
 .../metrics/timeline/HBaseTimelineMetricStore.java       |  9 ++++++++-
 .../metrics/timeline/TimelineMetricConfiguration.java    | 11 +++++++++++
 .../metrics/timeline/TestTimelineMetricStore.java        |  1 +
 3 files changed, 20 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6eaabc12/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index 12c27a4..ad1fd67 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -82,6 +82,7 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
   private TimelineMetricMetadataManager metricMetadataManager;
   private Integer defaultTopNHostsLimit;
   private MetricCollectorHAController haController;
+  private boolean containerMetricsDisabled = false;
 
   /**
    * Construct the service.
@@ -188,7 +189,7 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
         LOG.info("Started watchdog for timeline metrics store with initial " +
           "delay = " + initDelay + ", delay = " + delay);
       }
-
+      containerMetricsDisabled = configuration.isContainerMetricsDisabled();
       isInitialized = true;
     }
 
@@ -363,6 +364,12 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
   @Override
   public TimelinePutResponse putContainerMetrics(List<ContainerMetric> metrics)
       throws SQLException, IOException {
+
+    if (containerMetricsDisabled) {
+      LOG.debug("Ignoring submitted container metrics according to configuration. Values will not be stored.");
+      return new TimelinePutResponse();
+    }
+
     hBaseAccessor.insertContainerMetrics(metrics);
     return new TimelinePutResponse();
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eaabc12/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
index 006a403..44073ab 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TimelineMetricConfiguration.java
@@ -191,6 +191,9 @@ public class TimelineMetricConfiguration {
   public static final String TIMELINE_SERVICE_RPC_ADDRESS =
     "timeline.metrics.service.rpc.address";
 
+  public static final String TIMELINE_SERVICE_DISABLE_CONTAINER_METRICS =
+    "timeline.metrics.service.container.metrics.disabled";
+
   public static final String CLUSTER_AGGREGATOR_APP_IDS =
     "timeline.metrics.service.cluster.aggregator.appIds";
 
@@ -507,4 +510,12 @@ public class TimelineMetricConfiguration {
 
     return whitelist;
   }
+
+  public boolean isContainerMetricsDisabled() {
+    try {
+      return metricsConf != null && Boolean.parseBoolean(metricsConf.get(TIMELINE_SERVICE_DISABLE_CONTAINER_METRICS, "false"));
+    } catch (Exception e) {
+      return false;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/6eaabc12/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
index ac2f9d7..8abcd83 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/TestTimelineMetricStore.java
@@ -111,4 +111,5 @@ public class TestTimelineMetricStore implements TimelineMetricStore {
   public List<String> getLiveInstances() {
     return Collections.emptyList();
   }
+  
 }


[03/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog242Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog242Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog242Test.java
deleted file mode 100644
index 2d3704d..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog242Test.java
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.aryEq;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-
-import java.lang.reflect.Method;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.PermissionDAO;
-import org.apache.ambari.server.orm.dao.PrincipalDAO;
-import org.apache.ambari.server.orm.dao.PrincipalTypeDAO;
-import org.apache.ambari.server.orm.dao.PrivilegeDAO;
-import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
-import org.apache.ambari.server.orm.dao.RoleAuthorizationDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.PermissionEntity;
-import org.apache.ambari.server.orm.entities.PrincipalEntity;
-import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
-import org.apache.ambari.server.orm.entities.PrivilegeEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
-import org.apache.ambari.server.orm.entities.RoleAuthorizationEntity;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.easymock.Capture;
-import org.easymock.EasyMockSupport;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-
-import junit.framework.Assert;
-/**
- * {@link org.apache.ambari.server.upgrade.UpgradeCatalog242} unit tests.
- */
-public class UpgradeCatalog242Test {
-  private Injector injector;
-  private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-  private EntityManager entityManager = createNiceMock(EntityManager.class);
-
-
-  @Before
-  public void init() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-  }
-
-  @After
-  public void tearDown() throws AmbariException, SQLException {
-    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
-  }
-
-  @Test
-  public void testUpdateTablesForMysql() throws Exception{
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    final Configuration configuration = createNiceMock(Configuration.class);
-
-    Capture<DBAccessor.DBColumnInfo> extensionExtensionNameColumnChangeSize = newCapture();
-    Capture<DBAccessor.DBColumnInfo> extensionExtensionVersionColumnChangeSize = newCapture();
-    Capture<DBAccessor.DBColumnInfo> usersUserTypeColumnChangeSize = newCapture();
-    Capture<DBAccessor.DBColumnInfo> usersUserNameColumnChangeSize = newCapture();
-    Capture<DBAccessor.DBColumnInfo> hostRoleCommandRoleColumnChangeSize = newCapture();
-    Capture<DBAccessor.DBColumnInfo> hostRoleCommandStatusColumnChangeSize = newCapture();
-    Capture<DBAccessor.DBColumnInfo> blueprintBlueprintNameColumnChangeSize = newCapture();
-
-
-    expect(configuration.getDatabaseType()).andReturn(Configuration.DatabaseType.MYSQL).once();
-
-    dbAccessor.alterColumn(eq(UpgradeCatalog242.EXTENSION_TABLE), capture(extensionExtensionNameColumnChangeSize));
-    dbAccessor.alterColumn(eq(UpgradeCatalog242.EXTENSION_TABLE), capture(extensionExtensionVersionColumnChangeSize));
-
-    dbAccessor.alterColumn(eq(UpgradeCatalog242.USERS_TABLE), capture(usersUserTypeColumnChangeSize));
-    dbAccessor.alterColumn(eq(UpgradeCatalog242.USERS_TABLE), capture(usersUserNameColumnChangeSize));
-
-    dbAccessor.alterColumn(eq(UpgradeCatalog242.HOST_ROLE_COMMAND_TABLE), capture(hostRoleCommandRoleColumnChangeSize));
-    dbAccessor.alterColumn(eq(UpgradeCatalog242.HOST_ROLE_COMMAND_TABLE), capture(hostRoleCommandStatusColumnChangeSize));
-
-    dbAccessor.dropFKConstraint(eq(UpgradeCatalog242.HOST_GROUP_TABLE), eq("FK_hg_blueprint_name"));
-    dbAccessor.dropFKConstraint(eq(UpgradeCatalog242.HOST_GROUP_TABLE), eq("FK_hostgroup_blueprint_name"));
-    dbAccessor.dropFKConstraint(eq(UpgradeCatalog242.BLUEPRINT_CONFIGURATION), eq("FK_cfg_blueprint_name"));
-    dbAccessor.dropFKConstraint(eq(UpgradeCatalog242.BLUEPRINT_CONFIGURATION), eq("FK_blueprint_configuration_blueprint_name"));
-    dbAccessor.dropFKConstraint(eq(UpgradeCatalog242.BLUEPRINT_SETTING), eq("FK_blueprint_setting_blueprint_name"));
-    dbAccessor.dropFKConstraint(eq(UpgradeCatalog242.BLUEPRINT_SETTING), eq("FK_blueprint_setting_name"));
-
-    dbAccessor.alterColumn(eq(UpgradeCatalog242.BLUEPRINT_TABLE), capture(blueprintBlueprintNameColumnChangeSize));
-
-    dbAccessor.addFKConstraint(eq(UpgradeCatalog242.HOST_GROUP_TABLE), eq("FK_hg_blueprint_name"),
-            aryEq(new String[]{"blueprint_name"}), eq(UpgradeCatalog242.BLUEPRINT_TABLE), aryEq(new String[]{"blueprint_name"}), eq(false));
-
-    dbAccessor.addFKConstraint(eq(UpgradeCatalog242.BLUEPRINT_CONFIGURATION), eq("FK_cfg_blueprint_name"),
-            aryEq(new String[]{"blueprint_name"}), eq(UpgradeCatalog242.BLUEPRINT_TABLE), aryEq(new String[]{"blueprint_name"}), eq(false));
-
-    dbAccessor.addFKConstraint(eq(UpgradeCatalog242.BLUEPRINT_SETTING), eq("FK_blueprint_setting_name"),
-            aryEq(new String[]{"blueprint_name"}), eq(UpgradeCatalog242.BLUEPRINT_TABLE), aryEq(new String[]{"blueprint_name"}), eq(false));
-
-    replay(dbAccessor, configuration);
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(Configuration.class).toInstance(configuration);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog242 upgradeCatalog242 = injector.getInstance(UpgradeCatalog242.class);
-    upgradeCatalog242.executeDDLUpdates();
-
-    DBAccessor.DBColumnInfo extensionExtensionNameInfo = extensionExtensionNameColumnChangeSize.getValue();
-    Assert.assertNotNull(extensionExtensionNameInfo);
-    Assert.assertEquals(UpgradeCatalog242.EXTENSION_NAME_COLUMN, extensionExtensionNameInfo.getName());
-    Assert.assertEquals(new Integer(100), extensionExtensionNameInfo.getLength());
-    Assert.assertEquals(String.class, extensionExtensionNameInfo.getType());
-    Assert.assertEquals(null, extensionExtensionNameInfo.getDefaultValue());
-    Assert.assertEquals(false, extensionExtensionNameInfo.isNullable());
-
-    DBAccessor.DBColumnInfo extensionExtensionVersionInfo = extensionExtensionVersionColumnChangeSize.getValue();
-    Assert.assertNotNull(extensionExtensionVersionInfo);
-    Assert.assertEquals(UpgradeCatalog242.EXTENSION_VERSION_COLUMN, extensionExtensionVersionInfo.getName());
-    Assert.assertEquals(new Integer(100), extensionExtensionVersionInfo.getLength());
-    Assert.assertEquals(String.class, extensionExtensionVersionInfo.getType());
-    Assert.assertEquals(null, extensionExtensionVersionInfo.getDefaultValue());
-    Assert.assertEquals(false, extensionExtensionVersionInfo.isNullable());
-
-    DBAccessor.DBColumnInfo usersUserTypeInfo = usersUserTypeColumnChangeSize.getValue();
-    Assert.assertNotNull(usersUserTypeInfo);
-    Assert.assertEquals(UpgradeCatalog242.USER_TYPE_COLUMN, usersUserTypeInfo.getName());
-    Assert.assertEquals(new Integer(100), usersUserTypeInfo.getLength());
-    Assert.assertEquals(String.class, usersUserTypeInfo.getType());
-    Assert.assertEquals(null, usersUserTypeInfo.getDefaultValue());
-    Assert.assertEquals(false, usersUserTypeInfo.isNullable());
-
-    DBAccessor.DBColumnInfo usersUserNameInfo = usersUserNameColumnChangeSize.getValue();
-    Assert.assertNotNull(usersUserNameInfo);
-    Assert.assertEquals(UpgradeCatalog242.USER_NAME_COLUMN, usersUserNameInfo.getName());
-    Assert.assertEquals(new Integer(100), usersUserNameInfo.getLength());
-    Assert.assertEquals(String.class, usersUserNameInfo.getType());
-    Assert.assertEquals(null, usersUserNameInfo.getDefaultValue());
-    Assert.assertEquals(false, usersUserNameInfo.isNullable());
-
-    DBAccessor.DBColumnInfo hostRoleCommandRoleInfo = hostRoleCommandRoleColumnChangeSize.getValue();
-    Assert.assertNotNull(hostRoleCommandRoleInfo);
-    Assert.assertEquals(UpgradeCatalog242.ROLE_COLUMN, hostRoleCommandRoleInfo.getName());
-    Assert.assertEquals(new Integer(100), hostRoleCommandRoleInfo.getLength());
-    Assert.assertEquals(String.class, hostRoleCommandRoleInfo.getType());
-    Assert.assertEquals(null, hostRoleCommandRoleInfo.getDefaultValue());
-    Assert.assertEquals(true, hostRoleCommandRoleInfo.isNullable());
-
-    DBAccessor.DBColumnInfo hostRoleCommandStatusInfo = hostRoleCommandStatusColumnChangeSize.getValue();
-    Assert.assertNotNull(hostRoleCommandStatusInfo);
-    Assert.assertEquals(UpgradeCatalog242.STATUS_COLUMN, hostRoleCommandStatusInfo.getName());
-    Assert.assertEquals(new Integer(100), hostRoleCommandStatusInfo.getLength());
-    Assert.assertEquals(String.class, hostRoleCommandStatusInfo.getType());
-    Assert.assertEquals(null, hostRoleCommandStatusInfo.getDefaultValue());
-    Assert.assertEquals(true, hostRoleCommandStatusInfo.isNullable());
-
-    DBAccessor.DBColumnInfo blueprintBlueprintNameInfo = blueprintBlueprintNameColumnChangeSize.getValue();
-    Assert.assertNotNull(blueprintBlueprintNameInfo);
-    Assert.assertEquals(UpgradeCatalog242.BLUEPRINT_NAME_COLUMN, blueprintBlueprintNameInfo.getName());
-    Assert.assertEquals(new Integer(100), blueprintBlueprintNameInfo.getLength());
-    Assert.assertEquals(String.class, blueprintBlueprintNameInfo.getType());
-    Assert.assertEquals(null, blueprintBlueprintNameInfo.getDefaultValue());
-    Assert.assertEquals(false, blueprintBlueprintNameInfo.isNullable());
-
-
-    verify(dbAccessor, configuration);
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-    Method convertRolePrincipals = UpgradeCatalog242.class.getDeclaredMethod("convertRolePrincipals");
-    Method createRoleAuthorizations = UpgradeCatalog242.class.getDeclaredMethod("createRoleAuthorizations");
-
-    UpgradeCatalog242 upgradeCatalog242 = createMockBuilder(UpgradeCatalog242.class)
-        .addMockedMethod(addNewConfigurationsFromXml)
-        .addMockedMethod(convertRolePrincipals)
-        .addMockedMethod(createRoleAuthorizations)
-        .createMock();
-
-
-    upgradeCatalog242.addNewConfigurationsFromXml();
-    expectLastCall().once();
-
-    upgradeCatalog242.createRoleAuthorizations();
-    expectLastCall().once();
-
-    upgradeCatalog242.convertRolePrincipals();
-    expectLastCall().once();
-
-    replay(upgradeCatalog242);
-
-    upgradeCatalog242.executeDMLUpdates();
-
-    verify(upgradeCatalog242);
-  }
-
-  @Test
-  public void testConvertRolePrincipals() throws AmbariException, SQLException {
-
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    PrincipalEntity clusterAdministratorPrincipalEntity = easyMockSupport.createMock(PrincipalEntity.class);
-
-    PermissionEntity clusterAdministratorPermissionEntity = easyMockSupport.createMock(PermissionEntity.class);
-    expect(clusterAdministratorPermissionEntity.getPrincipal())
-        .andReturn(clusterAdministratorPrincipalEntity)
-        .once();
-
-    PrincipalTypeEntity allClusterAdministratorPrincipalTypeEntity = easyMockSupport.createMock(PrincipalTypeEntity.class);
-
-    PermissionDAO permissionDAO = easyMockSupport.createMock(PermissionDAO.class);
-    expect(permissionDAO.findByName("CLUSTER.ADMINISTRATOR"))
-        .andReturn(clusterAdministratorPermissionEntity)
-        .once();
-    expect(permissionDAO.findByName(anyString()))
-        .andReturn(null)
-        .anyTimes();
-
-    PrincipalTypeDAO principalTypeDAO = easyMockSupport.createMock(PrincipalTypeDAO.class);
-    expect(principalTypeDAO.findByName("ALL.CLUSTER.ADMINISTRATOR"))
-        .andReturn(allClusterAdministratorPrincipalTypeEntity)
-        .once();
-    expect(principalTypeDAO.findByName(anyString()))
-        .andReturn(null)
-        .anyTimes();
-    principalTypeDAO.remove(allClusterAdministratorPrincipalTypeEntity);
-    expectLastCall().once();
-
-    ResourceEntity allClusterAdministratorPrivilege1Resource = easyMockSupport.createMock(ResourceEntity.class);
-    expect(allClusterAdministratorPrivilege1Resource.getId()).andReturn(1L).once();
-
-    PrincipalEntity allClusterAdministratorPrivilege1Principal = easyMockSupport.createMock(PrincipalEntity.class);
-    expect(allClusterAdministratorPrivilege1Principal.getId()).andReturn(1L).once();
-
-    PermissionEntity allClusterAdministratorPrivilege1Permission = easyMockSupport.createMock(PermissionEntity.class);
-    expect(allClusterAdministratorPrivilege1Permission.getId()).andReturn(1).once();
-
-    PrivilegeEntity allClusterAdministratorPrivilege1  = easyMockSupport.createMock(PrivilegeEntity.class);
-    expect(allClusterAdministratorPrivilege1.getId()).andReturn(1).atLeastOnce();
-    expect(allClusterAdministratorPrivilege1.getResource()).andReturn(allClusterAdministratorPrivilege1Resource).once();
-    expect(allClusterAdministratorPrivilege1.getPrincipal()).andReturn(allClusterAdministratorPrivilege1Principal).once();
-    expect(allClusterAdministratorPrivilege1.getPermission()).andReturn(allClusterAdministratorPrivilege1Permission).once();
-    allClusterAdministratorPrivilege1.setPrincipal(clusterAdministratorPrincipalEntity);
-    expectLastCall().once();
-
-    ResourceEntity allClusterAdministratorPrivilege2Resource = easyMockSupport.createMock(ResourceEntity.class);
-    expect(allClusterAdministratorPrivilege2Resource.getId()).andReturn(2L).once();
-
-    PrincipalEntity allClusterAdministratorPrivilege2Principal = easyMockSupport.createMock(PrincipalEntity.class);
-    expect(allClusterAdministratorPrivilege2Principal.getId()).andReturn(2L).once();
-
-    PermissionEntity allClusterAdministratorPrivilege2Permission = easyMockSupport.createMock(PermissionEntity.class);
-    expect(allClusterAdministratorPrivilege2Permission.getId()).andReturn(2).once();
-
-    PrivilegeEntity allClusterAdministratorPrivilege2  = easyMockSupport.createMock(PrivilegeEntity.class);
-    expect(allClusterAdministratorPrivilege2.getId()).andReturn(2).atLeastOnce();
-    expect(allClusterAdministratorPrivilege2.getResource()).andReturn(allClusterAdministratorPrivilege2Resource).once();
-    expect(allClusterAdministratorPrivilege2.getPrincipal()).andReturn(allClusterAdministratorPrivilege2Principal).once();
-    expect(allClusterAdministratorPrivilege2.getPermission()).andReturn(allClusterAdministratorPrivilege2Permission).once();
-    allClusterAdministratorPrivilege2.setPrincipal(clusterAdministratorPrincipalEntity);
-    expectLastCall().once();
-
-    Set<PrivilegeEntity> allClusterAdministratorPrivileges = new HashSet<>();
-    allClusterAdministratorPrivileges.add(allClusterAdministratorPrivilege1);
-    allClusterAdministratorPrivileges.add(allClusterAdministratorPrivilege2);
-
-    PrincipalEntity allClusterAdministratorPrincipalEntity = easyMockSupport.createMock(PrincipalEntity.class);
-    expect(allClusterAdministratorPrincipalEntity.getPrivileges())
-        .andReturn(allClusterAdministratorPrivileges)
-        .once();
-
-    List<PrincipalEntity> allClusterAdministratorPrincipals = new ArrayList<>();
-    allClusterAdministratorPrincipals.add(allClusterAdministratorPrincipalEntity);
-
-    PrincipalDAO principalDAO = easyMockSupport.createMock(PrincipalDAO.class);
-    expect(principalDAO.findByPrincipalType("ALL.CLUSTER.ADMINISTRATOR"))
-        .andReturn(allClusterAdministratorPrincipals)
-        .once();
-    principalDAO.remove(allClusterAdministratorPrincipalEntity);
-    expectLastCall().once();
-
-
-    PrivilegeDAO privilegeDAO = easyMockSupport.createMock(PrivilegeDAO.class);
-    expect(privilegeDAO.merge(allClusterAdministratorPrivilege1))
-        .andReturn(allClusterAdministratorPrivilege1)
-        .once();
-    expect(privilegeDAO.merge(allClusterAdministratorPrivilege2))
-        .andReturn(allClusterAdministratorPrivilege2)
-        .once();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(PrincipalTypeDAO.class)).andReturn(principalTypeDAO).atLeastOnce();
-    expect(injector.getInstance(PrincipalDAO.class)).andReturn(principalDAO).atLeastOnce();
-    expect(injector.getInstance(PermissionDAO.class)).andReturn(permissionDAO).atLeastOnce();
-    expect(injector.getInstance(PrivilegeDAO.class)).andReturn(privilegeDAO).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    UpgradeCatalog242 upgradeCatalog = new UpgradeCatalog242(injector);
-    injector.injectMembers(upgradeCatalog);
-    upgradeCatalog.convertRolePrincipals();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testCreateRoleAuthorizations() throws AmbariException, SQLException {
-
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    ResourceTypeEntity ambariResourceTypeEntity = easyMockSupport.createMock(ResourceTypeEntity.class);
-
-    ResourceTypeEntity clusterResourceTypeEntity = easyMockSupport.createMock(ResourceTypeEntity.class);
-
-    PermissionEntity clusterAdministratorPermissionEntity = new PermissionEntity();
-    clusterAdministratorPermissionEntity.setId(1);
-    PermissionEntity ambariAdministratorPermissionEntity = new PermissionEntity();
-    ambariAdministratorPermissionEntity.setId(2);
-
-    PermissionDAO permissionDAO = easyMockSupport.createMock(PermissionDAO.class);
-    expect(permissionDAO.findPermissionByNameAndType("AMBARI.ADMINISTRATOR", ambariResourceTypeEntity))
-        .andReturn(ambariAdministratorPermissionEntity)
-        .times(2);
-    expect(permissionDAO.findPermissionByNameAndType("CLUSTER.ADMINISTRATOR", clusterResourceTypeEntity))
-        .andReturn(clusterAdministratorPermissionEntity)
-        .times(1);
-    expect(permissionDAO.merge(ambariAdministratorPermissionEntity))
-        .andReturn(ambariAdministratorPermissionEntity)
-        .times(2);
-    expect(permissionDAO.merge(clusterAdministratorPermissionEntity))
-        .andReturn(clusterAdministratorPermissionEntity)
-        .times(1);
-
-    ResourceTypeDAO resourceTypeDAO = easyMockSupport.createMock(ResourceTypeDAO.class);
-    expect(resourceTypeDAO.findByName("AMBARI")).andReturn(ambariResourceTypeEntity).times(2);
-    expect(resourceTypeDAO.findByName("CLUSTER")).andReturn(clusterResourceTypeEntity).times(1);
-
-    RoleAuthorizationDAO roleAuthorizationDAO = easyMockSupport.createMock(RoleAuthorizationDAO.class);
-    expect(roleAuthorizationDAO.findById("CLUSTER.RUN_CUSTOM_COMMAND")).andReturn(null).times(1);
-    expect(roleAuthorizationDAO.findById("AMBARI.RUN_CUSTOM_COMMAND")).andReturn(null).times(1);
-
-    Capture<RoleAuthorizationEntity> captureClusterRunCustomCommandEntity = newCapture();
-    roleAuthorizationDAO.create(capture(captureClusterRunCustomCommandEntity));
-    expectLastCall().times(1);
-
-    Capture<RoleAuthorizationEntity> captureAmbariRunCustomCommandEntity = newCapture();
-    roleAuthorizationDAO.create(capture(captureAmbariRunCustomCommandEntity));
-    expectLastCall().times(1);
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(RoleAuthorizationDAO.class)).andReturn(roleAuthorizationDAO).atLeastOnce();
-    expect(injector.getInstance(PermissionDAO.class)).andReturn(permissionDAO).atLeastOnce();
-    expect(injector.getInstance(ResourceTypeDAO.class)).andReturn(resourceTypeDAO).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    new UpgradeCatalog242(injector).createRoleAuthorizations();
-    easyMockSupport.verifyAll();
-
-    RoleAuthorizationEntity ambariRunCustomCommandEntity = captureAmbariRunCustomCommandEntity.getValue();
-    RoleAuthorizationEntity clusterRunCustomCommandEntity = captureClusterRunCustomCommandEntity.getValue();
-
-    Assert.assertEquals("AMBARI.RUN_CUSTOM_COMMAND", ambariRunCustomCommandEntity.getAuthorizationId());
-    Assert.assertEquals("Perform custom administrative actions", ambariRunCustomCommandEntity.getAuthorizationName());
-
-    Assert.assertEquals("CLUSTER.RUN_CUSTOM_COMMAND", clusterRunCustomCommandEntity.getAuthorizationId());
-    Assert.assertEquals("Perform custom cluster-level actions", clusterRunCustomCommandEntity.getAuthorizationName());
-
-    Assert.assertEquals(2, ambariAdministratorPermissionEntity.getAuthorizations().size());
-    Assert.assertTrue(ambariAdministratorPermissionEntity.getAuthorizations().contains(clusterRunCustomCommandEntity));
-    Assert.assertTrue(ambariAdministratorPermissionEntity.getAuthorizations().contains(ambariRunCustomCommandEntity));
-
-    Assert.assertEquals(1, clusterAdministratorPermissionEntity.getAuthorizations().size());
-    Assert.assertTrue(clusterAdministratorPermissionEntity.getAuthorizations().contains(clusterRunCustomCommandEntity));
-  }
-}


[27/63] [abbrv] ambari git commit: AMBARI-21304.Zeppelin should have a preconfigured %jdbc(spark) interpreter(Prabhjyot Singh via Venkata Sairam)

Posted by ab...@apache.org.
AMBARI-21304.Zeppelin should have a preconfigured %jdbc(spark) interpreter(Prabhjyot Singh via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8da634c7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8da634c7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8da634c7

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 8da634c70042e6bbd99a93dd08211021798875a7
Parents: 16dc405
Author: Venkata Sairam <ve...@gmail.com>
Authored: Mon Jun 26 12:23:43 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Mon Jun 26 12:23:43 2017 +0530

----------------------------------------------------------------------
 .../0.6.0.2.5/package/scripts/master.py         | 19 ++++++++++++++++
 .../0.6.0.2.5/package/scripts/params.py         | 23 ++++++++++++++++++++
 2 files changed, 42 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8da634c7/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
index 75ca6a5..8b5f821 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
@@ -377,6 +377,25 @@ class Master(Script):
                                                     params.hive_server_interactive_hosts + \
                                                     ':' + params.hive_server_port
 
+        if params.spark_thrift_server_hosts:
+          interpreter['properties']['spark.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+          interpreter['properties']['spark.user'] = 'hive'
+          interpreter['properties']['spark.password'] = ''
+          interpreter['properties']['spark.proxy.user.property'] = 'hive.server2.proxy.user'
+          interpreter['properties']['spark.url'] = 'jdbc:hive2://' + \
+              params.spark_thrift_server_hosts + ':' + params.spark_hive_thrift_port + '/'
+          if params.spark_hive_principal:
+            interpreter['properties']['spark.url'] += ';principal=' + params.spark_hive_principal
+
+        if params.spark2_thrift_server_hosts:
+          interpreter['properties']['spark2.driver'] = 'org.apache.hive.jdbc.HiveDriver'
+          interpreter['properties']['spark2.user'] = 'hive'
+          interpreter['properties']['spark2.password'] = ''
+          interpreter['properties']['spark2.proxy.user.property'] = 'hive.server2.proxy.user'
+          interpreter['properties']['spark2.url'] = 'jdbc:hive2://' + \
+              params.spark2_thrift_server_hosts + ':' + params.spark2_hive_thrift_port + '/'
+          if params.spark_hive_principal:
+            interpreter['properties']['spark2.url'] += ';principal=' + params.spark2_hive_principal
 
         if params.zookeeper_znode_parent \
                 and params.hbase_zookeeper_quorum:

http://git-wip-us.apache.org/repos/asf/ambari/blob/8da634c7/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py
index 2340df9..f1ecbb1 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/params.py
@@ -152,6 +152,29 @@ if 'hive_server_interactive_hosts' in master_configs and len(master_configs['hiv
     hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
     hive_server2_support_dynamic_service_discovery = config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']
 
+spark_thrift_server_hosts = None
+spark_hive_thrift_port = None
+spark_hive_principal = None
+if 'spark_thriftserver_hosts' in master_configs and len(master_configs['spark_thriftserver_hosts']) != 0:
+  spark_thrift_server_hosts = str(master_configs['spark_thriftserver_hosts'][0])
+  if config['configurations']['spark-hive-site-override']:
+    spark_hive_thrift_port = config['configurations']['spark-hive-site-override']['hive.server2.thrift.port']
+  if config['configurations']['spark-thrift-sparkconf'] and \
+      'spark.sql.hive.hiveserver2.jdbc.url.principal' in config['configurations']['spark-thrift-sparkconf']:
+    spark_hive_principal = config['configurations']['spark-thrift-sparkconf']['spark.sql.hive.hiveserver2.jdbc.url.principal']
+
+spark2_thrift_server_hosts = None
+spark2_hive_thrift_port = None
+spark2_hive_principal = None
+if 'spark2_thriftserver_hosts' in master_configs and len(master_configs['spark2_thriftserver_hosts']) != 0:
+  spark2_thrift_server_hosts = str(master_configs['spark2_thriftserver_hosts'][0])
+  if config['configurations']['spark2-hive-site-override']:
+    spark2_hive_thrift_port = config['configurations']['spark2-hive-site-override']['hive.server2.thrift.port']
+  if config['configurations']['spark2-thrift-sparkconf'] and \
+      'spark.sql.hive.hiveserver2.jdbc.url.principal' in config['configurations']['spark2-thrift-sparkconf']:
+    spark2_hive_principal = config['configurations']['spark2-thrift-sparkconf']['spark.sql.hive.hiveserver2.jdbc.url.principal']
+
+
 # detect hbase details if installed
 zookeeper_znode_parent = None
 hbase_zookeeper_quorum = None


[25/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/af1bf85c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/af1bf85c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/af1bf85c

Branch: refs/heads/branch-feature-logsearch-ui
Commit: af1bf85c6801293fce732e17c13bb4fd5377f0b1
Parents: 3acf908
Author: Dmytro Grinenko <ha...@apache.org>
Authored: Sun Jun 25 12:58:05 2017 +0300
Committer: Dmytro Grinenko <ha...@apache.org>
Committed: Sun Jun 25 12:59:06 2017 +0300

----------------------------------------------------------------------
 .../server/upgrade/SchemaUpgradeHelper.java     |   13 -
 .../server/upgrade/UpgradeCatalog200.java       |  613 ----
 .../server/upgrade/UpgradeCatalog210.java       | 1765 ----------
 .../server/upgrade/UpgradeCatalog211.java       |  295 --
 .../server/upgrade/UpgradeCatalog212.java       |  427 ---
 .../server/upgrade/UpgradeCatalog2121.java      |  206 --
 .../server/upgrade/UpgradeCatalog220.java       | 1404 --------
 .../server/upgrade/UpgradeCatalog221.java       |  456 ---
 .../server/upgrade/UpgradeCatalog222.java       |  781 -----
 .../server/upgrade/UpgradeCatalog230.java       |  402 ---
 .../server/upgrade/UpgradeCatalog240.java       | 3079 ------------------
 .../server/upgrade/UpgradeCatalog2402.java      |  121 -
 .../server/upgrade/UpgradeCatalog242.java       |  272 --
 .../server/upgrade/UpgradeCatalog250.java       | 1352 --------
 .../server/upgrade/UpgradeCatalog300.java       |    5 +-
 ambari-server/src/main/python/upgradeHelper.py  | 2338 -------------
 .../catalog/UpgradeCatalog_1.3_to_2.2.json      |  948 ------
 .../catalog/UpgradeCatalog_2.0_to_2.2.2.json    |  408 ---
 .../catalog/UpgradeCatalog_2.0_to_2.2.4.json    |  453 ---
 .../catalog/UpgradeCatalog_2.0_to_2.2.json      |  275 --
 .../catalog/UpgradeCatalog_2.1_to_2.2.2.json    |  465 ---
 .../catalog/UpgradeCatalog_2.1_to_2.2.4.json    |  499 ---
 .../catalog/UpgradeCatalog_2.1_to_2.2.json      |  292 --
 .../catalog/UpgradeCatalog_2.1_to_2.3.json      |  440 ---
 .../catalog/UpgradeCatalog_2.2_to_2.3.json      | 2234 -------------
 .../UpgradeCatalog_2.2_to_2.3_step2.json        |   81 -
 .../server/upgrade/UpgradeCatalog200Test.java   |  915 ------
 .../server/upgrade/UpgradeCatalog210Test.java   | 1360 --------
 .../server/upgrade/UpgradeCatalog211Test.java   |  446 ---
 .../server/upgrade/UpgradeCatalog2121Test.java  |  161 -
 .../server/upgrade/UpgradeCatalog212Test.java   |  694 ----
 .../server/upgrade/UpgradeCatalog220Test.java   | 1535 ---------
 .../server/upgrade/UpgradeCatalog221Test.java   |  614 ----
 .../server/upgrade/UpgradeCatalog222Test.java   | 1180 -------
 .../server/upgrade/UpgradeCatalog230Test.java   |  317 --
 .../server/upgrade/UpgradeCatalog240Test.java   | 2688 ---------------
 .../server/upgrade/UpgradeCatalog242Test.java   |  430 ---
 .../server/upgrade/UpgradeCatalog250Test.java   | 2129 ------------
 .../server/upgrade/UpgradeCatalog300Test.java   |    2 +-
 .../server/upgrade/UpgradeCatalogTest.java      |   13 +-
 40 files changed, 11 insertions(+), 32097 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
index dee05c3..bcc8328 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/SchemaUpgradeHelper.java
@@ -176,19 +176,6 @@ public class SchemaUpgradeHelper {
       // Add binding to each newly created catalog
       Multibinder<UpgradeCatalog> catalogBinder =
         Multibinder.newSetBinder(binder(), UpgradeCatalog.class);
-      catalogBinder.addBinding().to(UpgradeCatalog200.class);
-      catalogBinder.addBinding().to(UpgradeCatalog210.class);
-      catalogBinder.addBinding().to(UpgradeCatalog211.class);
-      catalogBinder.addBinding().to(UpgradeCatalog212.class);
-      catalogBinder.addBinding().to(UpgradeCatalog2121.class);
-      catalogBinder.addBinding().to(UpgradeCatalog220.class);
-      catalogBinder.addBinding().to(UpgradeCatalog221.class);
-      catalogBinder.addBinding().to(UpgradeCatalog222.class);
-      catalogBinder.addBinding().to(UpgradeCatalog230.class);
-      catalogBinder.addBinding().to(UpgradeCatalog240.class);
-      catalogBinder.addBinding().to(UpgradeCatalog2402.class);
-      catalogBinder.addBinding().to(UpgradeCatalog242.class);
-      catalogBinder.addBinding().to(UpgradeCatalog250.class);
       catalogBinder.addBinding().to(UpgradeCatalog251.class);
       catalogBinder.addBinding().to(UpgradeCatalog252.class);
       catalogBinder.addBinding().to(UpgradeCatalog300.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
deleted file mode 100644
index a9280a4..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog200.java
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.annotations.Experimental;
-import org.apache.ambari.annotations.ExperimentalFeature;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.ambari.server.state.RepositoryInfo;
-import org.apache.ambari.server.state.SecurityState;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.UpgradeState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Upgrade catalog for version 2.0.0.
- */
-public class UpgradeCatalog200 extends AbstractUpgradeCatalog {
-
-  private static final String ALERT_DEFINITION_TABLE = "alert_definition";
-  private static final String ALERT_TARGET_TABLE = "alert_target";
-  private static final String ALERT_TARGET_STATES_TABLE = "alert_target_states";
-  private static final String ALERT_CURRENT_TABLE = "alert_current";
-  private static final String ARTIFACT_TABLE = "artifact";
-  private static final String KERBEROS_PRINCIPAL_TABLE = "kerberos_principal";
-  private static final String KERBEROS_PRINCIPAL_HOST_TABLE = "kerberos_principal_host";
-  private static final String TEZ_USE_CLUSTER_HADOOP_LIBS_PROPERTY = "tez.use.cluster.hadoop-libs";
-  private static final String FLUME_ENV_CONFIG = "flume-env";
-  private static final String CONTENT_PROPERTY = "content";
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.0.0";
-  }
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger
-      (UpgradeCatalog200.class);
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog200(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    prepareRollingUpgradesDDL();
-    executeAlertDDLUpdates();
-    createArtifactTable();
-    createKerberosPrincipalTables();
-
-    // add viewparameter columns
-    dbAccessor.addColumn("viewparameter", new DBColumnInfo("label",
-        String.class, 255, null, true));
-
-    dbAccessor.addColumn("viewparameter", new DBColumnInfo("placeholder",
-        String.class, 255, null, true));
-
-    dbAccessor.addColumn("viewparameter", new DBColumnInfo("default_value",
-        String.class, 2000, null, true));
-
-    // add security_type to clusters
-    dbAccessor.addColumn("clusters", new DBColumnInfo(
-        "security_type", String.class, 32, SecurityType.NONE.toString(), false));
-
-    // add security_state to various tables
-    dbAccessor.addColumn("hostcomponentdesiredstate", new DBColumnInfo(
-        "security_state", String.class, 32, SecurityState.UNSECURED.toString(), false));
-    dbAccessor.addColumn("hostcomponentstate", new DBColumnInfo(
-        "security_state", String.class, 32, SecurityState.UNSECURED.toString(), false));
-    dbAccessor.addColumn("servicedesiredstate", new DBColumnInfo(
-        "security_state", String.class, 32, SecurityState.UNSECURED.toString(), false));
-
-    // Alter column : make viewinstanceproperty.value & viewinstancedata.value
-    // nullable
-    dbAccessor.alterColumn("viewinstanceproperty", new DBColumnInfo("value",
-        String.class, 2000, null, true));
-    dbAccessor.alterColumn("viewinstancedata", new DBColumnInfo("value",
-        String.class, 2000, null, true));
-  }
-
-  /**
-   * Execute all of the alert DDL updates.
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  private void executeAlertDDLUpdates() throws AmbariException, SQLException {
-    // add ignore_host column to alert_definition
-    dbAccessor.addColumn(ALERT_DEFINITION_TABLE, new DBColumnInfo(
-            "ignore_host", Short.class, 1, 0, false));
-
-    dbAccessor.addColumn(ALERT_DEFINITION_TABLE, new DBColumnInfo(
-            "description", char[].class, 32672, null, true));
-
-    // update alert target
-    dbAccessor.addColumn(ALERT_TARGET_TABLE, new DBColumnInfo("is_global",
-        Short.class, 1, 0, false));
-
-    // create alert_target_states table
-    ArrayList<DBColumnInfo> columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("target_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("alert_state", String.class, 255, null, false));
-    dbAccessor.createTable(ALERT_TARGET_STATES_TABLE, columns);
-    dbAccessor.addFKConstraint(ALERT_TARGET_STATES_TABLE,
-        "fk_alert_tgt_states_tgt_id", "target_id", ALERT_TARGET_TABLE,
-        "target_id", false);
-
-    // update alert current maintenance mode
-    dbAccessor.alterColumn(ALERT_CURRENT_TABLE, new DBColumnInfo(
-        "maintenance_state", String.class, 255, null, false));
-  }
-
-  /**
-   * Add any columns, tables, and keys needed for Rolling Upgrades.
-   * @throws SQLException
-   */
-  private void prepareRollingUpgradesDDL() throws SQLException {
-    List<DBAccessor.DBColumnInfo> columns = new ArrayList<>();
-
-    columns.add(new DBColumnInfo("repo_version_id", Long.class,    null,  null, false));
-    columns.add(new DBColumnInfo("stack",           String.class,  255,   null, false));
-    columns.add(new DBColumnInfo("version",         String.class,  255,   null, false));
-    columns.add(new DBColumnInfo("display_name",    String.class,  128,   null, false));
-    columns.add(new DBColumnInfo("upgrade_package", String.class,  255,   null, false));
-    columns.add(new DBColumnInfo("repositories",    char[].class,  null,  null, false));
-    dbAccessor.createTable("repo_version", columns, "repo_version_id");
-    addSequence("repo_version_id_seq", 0L, false);
-
-
-    dbAccessor.addUniqueConstraint("repo_version", "UQ_repo_version_display_name", "display_name");
-    dbAccessor.addUniqueConstraint("repo_version", "UQ_repo_version_stack_version", "stack", "version");
-
-    // New columns
-    dbAccessor.addColumn("hostcomponentstate", new DBAccessor.DBColumnInfo("upgrade_state",
-        String.class, 32, "NONE", false));
-
-    dbAccessor.addColumn("hostcomponentstate", new DBAccessor.DBColumnInfo("version",
-        String.class, 32, "UNKNOWN", false));
-
-    dbAccessor.addColumn("host_role_command", new DBAccessor.DBColumnInfo("retry_allowed",
-        Integer.class, 1, 0, false));
-
-    dbAccessor.addColumn("stage", new DBAccessor.DBColumnInfo("skippable",
-        Integer.class, 1, 0, false));
-
-    // New tables
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("repo_version_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("cluster_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("state", String.class, 32, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("start_time", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("end_time", Long.class, null, null, true));
-    columns.add(new DBAccessor.DBColumnInfo("user_name", String.class, 32, null, true));
-    dbAccessor.createTable("cluster_version", columns, "id");
-
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("repo_version_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("host_name", String.class, 255, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("state", String.class, 32, null, false));
-    dbAccessor.createTable("host_version", columns, "id");
-
-    // Foreign Key Constraints
-    dbAccessor.addFKConstraint("cluster_version", "FK_cluster_version_cluster_id", "cluster_id", "clusters", "cluster_id", false);
-    dbAccessor.addFKConstraint("cluster_version", "FK_cluster_version_repovers_id", "repo_version_id", "repo_version", "repo_version_id", false);
-    if (dbAccessor.tableHasColumn("host_version", "host_name")) {
-      dbAccessor.addFKConstraint("host_version", "FK_host_version_host_name", "host_name", "hosts", "host_name", false);
-    }
-    dbAccessor.addFKConstraint("host_version", "FK_host_version_repovers_id", "repo_version_id", "repo_version", "repo_version_id", false);
-
-    // New sequences
-    addSequence("cluster_version_id_seq", 0L, false);
-    addSequence("host_version_id_seq", 0L, false);
-
-    // upgrade tables
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("cluster_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("request_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("from_version", String.class, 255, "", false));
-    columns.add(new DBAccessor.DBColumnInfo("to_version", String.class, 255, "", false));
-    columns.add(new DBAccessor.DBColumnInfo("direction", String.class, 255, "UPGRADE", false));
-    dbAccessor.createTable("upgrade", columns, "upgrade_id");
-    dbAccessor.addFKConstraint("upgrade", "fk_upgrade_cluster_id", "cluster_id", "clusters", "cluster_id", false);
-    dbAccessor.addFKConstraint("upgrade", "fk_upgrade_request_id", "request_id", "request", "request_id", false);
-    addSequence("upgrade_id_seq", 0L, false);
-
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_group_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("group_name", String.class, 255, "", false));
-    columns.add(new DBAccessor.DBColumnInfo("group_title", String.class, 1024, "", false));
-    dbAccessor.createTable("upgrade_group", columns, "upgrade_group_id");
-    dbAccessor.addFKConstraint("upgrade_group", "fk_upgrade_group_upgrade_id", "upgrade_id", "upgrade", "upgrade_id", false);
-    addSequence("upgrade_group_id_seq", 0L, false);
-
-
-    columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_item_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("upgrade_group_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("stage_id", Long.class, null, null, false));
-    columns.add(new DBAccessor.DBColumnInfo("state", String.class, 255, UpgradeState.NONE.name(), false));
-    columns.add(new DBAccessor.DBColumnInfo("hosts", char[].class, 32672, null, true));
-    columns.add(new DBAccessor.DBColumnInfo("tasks", char[].class, 32672, null, true));
-    columns.add(new DBAccessor.DBColumnInfo("item_text", String.class, 1024, null, true));
-    dbAccessor.createTable("upgrade_item", columns, "upgrade_item_id");
-    dbAccessor.addFKConstraint("upgrade_item", "fk_upg_item_upgrade_group_id", "upgrade_group_id", "upgrade_group", "upgrade_group_id", false);
-    addSequence("upgrade_item_id_seq", 0L, false);
-  }
-
-  private void createArtifactTable() throws SQLException {
-    ArrayList<DBColumnInfo> columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("artifact_name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("foreign_keys", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("artifact_data", char[].class, null, null, false));
-    dbAccessor.createTable(ARTIFACT_TABLE, columns, "artifact_name", "foreign_keys");
-  }
-
-  private void createKerberosPrincipalTables() throws SQLException {
-    ArrayList<DBColumnInfo> columns;
-
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("principal_name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("is_service", Short.class, 1, 1, false));
-    columns.add(new DBColumnInfo("cached_keytab_path", String.class, 255, null, true));
-    dbAccessor.createTable(KERBEROS_PRINCIPAL_TABLE, columns, "principal_name");
-
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("principal_name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("host_name", String.class, 255, null, false));
-    dbAccessor.createTable(KERBEROS_PRINCIPAL_HOST_TABLE, columns, "principal_name", "host_name");
-    if (dbAccessor.tableHasColumn(KERBEROS_PRINCIPAL_HOST_TABLE, "host_name")) {
-      dbAccessor.addFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_krb_pr_host_hostname", "host_name", "hosts", "host_name", true, false);
-    }
-    dbAccessor.addFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_krb_pr_host_principalname", "principal_name", KERBEROS_PRINCIPAL_TABLE, "principal_name", true, false);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void executePreDMLUpdates() {
-    ;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    // remove NAGIOS to make way for the new embedded alert framework
-    removeNagiosService();
-    addNewConfigurationsFromXml();
-    updateHiveDatabaseType();
-    updateTezConfiguration();
-    updateFlumeEnvConfig();
-    addMissingConfigs();
-    persistHDPRepo();
-    updateClusterEnvConfiguration();
-  }
-
-  @Experimental(feature=ExperimentalFeature.PATCH_UPGRADES,
-      comment = "the metainfo table of storing the latest repo will be removed")
-  protected void persistHDPRepo() throws AmbariException{
-    /*
-    AmbariManagementController amc = injector.getInstance(
-            AmbariManagementController.class);
-    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
-    Map<String, Cluster> clusterMap = amc.getClusters().getClusters();
-    for (Cluster cluster : clusterMap.values()) {
-      StackId stackId = cluster.getCurrentStackVersion();
-      String stackName = stackId.getStackName();
-      String stackVersion = stackId.getStackVersion();
-      String stackRepoId = stackName + "-" + stackVersion;
-
-      for (OperatingSystemInfo osi : ambariMetaInfo.getOperatingSystems(stackName, stackVersion)) {
-        MetainfoDAO metaInfoDAO = injector.getInstance(MetainfoDAO.class);
-        String repoMetaKey = AmbariMetaInfo.generateRepoMetaKey(stackName,stackVersion,osi.getOsType(),
-                stackRepoId,AmbariMetaInfo.REPOSITORY_XML_PROPERTY_BASEURL);
-        // Check if default repo is used and not persisted
-        if (metaInfoDAO.findByKey(repoMetaKey) == null) {
-          RepositoryInfo repositoryInfo = ambariMetaInfo.getRepository(stackName, stackVersion, osi.getOsType(), stackRepoId);
-          // We save default base url which has not changed during upgrade as base url
-          String baseUrl = repositoryInfo.getDefaultBaseUrl();
-          ambariMetaInfo.updateRepo(stackName, stackVersion, osi.getOsType(),
-              stackRepoId, baseUrl, null);
-        }
-      }
-
-      // Repositories that have been autoset may be unexpected for user
-      // (especially if they are taken from online json)
-      // We have to output to stdout here, and not to log
-      // to be sure that user sees this message
-      System.out.printf("Ambari has recorded the following repository base urls for cluster %s. Please verify the " +
-              "values and ensure that these are correct. If necessary, " +
-              "after starting Ambari Server, you can edit them using Ambari UI, " +
-              "Admin -> Stacks and Versions -> Versions Tab and editing the base urls for the current Repo. " +
-              "It is critical that these repo base urls are valid for your environment as they " +
-              "will be used for Add Host/Service operations.",
-        cluster.getClusterName());
-      System.out.println(repositoryTable(ambariMetaInfo.getStack(stackName, stackVersion).getRepositories()));
-    }
-    */
-  }
-
-  /**
-   * Formats a list repositories for printing to console
-   * @param repositories list of repositories
-   * @return multi-line string
-   */
-  static String repositoryTable(List<RepositoryInfo> repositories) {
-    StringBuilder result = new StringBuilder();
-    for (RepositoryInfo repository : repositories) {
-      result.append(String.format(" %8s |", repository.getOsType()));
-      result.append(String.format(" %18s |", repository.getRepoId()));
-      result.append(String.format(" %48s ", repository.getBaseUrl()));
-      result.append("\n");
-    }
-    return result.toString();
-  }
-
-  protected void updateTezConfiguration() throws AmbariException {
-    updateConfigurationProperties("tez-site", Collections.singletonMap(TEZ_USE_CLUSTER_HADOOP_LIBS_PROPERTY, String.valueOf(false)), false, false);
-  }
-
-  protected void updateFlumeEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config flumeEnvConfig = cluster.getDesiredConfigByType(FLUME_ENV_CONFIG);
-      if (flumeEnvConfig != null) {
-        String content = flumeEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null && !content.contains("/usr/lib/flume/lib/ambari-metrics-flume-sink.jar")) {
-          String newPartOfContent = "\n\n" +
-            "# Note that the Flume conf directory is always included in the classpath.\n" +
-            "# Add flume sink to classpath\n" +
-            "if [ -e \"/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\" ]; then\n" +
-            "  export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\n" +
-            "fi\n";
-          content += newPartOfContent;
-          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, FLUME_ENV_CONFIG, updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateHiveDatabaseType() throws AmbariException {
-    final String PROPERTY_NAME = "hive_database_type";
-    final String PROPERTY_VALUE_OLD = "postgresql";
-    final String PROPERTY_VALUE_NEW = "postgres";
-    final String PROPERTY_CONFIG_NAME = "hive-env";
-
-    AmbariManagementController ambariManagementController = injector.getInstance(
-            AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      Map<String, String> prop = new HashMap<>();
-      String hive_database_type = null;
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          hive_database_type = null;
-
-          if (cluster.getDesiredConfigByType(PROPERTY_CONFIG_NAME) != null) {
-            hive_database_type = cluster.getDesiredConfigByType(
-                    PROPERTY_CONFIG_NAME).getProperties().get(PROPERTY_NAME);
-          }
-
-          if (hive_database_type != null && !hive_database_type.isEmpty() &&
-                  hive_database_type.equals(PROPERTY_VALUE_OLD)) {
-            prop.put(PROPERTY_NAME, PROPERTY_VALUE_NEW);
-            updateConfigurationPropertiesForCluster(cluster, PROPERTY_CONFIG_NAME, prop, true, false);
-          }
-        }
-      }
-
-    }
-  }
-
-  /**
-   * Removes Nagios and all associated components and states.
-   */
-  protected void removeNagiosService() {
-    executeInTransaction(new RemoveNagiosRunnable());
-  }
-
-  /**
-   * The RemoveNagiosRunnable is used to remove Nagios from the cluster. This
-   * runnable is exepected to run inside of a transation so that if any of the
-   * removals fails, Nagios is returned to a valid service state.
-   */
-  protected final class RemoveNagiosRunnable implements Runnable {
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void run() {
-      ClusterDAO clusterDao = injector.getInstance(ClusterDAO.class);
-      ClusterServiceDAO clusterServiceDao = injector.getInstance(ClusterServiceDAO.class);
-      ServiceComponentDesiredStateDAO componentDesiredStateDao = injector.getInstance(ServiceComponentDesiredStateDAO.class);
-      ServiceDesiredStateDAO desiredStateDao = injector.getInstance(ServiceDesiredStateDAO.class);
-      HostComponentDesiredStateDAO hostComponentDesiredStateDao = injector.getInstance(HostComponentDesiredStateDAO.class);
-      HostComponentStateDAO hostComponentStateDao = injector.getInstance(HostComponentStateDAO.class);
-
-      List<ClusterEntity> clusters = clusterDao.findAll();
-      if (null == clusters) {
-        return;
-      }
-
-      for (ClusterEntity cluster : clusters) {
-        ClusterServiceEntity nagios = clusterServiceDao.findByClusterAndServiceNames(
-            cluster.getClusterName(), "NAGIOS");
-
-        if (null == nagios) {
-          continue;
-        }
-
-        Collection<ServiceComponentDesiredStateEntity> serviceComponentDesiredStates = nagios.getServiceComponentDesiredStateEntities();
-        ServiceDesiredStateEntity serviceDesiredState = nagios.getServiceDesiredStateEntity();
-
-        // remove all component states
-        for (ServiceComponentDesiredStateEntity componentDesiredState : serviceComponentDesiredStates) {
-          Collection<HostComponentStateEntity> hostComponentStateEntities = componentDesiredState.getHostComponentStateEntities();
-          Collection<HostComponentDesiredStateEntity> hostComponentDesiredStateEntities = componentDesiredState.getHostComponentDesiredStateEntities();
-
-          // remove host states
-          for (HostComponentStateEntity hostComponentState : hostComponentStateEntities) {
-            hostComponentStateDao.remove(hostComponentState);
-          }
-
-          // remove host desired states
-          for (HostComponentDesiredStateEntity hostComponentDesiredState : hostComponentDesiredStateEntities) {
-            hostComponentDesiredStateDao.remove(hostComponentDesiredState);
-          }
-
-          // remove component state
-          componentDesiredStateDao.removeByName(nagios.getClusterId(),
-              componentDesiredState.getServiceName(), componentDesiredState.getComponentName());
-        }
-
-        // remove service state
-        desiredStateDao.remove(serviceDesiredState);
-
-        // remove service
-        cluster.getClusterServiceEntities().remove(nagios);
-        ClusterServiceEntityPK primaryKey = new ClusterServiceEntityPK();
-        primaryKey.setClusterId(nagios.getClusterId());
-        primaryKey.setServiceName(nagios.getServiceName());
-        clusterServiceDao.removeByPK(primaryKey);
-      }
-    }
-  }
-  protected void addMissingConfigs() throws AmbariException {
-    updateConfigurationProperties("hive-site", Collections.singletonMap("hive.server2.transport.mode", "binary"), false, false);
-  }
-
-  /**
-   * Update the cluster-env configuration (in all clusters) to add missing properties and remove
-   * obsolete properties.
-   *
-   * @throws org.apache.ambari.server.AmbariException
-   */
-  protected void updateClusterEnvConfiguration() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
-
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config configClusterEnv = cluster.getDesiredConfigByType("cluster-env");
-
-          if (configClusterEnv != null) {
-            Map<String, String> properties = configClusterEnv.getProperties();
-
-            if (properties != null) {
-              // -----------------------------------------
-              // Add missing properties
-
-              if (!properties.containsKey("smokeuser_principal_name")) {
-                // Add smokeuser_principal_name, from cluster-env/smokeuser
-                // Ideally a realm should be added, but for now we can assume the default realm and
-                // leave it off
-                String smokeUser = properties.get("smokeuser");
-
-                if ((smokeUser == null) || smokeUser.isEmpty()) {
-                  // If the smokeuser property is not set in the current configuration set, grab
-                  // it from the stack defaults:
-                  Set<PropertyInfo> stackProperties = configHelper.getStackProperties(cluster);
-
-                  if (stackProperties != null) {
-                    for (PropertyInfo propertyInfo : stackProperties) {
-                      String filename = propertyInfo.getFilename();
-
-                      if ((filename != null) && "cluster-env".equals(ConfigHelper.fileNameToConfigType(filename))) {
-                        smokeUser = propertyInfo.getValue();
-                        break;
-                      }
-                    }
-                  }
-
-                  // If a default value for smokeuser was not found, force it to be "ambari-qa"
-                  if ((smokeUser == null) || smokeUser.isEmpty()) {
-                    smokeUser = "ambari-qa";
-                  }
-                }
-
-                properties.put("smokeuser_principal_name", smokeUser);
-              }
-
-              // Add missing properties (end)
-              // -----------------------------------------
-
-              // -----------------------------------------
-              // Remove obsolete properties
-
-              // Remove obsolete properties (end)
-              // -----------------------------------------
-
-              // -----------------------------------------
-              // Set the updated configuration
-
-              configHelper.createConfigType(cluster, cluster.getDesiredStackVersion(),
-                  ambariManagementController, "cluster-env", properties,
-                  AUTHENTICATED_USER_NAME, "Upgrading to Ambari 2.0");
-
-              // Set configuration (end)
-              // -----------------------------------------
-
-            }
-          }
-        }
-      }
-    }
-  }
-}


[39/63] [abbrv] ambari git commit: Updated team page. (yusaku)

Posted by ab...@apache.org.
Updated team page. (yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3529d053
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3529d053
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3529d053

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 3529d053dae7381f098d6998280ab7f152e86843
Parents: c1eeafb
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Tue Jun 27 10:19:26 2017 -0700
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Tue Jun 27 10:19:26 2017 -0700

----------------------------------------------------------------------
 docs/pom.xml | 12 ++++++++++++
 1 file changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3529d053/docs/pom.xml
----------------------------------------------------------------------
diff --git a/docs/pom.xml b/docs/pom.xml
index a862796..390ade4 100644
--- a/docs/pom.xml
+++ b/docs/pom.xml
@@ -234,6 +234,18 @@
             </organization>
         </developer>
         <developer>
+            <id>amagyar</id>
+            <name>Attila Magyar</name>
+            <email>amagyar@apache.org</email>
+            <timezone>+1</timezone>
+            <roles>
+                <role>Committer</role>
+            </roles>
+            <organization>
+                Hortonworks
+            </organization>
+        </developer>
+        <developer>
             <id>avijayan</id>
             <name>Aravindan Vijayan</name>
             <email>avijayan@apache.org</email>


[29/63] [abbrv] ambari git commit: AMBARI-21206 - Remove Zookeeper as a required service from YARN

Posted by ab...@apache.org.
AMBARI-21206 - Remove Zookeeper as a required service from YARN


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a2464b90
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a2464b90
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a2464b90

Branch: refs/heads/branch-feature-logsearch-ui
Commit: a2464b9045637c1d5014db4aff7d83a0bc573fc0
Parents: 23cc628
Author: Tim Thorpe <tt...@apache.org>
Authored: Mon Jun 26 07:58:15 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Mon Jun 26 07:58:15 2017 -0700

----------------------------------------------------------------------
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  |  10 +-
 .../common-services/YARN/3.0.0.3.0/metainfo.xml |  46 ++++++-
 .../YARN/3.0.0.3.0/service_advisor.py           |  53 +++++++-
 .../stacks/HDP/2.2/services/stack_advisor.py    |  53 +++++++-
 .../stacks/2.2/common/test_stack_advisor.py     | 132 ++++++++++++++++++-
 .../stacks/2.6/common/test_stack_advisor.py     |   9 ++
 6 files changed, 289 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index 64e0bcb..c77aa2a 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -486,7 +486,10 @@
   </property>
   <property>
     <name>hadoop.registry.zk.quorum</name>
-    <value>localhost:2181</value>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
     <description>
       List of hostname:port pairs defining the zookeeper quorum binding for the registry
     </description>
@@ -553,7 +556,10 @@
   </property>
   <property>
     <name>yarn.resourcemanager.zk-address</name>
-    <value>localhost:2181</value>
+    <value></value>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
     <description>
       List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
     </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
index 061587d..90f4a92 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
@@ -73,17 +73,41 @@
             <timeout>1200</timeout>
           </commandScript>
 
-          <!-- TODO HDP 3.0, add later after UI is fixed,
           <dependencies>
             <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>false</enabled>
+              </auto-deploy>
+              <conditions>
+                <condition xsi:type="propertyExists">
+                  <configType>yarn-site</configType>
+                  <property>yarn.resourcemanager.recovery.enabled</property>
+                  <value>true</value>
+                </condition>
+                <condition xsi:type="propertyExists">
+                  <configType>yarn-site</configType>
+                  <property>yarn.resourcemanager.ha.enabled</property>
+                  <value>true</value>
+                </condition>
+                <condition xsi:type="propertyExists">
+                  <configType>yarn-site</configType>
+                  <property>hadoop.registry.rm.enabled</property>
+                  <value>true</value>
+                </condition>
+              </conditions>
+            </dependency>
+            <!-- TODO HDP 3.0, add later after UI is fixed,
+            <dependency>
               <name>TEZ/TEZ_CLIENT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>
               </auto-deploy>
             </dependency>
+            -->
           </dependencies>
-          -->
 
           <logs>
             <log>
@@ -145,6 +169,23 @@
               <logId>yarn_nodemanager</logId>
             </log>
           </logs>
+
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>false</enabled>
+              </auto-deploy>
+              <conditions>
+                <condition xsi:type="propertyExists">
+                  <configType>yarn-site</configType>
+                  <property>yarn.nodemanager.recovery.enabled</property>
+                  <value>true</value>
+                </condition>
+              </conditions>
+            </dependency>
+          </dependencies>
         </component>
 
         <component>
@@ -214,7 +255,6 @@
       <requiredServices>
         <service>HDFS</service>
         <service>MAPREDUCE2</service>
-        <service>ZOOKEEPER</service>
       </requiredServices>
 
       <themes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index 0fb538d..1af9821 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -351,12 +351,21 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount-path', 'delete', 'true')
-    # recommend hadoop.registry.rm.enabled based on SLIDER in services
+    # recommend hadoop.registry.rm.enabled based on SLIDER and ZOOKEEPER in services
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "SLIDER" in servicesList:
+    if "SLIDER" in servicesList and "ZOOKEEPER" in servicesList:
       putYarnProperty('hadoop.registry.rm.enabled', 'true')
     else:
       putYarnProperty('hadoop.registry.rm.enabled', 'false')
+    # recommend enabling RM and NM recovery if ZOOKEEPER in services
+    if "ZOOKEEPER" in servicesList:
+      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'true')
+      putYarnProperty('yarn.nodemanager.recovery.enabled', 'true')
+    else:
+      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'false')
+      putYarnProperty('yarn.nodemanager.recovery.enabled', 'false')
+      # recommend disabling RM HA if ZOOKEEPER is not in services
+      putYarnProperty('yarn.resourcemanager.ha.enabled', 'false')
 
   def recommendYARNConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
@@ -1795,6 +1804,7 @@ class YARNValidator(service_advisor.ServiceAdvisor):
     self.as_super.__init__(*args, **kwargs)
 
     self.validators = [("yarn-site", self.validateYARNSiteConfigurationsFromHDP206),
+                       ("yarn-site", self.validateYARNSiteConfigurationsFromHDP22),
                        ("yarn-site", self.validateYARNSiteConfigurationsFromHDP25),
                        ("yarn-site" , self.validateYARNSiteConfigurationsFromHDP26),
                        ("yarn-env", self.validateYARNEnvConfigurationsFromHDP206),
@@ -1837,6 +1847,45 @@ class YARNValidator(service_advisor.ServiceAdvisor):
                         {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
     return self.toConfigurationValidationProblems(validationItems, "yarn-site")
 
+  def validateYARNSiteConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
+    """
+    This was copied from HDP 2.2; validate yarn-site
+    :return: A list of configuration validation problems.
+    """
+    yarn_site = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    zk_hosts = self.getHostsForComponent(services, "ZOOKEEPER", "ZOOKEEPER_SERVER")
+    if len(zk_hosts) == 0:
+      # ZOOKEEPER_SERVER isn't assigned to at least one host
+      if 'yarn.resourcemanager.recovery.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.resourcemanager.recovery.enabled']:
+        validationItems.append({"config-name": "yarn.resourcemanager.recovery.enabled",
+                                "item": self.getWarnItem(
+                                  "YARN resource manager recovery can only be enabled if ZOOKEEPER is installed.")})
+      if 'yarn.nodemanager.recovery.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.nodemanager.recovery.enabled']:
+        validationItems.append({"config-name": "yarn.nodemanager.recovery.enabled",
+                                "item": self.getWarnItem(
+                                  "YARN node manager recovery can only be enabled if ZOOKEEPER is installed.")})
+
+    if len(zk_hosts) < 3:
+      if 'yarn.resourcemanager.ha.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.resourcemanager.ha.enabled']:
+        validationItems.append({"config-name": "yarn.resourcemanager.ha.enabled",
+                                "item": self.getWarnItem(
+                                  "You must have at least 3 ZooKeeper Servers in your cluster to enable ResourceManager HA.")})
+
+    if 'ZOOKEEPER' not in servicesList or 'SLIDER' not in servicesList:
+      if 'hadoop.registry.rm.enabled' in yarn_site and \
+              'true' == yarn_site['hadoop.registry.rm.enabled']:
+        validationItems.append({"config-name": "hadoop.registry.rm.enabled",
+                                "item": self.getWarnItem(
+                                  "HADOOP resource manager registry can only be enabled if ZOOKEEPER and SLIDER are installed.")})
+
+    return self.toConfigurationValidationProblems(validationItems, "yarn-site")
+
   def validateYARNSiteConfigurationsFromHDP25(self, properties, recommendedDefaults, configurations, services, hosts):
     yarn_site_properties = self.getSiteProperties(configurations, "yarn-site")
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 726514b..54ddd89 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -174,12 +174,23 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount-path', 'delete', 'true')
-    # recommend hadoop.registry.rm.enabled based on SLIDER in services
+    # recommend hadoop.registry.rm.enabled based on SLIDER and ZOOKEEPER in services
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "SLIDER" in servicesList:
+    if "SLIDER" in servicesList and "ZOOKEEPER" in servicesList:
       putYarnProperty('hadoop.registry.rm.enabled', 'true')
     else:
       putYarnProperty('hadoop.registry.rm.enabled', 'false')
+    # recommend enabling RM and NM recovery if ZOOKEEPER in services
+    if "ZOOKEEPER" in servicesList:
+      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'true')
+      putYarnProperty('yarn.nodemanager.recovery.enabled', 'true')
+    else:
+      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'false')
+      putYarnProperty('yarn.nodemanager.recovery.enabled', 'false')
+      # recommend disabling RM HA if ZOOKEEPER is not in services
+      putYarnProperty('yarn.resourcemanager.ha.enabled', 'false')
+
+
 
   def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP22StackAdvisor, self).recommendHDFSConfigurations(configurations, clusterData, services, hosts)
@@ -1034,6 +1045,7 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                "hadoop-env": self.validateHDFSConfigurationsEnv,
                "ranger-hdfs-plugin-properties": self.validateHDFSRangerPluginConfigurations},
       "YARN": {"yarn-env": self.validateYARNEnvConfigurations,
+               "yarn-site": self.validateYARNConfigurations,
                "ranger-yarn-plugin-properties": self.validateYARNRangerPluginConfigurations},
       "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
                "hive-site": self.validateHiveConfigurations,
@@ -1714,6 +1726,43 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
     return self.toConfigurationValidationProblems(validationItems, "ranger-storm-plugin-properties")
 
+  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    parentValidationProblems = super(HDP22StackAdvisor, self).validateYARNConfigurations(properties, recommendedDefaults, configurations, services, hosts)
+    yarn_site = properties
+    validationItems = []
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    zk_hosts = self.getHostsForComponent(services, "ZOOKEEPER", "ZOOKEEPER_SERVER")
+    if len(zk_hosts) == 0:
+      # ZOOKEEPER_SERVER isn't assigned to at least one host
+      if 'yarn.resourcemanager.recovery.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.resourcemanager.recovery.enabled']:
+        validationItems.append({"config-name": "yarn.resourcemanager.recovery.enabled",
+                                "item": self.getWarnItem(
+                                  "YARN resource manager recovery can only be enabled if ZOOKEEPER is installed.")})
+      if 'yarn.nodemanager.recovery.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.nodemanager.recovery.enabled']:
+        validationItems.append({"config-name": "yarn.nodemanager.recovery.enabled",
+                                "item": self.getWarnItem(
+                                  "YARN node manager recovery can only be enabled if ZOOKEEPER is installed.")})
+
+    if len(zk_hosts) < 3:
+      if 'yarn.resourcemanager.ha.enabled' in yarn_site and \
+              'true' == yarn_site['yarn.resourcemanager.ha.enabled']:
+        validationItems.append({"config-name": "yarn.resourcemanager.ha.enabled",
+                                "item": self.getWarnItem(
+                                  "You must have at least 3 ZooKeeper Servers in your cluster to enable ResourceManager HA.")})
+
+    if 'ZOOKEEPER' not in servicesList or 'SLIDER' not in servicesList:
+      if 'hadoop.registry.rm.enabled' in yarn_site and \
+              'true' == yarn_site['hadoop.registry.rm.enabled']:
+        validationItems.append({"config-name": "hadoop.registry.rm.enabled",
+                                "item": self.getWarnItem(
+                                  "HADOOP resource manager registry can only be enabled if ZOOKEEPER and SLIDER are installed.")})
+
+    validationProblems = self.toConfigurationValidationProblems(validationItems, "yarn-site")
+    validationProblems.extend(parentValidationProblems)
+    return validationProblems
+
   def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     parentValidationProblems = super(HDP22StackAdvisor, self).validateYARNEnvConfigurations(properties, recommendedDefaults, configurations, services, hosts)
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index 571ff26..ee620b5 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -906,7 +906,62 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
-          "hadoop.registry.rm.enabled": "true"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false"
+        }
+      }
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+
+  def test_recommendYARNConfigurationsWithZKAndSlider(self):
+    configurations = {}
+    services = {"configurations": configurations}
+    services['services'] = [
+      {
+        "StackServices": {
+          "service_name": "ZOOKEEPER"
+        },
+        },
+      {
+        "StackServices": {
+          "service_name": "YARN"
+        },
+        },
+      {
+        "StackServices": {
+          "service_name": "SLIDER"
+        },
+        }
+    ]
+    clusterData = {
+      "cpu": 4,
+      "containers" : 5,
+      "ramPerContainer": 256,
+      "yarnMinContainerSize": 256
+    }
+    expected = {
+      "yarn-env": {
+        "properties": {
+          "min_user_id": "500",
+          'service_check.queue.name': 'default'
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.nodemanager.linux-container-executor.group": "hadoop",
+          "yarn.nodemanager.resource.memory-mb": "1280",
+          "yarn.scheduler.minimum-allocation-mb": "256",
+          "yarn.scheduler.maximum-allocation-mb": "1280",
+          "yarn.scheduler.maximum-allocation-vcores": "4",
+          "yarn.scheduler.minimum-allocation-vcores": "1",
+          "yarn.nodemanager.resource.cpu-vcores": "4",
+          "hadoop.registry.rm.enabled": "true",
+          "yarn.resourcemanager.recovery.enabled": "true",
+          "yarn.nodemanager.recovery.enabled": "true"
         }
       }
     }
@@ -914,6 +969,55 @@ class TestHDP22StackAdvisor(TestCase):
     self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
     self.assertEquals(configurations, expected)
 
+  def test_recommendYARNConfigurationsWithZK(self):
+    configurations = {}
+    services = {"configurations": configurations}
+    services['services'] = [
+      {
+        "StackServices": {
+          "service_name": "ZOOKEEPER"
+        },
+        },
+      {
+        "StackServices": {
+          "service_name": "YARN"
+        },
+        }
+    ]
+    clusterData = {
+      "cpu": 4,
+      "containers" : 5,
+      "ramPerContainer": 256,
+      "yarnMinContainerSize": 256
+    }
+    expected = {
+      "yarn-env": {
+        "properties": {
+          "min_user_id": "500",
+          'service_check.queue.name': 'default'
+        }
+      },
+      "yarn-site": {
+        "properties": {
+          "yarn.nodemanager.linux-container-executor.group": "hadoop",
+          "yarn.nodemanager.resource.memory-mb": "1280",
+          "yarn.scheduler.minimum-allocation-mb": "256",
+          "yarn.scheduler.maximum-allocation-mb": "1280",
+          "yarn.scheduler.maximum-allocation-vcores": "4",
+          "yarn.scheduler.minimum-allocation-vcores": "1",
+          "yarn.nodemanager.resource.cpu-vcores": "4",
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "true",
+          "yarn.nodemanager.recovery.enabled": "true"
+        }
+      }
+    }
+
+    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
+    self.assertEquals(configurations, expected)
+
+
+
   def test_recommendSPARKConfigurations(self):
     configurations = {}
     services = {"configurations": configurations}
@@ -979,7 +1083,10 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1280",
           "yarn.nodemanager.resource.cpu-vcores": "2",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -1806,7 +1913,10 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1792",
           "yarn.nodemanager.resource.cpu-vcores": "1",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -2070,7 +2180,10 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1280",
           "yarn.nodemanager.resource.cpu-vcores": "1",
-          "hadoop.registry.rm.enabled": "false"
+          "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -2285,7 +2398,10 @@ class TestHDP22StackAdvisor(TestCase):
                 "yarn.scheduler.minimum-allocation-vcores": "1",
                 "yarn.scheduler.maximum-allocation-mb": "1280",
                 "yarn.nodemanager.resource.cpu-vcores": "1",
-                "hadoop.registry.rm.enabled": "false"
+                "hadoop.registry.rm.enabled": "false",
+                "yarn.resourcemanager.recovery.enabled": "false",
+                "yarn.nodemanager.recovery.enabled": "false",
+                "yarn.resourcemanager.ha.enabled": "false"
             },
             "property_attributes": {
                 'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -3844,6 +3960,9 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
           "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
           "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",
@@ -3903,6 +4022,9 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
+          "yarn.resourcemanager.recovery.enabled": "false",
+          "yarn.nodemanager.recovery.enabled": "false",
+          "yarn.resourcemanager.ha.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
           "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
           "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",

http://git-wip-us.apache.org/repos/asf/ambari/blob/a2464b90/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index d4d28c9..96a595f 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -1153,6 +1153,9 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
+          'yarn.resourcemanager.recovery.enabled': 'false',
+          'yarn.nodemanager.recovery.enabled': 'false',
+          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.scheduler.minimum-allocation-vcores': '1',
           'yarn.scheduler.maximum-allocation-vcores': '4',
@@ -1329,6 +1332,9 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
+          'yarn.resourcemanager.recovery.enabled': 'false',
+          'yarn.nodemanager.recovery.enabled': 'false',
+          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
           'yarn.acl.enable': 'true',
@@ -1436,6 +1442,9 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
+          'yarn.resourcemanager.recovery.enabled': 'false',
+          'yarn.nodemanager.recovery.enabled': 'false',
+          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
           'yarn.acl.enable': 'true',


[26/63] [abbrv] ambari git commit: AMBARI-21332. Wizard's Minimized State - Show Who Initiated (alexantonenko)

Posted by ab...@apache.org.
AMBARI-21332. Wizard's Minimized State - Show Who Initiated (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/16dc4056
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/16dc4056
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/16dc4056

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 16dc4056296615a7e6ecb5509a31ab11c95ac4ac
Parents: af1bf85
Author: Alex Antonenko <hi...@gmail.com>
Authored: Fri Jun 23 16:35:16 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Mon Jun 26 02:14:16 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/controllers/global/wizard_watcher_controller.js | 6 ++----
 ambari-web/app/messages.js                                     | 2 +-
 .../test/controllers/global/wizard_watcher_controller_test.js  | 3 ++-
 3 files changed, 5 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/16dc4056/ambari-web/app/controllers/global/wizard_watcher_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/global/wizard_watcher_controller.js b/ambari-web/app/controllers/global/wizard_watcher_controller.js
index 1562d7c..4ae000d 100644
--- a/ambari-web/app/controllers/global/wizard_watcher_controller.js
+++ b/ambari-web/app/controllers/global/wizard_watcher_controller.js
@@ -47,10 +47,8 @@ App.WizardWatcherController = Em.Controller.extend(App.Persist, {
    * @type {string}
    */
   wizardDisplayName: function() {
-    if (this.get('controllerName')) {
-      return Em.I18n.t('wizard.inProgress').format(App.router.get(this.get('controllerName')).get('displayName'));
-    }
-    return "";
+    const controllerName = this.get('controllerName');
+    return controllerName ? Em.I18n.t('wizard.inProgress').format(App.router.get(controllerName).get('displayName'), this.get('wizardUser')) : '';
   }.property('controllerName'),
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/16dc4056/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 6feebb1..e88ec42 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -3270,7 +3270,7 @@ Em.I18n.translations = {
   'utils.ajax.defaultErrorPopupBody.message': 'received on {0} method for API: {1}',
   'utils.ajax.defaultErrorPopupBody.statusCode': '{0} status code',
 
-  'wizard.inProgress': '{0} in Progress',
+  'wizard.inProgress': '{0} in Progress | Initiated by {1}',
 
   'alerts.instance.fullLogPopup.header': 'Instance Response',
   'admin.addHawqStandby.button.enable': 'Add HAWQ Standby Master',

http://git-wip-us.apache.org/repos/asf/ambari/blob/16dc4056/ambari-web/test/controllers/global/wizard_watcher_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/global/wizard_watcher_controller_test.js b/ambari-web/test/controllers/global/wizard_watcher_controller_test.js
index 7c72977..969a652 100644
--- a/ambari-web/test/controllers/global/wizard_watcher_controller_test.js
+++ b/ambari-web/test/controllers/global/wizard_watcher_controller_test.js
@@ -42,6 +42,7 @@ describe('App.wizardWatcherController', function () {
 
   describe("#wizardDisplayName", function() {
     beforeEach(function () {
+      controller.set('wizardUser', 'tdk');
       sinon.stub(App.router, 'get').returns(Em.Object.create({displayName: 'Wizard'}));
     });
     afterEach(function () {
@@ -55,7 +56,7 @@ describe('App.wizardWatcherController', function () {
     it("controllerName is correct", function() {
       controller.set('controllerName', 'ctrl1');
       controller.propertyDidChange('wizardDisplayName');
-      expect(controller.get('wizardDisplayName')).to.equal(Em.I18n.t('wizard.inProgress').format('Wizard'));
+      expect(controller.get('wizardDisplayName')).to.equal(Em.I18n.t('wizard.inProgress').format('Wizard', 'tdk'));
     });
   });
 


[10/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
deleted file mode 100644
index e82097b..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog200Test.java
+++ /dev/null
@@ -1,915 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertFalse;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertNull;
-import static junit.framework.Assert.assertTrue;
-import static junit.framework.Assert.fail;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.OperatingSystemInfo;
-import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.ambari.server.state.RepositoryInfo;
-import org.apache.ambari.server.state.SecurityState;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.easymock.Capture;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockSupport;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-
-/**
- * {@link UpgradeCatalog200} unit tests.
- */
-public class UpgradeCatalog200Test {
-  private final String CLUSTER_NAME = "c1";
-  private final String HOST_NAME = "h1";
-
-  private final StackId DESIRED_STACK = new StackId("HDP", "2.0.6");
-  private final String DESIRED_REPO_VERSION = "2.0.6-1234";
-
-  private Injector injector;
-  private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-  private EntityManager entityManager = createNiceMock(EntityManager.class);
-  private UpgradeCatalogHelper upgradeCatalogHelper;
-
-  @Before
-  public void init() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    upgradeCatalogHelper = injector.getInstance(UpgradeCatalogHelper.class);
-  }
-
-  @After
-  public void tearDown() throws AmbariException, SQLException {
-    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
-  }
-
-  @Test
-  public void testExecuteDDLUpdates() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    Configuration configuration = createNiceMock(Configuration.class);
-    Connection connection = createNiceMock(Connection.class);
-    Statement statement = createNiceMock(Statement.class);
-    ResultSet resultSet = createNiceMock(ResultSet.class);
-
-    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
-
-    Capture<DBAccessor.DBColumnInfo> alertDefinitionIgnoreColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> alertDefinitionDescriptionColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> alertTargetGlobalColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> hostComponentStateColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> hostComponentVersionColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> clustersSecurityTypeColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> hostComponentStateSecurityStateColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> hostComponentDesiredStateSecurityStateColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> hostRoleCommandRetryColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> stageSkippableColumnCapture = EasyMock.newCapture();
-
-    Capture<DBAccessor.DBColumnInfo> viewparameterLabelColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> viewparameterPlaceholderColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> viewparameterDefaultValueColumnCapture = EasyMock.newCapture();
-
-    Capture<DBAccessor.DBColumnInfo> serviceDesiredStateSecurityStateColumnCapture = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> clusterVersionCapture = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> hostVersionCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> valueColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> dataValueColumnCapture = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> alertTargetStatesCapture = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> artifactCapture = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> kerberosPrincipalCapture = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> kerberosPrincipalHostCapture = EasyMock.newCapture();
-
-    Capture<List<DBAccessor.DBColumnInfo>> upgradeCapture = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> upgradeGroupCapture = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> upgradeItemCapture = EasyMock.newCapture();
-
-    // Alert Definition
-    dbAccessor.addColumn(eq("alert_definition"),
-                          capture(alertDefinitionIgnoreColumnCapture));
-
-    dbAccessor.addColumn(eq("alert_definition"),
-        capture(alertDefinitionDescriptionColumnCapture));
-
-    dbAccessor.createTable(eq("alert_target_states"),
-        capture(alertTargetStatesCapture));
-
-    // alert target
-    dbAccessor.addColumn(eq("alert_target"),
-        capture(alertTargetGlobalColumnCapture));
-
-    // Host Component State
-    dbAccessor.addColumn(eq("hostcomponentstate"),
-        capture(hostComponentStateColumnCapture));
-
-    // Host Component Version
-    dbAccessor.addColumn(eq("hostcomponentstate"),
-        capture(hostComponentVersionColumnCapture));
-
-    // Host Role Command retry allowed
-    dbAccessor.addColumn(eq("host_role_command"),
-        capture(hostRoleCommandRetryColumnCapture));
-
-    // Stage skippable
-    dbAccessor.addColumn(eq("stage"),
-        capture(stageSkippableColumnCapture));
-
-    // Clusters: security type
-    dbAccessor.addColumn(eq("clusters"),
-        capture(clustersSecurityTypeColumnCapture));
-
-    // Host Component State: security State
-    dbAccessor.addColumn(eq("hostcomponentstate"),
-        capture(hostComponentStateSecurityStateColumnCapture));
-
-    // Host Component Desired State: security State
-    dbAccessor.addColumn(eq("hostcomponentdesiredstate"),
-        capture(hostComponentDesiredStateSecurityStateColumnCapture));
-
-    dbAccessor.addColumn(eq("viewparameter"), capture(viewparameterLabelColumnCapture));
-    dbAccessor.addColumn(eq("viewparameter"), capture(viewparameterPlaceholderColumnCapture));
-    dbAccessor.addColumn(eq("viewparameter"), capture(viewparameterDefaultValueColumnCapture));
-
-    // Service Desired State: security State
-    dbAccessor.addColumn(eq("servicedesiredstate"),
-        capture(serviceDesiredStateSecurityStateColumnCapture));
-
-    // Cluster Version
-    dbAccessor.createTable(eq("cluster_version"),
-        capture(clusterVersionCapture), eq("id"));
-
-    // Host Version
-    dbAccessor.createTable(eq("host_version"),
-        capture(hostVersionCapture), eq("id"));
-
-    // Upgrade
-    dbAccessor.createTable(eq("upgrade"), capture(upgradeCapture), eq("upgrade_id"));
-
-    // Upgrade Group item
-    dbAccessor.createTable(eq("upgrade_group"), capture(upgradeGroupCapture), eq("upgrade_group_id"));
-
-    // Upgrade item
-    dbAccessor.createTable(eq("upgrade_item"), capture(upgradeItemCapture), eq("upgrade_item_id"));
-
-    // artifact
-    dbAccessor.createTable(eq("artifact"), capture(artifactCapture),
-        eq("artifact_name"), eq("foreign_keys"));
-
-    // kerberos_principal
-    dbAccessor.createTable(eq("kerberos_principal"), capture(kerberosPrincipalCapture),
-        eq("principal_name"));
-
-    // kerberos_principal_host
-    dbAccessor.createTable(eq("kerberos_principal_host"), capture(kerberosPrincipalHostCapture),
-        eq("principal_name"), eq("host_name"));
-
-    expect(dbAccessor.tableHasColumn("kerberos_principal_host", "host_name")).andReturn(true).atLeastOnce();
-
-    dbAccessor.addFKConstraint(eq("kerberos_principal_host"), eq("FK_krb_pr_host_hostname"),
-        eq("host_name"), eq("hosts"), eq("host_name"), eq(true), eq(false));
-
-    dbAccessor.addFKConstraint(eq("kerberos_principal_host"), eq("FK_krb_pr_host_principalname"),
-        eq("principal_name"), eq("kerberos_principal"), eq("principal_name"), eq(true), eq(false));
-
-    setViewInstancePropertyExpectations(dbAccessor, valueColumnCapture);
-    setViewInstanceDataExpectations(dbAccessor, dataValueColumnCapture);
-
-    // AbstractUpgradeCatalog.addSequence()
-    dbAccessor.getConnection();
-    expectLastCall().andReturn(connection).anyTimes();
-    connection.createStatement();
-    expectLastCall().andReturn(statement).anyTimes();
-    statement.executeQuery(anyObject(String.class));
-    expectLastCall().andReturn(resultSet).anyTimes();
-
-    replay(dbAccessor, configuration, resultSet, statement, connection);
-
-    AbstractUpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Class<?> c = AbstractUpgradeCatalog.class;
-    Field f = c.getDeclaredField("configuration");
-    f.setAccessible(true);
-    f.set(upgradeCatalog, configuration);
-
-    upgradeCatalog.executeDDLUpdates();
-    verify(dbAccessor, configuration, resultSet, statement, connection);
-
-    // verify columns for alert_definition
-    verifyAlertDefinitionIgnoreColumn(alertDefinitionIgnoreColumnCapture);
-    verifyAlertDefinitionDescriptionColumn(alertDefinitionDescriptionColumnCapture);
-
-    // verify alert target column for is_global
-    verifyAlertTargetGlobal(alertTargetGlobalColumnCapture);
-
-    // verify new table for alert target states
-    verifyAlertTargetStatesTable(alertTargetStatesCapture);
-
-    // Verify added column in hostcomponentstate table
-    DBAccessor.DBColumnInfo upgradeStateColumn = hostComponentStateColumnCapture.getValue();
-    assertEquals("upgrade_state", upgradeStateColumn.getName());
-    assertEquals(32, (int) upgradeStateColumn.getLength());
-    assertEquals(String.class, upgradeStateColumn.getType());
-    assertEquals("NONE", upgradeStateColumn.getDefaultValue());
-    assertFalse(upgradeStateColumn.isNullable());
-
-    // Verify added column in hostcomponentstate table
-    DBAccessor.DBColumnInfo upgradeVersionColumn = hostComponentVersionColumnCapture.getValue();
-    assertEquals("version", upgradeVersionColumn.getName());
-    assertEquals(32, (int) upgradeVersionColumn.getLength());
-    assertEquals(String.class, upgradeVersionColumn.getType());
-    assertEquals("UNKNOWN", upgradeVersionColumn.getDefaultValue());
-    assertFalse(upgradeVersionColumn.isNullable());
-
-    // Verify added column in host_role_command table
-    DBAccessor.DBColumnInfo upgradeRetryColumn = hostRoleCommandRetryColumnCapture.getValue();
-    assertEquals("retry_allowed", upgradeRetryColumn.getName());
-    assertEquals(1, (int) upgradeRetryColumn.getLength());
-    assertEquals(Integer.class, upgradeRetryColumn.getType());
-    assertEquals(0, upgradeRetryColumn.getDefaultValue());
-    assertFalse(upgradeRetryColumn.isNullable());
-
-    // Verify added column in host_role_command table
-    DBAccessor.DBColumnInfo upgradeSkippableColumn = stageSkippableColumnCapture.getValue();
-    assertEquals("skippable", upgradeSkippableColumn.getName());
-    assertEquals(1, (int) upgradeSkippableColumn.getLength());
-    assertEquals(Integer.class, upgradeSkippableColumn.getType());
-    assertEquals(0, upgradeSkippableColumn.getDefaultValue());
-    assertFalse(upgradeSkippableColumn.isNullable());
-
-    // verify security_type column
-    verifyClustersSecurityType(clustersSecurityTypeColumnCapture);
-
-    // verify security_state columns
-    verifyComponentSecurityStateColumn(hostComponentStateSecurityStateColumnCapture);
-    verifyComponentSecurityStateColumn(hostComponentDesiredStateSecurityStateColumnCapture);
-    verifyServiceSecurityStateColumn(serviceDesiredStateSecurityStateColumnCapture);
-
-    verifyViewParameterColumns(viewparameterLabelColumnCapture, viewparameterPlaceholderColumnCapture,
-        viewparameterDefaultValueColumnCapture);
-
-    // verify artifact columns
-    List<DBAccessor.DBColumnInfo> artifactColumns = artifactCapture.getValue();
-    testCreateArtifactTable(artifactColumns);
-
-    // verify kerberos_principal columns
-    testCreateKerberosPrincipalTable(kerberosPrincipalCapture.getValue());
-
-    // verify kerberos_principal_host columns
-    testCreateKerberosPrincipalHostTable(kerberosPrincipalHostCapture.getValue());
-
-    // Verify capture group sizes
-    assertEquals(7, clusterVersionCapture.getValue().size());
-    assertEquals(4, hostVersionCapture.getValue().size());
-
-    assertViewInstancePropertyColumns(valueColumnCapture);
-    assertViewInstanceDataColumns(dataValueColumnCapture);
-
-    assertEquals(6, upgradeCapture.getValue().size());
-    assertEquals(4, upgradeGroupCapture.getValue().size());
-    assertEquals(7, upgradeItemCapture.getValue().size());
-  }
-
-  /**
-   * Tests that each DML method is invoked.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    Method removeNagiosService = UpgradeCatalog200.class.getDeclaredMethod("removeNagiosService");
-    Method updateHiveDatabaseType = UpgradeCatalog200.class.getDeclaredMethod("updateHiveDatabaseType");
-    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod
-        ("addNewConfigurationsFromXml");
-    Method updateTezConfiguration = UpgradeCatalog200.class.getDeclaredMethod("updateTezConfiguration");
-    Method updateFlumeEnvConfig = UpgradeCatalog200.class.getDeclaredMethod("updateFlumeEnvConfig");
-    Method updateClusterEnvConfiguration = UpgradeCatalog200.class.getDeclaredMethod("updateClusterEnvConfiguration");
-    Method updateConfigurationProperties = AbstractUpgradeCatalog.class.getDeclaredMethod
-            ("updateConfigurationProperties", String.class, Map.class, boolean.class, boolean.class);
-    Method persistHDPRepo = UpgradeCatalog200.class.getDeclaredMethod("persistHDPRepo");
-
-    UpgradeCatalog200 upgradeCatalog = createMockBuilder(UpgradeCatalog200.class)
-        .addMockedMethod(removeNagiosService)
-        .addMockedMethod(updateHiveDatabaseType)
-        .addMockedMethod(addNewConfigurationsFromXml)
-        .addMockedMethod(updateTezConfiguration)
-        .addMockedMethod(updateFlumeEnvConfig)
-        .addMockedMethod(updateConfigurationProperties)
-        .addMockedMethod(updateClusterEnvConfiguration)
-        .addMockedMethod(persistHDPRepo)
-        .createMock();
-
-    upgradeCatalog.removeNagiosService();
-    expectLastCall().once();
-    upgradeCatalog.addNewConfigurationsFromXml();
-    expectLastCall();
-
-    upgradeCatalog.updateHiveDatabaseType();
-    expectLastCall().once();
-
-    upgradeCatalog.updateTezConfiguration();
-    expectLastCall().once();
-
-    upgradeCatalog.updateFlumeEnvConfig();
-    expectLastCall().once();
-
-    upgradeCatalog.updateConfigurationProperties("hive-site",
-            Collections.singletonMap("hive.server2.transport.mode", "binary"), false, false);
-    expectLastCall();
-
-    upgradeCatalog.persistHDPRepo();
-    expectLastCall().once();
-
-    upgradeCatalog.updateClusterEnvConfiguration();
-    expectLastCall();
-
-    replay(upgradeCatalog);
-
-    upgradeCatalog.executeDMLUpdates();
-
-    verify(upgradeCatalog);
-  }
-
-  @Test
-  public void testUpdateFlumeEnvConfig() throws AmbariException {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesFlumeEnv = new HashMap<String, String>() {
-      {
-        put("content", "test");
-      }
-    };
-
-    final Config mockFlumeEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(mockFlumeEnv.getProperties()).andReturn(propertiesFlumeEnv).once();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    expect(mockClusterExpected.getDesiredConfigByType("flume-env")).andReturn(mockFlumeEnv).atLeastOnce();
-    expect(mockFlumeEnv.getProperties()).andReturn(propertiesFlumeEnv).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog200.class).updateFlumeEnvConfig();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testPersistHDPRepo() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createStrictMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockCluster = easyMockSupport.createStrictMock(Cluster.class);
-    final Map<String, Cluster> clusterMap = new HashMap<>();
-    clusterMap.put("c1",mockCluster);
-    OperatingSystemInfo osi = new OperatingSystemInfo("redhat6");
-    HashSet<OperatingSystemInfo> osiSet = new HashSet<>();
-    osiSet.add(osi);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockCluster.getClusterName()).andReturn("cc").anyTimes();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog200.class).persistHDPRepo();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testRepositoryTable() {
-    final RepositoryInfo repositoryInfo1 = new RepositoryInfo();
-    repositoryInfo1.setOsType("redhat6");
-    repositoryInfo1.setRepoId("HDP-2.2");
-    repositoryInfo1.setBaseUrl("http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.2.6.0");
-
-    final RepositoryInfo repositoryInfo2 = new RepositoryInfo();
-    repositoryInfo2.setOsType("suse11");
-    repositoryInfo2.setRepoId("HDP-UTILS-1.1.0.20");
-    repositoryInfo2.setBaseUrl("http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/suse11sp3");
-
-    List<RepositoryInfo> repos = new ArrayList<RepositoryInfo>() {{
-      add(repositoryInfo1);
-      add(repositoryInfo2);
-    }};
-    String output = UpgradeCatalog200.repositoryTable(repos);
-    assertEquals("  redhat6 |            HDP-2.2 | http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.2.6.0 \n" +
-                 "   suse11 | HDP-UTILS-1.1.0.20 | http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/suse11sp3 \n",
-      output);
-  }
-
-  @Test
-  public void testUpdateClusterEnvConfiguration() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createStrictMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createStrictMock(Cluster.class);
-    final Cluster mockClusterMissingSmokeUser = easyMockSupport.createStrictMock(Cluster.class);
-    final Cluster mockClusterMissingConfig = easyMockSupport.createStrictMock(Cluster.class);
-    final StackId mockStackId = easyMockSupport.createNiceMock(StackId.class);   
-
-    final Config mockClusterEnvExpected = easyMockSupport.createStrictMock(Config.class);
-    final Config mockClusterEnvMissingSmokeUser = easyMockSupport.createStrictMock(Config.class);
-
-    final Map<String, String> propertiesExpectedT0 = new HashMap<>();
-    propertiesExpectedT0.put("kerberos_domain", "EXAMPLE.COM");
-    propertiesExpectedT0.put("user_group", "hadoop");
-    propertiesExpectedT0.put("kinit_path_local", "/usr/bin");
-    propertiesExpectedT0.put("security_enabled", "true");
-    propertiesExpectedT0.put("smokeuser", "ambari-qa");
-    propertiesExpectedT0.put("smokeuser_keytab", "/etc/security/keytabs/smokeuser.headless.keytab");
-    propertiesExpectedT0.put("ignore_groupsusers_create", "false");
-
-    final Map<String, String> propertiesExpectedT1 = new HashMap<>(propertiesExpectedT0);
-    propertiesExpectedT1.put("smokeuser_principal_name", "ambari-qa");
-
-    final Map<String, String> propertiesMissingSmokeUserT0 = new HashMap<>(propertiesExpectedT0);
-    propertiesMissingSmokeUserT0.remove("smokeuser");
-
-    final Map<String, String> propertiesMissingSmokeUserT1 = new HashMap<>(propertiesMissingSmokeUserT0);
-    propertiesMissingSmokeUserT1.put("smokeuser_principal_name", "ambari-qa");
-
-    final PropertyInfo mockSmokeUserPropertyInfo = easyMockSupport.createStrictMock(PropertyInfo.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-      put("missing_smokeuser", mockClusterMissingSmokeUser);
-      put("missing_cluster-env", mockClusterMissingConfig);
-
-    }}).once();
-
-      // Expected operation
-    expect(mockClusterExpected.getDesiredConfigByType("cluster-env")).andReturn(mockClusterEnvExpected).once();
-    expect(mockClusterExpected.getDesiredStackVersion()).andReturn(mockStackId).atLeastOnce();
-    expect(mockClusterEnvExpected.getProperties()).andReturn(propertiesExpectedT0).once();
-
-    mockConfigHelper.createConfigType(mockClusterExpected, mockStackId, mockAmbariManagementController,
-        "cluster-env", propertiesExpectedT1, UpgradeCatalog200.AUTHENTICATED_USER_NAME, "Upgrading to Ambari 2.0");
-    expectLastCall().once();
-
-    // Missing smokeuser
-    expect(mockClusterMissingSmokeUser.getDesiredConfigByType("cluster-env")).andReturn(mockClusterEnvMissingSmokeUser).once();
-    expect(mockClusterMissingSmokeUser.getDesiredStackVersion()).andReturn(mockStackId).atLeastOnce();
-    expect(mockClusterEnvMissingSmokeUser.getProperties()).andReturn(propertiesMissingSmokeUserT0).once();
-
-    expect(mockConfigHelper.getStackProperties(mockClusterMissingSmokeUser)).andReturn(Collections.singleton(mockSmokeUserPropertyInfo)).once();
-
-    expect(mockSmokeUserPropertyInfo.getFilename()).andReturn("cluster-env.xml").once();
-    expect(mockSmokeUserPropertyInfo.getValue()).andReturn("ambari-qa").once();
-
-    mockConfigHelper.createConfigType(mockClusterMissingSmokeUser, mockStackId, mockAmbariManagementController,
-        "cluster-env", propertiesMissingSmokeUserT1, UpgradeCatalog200.AUTHENTICATED_USER_NAME, "Upgrading to Ambari 2.0");
-    expectLastCall().once();
-
-    // Missing cluster-env config
-    expect(mockClusterMissingConfig.getDesiredConfigByType("cluster-env")).andReturn(null).once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog200.class).updateClusterEnvConfiguration();
-    easyMockSupport.verifyAll();
-  }
-
-  /**
-   * Tests that Nagios is correctly removed.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testDeleteNagiosService() throws Exception {
-    UpgradeCatalog200 upgradeCatalog200 = injector.getInstance(UpgradeCatalog200.class);
-    ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance(ServiceComponentDesiredStateDAO.class);
-    HostComponentDesiredStateDAO hostComponentDesiredStateDAO = injector.getInstance(HostComponentDesiredStateDAO.class);
-    HostComponentStateDAO hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class);
-    ClusterServiceDAO clusterServiceDao = injector.getInstance(ClusterServiceDAO.class);
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-
-    StackEntity stackEntity = stackDAO.find(DESIRED_STACK.getStackName(),
-        DESIRED_STACK.getStackVersion());
-
-    assertNotNull(stackEntity);
-
-    final ClusterEntity clusterEntity = upgradeCatalogHelper.createCluster(
-        injector, CLUSTER_NAME, stackEntity, DESIRED_REPO_VERSION);
-
-    RepositoryVersionDAO repositoryVersionDAO = injector.getInstance(RepositoryVersionDAO.class);
-    RepositoryVersionEntity repositoryVersion = repositoryVersionDAO.findByStackAndVersion(
-        stackEntity, DESIRED_REPO_VERSION);
-
-    final ClusterServiceEntity clusterServiceEntityNagios = upgradeCatalogHelper.addService(
-        injector, clusterEntity, "NAGIOS", repositoryVersion);
-
-    final HostEntity hostEntity = upgradeCatalogHelper.createHost(injector,
-        clusterEntity, HOST_NAME);
-
-    upgradeCatalogHelper.addComponent(injector, clusterEntity,
-        clusterServiceEntityNagios, hostEntity, "NAGIOS_SERVER", stackEntity, repositoryVersion);
-
-    ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = serviceComponentDesiredStateDAO.findByName(
-        clusterEntity.getClusterId(), "NAGIOS", "NAGIOS_SERVER");
-
-    assertNotNull(serviceComponentDesiredStateEntity);
-
-    HostComponentDesiredStateEntity hcDesiredStateEntity = hostComponentDesiredStateDAO.findByIndex(
-      clusterEntity.getClusterId(),
-      "NAGIOS",
-      "NAGIOS_SERVER",
-      hostEntity.getHostId()
-    );
-    assertNotNull(hcDesiredStateEntity);
-
-    HostComponentStateEntity hcStateEntity = hostComponentStateDAO.findByIndex(
-        clusterEntity.getClusterId(), "NAGIOS", "NAGIOS_SERVER", hostEntity.getHostId());
-
-    assertNotNull(hcStateEntity);
-
-    ClusterServiceEntity clusterService = clusterServiceDao.findByClusterAndServiceNames(
-        CLUSTER_NAME, "NAGIOS");
-
-    upgradeCatalog200.removeNagiosService();
-
-    clusterService = clusterServiceDao.findByClusterAndServiceNames(
-        CLUSTER_NAME, "NAGIOS");
-
-    assertNull(clusterService);
-  }
-
-  /**
-   * @param dbAccessor
-   * @return
-   */
-  private AbstractUpgradeCatalog getUpgradeCatalog(final DBAccessor dbAccessor) {
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    return injector.getInstance(UpgradeCatalog200.class);
-  }
-
-  /**
-   * Verifies new ignore column for alert definition.
-   *
-   * @param alertDefinitionIgnoreColumnCapture
-   */
-  private void verifyAlertDefinitionIgnoreColumn(
-      Capture<DBAccessor.DBColumnInfo> alertDefinitionIgnoreColumnCapture) {
-    DBColumnInfo column = alertDefinitionIgnoreColumnCapture.getValue();
-    Assert.assertEquals(Integer.valueOf(0), column.getDefaultValue());
-    Assert.assertEquals(Integer.valueOf(1), column.getLength());
-    Assert.assertEquals(Short.class, column.getType());
-    Assert.assertEquals("ignore_host", column.getName());
-  }
-
-  /**
-   * Verifies new description column for alert definition.
-   *
-   * @param alertDefinitionDescriptionColumnCapture
-   */
-  private void verifyAlertDefinitionDescriptionColumn(
-      Capture<DBAccessor.DBColumnInfo> alertDefinitionDescriptionColumnCapture) {
-    DBColumnInfo column = alertDefinitionDescriptionColumnCapture.getValue();
-    Assert.assertEquals(null, column.getDefaultValue());
-    Assert.assertEquals(char[].class, column.getType());
-    Assert.assertEquals("description", column.getName());
-  }
-
-  /**
-   * Verifies alert_target_states table.
-   *
-   * @param alertTargetStatesCapture
-   */
-  private void verifyAlertTargetStatesTable(
-      Capture<List<DBAccessor.DBColumnInfo>> alertTargetStatesCapture) {
-    Assert.assertEquals(2, alertTargetStatesCapture.getValue().size());
-  }
-
-  /**
-   * Verifies is_global added to alert target table.
-   *
-   * @param alertTargetGlobalCapture
-   */
-  private void verifyAlertTargetGlobal(
-      Capture<DBAccessor.DBColumnInfo> alertTargetGlobalCapture) {
-    DBColumnInfo column = alertTargetGlobalCapture.getValue();
-    Assert.assertEquals(0, column.getDefaultValue());
-    Assert.assertEquals(Short.class, column.getType());
-    Assert.assertEquals("is_global", column.getName());
-  }
-
-  /**
-   * Verifies new security_state column in servicedesiredsstate table.
-   *
-   * @param securityStateColumnCapture
-   */
-  private void verifyServiceSecurityStateColumn(
-      Capture<DBAccessor.DBColumnInfo> securityStateColumnCapture) {
-    DBColumnInfo column = securityStateColumnCapture.getValue();
-    Assert.assertEquals(SecurityState.UNSECURED.toString(), column.getDefaultValue());
-    Assert.assertEquals(Integer.valueOf(32), column.getLength());
-    Assert.assertEquals(String.class, column.getType());
-    Assert.assertEquals("security_state", column.getName());
-  }
-
-  /**
-   * Verifies new security_type column in clusters table
-   *
-   * @param securityTypeColumnCapture
-   */
-  private void verifyClustersSecurityType(
-      Capture<DBAccessor.DBColumnInfo> securityTypeColumnCapture) {
-    DBColumnInfo column = securityTypeColumnCapture.getValue();
-    Assert.assertEquals(SecurityType.NONE.toString(), column.getDefaultValue());
-    Assert.assertEquals(Integer.valueOf(32), column.getLength());
-    Assert.assertEquals(String.class, column.getType());
-    Assert.assertEquals("security_type", column.getName());
-  }
-
-  /**
-   * Verifies new security_state column in hostcomponentdesiredstate and hostcomponentstate tables
-   *
-   * @param securityStateColumnCapture
-   */
-  private void verifyComponentSecurityStateColumn(
-      Capture<DBAccessor.DBColumnInfo> securityStateColumnCapture) {
-    DBColumnInfo column = securityStateColumnCapture.getValue();
-    Assert.assertEquals(SecurityState.UNSECURED.toString(), column.getDefaultValue());
-    Assert.assertEquals(Integer.valueOf(32), column.getLength());
-    Assert.assertEquals(String.class, column.getType());
-    Assert.assertEquals("security_state", column.getName());
-  }
-
-  private void verifyViewParameterColumns(
-      Capture<DBAccessor.DBColumnInfo> labelColumnCapture,
-      Capture<DBAccessor.DBColumnInfo> placeholderColumnCapture,
-      Capture<DBAccessor.DBColumnInfo> defaultValueColumnCapture) {
-
-
-    DBColumnInfo column = labelColumnCapture.getValue();
-    assertNull(column.getDefaultValue());
-    Assert.assertEquals(Integer.valueOf(255), column.getLength());
-    Assert.assertEquals(String.class, column.getType());
-    Assert.assertEquals("label", column.getName());
-
-    column = placeholderColumnCapture.getValue();
-    assertNull(column.getDefaultValue());
-    Assert.assertEquals(Integer.valueOf(255), column.getLength());
-    Assert.assertEquals(String.class, column.getType());
-    Assert.assertEquals("placeholder", column.getName());
-
-    column = defaultValueColumnCapture.getValue();
-    assertNull(column.getDefaultValue());
-    Assert.assertEquals(Integer.valueOf(2000), column.getLength());
-    Assert.assertEquals(String.class, column.getType());
-    Assert.assertEquals("default_value", column.getName());
-  }
-
-  @Test
-  public void testGetSourceVersion() {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Assert.assertEquals(null, upgradeCatalog.getSourceVersion());
-  }
-
-  @Test
-  public void testGetTargetVersion() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-
-    Assert.assertEquals("2.0.0", upgradeCatalog.getTargetVersion());
-  }
-
-  private void setViewInstancePropertyExpectations(DBAccessor dbAccessor,
-                                                   Capture<DBAccessor.DBColumnInfo> valueColumnCapture)
-      throws SQLException {
-
-    dbAccessor.alterColumn(eq("viewinstanceproperty"), capture(valueColumnCapture));
-  }
-
-  private void setViewInstanceDataExpectations(DBAccessor dbAccessor,
-                                               Capture<DBAccessor.DBColumnInfo> dataValueColumnCapture)
-      throws SQLException {
-
-    dbAccessor.alterColumn(eq("viewinstancedata"), capture(dataValueColumnCapture));
-  }
-
-  private void assertViewInstancePropertyColumns(
-      Capture<DBAccessor.DBColumnInfo> valueColumnCapture) {
-    DBAccessor.DBColumnInfo column = valueColumnCapture.getValue();
-    assertEquals("value", column.getName());
-    assertEquals(2000, (int) column.getLength());
-    assertEquals(String.class, column.getType());
-    assertNull(column.getDefaultValue());
-    assertTrue(column.isNullable());
-  }
-
-  private void assertViewInstanceDataColumns(
-      Capture<DBAccessor.DBColumnInfo> dataValueColumnCapture) {
-    DBAccessor.DBColumnInfo column = dataValueColumnCapture.getValue();
-    assertEquals("value", column.getName());
-    assertEquals(2000, (int) column.getLength());
-    assertEquals(String.class, column.getType());
-    assertNull(column.getDefaultValue());
-    assertTrue(column.isNullable());
-  }
-
-  /**
-   * assert artifact table creation
-   *
-   * @param artifactColumns artifact table columns
-   */
-  private void testCreateArtifactTable(List<DBColumnInfo> artifactColumns) {
-    assertEquals(3, artifactColumns.size());
-    for (DBColumnInfo column : artifactColumns) {
-      if (column.getName().equals("artifact_name")) {
-        assertNull(column.getDefaultValue());
-        assertEquals(String.class, column.getType());
-        assertEquals(255, (int) column.getLength());
-        assertEquals(false, column.isNullable());
-      } else if (column.getName().equals("foreign_keys")) {
-        assertNull(column.getDefaultValue());
-        assertEquals(String.class, column.getType());
-        assertEquals(255, (int) column.getLength());
-        assertEquals(false, column.isNullable());
-      } else if (column.getName().equals("artifact_data")) {
-        assertNull(column.getDefaultValue());
-        assertEquals(char[].class, column.getType());
-        assertEquals(false, column.isNullable());
-      } else {
-        fail("unexpected column name");
-      }
-    }
-  }
-
-  private void testCreateKerberosPrincipalTable(List<DBColumnInfo> columns) {
-    assertEquals(3, columns.size());
-    for (DBColumnInfo column : columns) {
-      if (column.getName().equals("principal_name")) {
-        assertNull(column.getDefaultValue());
-        assertEquals(String.class, column.getType());
-        assertEquals(255, (int) column.getLength());
-        assertEquals(false, column.isNullable());
-      } else if (column.getName().equals("is_service")) {
-        assertEquals(1, column.getDefaultValue());
-        assertEquals(Short.class, column.getType());
-        assertEquals(1, (int) column.getLength());
-        assertEquals(false, column.isNullable());
-      } else if (column.getName().equals("cached_keytab_path")) {
-        assertNull(column.getDefaultValue());
-        assertEquals(String.class, column.getType());
-        assertEquals(255, (int) column.getLength());
-        assertEquals(true, column.isNullable());
-      } else {
-        fail("unexpected column name");
-      }
-    }
-  }
-
-  private void testCreateKerberosPrincipalHostTable(List<DBColumnInfo> columns) {
-    assertEquals(2, columns.size());
-    for (DBColumnInfo column : columns) {
-      if (column.getName().equals("principal_name")) {
-        assertNull(column.getDefaultValue());
-        assertEquals(String.class, column.getType());
-        assertEquals(255, (int) column.getLength());
-        assertEquals(false, column.isNullable());
-      } else if (column.getName().equals("host_name")) {
-        assertNull(column.getDefaultValue());
-        assertEquals(String.class, column.getType());
-        assertEquals(255, (int) column.getLength());
-        assertEquals(false, column.isNullable());
-      } else {
-        fail("unexpected column name");
-      }
-    }
-  }
-}


[50/63] [abbrv] ambari git commit: AMBARI-21362. Ambari upgrade not idempotent due to column move

Posted by ab...@apache.org.
AMBARI-21362. Ambari upgrade not idempotent due to column move


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5c874ccb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5c874ccb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5c874ccb

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 5c874ccb40b282e5074588906cb7de1f7eeae614
Parents: a3681c0
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Tue Jun 27 14:33:27 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Wed Jun 28 18:31:49 2017 +0200

----------------------------------------------------------------------
 .../ambari/server/orm/DBAccessorImpl.java       |  5 +++-
 .../ambari/server/orm/DBAccessorImplTest.java   | 29 ++++++++++++++++++++
 2 files changed, 33 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5c874ccb/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
index 13e7d7d..83ea8e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -1411,7 +1411,10 @@ public class DBAccessorImpl implements DBAccessor {
   public void moveColumnToAnotherTable(String sourceTableName, DBColumnInfo sourceColumn, String sourceIDFieldName,
               String targetTableName, DBColumnInfo targetColumn, String targetIDFieldName, Object initialValue) throws SQLException {
 
-    if (this.tableHasColumn(sourceTableName, sourceIDFieldName)) {
+    if (tableHasColumn(sourceTableName, sourceIDFieldName) &&
+      tableHasColumn(sourceTableName, sourceColumn.getName()) &&
+      tableHasColumn(targetTableName, targetIDFieldName)
+    ) {
 
       final String moveSQL = dbmsHelper.getCopyColumnToAnotherTableStatement(sourceTableName, sourceColumn.getName(),
         sourceIDFieldName, targetTableName, targetColumn.getName(),targetIDFieldName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5c874ccb/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
index ca2674c..b4ffbf1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/DBAccessorImplTest.java
@@ -638,4 +638,33 @@ public class DBAccessorImplTest {
     }
 
    }
+
+  @Test
+  public void testMoveNonexistentColumnIsNoop() throws Exception {
+    DBAccessorImpl dbAccessor = injector.getInstance(DBAccessorImpl.class);
+    String sourceTableName = getFreeTableName();
+    String targetTableName = getFreeTableName();
+    int testRowAmount = 10;
+
+    createMyTable(sourceTableName, "col1");
+    createMyTable(targetTableName, "col1", "col2");
+
+    for (Integer i=0; i < testRowAmount; i++){
+      dbAccessor.insertRow(sourceTableName,
+        new String[] {"id", "col1"},
+        new String[]{i.toString(), String.format("'source,1,%s'", i)}, false);
+
+      dbAccessor.insertRow(targetTableName,
+        new String[] {"id", "col1", "col2"},
+        new String[]{i.toString(), String.format("'target,1,%s'", i), String.format("'target,2,%s'", i)}, false);
+    }
+
+    DBColumnInfo sourceColumn = new DBColumnInfo("col2", String.class, null, null, false);
+    DBColumnInfo targetColumn = new DBColumnInfo("col2", String.class, null, null, false);
+
+    dbAccessor.moveColumnToAnotherTable(sourceTableName, sourceColumn, "id",
+      targetTableName, targetColumn, "id", "initial");
+
+    // should not result in exception due to unknown column in source table
+  }
 }


[60/63] [abbrv] ambari git commit: AMBARI-21369. Use JDK 8 compiler maven plugin for Log Search and Infra projects (oleewere)

Posted by ab...@apache.org.
AMBARI-21369. Use JDK 8 compiler maven plugin for Log Search and Infra projects (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d7c59fca
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d7c59fca
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d7c59fca

Branch: refs/heads/branch-feature-logsearch-ui
Commit: d7c59fca19770a8fb0e488371d2f460673f7e3d4
Parents: 7554509
Author: oleewere <ol...@gmail.com>
Authored: Fri Jun 30 11:33:21 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Fri Jun 30 11:35:08 2017 +0200

----------------------------------------------------------------------
 ambari-infra/ambari-infra-manager/pom.xml           |  1 -
 ambari-infra/ambari-infra-solr-plugin/pom.xml       |  4 ++--
 ambari-infra/pom.xml                                |  2 +-
 ambari-logsearch/ambari-logsearch-logfeeder/pom.xml |  4 ++--
 ambari-logsearch/ambari-logsearch-server/pom.xml    | 12 ++++++------
 ambari-logsearch/pom.xml                            |  5 +++--
 6 files changed, 14 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-infra/ambari-infra-manager/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-manager/pom.xml b/ambari-infra/ambari-infra-manager/pom.xml
index b7708c2..b9f2a77 100644
--- a/ambari-infra/ambari-infra-manager/pom.xml
+++ b/ambari-infra/ambari-infra-manager/pom.xml
@@ -38,7 +38,6 @@
     <spring-data-solr.version>2.0.2.RELEASE</spring-data-solr.version>
     <jjwt.version>0.6.0</jjwt.version>
     <spring-batch.version>3.0.7.RELEASE</spring-batch.version>
-    <jdk.version>1.7</jdk.version>
     <sqlite.version>3.8.11.2</sqlite.version>
   </properties>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-infra/ambari-infra-solr-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/ambari-infra-solr-plugin/pom.xml b/ambari-infra/ambari-infra-solr-plugin/pom.xml
index c890cec..3337d99 100644
--- a/ambari-infra/ambari-infra-solr-plugin/pom.xml
+++ b/ambari-infra/ambari-infra-solr-plugin/pom.xml
@@ -47,8 +47,8 @@
         <artifactId>maven-compiler-plugin</artifactId>
         <version>3.3</version>
         <configuration>
-          <source>1.7</source>
-          <target>1.7</target>
+          <source>${jdk.version}</source>
+          <target>${jdk.version}</target>
         </configuration>
       </plugin>
     </plugins>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-infra/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-infra/pom.xml b/ambari-infra/pom.xml
index 07adb3e..4f5c29c 100644
--- a/ambari-infra/pom.xml
+++ b/ambari-infra/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>
 
   <properties>
-    <jdk.version>1.7</jdk.version>
+    <jdk.version>1.8</jdk.version>
     <solr.version>5.5.2</solr.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
index ae2150e..091f957 100644
--- a/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-logfeeder/pom.xml
@@ -197,8 +197,8 @@
         <artifactId>maven-compiler-plugin</artifactId>
         <version>3.3</version>
         <configuration>
-          <source>1.7</source>
-          <target>1.7</target>
+          <source>${jdk.version}</source>
+          <target>${jdk.version}</target>
         </configuration>
       </plugin>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-logsearch/ambari-logsearch-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-server/pom.xml b/ambari-logsearch/ambari-logsearch-server/pom.xml
index fc4029b..ebca2d5 100755
--- a/ambari-logsearch/ambari-logsearch-server/pom.xml
+++ b/ambari-logsearch/ambari-logsearch-server/pom.xml
@@ -63,8 +63,8 @@
             <artifactId>maven-compiler-plugin</artifactId>
             <version>3.0</version>
             <configuration>
-              <source>1.7</source>
-              <target>1.7</target>
+              <source>${jdk.version}</source>
+              <target>${jdk.version}</target>
             </configuration>
           </plugin>
           <!-- Exec main class plugin -->
@@ -196,8 +196,8 @@
             <artifactId>maven-compiler-plugin</artifactId>
             <version>3.0</version>
             <configuration>
-              <source>1.7</source>
-              <target>1.7</target>
+              <source>${jdk.version}</source>
+              <target>${jdk.version}</target>
             </configuration>
           </plugin>
           <!-- Exec main class plugin -->
@@ -337,8 +337,8 @@
             <artifactId>maven-compiler-plugin</artifactId>
             <version>3.0</version>
             <configuration>
-              <source>1.7</source>
-              <target>1.7</target>
+              <source>${jdk.version}</source>
+              <target>${jdk.version}</target>
             </configuration>
           </plugin>
           <!-- Exec main class plugin -->

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7c59fca/ambari-logsearch/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-logsearch/pom.xml b/ambari-logsearch/pom.xml
index af1dc6a..82943e4 100644
--- a/ambari-logsearch/pom.xml
+++ b/ambari-logsearch/pom.xml
@@ -39,6 +39,7 @@
     <module>ambari-logsearch-it</module>
   </modules>
   <properties>
+    <jdk.version>1.8</jdk.version>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>
     <deb.python.ver>python (&gt;= 2.6)</deb.python.ver>
@@ -82,8 +83,8 @@
         <artifactId>maven-compiler-plugin</artifactId>
         <version>3.2</version>
         <configuration>
-          <source>1.7</source>
-          <target>1.7</target>
+          <source>${jdk.version}</source>
+          <target>${jdk.version}</target>
         </configuration>
       </plugin>
       <plugin>


[62/63] [abbrv] ambari git commit: AMBARI-21382 Log Search UI: implement timezone customization. (ababiichuk)

Posted by ab...@apache.org.
AMBARI-21382 Log Search UI: implement timezone customization. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b7edc6cf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b7edc6cf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b7edc6cf

Branch: refs/heads/branch-feature-logsearch-ui
Commit: b7edc6cf8db108aa5b703817870a8d077c2e7180
Parents: ed66094
Author: ababiichuk <ab...@hortonworks.com>
Authored: Fri Jun 30 16:29:49 2017 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Fri Jun 30 16:29:49 2017 +0300

----------------------------------------------------------------------
 .../ambari-logsearch-web-new/package.json       |  2 +
 .../src/app/app.module.ts                       | 14 ++-
 .../dropdown-list/dropdown-list.component.less  |  2 +-
 .../filter-button.component.spec.ts             | 75 ++++++++++++++++
 .../filter-button/filter-button.component.ts    | 91 ++++++++++++++++++++
 .../filter-dropdown.component.html              |  2 +-
 .../filter-dropdown.component.spec.ts           | 84 +++++++++---------
 .../filter-dropdown.component.ts                | 65 +++++++++++---
 .../filter-text-field.component.html            |  3 +-
 .../filter-text-field.component.spec.ts         | 25 +++++-
 .../filter-text-field.component.ts              | 61 +++++++++++--
 .../filters-panel/filters-panel.component.html  | 51 ++++++-----
 .../filters-panel.component.spec.ts             | 26 ++++--
 .../filters-panel/filters-panel.component.ts    | 16 +++-
 .../logs-list/logs-list.component.html          |  2 +-
 .../logs-list/logs-list.component.spec.ts       | 11 ++-
 .../components/logs-list/logs-list.component.ts |  9 +-
 .../menu-button/menu-button.component.html      |  4 +-
 .../menu-button/menu-button.component.spec.ts   |  8 +-
 .../menu-button/menu-button.component.ts        | 23 ++---
 .../src/app/models/app-settings.model.ts        | 27 ++++++
 .../src/app/models/store.model.ts               | 53 ++++++++++--
 .../src/app/services/filtering.service.spec.ts  | 12 ++-
 .../src/app/services/filtering.service.ts       | 51 +++++++----
 .../services/storage/app-settings.service.ts    | 33 +++++++
 .../app/services/storage/audit-logs.service.ts  |  7 +-
 .../app/services/storage/bar-graphs.service.ts  |  7 +-
 .../src/app/services/storage/filters.service.ts |  6 +-
 .../src/app/services/storage/graphs.service.ts  |  6 +-
 .../src/app/services/storage/nodes.service.ts   |  6 +-
 .../services/storage/service-logs.service.ts    |  6 +-
 .../services/storage/user-configs.service.ts    |  6 +-
 .../src/assets/mock-data.ts                     | 30 ++++---
 .../ambari-logsearch-web-new/yarn.lock          | 28 +++++-
 34 files changed, 658 insertions(+), 194 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/package.json
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/package.json b/ambari-logsearch/ambari-logsearch-web-new/package.json
index da44d30..847787b 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/package.json
+++ b/ambari-logsearch/ambari-logsearch-web-new/package.json
@@ -23,6 +23,8 @@
     "@ngrx/store": "^2.2.2",
     "@ngx-translate/core": "^6.0.1",
     "@ngx-translate/http-loader": "^0.0.3",
+    "angular-moment-timezone": "^0.2.1",
+    "angular2-moment": "^1.4.0",
     "bootstrap": "^3.3.7",
     "core-js": "^2.4.1",
     "font-awesome": "^4.7.0",

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts
index a095a97..1e0ebb0 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/app.module.ts
@@ -18,19 +18,22 @@
 
 import {BrowserModule} from '@angular/platform-browser';
 import {NgModule, CUSTOM_ELEMENTS_SCHEMA, Injector} from '@angular/core';
-import {FormsModule} from '@angular/forms';
+import {FormsModule, ReactiveFormsModule} from '@angular/forms';
 import {HttpModule, Http, XHRBackend, BrowserXhr, ResponseOptions, XSRFStrategy} from '@angular/http';
 import {InMemoryBackendService} from 'angular-in-memory-web-api';
 import {AlertModule} from 'ngx-bootstrap';
 import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
 import {TranslateHttpLoader} from '@ngx-translate/http-loader';
 import {StoreModule} from '@ngrx/store';
+import {MomentModule} from 'angular2-moment';
+import {MomentTimezoneModule} from 'angular-moment-timezone';
 import {environment} from '../environments/environment';
 import {mockApiDataService} from '@app/services/mock-api-data.service'
 import {HttpClientService} from '@app/services/http-client.service';
 import {ComponentActionsService} from '@app/services/component-actions.service';
 import {FilteringService} from '@app/services/filtering.service';
 
+import {AppSettingsService, appSettings} from '@app/services/storage/app-settings.service';
 import {AuditLogsService, auditLogs} from '@app/services/storage/audit-logs.service';
 import {ServiceLogsService, serviceLogs} from '@app/services/storage/service-logs.service';
 import {BarGraphsService, barGraphs} from '@app/services/storage/bar-graphs.service';
@@ -48,6 +51,7 @@ import {FiltersPanelComponent} from '@app/components/filters-panel/filters-panel
 import {FilterDropdownComponent} from '@app/components/filter-dropdown/filter-dropdown.component';
 import {DropdownListComponent} from '@app/components/dropdown-list/dropdown-list.component';
 import {FilterTextFieldComponent} from '@app/components/filter-text-field/filter-text-field.component';
+import {FilterButtonComponent} from '@app/components/filter-button/filter-button.component';
 import {AccordionPanelComponent} from '@app/components/accordion-panel/accordion-panel.component';
 import {LogsListComponent} from '@app/components/logs-list/logs-list.component';
 
@@ -82,12 +86,14 @@ export function getXHRBackend(injector: Injector, browser: BrowserXhr, xsrf: XSR
     DropdownListComponent,
     FilterDropdownComponent,
     FilterTextFieldComponent,
+    FilterButtonComponent,
     AccordionPanelComponent,
     LogsListComponent
   ],
   imports: [
     BrowserModule,
     FormsModule,
+    ReactiveFormsModule,
     HttpModule,
     AlertModule.forRoot(),
     TranslateModule.forRoot({
@@ -98,6 +104,7 @@ export function getXHRBackend(injector: Injector, browser: BrowserXhr, xsrf: XSR
       }
     }),
     StoreModule.provideStore({
+      appSettings,
       auditLogs,
       serviceLogs,
       barGraphs,
@@ -105,12 +112,15 @@ export function getXHRBackend(injector: Injector, browser: BrowserXhr, xsrf: XSR
       nodes,
       userConfigs,
       filters
-    })
+    }),
+    MomentModule,
+    MomentTimezoneModule
   ],
   providers: [
     HttpClientService,
     ComponentActionsService,
     FilteringService,
+    AppSettingsService,
     AuditLogsService,
     ServiceLogsService,
     BarGraphsService,

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-list/dropdown-list.component.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-list/dropdown-list.component.less b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-list/dropdown-list.component.less
index d0f079a..0853883 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-list/dropdown-list.component.less
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/dropdown-list/dropdown-list.component.less
@@ -19,4 +19,4 @@
 :host {
   max-height: 500px; // TODO get rid of magic number, base on actual design
   overflow-y: auto;
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.spec.ts
new file mode 100644
index 0000000..8828390
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.spec.ts
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http; //www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {NO_ERRORS_SCHEMA} from '@angular/core';
+import {async, ComponentFixture, TestBed} from '@angular/core/testing';
+import {Http} from '@angular/http';
+import {FormControl, FormGroup} from '@angular/forms';
+import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
+import {TranslateHttpLoader} from '@ngx-translate/http-loader';
+import {StoreModule} from '@ngrx/store';
+import {AppSettingsService, appSettings} from '@app/services/storage/app-settings.service';
+import {ComponentActionsService} from '@app/services/component-actions.service';
+import {FilteringService} from '@app/services/filtering.service';
+
+import {FilterButtonComponent} from './filter-button.component';
+
+export function HttpLoaderFactory(http: Http) {
+  return new TranslateHttpLoader(http, 'assets/i18n/', '.json');
+}
+
+describe('FilterButtonComponent', () => {
+  let component: FilterButtonComponent;
+  let fixture: ComponentFixture<FilterButtonComponent>;
+
+  beforeEach(async(() => {
+    TestBed.configureTestingModule({
+      declarations: [FilterButtonComponent],
+      imports: [
+        StoreModule.provideStore({
+          appSettings
+        }),
+        TranslateModule.forRoot({
+          provide: TranslateLoader,
+          useFactory: HttpLoaderFactory,
+          deps: [Http]
+        })],
+      providers: [
+        AppSettingsService,
+        ComponentActionsService,
+        FilteringService
+      ],
+      schemas: [NO_ERRORS_SCHEMA]
+    })
+    .compileComponents();
+  }));
+
+  beforeEach(() => {
+    fixture = TestBed.createComponent(FilterButtonComponent);
+    component = fixture.componentInstance;
+    component.filterName = 'f';
+    component.form = new FormGroup({
+      f: new FormControl()
+    });
+    fixture.detectChanges();
+  });
+
+  it('should create component', () => {
+    expect(component).toBeTruthy();
+  });
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.ts
new file mode 100644
index 0000000..3da53ca
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-button/filter-button.component.ts
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http; //www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {Component, Input, forwardRef} from '@angular/core';
+import {ControlValueAccessor, NG_VALUE_ACCESSOR, FormGroup} from '@angular/forms';
+import {ComponentActionsService} from '@app/services/component-actions.service';
+import {FilteringService} from '@app/services/filtering.service';
+import {MenuButtonComponent, menuButtonComponentOptions} from '@app/components/menu-button/menu-button.component';
+
+@Component(Object.assign({
+  providers: [
+    {
+      provide: NG_VALUE_ACCESSOR,
+      useExisting: forwardRef(() => FilterButtonComponent),
+      multi: true
+    }
+  ]
+}, menuButtonComponentOptions, {
+  selector: 'filter-button',
+}))
+export class FilterButtonComponent extends MenuButtonComponent implements ControlValueAccessor {
+
+  constructor(protected actions: ComponentActionsService, private filtering: FilteringService) {
+    super(actions);
+  }
+
+  ngAfterViewInit() {
+    const callback = this.customOnChange ?
+      (value => this.customOnChange(value)) : (() => this.filtering.filteringSubject.next(null));
+    this.form.controls[this.filterName].valueChanges.subscribe(callback);
+  }
+
+  @Input()
+  filterName: string;
+
+  @Input()
+  customOnChange: (value: any) => void;
+
+  @Input()
+  form: FormGroup;
+
+  private onChange: (fn: any) => void;
+
+  readonly isFilter = true;
+
+  get filterInstance(): any {
+    return this.filtering.filters[this.filterName];
+  }
+
+  get value(): any {
+    return this.filterInstance.selectedValue;
+  }
+
+  set value(newValue: any) {
+    if (this.filtering.valueHasChanged(this.filterInstance.selectedValue, newValue)) {
+      this.filterInstance.selectedValue = newValue;
+      this.onChange(newValue);
+    }
+  }
+
+  writeValue(options: any) {
+    const value = options && options.value;
+    if (this.filtering.valueHasChanged(this.filterInstance.selectedValue, value)) {
+      this.filterInstance.selectedValue = value;
+      this.filterInstance.selectedLabel = options.label;
+    }
+  }
+
+  registerOnChange(callback: any): void {
+    this.onChange = callback;
+  }
+
+  registerOnTouched() {
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.html
index 1ac663e..bb7a206 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.html
@@ -20,4 +20,4 @@
   {{filterInstance.selectedLabel | translate}} <span class="caret"></span>
 </button>
 <ul class="dropdown-menu" [items]="filterInstance.options" [isFilter]="true"
-    (selectedItemChange)="setSelectedValue($event)"></ul>
+    (selectedItemChange)="writeValue($event)"></ul>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts
index e0414f3..9f4522b 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.spec.ts
@@ -18,8 +18,11 @@
 import {NO_ERRORS_SCHEMA} from '@angular/core';
 import {async, ComponentFixture, TestBed} from '@angular/core/testing';
 import {Http} from '@angular/http';
+import {FormControl, FormGroup} from '@angular/forms';
 import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
 import {TranslateHttpLoader} from '@ngx-translate/http-loader';
+import {StoreModule} from '@ngrx/store';
+import {AppSettingsService, appSettings} from '@app/services/storage/app-settings.service';
 import {FilteringService} from '@app/services/filtering.service';
 
 import {FilterDropdownComponent} from './filter-dropdown.component';
@@ -31,16 +34,43 @@ export function HttpLoaderFactory(http: Http) {
 describe('FilterDropdownComponent', () => {
   let component: FilterDropdownComponent;
   let fixture: ComponentFixture<FilterDropdownComponent>;
+  const filtering = {
+    filters: {
+      f: {
+        options: [
+          {
+            value: 'v0',
+            label: 'l0'
+          },
+          {
+            value: 'v1',
+            label: 'l1'
+          }
+        ]
+      }
+    }
+  };
 
   beforeEach(async(() => {
     TestBed.configureTestingModule({
       declarations: [FilterDropdownComponent],
-      imports: [TranslateModule.forRoot({
-        provide: TranslateLoader,
-        useFactory: HttpLoaderFactory,
-        deps: [Http]
-      })],
-      providers: [FilteringService],
+      imports: [
+        StoreModule.provideStore({
+          appSettings
+        }),
+        TranslateModule.forRoot({
+          provide: TranslateLoader,
+          useFactory: HttpLoaderFactory,
+          deps: [Http]
+        })
+      ],
+      providers: [
+        AppSettingsService,
+        {
+          provide: FilteringService,
+          useValue: filtering
+        }
+      ],
       schemas: [NO_ERRORS_SCHEMA]
     })
     .compileComponents();
@@ -49,18 +79,10 @@ describe('FilterDropdownComponent', () => {
   beforeEach(() => {
     fixture = TestBed.createComponent(FilterDropdownComponent);
     component = fixture.componentInstance;
-    component.filterInstance = {
-      options: [
-        {
-          value: 'v0',
-          label: 'l0'
-        },
-        {
-          value: 'v1',
-          label: 'l1'
-        }
-      ]
-    };
+    component.filterName = 'f';
+    component.form = new FormGroup({
+      f: new FormControl()
+    });
     fixture.detectChanges();
   });
 
@@ -68,30 +90,4 @@ describe('FilterDropdownComponent', () => {
     expect(component).toBeTruthy();
   });
 
-  describe('should take initial filter values from 1st item', () => {
-    it('selectedValue', () => {
-      expect(component.filterInstance.selectedValue).toEqual('v0');
-    });
-
-    it('selectedLabel', () => {
-      expect(component.filterInstance.selectedLabel).toEqual('l0');
-    });
-  });
-
-  describe('#setSelectedValue()', () => {
-    beforeEach(() => {
-      component.setSelectedValue({
-        value: 'v2',
-        label: 'l2'
-      });
-    });
-
-    it('selectedValue', () => {
-      expect(component.filterInstance.selectedValue).toEqual('v2');
-    });
-
-    it('selectedLabel', () => {
-      expect(component.filterInstance.selectedLabel).toEqual('l2');
-    });
-  });
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts
index faaafcb..84210dc 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-dropdown/filter-dropdown.component.ts
@@ -15,36 +15,75 @@
  * limitations under the License.
  */
 
-import {Component, OnInit, Input} from '@angular/core';
+import {Component, AfterViewInit, Input, forwardRef} from '@angular/core';
+import {ControlValueAccessor, NG_VALUE_ACCESSOR, FormGroup} from '@angular/forms';
 import {FilteringService} from '@app/services/filtering.service';
 
 @Component({
   selector: 'filter-dropdown',
   templateUrl: './filter-dropdown.component.html',
-  styleUrls: ['./filter-dropdown.component.less']
+  styleUrls: ['./filter-dropdown.component.less'],
+  providers: [
+    {
+      provide: NG_VALUE_ACCESSOR,
+      useExisting: forwardRef(() => FilterDropdownComponent),
+      multi: true
+    }
+  ]
 })
-export class FilterDropdownComponent implements OnInit {
+export class FilterDropdownComponent implements AfterViewInit, ControlValueAccessor {
 
   constructor(private filtering: FilteringService) {
   }
 
-  ngOnInit() {
-    this.filterInstance.selectedValue = this.filterInstance.options[0].value;
-    this.filterInstance.selectedLabel = this.filterInstance.options[0].label;
+  ngAfterViewInit() {
+    const callback = this.customOnChange ?
+      (value => this.customOnChange(value)) : (() => this.filtering.filteringSubject.next(null));
+    this.form.controls[this.filterName].valueChanges.subscribe(callback);
   }
 
   @Input()
-  filterInstance: any;
+  options: any[];
 
   @Input()
-  options: any[];
+  customOnChange: (value: any) => void;
+
+  @Input()
+  form: FormGroup;
+
+  @Input()
+  filterName: string;
+
+  private onChange: (fn: any) => void;
+
+  get filterInstance(): any {
+    return this.filtering.filters[this.filterName];
+  }
 
-  setSelectedValue(options: any): void {
-    if (this.filterInstance.selectedValue !== options.value) {
-      this.filterInstance.selectedValue = options.value;
+  get value(): any {
+    return this.filterInstance.selectedValue;
+  }
+
+  set value(newValue: any) {
+    if (this.filtering.valueHasChanged(this.filterInstance.selectedValue, newValue)) {
+      this.filterInstance.selectedValue = newValue;
+      this.onChange(newValue);
+    }
+  }
+
+  writeValue(options: any) {
+    const value = options && options.value;
+    if (this.filtering.valueHasChanged(this.filterInstance.selectedValue, value)) {
+      this.filterInstance.selectedValue = value;
       this.filterInstance.selectedLabel = options.label;
-      this.filtering.filteringSubject.next(null);
     }
-  };
+  }
+
+  registerOnChange(callback: any): void {
+    this.onChange = callback;
+  }
+
+  registerOnTouched() {
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html
index ed3c4ba..8fb7659 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.html
@@ -17,5 +17,6 @@
 
 <div class="input-group">
   <span class="input-group-addon">{{filterInstance.label | translate}}</span>
-  <input type="text" class="form-control" [(ngModel)]="filterInstance.selectedValue" (change)="onValueChange()"> <!-- TODO use ngModelChange with debounce -->
+  <input type="text" class="form-control" [(ngModel)]="filterInstance.selectedValue"
+         (ngModelChange)="writeValue($event)">
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts
index e4f026c..740593f 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.spec.ts
@@ -18,9 +18,11 @@
 import {CUSTOM_ELEMENTS_SCHEMA} from '@angular/core';
 import {async, ComponentFixture, TestBed} from '@angular/core/testing';
 import {Http} from '@angular/http';
-import {FormsModule} from '@angular/forms';
+import {FormsModule, FormControl, FormGroup} from '@angular/forms';
 import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
 import {TranslateHttpLoader} from '@ngx-translate/http-loader';
+import {StoreModule} from '@ngrx/store';
+import {AppSettingsService, appSettings} from '@app/services/storage/app-settings.service';
 import {FilteringService} from '@app/services/filtering.service';
 
 import {FilterTextFieldComponent} from './filter-text-field.component';
@@ -32,6 +34,11 @@ export function HttpLoaderFactory(http: Http) {
 describe('FilterTextFieldComponent', () => {
   let component: FilterTextFieldComponent;
   let fixture: ComponentFixture<FilterTextFieldComponent>;
+  const filtering = {
+    filters: {
+      f: {}
+    }
+  };
 
   beforeEach(async(() => {
     TestBed.configureTestingModule({
@@ -42,9 +49,18 @@ describe('FilterTextFieldComponent', () => {
           provide: TranslateLoader,
           useFactory: HttpLoaderFactory,
           deps: [Http]
+        }),
+        StoreModule.provideStore({
+          appSettings
         })
       ],
-      providers: [FilteringService],
+      providers: [
+        AppSettingsService,
+        {
+          provide: FilteringService,
+          useValue: filtering
+        }
+      ],
       schemas: [CUSTOM_ELEMENTS_SCHEMA]
     })
     .compileComponents();
@@ -53,7 +69,10 @@ describe('FilterTextFieldComponent', () => {
   beforeEach(() => {
     fixture = TestBed.createComponent(FilterTextFieldComponent);
     component = fixture.componentInstance;
-    component.filterInstance = {};
+    component.filterName = 'f';
+    component.form = new FormGroup({
+      f: new FormControl()
+    });
     fixture.detectChanges();
   });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts
index 3f23ffd..857b511 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filter-text-field/filter-text-field.component.ts
@@ -15,27 +15,74 @@
  * limitations under the License.
  */
 
-import {Component, OnInit, Input} from '@angular/core';
+import {Component, AfterViewInit, Input, forwardRef} from '@angular/core';
+import {ControlValueAccessor, NG_VALUE_ACCESSOR, FormGroup} from '@angular/forms';
+import 'rxjs/add/operator/debounceTime';
 import {FilteringService} from '@app/services/filtering.service';
 
 @Component({
   selector: 'filter-text-field',
   templateUrl: './filter-text-field.component.html',
-  styleUrls: ['./filter-text-field.component.less']
+  styleUrls: ['./filter-text-field.component.less'],
+  providers: [
+    {
+      provide: NG_VALUE_ACCESSOR,
+      useExisting: forwardRef(() => FilterTextFieldComponent),
+      multi: true
+    }
+  ]
 })
-export class FilterTextFieldComponent implements OnInit {
+export class FilterTextFieldComponent implements AfterViewInit, ControlValueAccessor {
 
   constructor(private filtering: FilteringService) {
   }
 
-  ngOnInit() {
+  ngAfterViewInit() {
+    const callback = this.customOnChange ?
+      (value => this.customOnChange(value)) : (() => this.filtering.filteringSubject.next(null));
+    this.form.controls[this.filterName].valueChanges.debounceTime(this.debounceInterval).subscribe(callback);
   }
 
   @Input()
-  filterInstance: any;
+  filterName: string;
 
-  onValueChange() {
-    this.filtering.filteringSubject.next(null);
+  @Input()
+  customOnChange: (value: any) => void;
+
+  @Input()
+  form: FormGroup;
+
+  private onChange: (fn: any) => void;
+
+  private readonly debounceInterval = 1500;
+
+  get filterInstance(): any {
+    return this.filtering.filters[this.filterName];
+  }
+
+  get value(): any {
+    return this.filterInstance.selectedValue;
+  }
+
+  set value(newValue: any) {
+    if (this.filtering.valueHasChanged(this.filterInstance.selectedValue, newValue)) {
+      this.filterInstance.selectedValue = newValue;
+      this.onChange(newValue);
+    }
+  }
+
+  writeValue(options: any) {
+    const value = options && options.value;
+    if (this.filtering.valueHasChanged(this.filterInstance.selectedValue, value)) {
+      this.filterInstance.selectedValue = value;
+    }
+  }
+
+  registerOnChange(callback: any): void {
+    this.onChange = callback;
+  }
+
+  registerOnTouched() {
   }
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html
index e278a09..256b547 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.html
@@ -15,24 +15,33 @@
   limitations under the License.
 -->
 
-<div class="form-inline filter-input-container col-md-8">
-  <filter-dropdown [filterInstance]="filters.clusters"></filter-dropdown>
-  <filter-text-field [filterInstance]="filters.text"></filter-text-field>
-  <filter-dropdown [filterInstance]="filters.timeRange"></filter-dropdown>
-  <filter-dropdown [filterInstance]="filters.timeZone"></filter-dropdown>
-  <!--button class="btn btn-success" type="button">
-    <span class="fa fa-search"></span>
-  </button-->
-</div>
-<div class="default-flex col-md-4">
-  <a href="#">
-    <span class="fa fa-search-minus"></span> {{'filter.excluded' | translate}}
-  </a>
-  <menu-button [label]="filters.components.label" [iconClass]="filters.components.iconClass"
-               [subItems]="filters.components.options" [isFilter]="true"
-               [filterInstance]="filters.components"></menu-button>
-  <menu-button [label]="filters.levels.label" [iconClass]="filters.levels.iconClass"
-               [subItems]="filters.levels.options" [isFilter]="true"
-               [filterInstance]="filters.levels"></menu-button>
-  <menu-button label="filter.capture" iconClass="fa fa-caret-right"></menu-button>
-</div>
+<form [formGroup]="filtersForm">
+  <div class="form-inline filter-input-container col-md-8">
+    <filter-dropdown [(ngModel)]="filters.clusters.selectedValue" [filterName]="'clusters'"
+                     formControlName="clusters" [form]="filtersForm"></filter-dropdown>
+    <filter-text-field [(ngModel)]="filters.text.selectedValue" [filterName]="'text'" formControlName="text"
+                       [form]="filtersForm"></filter-text-field>
+    <filter-dropdown [(ngModel)]="filters.timeRange.selectedValue" [filterName]="'timeRange'"
+                     formControlName="timeRange" [form]="filtersForm"></filter-dropdown>
+    <filter-dropdown [(ngModel)]="filters.timeZone.selectedValue" [filterName]="'timeZone'"
+                     formControlName="timeZone" [form]="filtersForm"
+                     [customOnChange]="setTimeZone"></filter-dropdown>
+    <!--button class="btn btn-success" type="button">
+      <span class="fa fa-search"></span>
+    </button-->
+  </div>
+  <div class="default-flex col-md-4">
+    <a href="#">
+      <span class="fa fa-search-minus"></span> {{'filter.excluded' | translate}}
+    </a>
+    <filter-button [(ngModel)]="filters.components.selectedValue" formControlName="components"
+                   [label]="filters.components.label" [iconClass]="filters.components.iconClass"
+                   [subItems]="filters.components.options" [filterName]="'components'"
+                   [form]="filtersForm"></filter-button>
+    <filter-button [(ngModel)]="filters.levels.selectedValue" formControlName="levels"
+                   [label]="filters.levels.label" [iconClass]="filters.levels.iconClass"
+                   [subItems]="filters.levels.options" [filterName]="'levels'"
+                   [form]="filtersForm"></filter-button>
+    <menu-button label="filter.capture" iconClass="fa fa-caret-right"></menu-button>
+  </div>
+</form>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.spec.ts
index 934f37a..8291572 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.spec.ts
@@ -16,11 +16,13 @@
  * limitations under the License.
  */
 
-import {CUSTOM_ELEMENTS_SCHEMA} from '@angular/core';
+import {NO_ERRORS_SCHEMA} from '@angular/core';
 import {async, ComponentFixture, TestBed} from '@angular/core/testing';
 import {Http} from '@angular/http';
 import {TranslateModule, TranslateLoader} from '@ngx-translate/core';
 import {TranslateHttpLoader} from '@ngx-translate/http-loader';
+import {StoreModule} from '@ngrx/store';
+import {AppSettingsService, appSettings} from '@app/services/storage/app-settings.service';
 import {FilteringService} from '@app/services/filtering.service';
 
 import {FiltersPanelComponent} from './filters-panel.component';
@@ -36,13 +38,21 @@ describe('FiltersPanelComponent', () => {
   beforeEach(async(() => {
     TestBed.configureTestingModule({
       declarations: [FiltersPanelComponent],
-      imports: [TranslateModule.forRoot({
-        provide: TranslateLoader,
-        useFactory: HttpLoaderFactory,
-        deps: [Http]
-      })],
-      providers: [FilteringService],
-      schemas: [CUSTOM_ELEMENTS_SCHEMA]
+      imports: [
+        StoreModule.provideStore({
+          appSettings
+        }),
+        TranslateModule.forRoot({
+          provide: TranslateLoader,
+          useFactory: HttpLoaderFactory,
+          deps: [Http]
+        })
+      ],
+      providers: [
+        AppSettingsService,
+        FilteringService
+      ],
+      schemas: [NO_ERRORS_SCHEMA]
     })
     .compileComponents();
   }));

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts
index f7b8429..b2ae9b0 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/filters-panel/filters-panel.component.ts
@@ -17,7 +17,9 @@
  */
 
 import {Component, OnInit} from '@angular/core';
+import {FormControl, FormGroup} from '@angular/forms';
 import {FilteringService} from '@app/services/filtering.service';
+import {AppSettingsService} from '@app/services/storage/app-settings.service';
 
 @Component({
   selector: 'filters-panel',
@@ -26,7 +28,7 @@ import {FilteringService} from '@app/services/filtering.service';
 })
 export class FiltersPanelComponent implements OnInit {
 
-  constructor(private filtering: FilteringService) {
+  constructor(private filtering: FilteringService, private appSettings: AppSettingsService) {
   }
 
   ngOnInit() {
@@ -36,4 +38,16 @@ export class FiltersPanelComponent implements OnInit {
     return this.filtering.filters;
   }
 
+  private filtersFormItems = Object.keys(this.filters).reduce((currentObject, key) => {
+    let item = {};
+    item[key] = new FormControl();
+    return Object.assign(currentObject, item);
+  }, {});
+
+  filtersForm = new FormGroup(this.filtersFormItems);
+
+  setTimeZone(timeZone: string): void {
+    this.appSettings.setParameter('timeZone', timeZone);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html
index 827f63c..df72502 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.html
@@ -21,7 +21,7 @@
     <div [ngClass]="'col-md-1 log-status ' + log.className">{{log.level}}</div>
     <div class="col-md-3">
       <div class="log-type">{{log.type}}</div>
-      <time class="log-time">{{log.time}}</time>
+      <time class="log-time">{{log.time | amTz: filtering.timeZone | amDateFormat: timeFormat}}</time>
     </div>
     <div class="col-md-6 log-content-wrapper">
       <div class="collapse log-actions" [attr.id]="'details-' + i">

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts
index 072bfcd..ea3d780 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.spec.ts
@@ -18,8 +18,11 @@
 import {CUSTOM_ELEMENTS_SCHEMA} from '@angular/core';
 import {async, ComponentFixture, TestBed} from '@angular/core/testing';
 import {StoreModule} from '@ngrx/store';
+import {MomentModule} from 'angular2-moment';
+import {MomentTimezoneModule} from 'angular-moment-timezone';
 import {AuditLogsService, auditLogs} from '@app/services/storage/audit-logs.service';
 import {ServiceLogsService, serviceLogs} from '@app/services/storage/service-logs.service';
+import {AppSettingsService, appSettings} from '@app/services/storage/app-settings.service';
 import {HttpClientService} from '@app/services/http-client.service';
 import {FilteringService} from '@app/services/filtering.service';
 
@@ -43,8 +46,11 @@ describe('LogsListComponent', () => {
       imports: [
         StoreModule.provideStore({
           auditLogs,
-          serviceLogs
-        })
+          serviceLogs,
+          appSettings
+        }),
+        MomentModule,
+        MomentTimezoneModule
       ],
       providers: [
         {
@@ -53,6 +59,7 @@ describe('LogsListComponent', () => {
         },
         AuditLogsService,
         ServiceLogsService,
+        AppSettingsService,
         FilteringService
       ],
       schemas: [CUSTOM_ELEMENTS_SCHEMA]

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts
index cc77784..efa05e4 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/logs-list/logs-list.component.ts
@@ -19,6 +19,7 @@ import {Component, OnInit, Input} from '@angular/core';
 import 'rxjs/add/operator/map';
 import {HttpClientService} from '@app/services/http-client.service';
 import {ServiceLogsService} from '@app/services/storage/service-logs.service';
+import {AppSettingsService} from '@app/services/storage/app-settings.service';
 import {FilteringService} from '@app/services/filtering.service';
 
 @Component({
@@ -28,7 +29,7 @@ import {FilteringService} from '@app/services/filtering.service';
 })
 export class LogsListComponent implements OnInit {
 
-  constructor(private httpClient: HttpClientService, private serviceLogsStorage: ServiceLogsService, private filtering: FilteringService) {
+  constructor(private httpClient: HttpClientService, private serviceLogsStorage: ServiceLogsService, private appSettings: AppSettingsService, private filtering: FilteringService) {
     this.filtering.filteringSubject.subscribe(this.loadLogs.bind(this));
   }
 
@@ -39,6 +40,8 @@ export class LogsListComponent implements OnInit {
   @Input()
   private logsArrayId: string;
 
+  timeFormat: string = 'DD/MM/YYYY HH:mm:ss';
+
   private readonly usedFilters = {
     clusters: ['clusters'],
     text: ['iMessage'],
@@ -47,14 +50,14 @@ export class LogsListComponent implements OnInit {
     levels: ['level']
   };
 
-  logs = this.serviceLogsStorage.getInstances().map(logs => {
+  logs = this.serviceLogsStorage.getAll().map(logs => {
     return logs.map(log => {
       return {
         type: log.type,
         level: log.level,
         className: log.level.toLowerCase(),
         message: log.log_message,
-        time: new Date(log.logtime).toLocaleDateString() + ' ' + new Date(log.logtime).toLocaleTimeString() // TODO use moment with custom time zone
+        time: log.logtime
       }
     });
   });

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html
index 132d717..5111197 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.html
@@ -22,6 +22,6 @@
   <br>
   <a *ngIf="label" (mousedown)="onMouseDown($event)" [ngClass]="labelClass" (mouseup)="onMouseUp($event)"
      (click)="$event.stopPropagation()">{{label | translate}}</a>
-  <ul class="dropdown-menu" [isFilter]="isFilter" *ngIf="hasSubItems" [items]="subItems"
-      (selectedItemChange)="setSelectedValue($event)"></ul>
+  <ul class="dropdown-menu" *ngIf="hasSubItems" [items]="subItems" [isFilter]="isFilter"
+      (selectedItemChange)="isFilter && writeValue($event)"></ul>
 </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.spec.ts
index 424d322..d53677d 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.spec.ts
@@ -39,10 +39,10 @@ describe('MenuButtonComponent', () => {
       declarations: [MenuButtonComponent],
       imports: [
         TranslateModule.forRoot({
-        provide: TranslateLoader,
-        useFactory: HttpLoaderFactory,
-        deps: [Http]
-      })],
+          provide: TranslateLoader,
+          useFactory: HttpLoaderFactory,
+          deps: [Http]
+        })],
       providers: [
         ComponentActionsService,
         FilteringService

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts
index ded01b7..08b555c 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/components/menu-button/menu-button.component.ts
@@ -18,17 +18,18 @@
 
 import {Component, AfterViewInit, Input, ViewChild, ElementRef} from '@angular/core';
 import {ComponentActionsService} from '@app/services/component-actions.service';
-import {FilteringService} from '@app/services/filtering.service';
 import * as $ from 'jquery';
 
-@Component({
+export const menuButtonComponentOptions = {
   selector: 'menu-button',
   templateUrl: './menu-button.component.html',
   styleUrls: ['./menu-button.component.less']
-})
+}
+
+@Component(menuButtonComponentOptions)
 export class MenuButtonComponent implements AfterViewInit {
 
-  constructor(private actions: ComponentActionsService, private filtering: FilteringService) {
+  constructor(protected actions: ComponentActionsService) {
   }
 
   ngAfterViewInit() {
@@ -43,11 +44,7 @@ export class MenuButtonComponent implements AfterViewInit {
   @Input()
   action: string;
 
-  @Input()
-  isFilter: boolean;
-
-  @Input()
-  filterInstance?: any;
+  isFilter: boolean = false;
 
   @Input()
   iconClass: string;
@@ -91,12 +88,4 @@ export class MenuButtonComponent implements AfterViewInit {
     }
   }
 
-  setSelectedValue(options: any): void {
-    if (this.filterInstance.selectedValue !== options.value) {
-      this.filterInstance.selectedValue = options.value;
-      this.filterInstance.selectedLabel = options.label;
-      this.filtering.filteringSubject.next(null);
-    }
-  };
-
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/models/app-settings.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/models/app-settings.model.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/models/app-settings.model.ts
new file mode 100644
index 0000000..30bf0c8
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/models/app-settings.model.ts
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http; //www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as moment from 'moment-timezone';
+
+export interface AppSettings {
+  timeZone: string;
+}
+
+export const defaultSettings: AppSettings = {
+  timeZone: moment.tz.guess()
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/models/store.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/models/store.model.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/models/store.model.ts
index 1f3a89c..c169c1b 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/models/store.model.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/models/store.model.ts
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+import {AppSettings} from '@app/models/app-settings.model';
 import {Observable} from 'rxjs/Observable';
 import {Store, Action} from '@ngrx/store';
 import {AuditLog} from '@app/models/audit-log.model';
@@ -29,10 +30,12 @@ import {Filter} from '@app/models/filter.model';
 export const storeActions = {
   ADD: 'ADD',
   DELETE: 'DELETE',
-  CLEAR: 'CLEAR'
+  CLEAR: 'CLEAR',
+  SET: 'SET'
 };
 
 export interface AppStore {
+  appSettings: AppSettings;
   auditLogs: AuditLog[];
   serviceLogs: ServiceLog[];
   barGraphs: BarGraph[];
@@ -44,17 +47,25 @@ export interface AppStore {
 
 export class ModelService {
 
-  constructor(private modelName: string, private store: Store<AppStore>) {}
+  constructor(modelName: string, store: Store<AppStore>) {
+    this.modelName = modelName;
+    this.store = store;
+  }
+
+  protected modelName: string;
+
+  protected store: Store<AppStore>;
 
-  getInstances(): Observable<any> {
+  getAll(): Observable<any> {
     return this.store.select(this.modelName);
   }
 
+}
+
+export class CollectionModelService extends ModelService {
+
   addInstance(instance: any): void {
-    this.store.dispatch({
-      type: storeActions.ADD,
-      payload: [instance]
-    });
+    this.addInstances([instance]);
   }
 
   addInstances(instances: any[]): void {
@@ -79,7 +90,24 @@ export class ModelService {
 
 }
 
-export function reducer(state: any, action: Action): any {
+export class ObjectModelService extends ModelService {
+
+  setParameter(key: string, value: any): void {
+    let payload = {};
+    payload[key] = value;
+    this.setParameters(payload);
+  }
+
+  setParameters(params: any): void {
+    this.store.dispatch({
+      type: storeActions.SET,
+      payload: params
+    });
+  }
+
+}
+
+export function collectionReducer(state: any, action: Action): any {
   switch (action.type) {
     case storeActions.ADD:
       return [...state, ...action.payload];
@@ -93,3 +121,12 @@ export function reducer(state: any, action: Action): any {
       return state;
   }
 }
+
+export function objectReducer(state: any, action: Action): any {
+  switch (action.type) {
+    case storeActions.SET:
+      return Object.assign({}, state, action.payload);
+    default:
+      return state;
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.spec.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.spec.ts
index d471e00..a8dc017 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.spec.ts
@@ -17,13 +17,23 @@
  */
 
 import {TestBed, inject} from '@angular/core/testing';
+import {StoreModule} from '@ngrx/store';
+import {AppSettingsService, appSettings} from '@app/services/storage/app-settings.service';
 
 import {FilteringService} from './filtering.service';
 
 describe('FilteringService', () => {
   beforeEach(() => {
     TestBed.configureTestingModule({
-      providers: [FilteringService]
+      imports: [
+        StoreModule.provideStore({
+          appSettings
+        })
+      ],
+      providers: [
+        FilteringService,
+        AppSettingsService
+      ]
     });
   });
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts
index c4d2bdf..9f6b7dc 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/filtering.service.ts
@@ -19,13 +19,17 @@
 import {Injectable} from '@angular/core';
 import {Subject} from 'rxjs/Subject';
 import * as moment from 'moment-timezone';
+import {AppSettingsService} from '@app/services/storage/app-settings.service';
 
 @Injectable()
 export class FilteringService {
 
-  constructor() {
+  constructor(private appSettings: AppSettingsService) {
+    this.appSettings.getAll().subscribe(settings => this.timeZone = settings.timeZone);
   }
 
+  timeZone: string;
+
   // TODO implement loading of real options data
   filters = {
     clusters: {
@@ -57,7 +61,7 @@ export class FilteringService {
         }
       ],
       selectedValue: '',
-      selectedLabel: '',
+      selectedLabel: 'filter.all',
       paramName: 'clusters',
     },
     text: {
@@ -133,19 +137,23 @@ export class FilteringService {
           }
         }
       ],
-      selectedValue: '',
-      selectedLabel: ''
+      selectedValue: {
+        type: 'LAST',
+        unit: 'h',
+        interval: 1
+      },
+      selectedLabel: 'filter.timeRange.1hr'
     },
     timeZone: {
       options: moment.tz.names().map(zone => {
         // TODO map labels according to actual design requirements
         return {
-          label: `${zone} (${moment.tz(zone).format('Z')})`,
+          label: this.getTimeZoneLabel(zone),
           value: zone
         };
       }),
-      selectedValue: '',
-      selectedLabel: ''
+      selectedValue: moment.tz.guess(),
+      selectedLabel: this.getTimeZoneLabel(moment.tz.guess())
     },
     components: {
       label: 'filter.components',
@@ -173,7 +181,7 @@ export class FilteringService {
         }
       ],
       selectedValue: '',
-      selectedLabel: ''
+      selectedLabel: 'filter.all'
     },
     levels: {
       label: 'filter.levels',
@@ -213,7 +221,7 @@ export class FilteringService {
         }
       ],
       selectedValue: '',
-      selectedLabel: ''
+      selectedLabel: 'filter.all'
     }
   };
 
@@ -226,12 +234,10 @@ export class FilteringService {
             time = moment();
             break;
           case 'CURRENT':
-            // TODO consider user's timezone
-            time = moment().endOf(value.unit);
+            time = moment().tz(this.timeZone).endOf(value.unit);
             break;
           case 'PAST':
-            // TODO consider user's timezone
-            time = moment().startOf(value.unit).millisecond(-1);
+            time = moment().tz(this.timeZone).startOf(value.unit).millisecond(-1);
             break;
           default:
             break;
@@ -248,11 +254,9 @@ export class FilteringService {
             time = endTime.subtract(value.interval, value.unit);
             break;
           case 'CURRENT':
-            // TODO consider user's timezone
-            time = moment().startOf(value.unit);
+            time = moment().tz(this.timeZone).startOf(value.unit);
             break;
           case 'PAST':
-            // TODO consider user's timezone
             time = endTime.startOf(value.unit);
             break;
           default:
@@ -263,6 +267,21 @@ export class FilteringService {
     }
   };
 
+  getTimeZoneLabel(timeZone) {
+    return `${timeZone} (${moment.tz(timeZone).format('Z')})`;
+  }
+
+  valueHasChanged(currentValue: any, newValue: any): boolean {
+    if (newValue == null) {
+      return false;
+    }
+    if (typeof newValue === 'object') {
+      return JSON.stringify(currentValue) !== JSON.stringify(newValue);
+    } else {
+      return currentValue !== newValue;
+    }
+  }
+
   filteringSubject = new Subject();
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/app-settings.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/app-settings.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/app-settings.service.ts
new file mode 100644
index 0000000..1c87a3c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/app-settings.service.ts
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http; //www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {Injectable} from '@angular/core';
+import {Action, ActionReducer, Store} from '@ngrx/store';
+import {AppSettings, defaultSettings} from '@app/models/app-settings.model';
+import {AppStore, ObjectModelService, objectReducer} from '@app/models/store.model';
+
+@Injectable()
+export class AppSettingsService extends ObjectModelService {
+  constructor(store: Store<AppStore>) {
+    super('appSettings', store);
+  }
+}
+
+export const appSettings: ActionReducer<AppSettings> = (state: AppSettings = defaultSettings, action: Action) => {
+  return objectReducer(state, action);
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/audit-logs.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/audit-logs.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/audit-logs.service.ts
index 706d2f3..7c322ed 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/audit-logs.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/audit-logs.service.ts
@@ -16,19 +16,18 @@
  * limitations under the License.
  */
 
-
 import {Injectable} from '@angular/core';
 import {Action, ActionReducer, Store} from '@ngrx/store';
 import {AuditLog} from '@app/models/audit-log.model';
-import {AppStore, ModelService, reducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, collectionReducer} from '@app/models/store.model';
 
 @Injectable()
-export class AuditLogsService extends ModelService {
+export class AuditLogsService extends CollectionModelService {
   constructor(store: Store<AppStore>) {
     super('auditLogs', store);
   }
 }
 
 export const auditLogs: ActionReducer<AuditLog[]> = (state: AuditLog[] = [], action: Action) => {
-  return reducer(state, action);
+  return collectionReducer(state, action);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/bar-graphs.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/bar-graphs.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/bar-graphs.service.ts
index 2e2db5b..0109409 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/bar-graphs.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/bar-graphs.service.ts
@@ -16,19 +16,18 @@
  * limitations under the License.
  */
 
-
 import {Injectable} from '@angular/core';
 import {Action, ActionReducer, Store} from '@ngrx/store';
 import {BarGraph} from '@app/models/bar-graph.model';
-import {AppStore, ModelService, reducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, collectionReducer} from '@app/models/store.model';
 
 @Injectable()
-export class BarGraphsService extends ModelService {
+export class BarGraphsService extends CollectionModelService {
   constructor(store: Store<AppStore>) {
     super('barGraphs', store);
   }
 }
 
 export const barGraphs: ActionReducer<BarGraph[]> = (state: BarGraph[] = [], action: Action) => {
-  return reducer(state, action);
+  return collectionReducer(state, action);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/filters.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/filters.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/filters.service.ts
index 99d2140..b8748e9 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/filters.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/filters.service.ts
@@ -20,15 +20,15 @@
 import {Injectable} from '@angular/core';
 import {Action, ActionReducer, Store} from '@ngrx/store';
 import {Filter} from '@app/models/filter.model';
-import {AppStore, ModelService, reducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, collectionReducer} from '@app/models/store.model';
 
 @Injectable()
-export class FiltersService extends ModelService {
+export class FiltersService extends CollectionModelService {
   constructor(store: Store<AppStore>) {
     super('filters', store);
   }
 }
 
 export const filters: ActionReducer<Filter[]> = (state: Filter[] = [], action: Action) => {
-  return reducer(state, action);
+  return collectionReducer(state, action);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/graphs.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/graphs.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/graphs.service.ts
index 1c32d34..eda04ee 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/graphs.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/graphs.service.ts
@@ -20,15 +20,15 @@
 import {Injectable} from '@angular/core';
 import {Action, ActionReducer, Store} from '@ngrx/store';
 import {Graph} from '@app/models/graph.model';
-import {AppStore, ModelService, reducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, collectionReducer} from '@app/models/store.model';
 
 @Injectable()
-export class GraphsService extends ModelService {
+export class GraphsService extends CollectionModelService {
   constructor(store: Store<AppStore>) {
     super('graphs', store);
   }
 }
 
 export const graphs: ActionReducer<Graph[]> = (state: Graph[] = [], action: Action) => {
-  return reducer(state, action);
+  return collectionReducer(state, action);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/nodes.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/nodes.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/nodes.service.ts
index b194f94..7b2e6e9 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/nodes.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/nodes.service.ts
@@ -20,15 +20,15 @@
 import {Injectable} from '@angular/core';
 import {Action, ActionReducer, Store} from '@ngrx/store';
 import {Node} from '@app/models/node.model';
-import {AppStore, ModelService, reducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, collectionReducer} from '@app/models/store.model';
 
 @Injectable()
-export class NodesService extends ModelService {
+export class NodesService extends CollectionModelService {
   constructor(store: Store<AppStore>) {
     super('nodes', store);
   }
 }
 
 export const nodes: ActionReducer<Node[]> = (state: Node[] = [], action: Action) => {
-  return reducer(state, action);
+  return collectionReducer(state, action);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/service-logs.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/service-logs.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/service-logs.service.ts
index 6859653..ba277c4 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/service-logs.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/service-logs.service.ts
@@ -20,15 +20,15 @@
 import {Injectable} from '@angular/core';
 import {Action, ActionReducer, Store} from '@ngrx/store';
 import {ServiceLog} from '@app/models/service-log.model';
-import {AppStore, ModelService, reducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, collectionReducer} from '@app/models/store.model';
 
 @Injectable()
-export class ServiceLogsService extends ModelService {
+export class ServiceLogsService extends CollectionModelService {
   constructor(store: Store<AppStore>) {
     super('serviceLogs', store);
   }
 }
 
 export const serviceLogs: ActionReducer<ServiceLog[]> = (state: ServiceLog[] = [], action: Action) => {
-  return reducer(state, action);
+  return collectionReducer(state, action);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/user-configs.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/user-configs.service.ts b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/user-configs.service.ts
index 2c770f0..b26485d 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/user-configs.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/app/services/storage/user-configs.service.ts
@@ -20,15 +20,15 @@
 import {Injectable} from '@angular/core';
 import {Action, ActionReducer, Store} from '@ngrx/store';
 import {UserConfig} from '@app/models/user-config.model';
-import {AppStore, ModelService, reducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, collectionReducer} from '@app/models/store.model';
 
 @Injectable()
-export class UserConfigsService extends ModelService {
+export class UserConfigsService extends CollectionModelService {
   constructor(store: Store<AppStore>) {
     super('userConfigs', store);
   }
 }
 
 export const userConfigs: ActionReducer<UserConfig[]> = (state: UserConfig[] = [], action: Action) => {
-  return reducer(state, action);
+  return collectionReducer(state, action);
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/src/assets/mock-data.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/src/assets/mock-data.ts b/ambari-logsearch/ambari-logsearch-web-new/src/assets/mock-data.ts
index d269bc3..732e8d3 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/src/assets/mock-data.ts
+++ b/ambari-logsearch/ambari-logsearch-web-new/src/assets/mock-data.ts
@@ -16,6 +16,8 @@
  * limitations under the License.
  */
 
+import * as moment from 'moment-timezone';
+
 export const mockData = {
   login: {},
   api: {
@@ -224,7 +226,7 @@ export const mockData = {
               path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
               host: 'h0',
               level: 'WARN',
-              logtime: 1497474000000,
+              logtime: moment().valueOf(),
               ip: '192.168.0.1',
               logfile_line_number: 8,
               type: 'ams_collector',
@@ -241,14 +243,14 @@ export const mockData = {
               event_md5: '1908755391',
               event_dur_ms: 200,
               _ttl_: "+5DAYS",
-              _expire_at_: 1497906000000,
+              _expire_at_: moment().add(5, 'd').valueOf(),
               _router_field_: 20
             },
             {
               path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
               host: 'h1',
               level: 'ERROR',
-              logtime: 1497387600000,
+              logtime: moment().subtract(2, 'd'),
               ip: '192.168.0.2',
               type: 'ams_collector',
               _version_: 14,
@@ -265,14 +267,14 @@ export const mockData = {
               event_md5: '1029384756',
               event_dur_ms: 700,
               _ttl_: "+5DAYS",
-              _expire_at_: 1497819600000,
+              _expire_at_: moment().add(3, 'd').valueOf(),
               _router_field_: 5
             },
             {
               path: '/var/log/ambari-metrics-collector/ambari-metrics-collector.log',
               host: 'h1',
               level: 'FATAL',
-              logtime: 1497042000000,
+              logtime: moment().subtract(10, 'd').valueOf(),
               ip: '192.168.0.3',
               type: 'ambari_agent',
               _version_: 14,
@@ -289,14 +291,14 @@ export const mockData = {
               event_md5: '67589403',
               event_dur_ms: 100,
               _ttl_: "+5DAYS",
-              _expire_at_: 1497474000000,
+              _expire_at_: moment().subtract(5, 'd').valueOf(),
               _router_field_: 45
             },
             {
               path: '/var/log/ambari-metrics-collector/zookeeper-server.log',
               host: 'h1',
               level: 'INFO',
-              logtime: 1497956919700,
+              logtime: moment().subtract(25, 'h').valueOf(),
               ip: '192.168.0.4',
               type: 'zookeeper_server',
               _version_: 14,
@@ -313,14 +315,14 @@ export const mockData = {
               event_md5: '67589403',
               event_dur_ms: 1000,
               _ttl_: "+5DAYS",
-              _expire_at_: 1497956939700,
+              _expire_at_: moment().subtract(25, 'h').add(5, 'd').valueOf(),
               _router_field_: 55
             },
             {
               path: '/var/log/ambari-metrics-collector/zookeeper-server.log',
               host: 'h1',
               level: 'DEBUG',
-              logtime: 1497956919700,
+              logtime: moment().subtract(25, 'd').valueOf(),
               ip: '192.168.0.4',
               type: 'zookeeper_server',
               _version_: 14,
@@ -337,14 +339,14 @@ export const mockData = {
               event_md5: '67589403',
               event_dur_ms: 1000,
               _ttl_: "+5DAYS",
-              _expire_at_: 1497956939700,
+              _expire_at_: moment().subtract(20, 'd').valueOf(),
               _router_field_: 55
             },
             {
               path: '/var/log/ambari-metrics-collector/zookeeper-client.log',
               host: 'h1',
               level: 'TRACE',
-              logtime: 1497956919700,
+              logtime: moment().subtract(2, 'h').valueOf(),
               ip: '192.168.0.4',
               type: 'zookeeper_client',
               _version_: 14,
@@ -361,14 +363,14 @@ export const mockData = {
               event_md5: '67589403',
               event_dur_ms: 1000,
               _ttl_: "+5DAYS",
-              _expire_at_: 1497956939700,
+              _expire_at_: moment().subtract(2, 'h').add(5, 'd').valueOf(),
               _router_field_: 55
             },
             {
               path: '/var/log/ambari-metrics-collector/zookeeper-client.log',
               host: 'h1',
               level: 'UNKNOWN',
-              logtime: 1497956919700,
+              logtime: moment().subtract(31, 'd').valueOf(),
               ip: '192.168.0.4',
               type: 'zookeeper_client',
               _version_: 14,
@@ -385,7 +387,7 @@ export const mockData = {
               event_md5: '67589403',
               event_dur_ms: 1000,
               _ttl_: "+5DAYS",
-              _expire_at_: 1497956939700,
+              _expire_at_: moment().subtract(26, 'd').valueOf(),
               _router_field_: 55
             }
           ],

http://git-wip-us.apache.org/repos/asf/ambari/blob/b7edc6cf/ambari-logsearch/ambari-logsearch-web-new/yarn.lock
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web-new/yarn.lock b/ambari-logsearch/ambari-logsearch-web-new/yarn.lock
index ff37374..6e592a7 100644
--- a/ambari-logsearch/ambari-logsearch-web-new/yarn.lock
+++ b/ambari-logsearch/ambari-logsearch-web-new/yarn.lock
@@ -240,6 +240,32 @@ angular-in-memory-web-api@^0.3.1:
   version "0.3.2"
   resolved "https://registry.yarnpkg.com/angular-in-memory-web-api/-/angular-in-memory-web-api-0.3.2.tgz#8836d9e2534d37b728f3cb5a1caf6fe1e7fbbecd"
 
+angular-moment-timezone@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/angular-moment-timezone/-/angular-moment-timezone-0.2.1.tgz#b2c1d9dd0e90558483b4da8db277bee4dbdbf6d1"
+  dependencies:
+    "@angular/common" "^4.0.0"
+    "@angular/compiler" "^4.0.0"
+    "@angular/core" "^4.0.0"
+    "@angular/forms" "^4.0.0"
+    "@angular/http" "^4.0.0"
+    "@angular/platform-browser" "^4.0.0"
+    "@angular/platform-browser-dynamic" "^4.0.0"
+    "@angular/router" "^4.0.0"
+    "@types/moment-timezone" "^0.2.34"
+    angular2-moment "^1.3.3"
+    core-js "^2.4.1"
+    moment "^2.18.1"
+    moment-timezone "^0.5.13"
+    rxjs "^5.1.0"
+    zone.js "^0.8.4"
+
+angular2-moment@^1.3.3, angular2-moment@^1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/angular2-moment/-/angular2-moment-1.4.0.tgz#3d59c1ebc28934fcfe9b888ab461e261724987e8"
+  dependencies:
+    moment "^2.16.0"
+
 ansi-align@^2.0.0:
   version "2.0.0"
   resolved "https://registry.yarnpkg.com/ansi-align/-/ansi-align-2.0.0.tgz#c36aeccba563b89ceb556f3690f0b1d9e3547f7f"
@@ -3045,7 +3071,7 @@ moment-timezone@^0.5.13:
   dependencies:
     moment ">= 2.9.0"
 
-moment@*, moment@2.18.1, "moment@>= 2.9.0", moment@>=2.14.0, moment@^2.18.1:
+moment@*, moment@2.18.1, "moment@>= 2.9.0", moment@>=2.14.0, moment@^2.16.0, moment@^2.18.1:
   version "2.18.1"
   resolved "https://registry.yarnpkg.com/moment/-/moment-2.18.1.tgz#c36193dd3ce1c2eed2adb7c802dbbc77a81b1c0f"
 


[56/63] [abbrv] ambari git commit: AMBARI-21363 ORA-00911 error during Ambari server schema upgrade due to incorrect syntax of Update statement (dgrinenko)

Posted by ab...@apache.org.
AMBARI-21363 ORA-00911 error during Ambari server schema upgrade due to incorrect syntax of Update statement (dgrinenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4cd31501
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4cd31501
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4cd31501

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 4cd3150111560a43dd8028e6f14b2abf753e3d8b
Parents: 09e5d41
Author: Dmytro Grinenko <ha...@apache.org>
Authored: Thu Jun 29 15:17:29 2017 +0300
Committer: Dmytro Grinenko <ha...@apache.org>
Committed: Thu Jun 29 15:17:29 2017 +0300

----------------------------------------------------------------------
 .../ambari/server/orm/DBAccessorImpl.java       | 43 +++++++++++---------
 .../orm/helpers/dbms/GenericDbmsHelper.java     |  2 +-
 2 files changed, 25 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4cd31501/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
index 83ea8e1..a88430b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/DBAccessorImpl.java
@@ -204,6 +204,27 @@ public class DBAccessorImpl implements DBAccessor {
     return objectName;
   }
 
+  /**
+   * Setting arguments for prepared statement
+   *
+   * @param preparedStatement {@link PreparedStatement} object
+   * @param arguments array of arguments
+   *
+   * @throws SQLException
+   */
+  private void setArgumentsForPreparedStatement(PreparedStatement preparedStatement, Object[] arguments) throws SQLException{
+    for (int i = 0; i < arguments.length; i++) {
+      if (arguments[i] instanceof byte[]) {
+        byte[] binaryData = (byte[]) arguments[i];
+
+        // JDBC drivers supports only this function signature
+        preparedStatement.setBinaryStream(i+1, new ByteArrayInputStream(binaryData), binaryData.length);
+      } else {
+        preparedStatement.setObject(i+1, arguments[i]);
+      }
+    }
+  }
+
   @Override
   public boolean tableExists(String tableName) throws SQLException {
     boolean result = false;
@@ -878,16 +899,8 @@ public class DBAccessorImpl implements DBAccessor {
     LOG.info("Executing prepared query: {}", query);
 
     PreparedStatement preparedStatement = getConnection().prepareStatement(query);
+    setArgumentsForPreparedStatement(preparedStatement, arguments);
 
-      for (int i = 0; i < arguments.length; i++) {
-        if (arguments[i] instanceof byte[]) {
-          byte[] binaryData = (byte[]) arguments[i];
-          // JDBC drivers supports only this function signature
-          preparedStatement.setBinaryStream(i+1, new ByteArrayInputStream(binaryData), binaryData.length);
-        } else {
-          preparedStatement.setObject(i+1, arguments[i]);
-        }
-      }
     try {
         preparedStatement.execute();
     } catch (SQLException e) {
@@ -908,7 +921,7 @@ public class DBAccessorImpl implements DBAccessor {
    {@inheritDoc}
    */
   public void executePreparedUpdate(String query, Object...arguments) throws SQLException {
-    executePreparedQuery(query, false, arguments);
+    executePreparedUpdate(query, false, arguments);
   }
 
   /**
@@ -918,16 +931,8 @@ public class DBAccessorImpl implements DBAccessor {
     LOG.info("Executing prepared query: {}", query);
 
     PreparedStatement preparedStatement = getConnection().prepareStatement(query);
+    setArgumentsForPreparedStatement(preparedStatement, arguments);
 
-    for (int i = 0; i <= arguments.length; i++) {
-      if (arguments[i] instanceof byte[]) {
-        byte[] binaryData = (byte[]) arguments[i];
-        // JDBC drivers supports only this function signature
-        preparedStatement.setBinaryStream(i+1, new ByteArrayInputStream(binaryData), binaryData.length);
-      } else {
-        preparedStatement.setObject(i+1, arguments[i]);
-      }
-    }
     try {
       preparedStatement.executeUpdate();
     } catch (SQLException e) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/4cd31501/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
index 56274c5..e2a1f38 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/helpers/dbms/GenericDbmsHelper.java
@@ -280,7 +280,7 @@ public class GenericDbmsHelper implements DbmsHelper {
    */
   @Override
   public String getColumnUpdateStatementWhereColumnIsNull(String tableName, String setColumnName, String conditionColumnName){
-    return "UPDATE " + tableName + " SET " + setColumnName + "=? WHERE " + conditionColumnName + " IS NULL;";
+    return "UPDATE " + tableName + " SET " + setColumnName + "=? WHERE " + conditionColumnName + " IS NULL";
   }
 
   /**


[05/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java
deleted file mode 100644
index 9611334..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog222Test.java
+++ /dev/null
@@ -1,1180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.lang.reflect.Method;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
-import org.apache.ambari.server.controller.KerberosHelper;
-import org.apache.ambari.server.controller.MaintenanceStateHelper;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.dao.WidgetDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.orm.entities.WidgetEntity;
-import org.apache.ambari.server.stack.StackManagerFactory;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.apache.commons.io.FileUtils;
-import org.easymock.Capture;
-import org.easymock.CaptureType;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockSupport;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Maps;
-import com.google.gson.Gson;
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-import com.google.inject.persist.PersistService;
-
-public class UpgradeCatalog222Test {
-  private Injector injector;
-  private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-  private EntityManager entityManager = createNiceMock(EntityManager.class);
-
-  @Inject
-  private UpgradeCatalogHelper upgradeCatalogHelper;
-
-  private StackEntity desiredStackEntity;
-
-  @Rule
-  public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
-  public UpgradeCatalog222Test(){
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-
-  }
-
-  @Before
-  public void init() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-
-    injector.injectMembers(this);
-
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    desiredStackEntity = stackDAO.find("HDP", "2.2.0");
-  }
-
-  @After
-  public void tearDown() {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-    Method updateAlerts = UpgradeCatalog222.class.getDeclaredMethod("updateAlerts");
-    Method updateStormConfigs = UpgradeCatalog222.class.getDeclaredMethod("updateStormConfigs");
-    Method updateAMSConfigs = UpgradeCatalog222.class.getDeclaredMethod("updateAMSConfigs");
-    Method updateHiveConfigs = UpgradeCatalog222.class.getDeclaredMethod("updateHiveConfig");
-    Method updateHostRoleCommands = UpgradeCatalog222.class.getDeclaredMethod("updateHostRoleCommands");
-    Method updateHDFSWidget = UpgradeCatalog222.class.getDeclaredMethod("updateHDFSWidgetDefinition");
-    Method updateYARNWidget = UpgradeCatalog222.class.getDeclaredMethod("updateYARNWidgetDefinition");
-    Method updateHBASEWidget = UpgradeCatalog222.class.getDeclaredMethod("updateHBASEWidgetDefinition");
-    Method updateHbaseEnvConfig = UpgradeCatalog222.class.getDeclaredMethod("updateHbaseEnvConfig");
-    Method updateCorruptedReplicaWidget = UpgradeCatalog222.class.getDeclaredMethod("updateCorruptedReplicaWidget");
-    Method createNewSliderConfigVersion = UpgradeCatalog222.class.getDeclaredMethod("createNewSliderConfigVersion");
-    Method updateZookeeperConfigs = UpgradeCatalog222.class.getDeclaredMethod("updateZookeeperConfigs");
-    Method updateHBASEConfigs = UpgradeCatalog222.class.getDeclaredMethod("updateHBASEConfigs");
-    Method initializeStromAnsKafkaWidgets = UpgradeCatalog222.class.getDeclaredMethod("initializeStromAndKafkaWidgets");
-
-    UpgradeCatalog222 upgradeCatalog222 = createMockBuilder(UpgradeCatalog222.class)
-      .addMockedMethod(addNewConfigurationsFromXml)
-      .addMockedMethod(updateAlerts)
-      .addMockedMethod(updateStormConfigs)
-      .addMockedMethod(updateAMSConfigs)
-      .addMockedMethod(updateHiveConfigs)
-      .addMockedMethod(updateHostRoleCommands)
-      .addMockedMethod(updateHDFSWidget)
-      .addMockedMethod(updateYARNWidget)
-      .addMockedMethod(updateHBASEWidget)
-      .addMockedMethod(updateHbaseEnvConfig)
-      .addMockedMethod(updateCorruptedReplicaWidget)
-      .addMockedMethod(createNewSliderConfigVersion)
-      .addMockedMethod(updateZookeeperConfigs)
-      .addMockedMethod(updateHBASEConfigs)
-      .addMockedMethod(initializeStromAnsKafkaWidgets)
-      .createMock();
-
-    upgradeCatalog222.addNewConfigurationsFromXml();
-    upgradeCatalog222.updateAlerts();
-    upgradeCatalog222.updateStormConfigs();
-    upgradeCatalog222.updateAMSConfigs();
-    upgradeCatalog222.updateHostRoleCommands();
-    upgradeCatalog222.updateHiveConfig();
-    upgradeCatalog222.updateHDFSWidgetDefinition();
-    upgradeCatalog222.updateHbaseEnvConfig();
-    upgradeCatalog222.updateYARNWidgetDefinition();
-    upgradeCatalog222.updateHBASEWidgetDefinition();
-    upgradeCatalog222.updateCorruptedReplicaWidget();
-    upgradeCatalog222.updateZookeeperConfigs();
-    upgradeCatalog222.updateHBASEConfigs();
-    upgradeCatalog222.createNewSliderConfigVersion();
-    upgradeCatalog222.initializeStromAndKafkaWidgets();
-
-    replay(upgradeCatalog222);
-
-    upgradeCatalog222.executeDMLUpdates();
-
-    verify(upgradeCatalog222);
-  }
-
-  @Test
-  public void testUpdateAlerts_ATSAlert() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final AlertDefinitionEntity mockATSWebAlert = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(AlertDefinitionDAO.class).toInstance(mockAlertDefinitionDAO);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    long clusterId = 1;
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("yarn_app_timeline_server_webui")))
-            .andReturn(mockATSWebAlert).atLeastOnce();
-    expect(mockATSWebAlert.getSource()).andReturn("{\"uri\": {\n" +
-      "            \"http\": \"{{yarn-site/yarn.timeline-service.webapp.address}}/ws/v1/timeline\",\n" +
-      "            \"https\": \"{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline\" } }");
-
-    mockATSWebAlert.setSource("{\"uri\":{\"http\":\"{{yarn-site/yarn.timeline-service.webapp.address}}/ws/v1/timeline\",\"https\":\"{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline\"}}");
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog222.class).updateAlerts();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testHiveSiteUpdateConfigs() throws AmbariException {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config hiveSiteConfigs = easyMockSupport.createNiceMock(Config.class);
-    final Config AtlasSiteConfigs = easyMockSupport.createNiceMock(Config.class);
-
-    final ServiceComponentHost atlasHost = easyMockSupport.createNiceMock(ServiceComponentHost.class);
-    final List<ServiceComponentHost> atlasHosts = new ArrayList<>();
-    atlasHosts.add(atlasHost);
-
-    StackId stackId = new StackId("HDP","2.3");
-
-    final Map<String, String> propertiesAtlasSiteConfigs = new HashMap<String, String>() {{
-      put("atlas.enableTLS", "true");
-      put("atlas.server.https.port", "21443");
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(ServiceComponentHost.class).toInstance(atlasHost);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-//    expect(mockClusterExpected.getCurrentStackVersion()).andReturn(stackId).once();
-    expect(mockClusterExpected.getServiceComponentHosts("ATLAS", "ATLAS_SERVER")).andReturn(atlasHosts).once();
-//    expect(atlasHost.getHostName()).andReturn("c6401").once();
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(hiveSiteConfigs).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("application-properties")).andReturn(AtlasSiteConfigs).anyTimes();
-    expect(mockClusterExpected.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("ATLAS", easyMockSupport.createNiceMock(Service.class))
-        .build());
-
-    expect(AtlasSiteConfigs.getProperties()).andReturn(propertiesAtlasSiteConfigs).anyTimes();
-
-    UpgradeCatalog222 upgradeCatalog222 = createMockBuilder(UpgradeCatalog222.class)
-      .withConstructor(Injector.class)
-      .withArgs(mockInjector)
-      .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-        Map.class, boolean.class, boolean.class)
-      .createMock();
-
-    Map<String, String> expectedUpdates = new HashMap<>();
-    expectedUpdates.put("atlas.hook.hive.minThreads", "1");
-    expectedUpdates.put("atlas.hook.hive.maxThreads", "1");
-    expectedUpdates.put("atlas.cluster.name", "primary");
-    expectedUpdates.put("atlas.rest.address", "https://c6401:21443");
-
-    upgradeCatalog222.updateConfigurationPropertiesForCluster(mockClusterExpected, "hive-site", expectedUpdates,
-      false, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog222);
-    upgradeCatalog222.updateHiveConfig();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateZookeeperConfigs() throws Exception{
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config zookeeperEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(zookeeperEnv.getProperties()).andReturn(new HashMap<String, String>(){{
-      put("zk_server_heapsize", "1024");
-    }}
-    ).anyTimes();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("zookeeper-env")).andReturn(zookeeperEnv).atLeastOnce();
-
-    UpgradeCatalog222 upgradeCatalog222 = createMockBuilder(UpgradeCatalog222.class)
-        .withConstructor(Injector.class)
-        .withArgs(mockInjector)
-        .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-            Map.class, boolean.class, boolean.class)
-        .createMock();
-
-    Map<String, String> expectedUpdates = new HashMap<>();
-    expectedUpdates.put("zk_server_heapsize", "1024m");
-
-    upgradeCatalog222.updateConfigurationPropertiesForCluster(mockClusterExpected, "zookeeper-env", expectedUpdates,
-        true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog222);
-    upgradeCatalog222.updateZookeeperConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateHBASEConfigs() throws Exception{
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config hbaseSite = easyMockSupport.createNiceMock(Config.class);
-    expect(hbaseSite.getProperties()).andReturn(new HashMap<String, String>(){{
-                                                     put(UpgradeCatalog222.HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES, "test1");
-                                                     put(UpgradeCatalog222.HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES, "test2");
-                                                     put(UpgradeCatalog222.HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES, "test3");
-                                                   }}
-    ).anyTimes();
-
-    final Config rangerHbasePluginProperties = easyMockSupport.createNiceMock(Config.class);
-    expect(rangerHbasePluginProperties.getProperties()).andReturn(new HashMap<String, String>(){{
-                                                  put(AbstractUpgradeCatalog.PROPERTY_RANGER_HBASE_PLUGIN_ENABLED, "yes");
-                                                }}
-    ).anyTimes();
-
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).anyTimes();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    UpgradeCatalog222 upgradeCatalog222 = createMockBuilder(UpgradeCatalog222.class)
-      .withConstructor(Injector.class)
-      .withArgs(mockInjector)
-      .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-        Map.class, boolean.class, boolean.class)
-      .createStrictMock();
-
-    // CASE 1 - Ranger enabled, Cluster version is 2.2
-    Service hbaseService = easyMockSupport.createNiceMock(Service.class);
-    expect(hbaseService.getDesiredStackId()).andReturn(new StackId("HDP", "2.2")).anyTimes();
-
-//    expect(mockClusterExpected.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.2")).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-site")).andReturn(hbaseSite).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType(AbstractUpgradeCatalog.CONFIGURATION_TYPE_RANGER_HBASE_PLUGIN_PROPERTIES)).
-      andReturn(rangerHbasePluginProperties).once();
-    expect(mockClusterExpected.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HBASE", hbaseService)
-        .build());
-
-    Map<String, String> expectedUpdates = new HashMap<>();
-    expectedUpdates.put(UpgradeCatalog222.HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES, "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor");
-    expectedUpdates.put(UpgradeCatalog222.HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES, "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor");
-    expectedUpdates.put(UpgradeCatalog222.HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES,
-      "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint," +
-        "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor");
-
-    upgradeCatalog222.updateConfigurationPropertiesForCluster(mockClusterExpected, "hbase-site", expectedUpdates,
-      true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    upgradeCatalog222.updateHBASEConfigs();
-    easyMockSupport.verifyAll();
-
-    // CASE 2 - Ranger enabled, Cluster version is 2.3
-    reset(mockClusterExpected, upgradeCatalog222, hbaseService);
-
-
-    expect(hbaseService.getDesiredStackId()).andReturn(new StackId("HDP-2.3"));
-//    expect(mockClusterExpected.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.3")).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-site")).andReturn(hbaseSite).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType(AbstractUpgradeCatalog.CONFIGURATION_TYPE_RANGER_HBASE_PLUGIN_PROPERTIES)).
-      andReturn(rangerHbasePluginProperties).once();
-    expect(mockClusterExpected.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HBASE", hbaseService)
-        .build());
-
-    expectedUpdates = new HashMap<>();
-    expectedUpdates.put(UpgradeCatalog222.HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES, "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor ");
-    expectedUpdates.put(UpgradeCatalog222.HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES, "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor");
-    expectedUpdates.put(UpgradeCatalog222.HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES,
-      "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint," +
-        "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor");
-
-    upgradeCatalog222.updateConfigurationPropertiesForCluster(mockClusterExpected, "hbase-site", expectedUpdates,
-      true, false);
-    expectLastCall().once();
-
-    replay(mockClusterExpected, upgradeCatalog222, hbaseService);
-    upgradeCatalog222.updateHBASEConfigs();
-    easyMockSupport.verifyAll();
-
-    // CASE 3 - Ranger enabled, Cluster version is 2.1
-    reset(mockClusterExpected, upgradeCatalog222, hbaseService);
-    expect(hbaseService.getDesiredStackId()).andReturn(new StackId("HDP-2.1"));
-//    expect(mockClusterExpected.getCurrentStackVersion()).andReturn(new StackId("HDP", "2.1")).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-site")).andReturn(hbaseSite).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType(AbstractUpgradeCatalog.CONFIGURATION_TYPE_RANGER_HBASE_PLUGIN_PROPERTIES)).
-      andReturn(rangerHbasePluginProperties).once();
-    expect(mockClusterExpected.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HBASE", hbaseService)
-        .build());
-
-
-    replay(mockClusterExpected, upgradeCatalog222, hbaseService);
-    upgradeCatalog222.updateHBASEConfigs();
-    easyMockSupport.verifyAll();
-
-    // CASE 4 - Ranger disabled
-    reset(mockClusterExpected, upgradeCatalog222);
-    expect(mockClusterExpected.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HBASE", hbaseService)
-        .build());
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-site")).andReturn(hbaseSite).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType(AbstractUpgradeCatalog.CONFIGURATION_TYPE_RANGER_HBASE_PLUGIN_PROPERTIES)).
-      andReturn(null).once();
-
-    replay(mockClusterExpected, upgradeCatalog222);
-    upgradeCatalog222.updateHBASEConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testAmsSiteUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        put("timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier", String.valueOf(1));
-        put("timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier", String.valueOf(1));
-        put("timeline.metrics.service.operation.mode", "distributed");
-        put("timeline.metrics.host.aggregator.ttl", String.valueOf(86400));
-        put("timeline.metrics.cluster.aggregator.second.ttl", String.valueOf(21600)); //Less than 1 day
-        put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(7776000));
-        put("timeline.metrics.service.webapp.address", "0.0.0.0:6188");
-        put("timeline.metrics.sink.collection.period", "60");
-      }
-    };
-    Map<String, String> newPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        put("timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier", String.valueOf(2));
-        put("timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier", String.valueOf(2));
-        put("timeline.metrics.service.watcher.disabled", String.valueOf(false));
-        put("timeline.metrics.host.aggregator.ttl", String.valueOf(3 * 86400));
-        put("timeline.metrics.cluster.aggregator.second.ttl", String.valueOf(21600));
-        put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(30 * 86400));
-        put("timeline.metrics.service.operation.mode", "distributed");
-        put("timeline.metrics.service.webapp.address", "host1:6188");
-        put("timeline.metrics.cluster.aggregator.interpolation.enabled", String.valueOf(true));
-        put("timeline.metrics.sink.collection.period", "10");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsSite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-site")).andReturn(mockAmsSite).atLeastOnce();
-    expect(mockAmsSite.getProperties()).andReturn(oldPropertiesAmsSite).anyTimes();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-    expect(cluster.getHosts("AMBARI_METRICS", "METRICS_COLLECTOR")).andReturn( new HashSet<String>() {{
-      add("host1");
-    }}).atLeastOnce();
-
-    replay(injector, clusters, mockAmsSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .addMockedMethod("createConfig")
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog222(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesAmsSite, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testAmsHbaseSiteUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsHbaseSite = new HashMap<String, String>() {
-      {
-        put("hbase.client.scanner.timeout.period", String.valueOf(900000));
-        put("phoenix.query.timeoutMs", String.valueOf(1200000));
-      }
-    };
-    Map<String, String> newPropertiesAmsHbaseSite = new HashMap<String, String>() {
-      {
-        put("hbase.client.scanner.timeout.period", String.valueOf(300000));
-        put("hbase.rpc.timeout", String.valueOf(300000));
-        put("phoenix.query.timeoutMs", String.valueOf(300000));
-        put("phoenix.query.keepAliveMs", String.valueOf(300000));
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsHbaseSite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-hbase-site")).andReturn(mockAmsHbaseSite).atLeastOnce();
-    expect(mockAmsHbaseSite.getProperties()).andReturn(oldPropertiesAmsHbaseSite).anyTimes();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsHbaseSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .addMockedMethod("createConfig")
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog222(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesAmsHbaseSite, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testHDFSWidgetUpdateWithOnlyZkService() throws Exception {
-    final Clusters clusters = createNiceMock(Clusters.class);
-    final Cluster cluster = createNiceMock(Cluster.class);
-    final AmbariManagementController controller = createNiceMock(AmbariManagementController.class);
-    final Gson gson = new Gson();
-    final WidgetDAO widgetDAO = createNiceMock(WidgetDAO.class);
-    final AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-    StackInfo stackInfo = createNiceMock(StackInfo.class);
-    StackId stackId = new StackId("HDP", "2.0.0");
-
-    String widgetStr = "{\"layouts\":[{\"layout_name\":\"default_hdfs_dashboard\",\"display_name\":\"Standard HDFS Dashboard\",\"section_name\":\"HDFS_SUMMARY\",\"widgetLayoutInfo\":[{\"widget_name\":\"NameNode RPC\",\"metrics\":[],\"values\":[]}]}]}";
-
-    File dataDirectory = temporaryFolder.newFolder();
-    File file = new File(dataDirectory, "hdfs_widget.json");
-    FileUtils.writeStringToFile(file, widgetStr);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
-        bind(AmbariManagementController.class).toInstance(controller);
-        bind(Clusters.class).toInstance(clusters);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        bind(Gson.class).toInstance(gson);
-        bind(WidgetDAO.class).toInstance(widgetDAO);
-        bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
-        bind(AmbariMetaInfo.class).toInstance(metaInfo);
-      }
-    });
-
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).anyTimes();
-
-    Service hdfsService = createNiceMock(Service.class);
-    expect(hdfsService.getDesiredStackId()).andReturn(stackId).anyTimes();
-
-    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service> builder()
-        .put("HDFS", hdfsService)
-        .build()).anyTimes();
-    expect(cluster.getClusterId()).andReturn(1L).anyTimes();
-    expect(stackInfo.getService("HDFS")).andReturn(null);
-    expect(cluster.getDesiredStackVersion()).andReturn(stackId);
-    expect(metaInfo.getStack("HDP", "2.0.0")).andReturn(stackInfo);
-
-    replay(clusters, cluster, hdfsService, controller, widgetDAO, metaInfo, stackInfo);
-
-    UpgradeCatalog222 upgradeCatalog222 = createMockBuilder(UpgradeCatalog222.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .createMock();
-    upgradeCatalog222.updateHDFSWidgetDefinition();
-
-  }
-
-  @Test
-  public void testHDFSWidgetUpdate() throws Exception {
-    final Clusters clusters = createNiceMock(Clusters.class);
-    final Cluster cluster = createNiceMock(Cluster.class);
-    final AmbariManagementController controller = createNiceMock(AmbariManagementController.class);
-    final Gson gson = new Gson();
-    final WidgetDAO widgetDAO = createNiceMock(WidgetDAO.class);
-    final AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-    WidgetEntity widgetEntity = createNiceMock(WidgetEntity.class);
-    WidgetEntity widgetEntity2 = createNiceMock(WidgetEntity.class);
-    StackId stackId = new StackId("HDP", "2.0.0");
-    StackInfo stackInfo = createNiceMock(StackInfo.class);
-    ServiceInfo serviceInfo = createNiceMock(ServiceInfo.class);
-
-    String widgetStr = "{\n" +
-      "  \"layouts\": [\n" +
-      "    {\n" +
-      "      \"layout_name\": \"default_hdfs_dashboard\",\n" +
-      "      \"display_name\": \"Standard HDFS Dashboard\",\n" +
-      "      \"section_name\": \"HDFS_SUMMARY\",\n" +
-      "      \"widgetLayoutInfo\": [\n" +
-      "        {\n" +
-      "          \"widget_name\": \"NameNode RPC\",\n" +
-      "          \"metrics\": [],\n" +
-      "          \"values\": []\n" +
-      "        }\n" +
-      "      ]\n" +
-      "    },\n" +
-      "        {\n" +
-      "      \"layout_name\": \"default_hdfs_heatmap\",\n" +
-      "      \"display_name\": \"Standard HDFS HeatMaps\",\n" +
-      "      \"section_name\": \"HDFS_HEATMAPS\",\n" +
-      "      \"widgetLayoutInfo\": [\n" +
-      "        {\n" +
-      "          \"widget_name\": \"HDFS Bytes Read\",\n" +
-      "          \"metrics\": [],\n" +
-      "          \"values\": []\n" +
-      "        }\n" +
-      "      ]\n" +
-      "    }\n" +
-      "  ]\n" +
-      "}";
-
-    File dataDirectory = temporaryFolder.newFolder();
-    File file = new File(dataDirectory, "hdfs_widget.json");
-    FileUtils.writeStringToFile(file, widgetStr);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
-        bind(AmbariManagementController.class).toInstance(controller);
-        bind(Clusters.class).toInstance(clusters);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        bind(Gson.class).toInstance(gson);
-        bind(WidgetDAO.class).toInstance(widgetDAO);
-        bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
-        bind(AmbariMetaInfo.class).toInstance(metaInfo);
-      }
-    });
-
-    Service hdfsService = createNiceMock(Service.class);
-    expect(hdfsService.getDesiredStackId()).andReturn(stackId).anyTimes();
-    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HDFS", hdfsService)
-        .build()).anyTimes();
-
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).anyTimes();
-    expect(cluster.getClusterId()).andReturn(1L).anyTimes();
-    expect(stackInfo.getService("HDFS")).andReturn(serviceInfo);
-    expect(cluster.getDesiredStackVersion()).andReturn(stackId).anyTimes();
-    expect(metaInfo.getStack("HDP", "2.0.0")).andReturn(stackInfo).anyTimes();
-    expect(serviceInfo.getWidgetsDescriptorFile()).andReturn(file).anyTimes();
-
-    expect(widgetDAO.findByName(1L, "NameNode RPC", "ambari", "HDFS_SUMMARY"))
-      .andReturn(Collections.singletonList(widgetEntity));
-    expect(widgetDAO.merge(widgetEntity)).andReturn(null);
-    expect(widgetEntity.getWidgetName()).andReturn("Namenode RPC").anyTimes();
-
-    expect(widgetDAO.findByName(1L, "HDFS Bytes Read", "ambari", "HDFS_HEATMAPS"))
-      .andReturn(Collections.singletonList(widgetEntity2));
-    expect(widgetDAO.merge(widgetEntity2)).andReturn(null);
-    expect(widgetEntity2.getWidgetName()).andReturn("HDFS Bytes Read").anyTimes();
-
-    replay(clusters, cluster, hdfsService, controller, widgetDAO, metaInfo, widgetEntity, widgetEntity2, stackInfo, serviceInfo);
-
-    mockInjector.getInstance(UpgradeCatalog222.class).updateHDFSWidgetDefinition();
-
-    verify(clusters, cluster, controller, widgetDAO, widgetEntity, widgetEntity2, stackInfo, serviceInfo);
-  }
-
-  @Test
-  public void testYARNWidgetUpdate() throws Exception {
-    final Clusters clusters = createNiceMock(Clusters.class);
-    final Cluster cluster = createNiceMock(Cluster.class);
-    final AmbariManagementController controller = createNiceMock(AmbariManagementController.class);
-    final Gson gson = new Gson();
-    final WidgetDAO widgetDAO = createNiceMock(WidgetDAO.class);
-    final AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-    WidgetEntity widgetEntity = createNiceMock(WidgetEntity.class);
-    WidgetEntity widgetEntity2 = createNiceMock(WidgetEntity.class);
-    StackId stackId = new StackId("HDP", "2.0.0");
-    StackInfo stackInfo = createNiceMock(StackInfo.class);
-    ServiceInfo serviceInfo = createNiceMock(ServiceInfo.class);
-
-    String widgetStr = "{\n" +
-      "  \"layouts\": [\n" +
-      "    {\n" +
-      "      \"layout_name\": \"default_yarn_dashboard\",\n" +
-      "      \"display_name\": \"Standard YARN Dashboard\",\n" +
-      "      \"section_name\": \"YARN_SUMMARY\",\n" +
-      "      \"widgetLayoutInfo\": [\n" +
-      "        {\n" +
-      "          \"widget_name\": \"Container Failures\",\n" +
-      "          \"metrics\": [],\n" +
-      "          \"values\": []\n" +
-      "        }\n" +
-      "      ]\n" +
-      "    },\n" +
-      "        {\n" +
-      "      \"layout_name\": \"default_yarn_heatmap\",\n" +
-      "      \"display_name\": \"Standard YARN HeatMaps\",\n" +
-      "      \"section_name\": \"YARN_HEATMAPS\",\n" +
-      "      \"widgetLayoutInfo\": [\n" +
-      "        {\n" +
-      "          \"widget_name\": \"Container Failures\",\n" +
-      "          \"metrics\": [],\n" +
-      "          \"values\": []\n" +
-      "        }\n" +
-      "      ]\n" +
-      "    }\n" +
-      "  ]\n" +
-      "}";
-
-    File dataDirectory = temporaryFolder.newFolder();
-    File file = new File(dataDirectory, "yarn_widget.json");
-    FileUtils.writeStringToFile(file, widgetStr);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
-        bind(AmbariManagementController.class).toInstance(controller);
-        bind(Clusters.class).toInstance(clusters);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        bind(Gson.class).toInstance(gson);
-        bind(WidgetDAO.class).toInstance(widgetDAO);
-        bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
-        bind(AmbariMetaInfo.class).toInstance(metaInfo);
-      }
-    });
-
-    Service yarnService = createNiceMock(Service.class);
-    expect(yarnService.getDesiredStackId()).andReturn(stackId);
-    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("YARN", yarnService)
-        .build());
-
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).anyTimes();
-    expect(cluster.getClusterId()).andReturn(1L).anyTimes();
-    expect(stackInfo.getService("YARN")).andReturn(serviceInfo);
-    expect(cluster.getDesiredStackVersion()).andReturn(stackId).anyTimes();
-    expect(metaInfo.getStack("HDP", "2.0.0")).andReturn(stackInfo).anyTimes();
-    expect(serviceInfo.getWidgetsDescriptorFile()).andReturn(file).anyTimes();
-
-    expect(widgetDAO.findByName(1L, "Container Failures", "ambari", "YARN_SUMMARY"))
-      .andReturn(Collections.singletonList(widgetEntity));
-    expect(widgetDAO.merge(widgetEntity)).andReturn(null);
-    expect(widgetEntity.getWidgetName()).andReturn("Container Failures").anyTimes();
-
-    expect(widgetDAO.findByName(1L, "Container Failures", "ambari", "YARN_HEATMAPS"))
-      .andReturn(Collections.singletonList(widgetEntity2));
-    expect(widgetDAO.merge(widgetEntity2)).andReturn(null);
-    expect(widgetEntity2.getWidgetName()).andReturn("Container Failures").anyTimes();
-
-    replay(clusters, cluster, yarnService, controller, widgetDAO, metaInfo, widgetEntity, widgetEntity2, stackInfo, serviceInfo);
-
-    mockInjector.getInstance(UpgradeCatalog222.class).updateYARNWidgetDefinition();
-
-    verify(clusters, cluster, controller, widgetDAO, widgetEntity, widgetEntity2, stackInfo, serviceInfo);
-  }
-
-
-  @Test
-  public void testHBASEWidgetUpdate() throws Exception {
-    final Clusters clusters = createNiceMock(Clusters.class);
-    final Cluster cluster = createNiceMock(Cluster.class);
-    final AmbariManagementController controller = createNiceMock(AmbariManagementController.class);
-    final Gson gson = new Gson();
-    final WidgetDAO widgetDAO = createNiceMock(WidgetDAO.class);
-    final AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-    WidgetEntity widgetEntity = createNiceMock(WidgetEntity.class);
-    StackId stackId = new StackId("HDP", "2.0.0");
-    StackInfo stackInfo = createNiceMock(StackInfo.class);
-    ServiceInfo serviceInfo = createNiceMock(ServiceInfo.class);
-
-    String widgetStr = "{\n" +
-      "  \"layouts\": [\n" +
-      "    {\n" +
-      "      \"layout_name\": \"default_hbase_dashboard\",\n" +
-      "      \"display_name\": \"Standard HBASE Dashboard\",\n" +
-      "      \"section_name\": \"HBASE_SUMMARY\",\n" +
-      "      \"widgetLayoutInfo\": [\n" +
-      "        {\n" +
-      "          \"widget_name\": \"Blocked Updates\",\n" +
-      "          \"metrics\": [],\n" +
-      "          \"values\": []\n" +
-      "        }\n" +
-      "      ]\n" +
-      "    } " +
-      "]\n" +
-      "}";
-
-    File dataDirectory = temporaryFolder.newFolder();
-    File file = new File(dataDirectory, "hbase_widget.json");
-    FileUtils.writeStringToFile(file, widgetStr);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
-        bind(AmbariManagementController.class).toInstance(controller);
-        bind(Clusters.class).toInstance(clusters);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        bind(Gson.class).toInstance(gson);
-        bind(WidgetDAO.class).toInstance(widgetDAO);
-        bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
-        bind(AmbariMetaInfo.class).toInstance(metaInfo);
-      }
-    });
-
-    Service hbaseService = createNiceMock(Service.class);
-    expect(hbaseService.getDesiredStackId()).andReturn(stackId);
-    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HBASE", hbaseService)
-        .build());
-
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).anyTimes();
-    expect(cluster.getClusterId()).andReturn(1L).anyTimes();
-    expect(stackInfo.getService("HBASE")).andReturn(serviceInfo);
-    expect(cluster.getDesiredStackVersion()).andReturn(stackId).anyTimes();
-    expect(metaInfo.getStack("HDP", "2.0.0")).andReturn(stackInfo).anyTimes();
-    expect(serviceInfo.getWidgetsDescriptorFile()).andReturn(file).anyTimes();
-
-    expect(widgetDAO.findByName(1L, "Blocked Updates", "ambari", "HBASE_SUMMARY"))
-      .andReturn(Collections.singletonList(widgetEntity));
-    expect(widgetDAO.merge(widgetEntity)).andReturn(null);
-    expect(widgetEntity.getWidgetName()).andReturn("Blocked Updates").anyTimes();
-
-    replay(clusters, cluster, hbaseService, controller, widgetDAO, metaInfo, widgetEntity, stackInfo, serviceInfo);
-
-    mockInjector.getInstance(UpgradeCatalog222.class).updateHBASEWidgetDefinition();
-
-    verify(clusters, cluster, controller, widgetDAO, widgetEntity, stackInfo, serviceInfo);
-  }
-
-  @Test
-  public void testGetUpdatedHbaseEnvProperties_BadConfig() {
-    String badContent = "export HBASE_HEAPSIZE=1000;\n\n" +
-            "export HBASE_OPTS=\"-Djava.io.tmpdir={{java_io_tmpdir}}\"\n\n" +
-            "export HBASE_LOG_DIR={{log_dir}}";
-    String expectedContent = "export HBASE_HEAPSIZE=1000;\n\n" +
-            "export HBASE_OPTS=\"${HBASE_OPTS} -Djava.io.tmpdir={{java_io_tmpdir}}\"\n\n" +
-            "export HBASE_LOG_DIR={{log_dir}}";
-    testGetUpdatedHbaseEnvProperties(badContent, expectedContent);
-  }
-
-  @Test
-  public void testGetUpdatedHbaseEnvProperties_GoodConfig() {
-
-    String goodContent = "export HBASE_HEAPSIZE=1000;\n\n" +
-            "export HBASE_OPTS=\"${HBASE_OPTS} -Djava.io.tmpdir={{java_io_tmpdir}}\"\n\n" +
-            "export HBASE_LOG_DIR={{log_dir}}";
-    testGetUpdatedHbaseEnvProperties(goodContent, null);
-  }
-
-  @Test
-  public void testGetUpdatedHbaseEnvProperties_NoConfig() {
-    String content = "export HBASE_HEAPSIZE=1000;\n\n" +
-            "export HBASE_LOG_DIR={{log_dir}}";
-    testGetUpdatedHbaseEnvProperties(content, null);
-  }
-
-  private void testGetUpdatedHbaseEnvProperties(String content, String expectedContent) {
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog222 upgradeCatalog222 = injector.getInstance(UpgradeCatalog222.class);
-    Map<String, String> update = upgradeCatalog222.getUpdatedHbaseEnvProperties(content);
-    assertEquals(expectedContent, update.get("content"));
-  }
-
-  @Test
-  public void testUpdateHostRoleCommands() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    dbAccessor.createIndex(eq("idx_hrc_status_role"), eq("host_role_command"), eq("status"), eq("role"));
-    expectLastCall().once();
-
-    replay(dbAccessor);
-
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog222 upgradeCatalog222 = injector.getInstance(UpgradeCatalog222.class);
-    upgradeCatalog222.updateHostRoleCommands();
-
-
-    verify(dbAccessor);
-  }
-
-  @Test
-  public void testUpdateAlerts_AtlasAlert() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final AlertDefinitionEntity atlasMetadataServerWebUIMock = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(AlertDefinitionDAO.class).toInstance(mockAlertDefinitionDAO);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    long clusterId = 1;
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("metadata_server_webui")))
-            .andReturn(atlasMetadataServerWebUIMock).atLeastOnce();
-    expect(atlasMetadataServerWebUIMock.getSource()).andReturn("{\"uri\": {\n" +
-            "            \"http\": \"{{hostname}}:{{application-properties/atlas.server.http.port}}\",\n" +
-            "            \"https\": \"{{hostname}}:{{application-properties/atlas.server.https.port}}\" } }");
-
-    atlasMetadataServerWebUIMock.setSource("{\"uri\":{\"http\":\"{{application-properties/atlas.server.http.port}}\",\"https\":\"{{application-properties/atlas.server.https.port}}\"}}");
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog222.class).updateAlerts();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateCorruptedReplicaWidget() throws SQLException{
-    final DBAccessor dbAccessor = createStrictMock(DBAccessor.class);
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(EntityManager.class).toInstance(entityManager);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-
-    String expectedWidgetUpdate = "UPDATE widget SET widget_name='%s', description='%s', " +
-      "widget_values='[{\"name\": \"%s\", \"value\": \"%s\"}]' WHERE widget_name='%s'";
-    Capture<String> capturedStatements = Capture.newInstance(CaptureType.ALL);
-
-    expect(dbAccessor.executeUpdate(capture(capturedStatements))).andReturn(1);
-
-    UpgradeCatalog222 upgradeCatalog222 = injector.getInstance(UpgradeCatalog222.class);
-    replay(dbAccessor);
-
-    upgradeCatalog222.updateCorruptedReplicaWidget();
-
-    List<String> statements = capturedStatements.getValues();
-
-    assertTrue(statements.contains(String.format(expectedWidgetUpdate,
-      UpgradeCatalog222.WIDGET_CORRUPT_REPLICAS,
-      UpgradeCatalog222.WIDGET_CORRUPT_REPLICAS_DESCRIPTION,
-      UpgradeCatalog222.WIDGET_CORRUPT_REPLICAS,
-      UpgradeCatalog222.WIDGET_VALUES_VALUE,
-      UpgradeCatalog222.WIDGET_CORRUPT_BLOCKS)));
-
-  }
-
-  @Test
-  public void testCreateNewSliderConfigVersion() throws AmbariException {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Service mockSliderService = easyMockSupport.createNiceMock(Service.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getService("SLIDER")).andReturn(mockSliderService);
-    expect(mockClusterExpected.createServiceConfigVersion("SLIDER", "ambari-upgrade", "Creating new service config version for SLIDER service.", null)).andReturn(null).once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog222.class).createNewSliderConfigVersion();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testInitializeStromAndKafkaWidgets() throws AmbariException {
-
-    String stormServiceName = "STORM";
-    String kafkaServiceName = "KAFKA";
-    String hbaseServiceName = "HBASE";
-
-    final AmbariManagementController controller = createStrictMock(AmbariManagementController.class);
-    final Clusters clusters = createStrictMock(Clusters.class);
-    final Cluster cluster = createStrictMock(Cluster.class);
-    final Service stormService = createStrictMock(Service.class);
-    final Service kafkaService = createStrictMock(Service.class);
-    final Service hbaseService = createStrictMock(Service.class);
-    final Map<String, Cluster> clusterMap = Collections.singletonMap("c1", cluster);
-    // Use a TreeMap so we can assume a particular order when iterating over the services.
-    final Map<String, Service> services = new TreeMap<>();
-    services.put(stormServiceName, stormService);
-    services.put(kafkaServiceName, kafkaService);
-    services.put(hbaseServiceName, hbaseService);
-
-
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(AmbariManagementController.class).toInstance(controller);
-        binder.bind(Clusters.class).toInstance(clusters);
-        binder.bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    };
-
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(clusters.getClusters()).andReturn(clusterMap).anyTimes();
-
-    expect(cluster.getServices()).andReturn(services).once();
-    expect(stormService.getName()).andReturn(stormServiceName).atLeastOnce();
-    expect(kafkaService.getName()).andReturn(kafkaServiceName).atLeastOnce();
-    expect(hbaseService.getName()).andReturn(hbaseServiceName).atLeastOnce();
-
-    controller.initializeWidgetsAndLayouts(cluster, kafkaService);
-    expectLastCall().once();
-    controller.initializeWidgetsAndLayouts(cluster, stormService);
-    expectLastCall().once();
-    // but no controller call for HBase
-
-    replay(controller, clusters, cluster, stormService, kafkaService, hbaseService);
-
-    Injector injector = Guice.createInjector(module);
-    injector.getInstance(UpgradeCatalog222.class).initializeStromAndKafkaWidgets();
-
-    verify(controller, clusters, cluster, stormService, kafkaService, hbaseService);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog230Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog230Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog230Test.java
deleted file mode 100644
index 66eb0ed..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog230Test.java
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.upgrade;
-
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.lang.reflect.Field;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.dao.PermissionDAO;
-import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
-import org.apache.ambari.server.orm.dao.RoleAuthorizationDAO;
-import org.apache.ambari.server.orm.entities.PermissionEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
-import org.apache.ambari.server.orm.entities.RoleAuthorizationEntity;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.easymock.Capture;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockSupport;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-
-/**
- * UpgradeCatalog230 tests.
- */
-public class UpgradeCatalog230Test extends EasyMockSupport {
-
-  private Injector injector;
-
-  @Before
-  public void setup() {
-    resetAll();
-
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(createMock(DBAccessor.class));
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(EntityManager.class).toInstance(createNiceMock(EntityManager.class));
-        binder.bind(DaoUtils.class).toInstance(createNiceMock(DaoUtils.class));
-        binder.bind(PermissionDAO.class).toInstance(createMock(PermissionDAO.class));
-        binder.bind(ResourceTypeDAO.class).toInstance(createMock(ResourceTypeDAO.class));
-        binder.bind(RoleAuthorizationDAO.class).toInstance(createMock(RoleAuthorizationDAO.class));
-      }
-    };
-
-    injector = Guice.createInjector(module);
-  }
-
-  @Test
-  public void testExecuteDDLUpdates() throws Exception {
-
-    final DBAccessor dbAccessor = injector.getInstance(DBAccessor.class);
-
-    Configuration configuration = createNiceMock(Configuration.class);
-    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
-
-    Capture<DBAccessor.DBColumnInfo> columnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> columnCaptureUserType = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> columnCapturePermissionLabel = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> columnsCaptureRoleAuthorization = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> columnsCapturePermissionRoleAuthorization = EasyMock.newCapture();
-
-    dbAccessor.alterColumn(eq("host_role_command"), capture(columnCapture));
-    expectLastCall();
-
-    dbAccessor.executeQuery("UPDATE users SET user_type='LDAP' WHERE ldap_user=1");
-    expectLastCall();
-
-    dbAccessor.addUniqueConstraint("users", "UNQ_users_0", "user_name", "user_type");
-    expectLastCall();
-
-    dbAccessor.addColumn(eq("users"), capture(columnCaptureUserType));
-    expectLastCall();
-
-    dbAccessor.addColumn(eq("adminpermission"), capture(columnCapturePermissionLabel));
-    expectLastCall();
-
-    dbAccessor.createTable(eq("roleauthorization"), capture(columnsCaptureRoleAuthorization), eq("authorization_id"));
-    expectLastCall();
-
-    dbAccessor.createTable(eq("permission_roleauthorization"), capture(columnsCapturePermissionRoleAuthorization), eq("permission_id"), eq("authorization_id"));
-    expectLastCall();
-
-    dbAccessor.addFKConstraint("permission_roleauthorization", "FK_permission_roleauth_pid",
-        "permission_id", "adminpermission", "permission_id", false);
-    expectLastCall();
-
-    dbAccessor.addFKConstraint("permission_roleauthorization", "FK_permission_roleauth_aid",
-        "authorization_id", "roleauthorization", "authorization_id", false);
-    expectLastCall();
-
-    replayAll();
-    AbstractUpgradeCatalog upgradeCatalog = injector.getInstance(UpgradeCatalog230.class);
-    Class<?> c = AbstractUpgradeCatalog.class;
-    Field f = c.getDeclaredField("configuration");
-    f.setAccessible(true);
-    f.set(upgradeCatalog, configuration);
-
-    upgradeCatalog.executeDDLUpdates();
-    verifyAll();
-
-    assertTrue(columnCapture.getValue().isNullable());
-
-    assertEquals(columnCaptureUserType.getValue().getName(), "user_type");
-    assertEquals(columnCaptureUserType.getValue().getType(), String.class);
-    assertEquals(columnCaptureUserType.getValue().getLength(), null);
-    assertEquals(columnCaptureUserType.getValue().getDefaultValue(), "LOCAL");
-    assertEquals(columnCaptureUserType.getValue().isNullable(), true);
-
-    assertEquals(columnCapturePermissionLabel.getValue().getName(), "permission_label");
-    assertEquals(columnCapturePermissionLabel.getValue().getType(), String.class);
-    assertEquals(columnCapturePermissionLabel.getValue().getLength(), Integer.valueOf(255));
-    assertEquals(columnCapturePermissionLabel.getValue().isNullable(), true);
-
-    List<DBAccessor.DBColumnInfo> columnInfos;
-    DBAccessor.DBColumnInfo columnInfo;
-
-    // Verify roleauthorization table
-    columnInfos = columnsCaptureRoleAuthorization.getValue();
-    assertEquals(2, columnInfos.size());
-
-    columnInfo = columnInfos.get(0);
-    assertEquals("authorization_id", columnInfo.getName());
-    assertEquals(String.class, columnInfo.getType());
-    assertEquals(Integer.valueOf(100), columnInfo.getLength());
-
-    columnInfo = columnInfos.get(1);
-    assertEquals("authorization_name", columnInfo.getName());
-    assertEquals(String.class, columnInfo.getType());
-    assertEquals(Integer.valueOf(255), columnInfo.getLength());
-
-    // Verify permission_roleauthorization table
-    columnInfos = columnsCapturePermissionRoleAuthorization.getValue();
-    assertEquals(2, columnInfos.size());
-
-    columnInfo = columnInfos.get(0);
-    assertEquals("permission_id", columnInfo.getName());
-    assertEquals(Long.class, columnInfo.getType());
-    assertEquals(null, columnInfo.getLength());
-
-    columnInfo = columnInfos.get(1);
-    assertEquals("authorization_id", columnInfo.getName());
-    assertEquals(String.class, columnInfo.getType());
-    assertEquals(Integer.valueOf(100), columnInfo.getLength());
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    final DBAccessor dbAccessor = injector.getInstance(DBAccessor.class);
-    UpgradeCatalog230 upgradeCatalog = injector.getInstance(UpgradeCatalog230.class);
-
-    final ResourceTypeEntity ambariResourceTypeEntity = createMock(ResourceTypeEntity.class);
-    expect(ambariResourceTypeEntity.getId()).andReturn(1).anyTimes();
-
-    final ResourceTypeEntity clusterResourceTypeEntity = createMock(ResourceTypeEntity.class);
-    expect(clusterResourceTypeEntity.getId()).andReturn(2).anyTimes();
-
-    final ResourceTypeEntity viewResourceTypeEntity = createMock(ResourceTypeEntity.class);
-    expect(viewResourceTypeEntity.getId()).andReturn(3).anyTimes();
-
-    final ResourceTypeDAO resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
-    expect(resourceTypeDAO.findByName("AMBARI")).andReturn(ambariResourceTypeEntity).anyTimes();
-    expect(resourceTypeDAO.findByName("CLUSTER")).andReturn(clusterResourceTypeEntity).anyTimes();
-    expect(resourceTypeDAO.findByName("VIEW")).andReturn(viewResourceTypeEntity).anyTimes();
-
-    final PermissionEntity viewUserPermissionEntity = createMock(PermissionEntity.class);
-    expect(viewUserPermissionEntity.getId()).andReturn(1).anyTimes();
-
-    final PermissionEntity ambariAdministratorPermissionEntity = createMock(PermissionEntity.class);
-    expect(ambariAdministratorPermissionEntity.getId()).andReturn(2).anyTimes();
-
-    final PermissionEntity clusterUserPermissionEntity = createMock(PermissionEntity.class);
-    expect(clusterUserPermissionEntity.getId()).andReturn(3).anyTimes();
-
-    final PermissionEntity clusterOperatorPermissionEntity = createMock(PermissionEntity.class);
-    expect(clusterOperatorPermissionEntity.getId()).andReturn(4).anyTimes();
-
-    final PermissionEntity clusterAdministratorPermissionEntity = createMock(PermissionEntity.class);
-    expect(clusterAdministratorPermissionEntity.getId()).andReturn(5).anyTimes();
-
-    final PermissionEntity serviceAdministratorPermissionEntity = createMock(PermissionEntity.class);
-    expect(serviceAdministratorPermissionEntity.getId()).andReturn(6).anyTimes();
-
-    final PermissionEntity serviceOperatorPermissionEntity = createMock(PermissionEntity.class);
-    expect(serviceOperatorPermissionEntity.getId()).andReturn(7).anyTimes();
-
-    final PermissionDAO permissionDAO = injector.getInstance(PermissionDAO.class);
-    expect(permissionDAO.findPermissionByNameAndType("VIEW.USER", viewResourceTypeEntity))
-        .andReturn(viewUserPermissionEntity)
-        .anyTimes();
-    expect(permissionDAO.findPermissionByNameAndType("AMBARI.ADMINISTRATOR", ambariResourceTypeEntity))
-        .andReturn(ambariAdministratorPermissionEntity)
-        .anyTimes();
-    expect(permissionDAO.findPermissionByNameAndType("CLUSTER.USER", clusterResourceTypeEntity))
-        .andReturn(clusterUserPermissionEntity)
-        .anyTimes();
-    expect(permissionDAO.findPermissionByNameAndType("CLUSTER.OPERATOR", clusterResourceTypeEntity))
-        .andReturn(clusterOperatorPermissionEntity)
-        .anyTimes();
-    expect(permissionDAO.findPermissionByNameAndType("CLUSTER.ADMINISTRATOR", clusterResourceTypeEntity))
-        .andReturn(clusterAdministratorPermissionEntity)
-        .anyTimes();
-    expect(permissionDAO.findPermissionByNameAndType("SERVICE.ADMINISTRATOR", clusterResourceTypeEntity))
-        .andReturn(serviceAdministratorPermissionEntity)
-        .anyTimes();
-    expect(permissionDAO.findPermissionByNameAndType("SERVICE.OPERATOR", clusterResourceTypeEntity))
-        .andReturn(serviceOperatorPermissionEntity)
-        .anyTimes();
-
-    String updateQueryPattern;
-
-    // Set permission labels
-    updateQueryPattern = "UPDATE adminpermission SET permission_label='%s' WHERE permission_id=%d";
-    expect(dbAccessor.executeUpdate(String.format(updateQueryPattern,
-        "Ambari Administrator", PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION)))
-        .andReturn(1).once();
-    expect(dbAccessor.executeUpdate(String.format(updateQueryPattern,
-        "Cluster User", PermissionEntity.CLUSTER_USER_PERMISSION)))
-        .andReturn(1).once();
-    expect(dbAccessor.executeUpdate(String.format(updateQueryPattern,
-        "Cluster Administrator", PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION)))
-        .andReturn(1).once();
-    expect(dbAccessor.executeUpdate(String.format(updateQueryPattern,
-        "View User", PermissionEntity.VIEW_USER_PERMISSION)))
-        .andReturn(1).once();
-
-    // Update permissions names
-    updateQueryPattern = "UPDATE adminpermission SET permission_name='%s' WHERE permission_id=%d";
-    expect(dbAccessor.executeUpdate(String.format(updateQueryPattern,
-        PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION_NAME, PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION)))
-        .andReturn(1).once();
-    expect(dbAccessor.executeUpdate(String.format(updateQueryPattern,
-        PermissionEntity.CLUSTER_USER_PERMISSION_NAME, PermissionEntity.CLUSTER_USER_PERMISSION)))
-        .andReturn(1).once();
-    expect(dbAccessor.executeUpdate(String.format(updateQueryPattern,
-        PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION_NAME, PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION)))
-        .andReturn(1).once();
-    expect(dbAccessor.executeUpdate(String.format(updateQueryPattern,
-        PermissionEntity.VIEW_USER_PERMISSION_NAME, PermissionEntity.VIEW_USER_PERMISSION)))
-        .andReturn(1).once();
-
-    RoleAuthorizationEntity roleAuthorization = createMock(RoleAuthorizationEntity.class);
-
-    RoleAuthorizationDAO roleAuthorizationDAO = injector.getInstance(RoleAuthorizationDAO.class);
-    expect(roleAuthorizationDAO.findById(anyString())).andReturn(roleAuthorization).anyTimes();
-
-    Collection<RoleAuthorizationEntity> authorizations = new ArrayList<>();
-
-    expect(ambariAdministratorPermissionEntity.getAuthorizations()).andReturn(authorizations).atLeastOnce();
-    expect(clusterAdministratorPermissionEntity.getAuthorizations()).andReturn(authorizations).atLeastOnce();
-    expect(clusterOperatorPermissionEntity.getAuthorizations()).andReturn(authorizations).atLeastOnce();
-    expect(serviceAdministratorPermissionEntity.getAuthorizations()).andReturn(authorizations).atLeastOnce();
-    expect(serviceOperatorPermissionEntity.getAuthorizations()).andReturn(authorizations).atLeastOnce();
-    expect(clusterUserPermissionEntity.getAuthorizations()).andReturn(authorizations).atLeastOnce();
-    expect(viewUserPermissionEntity.getAuthorizations()).andReturn(authorizations).atLeastOnce();
-
-    expect(permissionDAO.merge(ambariAdministratorPermissionEntity)).andReturn(ambariAdministratorPermissionEntity).atLeastOnce();
-    expect(permissionDAO.merge(clusterAdministratorPermissionEntity)).andReturn(clusterAdministratorPermissionEntity).atLeastOnce();
-    expect(permissionDAO.merge(clusterOperatorPermissionEntity)).andReturn(clusterOperatorPermissionEntity).atLeastOnce();
-    expect(permissionDAO.merge(serviceAdministratorPermissionEntity)).andReturn(serviceAdministratorPermissionEntity).atLeastOnce();
-    expect(permissionDAO.merge(serviceOperatorPermissionEntity)).andReturn(serviceOperatorPermissionEntity).atLeastOnce();
-    expect(permissionDAO.merge(clusterUserPermissionEntity)).andReturn(clusterUserPermissionEntity).atLeastOnce();
-    expect(permissionDAO.merge(viewUserPermissionEntity)).andReturn(viewUserPermissionEntity).atLeastOnce();
-
-    replayAll();
-    upgradeCatalog.executeDMLUpdates();
-    verifyAll();
-  }
-
-  @Test
-  public void testGetTargetVersion() throws Exception {
-    UpgradeCatalog upgradeCatalog = injector.getInstance(UpgradeCatalog230.class);
-    Assert.assertEquals("2.3.0", upgradeCatalog.getTargetVersion());
-  }
-
-  @Test
-  public void testGetSourceVersion() {
-    UpgradeCatalog upgradeCatalog = injector.getInstance(UpgradeCatalog230.class);
-    Assert.assertEquals("2.2.1", upgradeCatalog.getSourceVersion());
-  }
-
-}


[42/63] [abbrv] ambari git commit: AMBARI-21327 Ambari server to print error messages if NN HA namenode services properties use diff FQDN (dual network cards) than FQDN in the HostComponentState table (dili)

Posted by ab...@apache.org.
AMBARI-21327 Ambari server to print error messages if NN HA namenode services properties use diff FQDN (dual network cards) than FQDN in the HostComponentState table (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2f402505
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2f402505
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2f402505

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 2f402505060354e8c71d24879c47a3850cc04009
Parents: 40e6352
Author: Di Li <di...@apache.org>
Authored: Tue Jun 27 15:56:53 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Tue Jun 27 15:56:53 2017 -0400

----------------------------------------------------------------------
 .../apache/ambari/server/stack/MasterHostResolver.java   | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2f402505/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
index 427a5f5..fc657c1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/MasterHostResolver.java
@@ -132,7 +132,7 @@ public class MasterHostResolver {
               return filterHosts(hostsType, serviceName, componentName);
             }
 
-            Map<Status, String> pair = getNameNodePair();
+            Map<Status, String> pair = getNameNodePair(componentHosts);
             if (pair != null) {
               hostsType.master = pair.containsKey(Status.ACTIVE) ? pair.get(Status.ACTIVE) :  null;
               hostsType.secondary = pair.containsKey(Status.STANDBY) ? pair.get(Status.STANDBY) :  null;
@@ -273,7 +273,7 @@ public class MasterHostResolver {
    * one active and one standby host were found, otherwise, return null.
    * The hostnames are returned in lowercase.
    */
-  private Map<Status, String> getNameNodePair() {
+  private Map<Status, String> getNameNodePair(Set<String> componentHosts) throws AmbariException {
     Map<Status, String> stateToHost = new HashMap<>();
     Cluster cluster = getCluster();
 
@@ -307,6 +307,13 @@ public class MasterHostResolver {
           throw new MalformedURLException("Could not parse host and port from " + value);
         }
 
+        if (!componentHosts.contains(hp.host)){
+          //This may happen when NN HA is configured on dual network card machines with public/private FQDNs.
+          LOG.error(
+              String.format(
+                  "Hadoop NameNode HA configuration {0} contains host {1} that does not exist in the NameNode hosts list {3}",
+                  key, hp.host, componentHosts.toString()));
+        }
         String state = queryJmxBeanValue(hp.host, hp.port, "Hadoop:service=NameNode,name=NameNodeStatus", "State", true, encrypted);
 
         if (null != state && (state.equalsIgnoreCase(Status.ACTIVE.toString()) || state.equalsIgnoreCase(Status.STANDBY.toString()))) {


[61/63] [abbrv] ambari git commit: AMBARI-21379. Ambari Agent doesn't start as non-root user with "ambari-agent start" logged in as root (aonishuk)

Posted by ab...@apache.org.
AMBARI-21379. Ambari Agent doesn't start as non-root user with "ambari-agent start" logged in as root (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ae6b74f3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ae6b74f3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ae6b74f3

Branch: refs/heads/branch-feature-logsearch-ui
Commit: ae6b74f38885967bde2ec3c4eca911cadcdcb295
Parents: d7c59fc
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Fri Jun 30 14:51:42 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Fri Jun 30 14:51:42 2017 +0300

----------------------------------------------------------------------
 ambari-agent/conf/unix/install-helper.sh  |  8 ++++++++
 ambari-agent/etc/init.d/ambari-agent      | 22 +---------------------
 ambari-agent/pom.xml                      | 11 -----------
 ambari-agent/src/packages/tarball/all.xml |  2 +-
 4 files changed, 10 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ae6b74f3/ambari-agent/conf/unix/install-helper.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/conf/unix/install-helper.sh b/ambari-agent/conf/unix/install-helper.sh
index c30aab1..37bbe4a 100644
--- a/ambari-agent/conf/unix/install-helper.sh
+++ b/ambari-agent/conf/unix/install-helper.sh
@@ -31,6 +31,8 @@ SIMPLEJSON_AGENT_DIR="/usr/lib/ambari-agent/lib/ambari_simplejson"
 AMBARI_AGENT="/usr/lib/python2.6/site-packages/ambari_agent"
 PYTHON_WRAPER_TARGET="/usr/bin/ambari-python-wrap"
 AMBARI_AGENT_VAR="/var/lib/ambari-agent"
+AMBARI_AGENT_BINARY="/etc/init.d/ambari-agent"
+AMBARI_AGENT_BINARY_SYMLINK="/usr/sbin/ambari-agent"
 
 clean_pyc_files(){
   # cleaning old *.pyc files
@@ -46,6 +48,10 @@ do_install(){
     cp -f /etc/ambari-agent/conf.save/* /etc/ambari-agent/conf
     mv /etc/ambari-agent/conf.save /etc/ambari-agent/conf_$(date '+%d_%m_%y_%H_%M').save
   fi
+
+  # setting up /usr/sbin/ambari-agent symlink
+  rm -f "$AMBARI_AGENT_BINARY_SYMLINK"
+  ln -s "$AMBARI_AGENT_BINARY" "$AMBARI_AGENT_BINARY_SYMLINK"
     
   # setting ambari_commons shared resource
   rm -rf "$OLD_COMMON_DIR"
@@ -125,6 +131,8 @@ do_remove(){
 
   clean_pyc_files
 
+  rm -f "$AMBARI_AGENT_BINARY_SYMLINK"
+
   if [ -d "/etc/ambari-agent/conf.save" ]; then
     mv /etc/ambari-agent/conf.save /etc/ambari-agent/conf_$(date '+%d_%m_%y_%H_%M').save
   fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae6b74f3/ambari-agent/etc/init.d/ambari-agent
----------------------------------------------------------------------
diff --git a/ambari-agent/etc/init.d/ambari-agent b/ambari-agent/etc/init.d/ambari-agent
index 044f86d..b70de85 100644
--- a/ambari-agent/etc/init.d/ambari-agent
+++ b/ambari-agent/etc/init.d/ambari-agent
@@ -36,26 +36,6 @@ else
   command_prefx="bash -c"
 fi
 
-case "$1" in
-  start)
-        $command_prefx "/usr/sbin/ambari-agent $@"
-        ;;
-  stop)
-        $command_prefx "/usr/sbin/ambari-agent $@"
-        ;;
-  status)
-        $command_prefx "/usr/sbin/ambari-agent $@"
-        ;;
-  restart)
-        $command_prefx "$0 stop"
-        $command_prefx "$0 start"
-        ;;
-  reset)
-        /usr/sbin/ambari-agent $@
-        ;;
-  *)
-        echo "Usage: $0 {start|stop|status|restart|reset <server_hostname>}"
-        exit 1
-esac
+$command_prefx "/var/lib/ambari-agent/bin/ambari-agent $@"
 
 exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae6b74f3/ambari-agent/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index f2add9a..8673f2e 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -274,17 +274,6 @@
               </sources>
             </mapping>
            <mapping>
-              <directory>/usr/sbin</directory>
-              <username>root</username>
-              <groupname>root</groupname>
-              <directoryIncluded>false</directoryIncluded> <!-- avoid managing /usr/sbin -->
-              <sources>
-                <source>
-                  <location>${project.build.directory}${dirsep}${project.artifactId}-${project.version}/usr/sbin</location>
-                </source>
-              </sources>
-            </mapping>
-           <mapping>
               <directory>/usr/lib/ambari-agent</directory>
               <filemode>755</filemode>
               <username>root</username>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae6b74f3/ambari-agent/src/packages/tarball/all.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/src/packages/tarball/all.xml b/ambari-agent/src/packages/tarball/all.xml
index c71ffe9..b0c2f1c 100644
--- a/ambari-agent/src/packages/tarball/all.xml
+++ b/ambari-agent/src/packages/tarball/all.xml
@@ -176,7 +176,7 @@
     <file>
       <fileMode>755</fileMode>
       <source>${basedir}/target/src/ambari-agent</source>
-      <outputDirectory>/usr/sbin</outputDirectory>
+      <outputDirectory>/var/lib/ambari-agent/bin</outputDirectory>
     </file>
     <file>
       <fileMode>700</fileMode>


[20/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
deleted file mode 100644
index f413c69..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog240.java
+++ /dev/null
@@ -1,3079 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.Clob;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.persistence.EntityManager;
-import javax.persistence.Query;
-import javax.persistence.TypedQuery;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.agent.RecoveryConfigHelper;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.ArtifactDAO;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.PermissionDAO;
-import org.apache.ambari.server.orm.dao.PrincipalDAO;
-import org.apache.ambari.server.orm.dao.PrincipalTypeDAO;
-import org.apache.ambari.server.orm.dao.PrivilegeDAO;
-import org.apache.ambari.server.orm.dao.RemoteAmbariClusterDAO;
-import org.apache.ambari.server.orm.dao.RequestScheduleDAO;
-import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
-import org.apache.ambari.server.orm.dao.RoleAuthorizationDAO;
-import org.apache.ambari.server.orm.dao.UserDAO;
-import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.PermissionEntity;
-import org.apache.ambari.server.orm.entities.PrincipalEntity;
-import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
-import org.apache.ambari.server.orm.entities.PrivilegeEntity;
-import org.apache.ambari.server.orm.entities.RemoteAmbariClusterEntity;
-import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
-import org.apache.ambari.server.orm.entities.RoleAuthorizationEntity;
-import org.apache.ambari.server.orm.entities.UserEntity;
-import org.apache.ambari.server.orm.entities.ViewEntityEntity;
-import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
-import org.apache.ambari.server.security.authorization.ResourceType;
-import org.apache.ambari.server.security.authorization.User;
-import org.apache.ambari.server.security.authorization.Users;
-import org.apache.ambari.server.state.AlertFirmness;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.ambari.server.state.RepositoryType;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.State;
-import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
-import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosKeytabDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
-import org.apache.ambari.server.view.DefaultMasker;
-import org.apache.ambari.view.ClusterType;
-import org.apache.ambari.view.MaskException;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.jdbc.support.JdbcUtils;
-
-import com.google.common.collect.Lists;
-import com.google.gson.JsonArray;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.gson.JsonPrimitive;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.Transactional;
-
-/**
- * Upgrade catalog for version 2.4.0.
- */
-public class UpgradeCatalog240 extends AbstractUpgradeCatalog {
-
-  protected static final String ADMIN_PERMISSION_TABLE = "adminpermission";
-  protected static final String PRINCIPAL_ID_COL = "principal_id";
-  protected static final String ALERT_DEFINITION_TABLE = "alert_definition";
-  protected static final String ALERT_TARGET_TABLE = "alert_target";
-  protected static final String ALERT_TARGET_ENABLED_COLUMN = "is_enabled";
-  protected static final String ALERT_CURRENT_TABLE = "alert_current";
-  protected static final String ALERT_CURRENT_OCCURRENCES_COLUMN = "occurrences";
-  protected static final String ALERT_CURRENT_FIRMNESS_COLUMN = "firmness";
-  protected static final String HELP_URL_COLUMN = "help_url";
-  protected static final String REPEAT_TOLERANCE_COLUMN = "repeat_tolerance";
-  protected static final String REPEAT_TOLERANCE_ENABLED_COLUMN = "repeat_tolerance_enabled";
-  protected static final String PERMISSION_ID_COL = "permission_name";
-  protected static final String SORT_ORDER_COL = "sort_order";
-  protected static final String REPO_VERSION_TABLE = "repo_version";
-  protected static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
-  protected static final String SERVICE_COMPONENT_DS_TABLE = "servicecomponentdesiredstate";
-  protected static final String HOST_COMPONENT_DS_TABLE = "hostcomponentdesiredstate";
-  protected static final String HOST_COMPONENT_STATE_TABLE = "hostcomponentstate";
-  protected static final String SERVICE_COMPONENT_HISTORY_TABLE = "servicecomponent_history";
-  protected static final String UPGRADE_TABLE = "upgrade";
-  protected static final String STACK_TABLE = "stack";
-  protected static final String CLUSTER_TABLE = "clusters";
-  protected static final String CLUSTER_UPGRADE_ID_COLUMN = "upgrade_id";
-  protected static final String YARN_ENV_CONFIG = "yarn-env";
-  protected static final String CAPACITY_SCHEDULER_CONFIG = "capacity-scheduler";
-  protected static final String WEBHCAT_SITE_CONFIG = "webhcat-site";
-  protected static final String TEZ_SITE_CONFIG = "tez-site";
-  protected static final String MAPRED_SITE_CONFIG = "mapred-site";
-  public static final String DESIRED_VERSION_COLUMN_NAME = "desired_version";
-  public static final String BLUEPRINT_SETTING_TABLE = "blueprint_setting";
-  public static final String BLUEPRINT_NAME_COL = "blueprint_name";
-  public static final String SETTING_NAME_COL = "setting_name";
-  public static final String SETTING_DATA_COL = "setting_data";
-  public static final String ID = "id";
-  public static final String BLUEPRINT_TABLE = "blueprint";
-  public static final String VIEWINSTANCE_TABLE = "viewinstance";
-  public static final String SHORT_URL_COLUMN = "short_url";
-  public static final String CLUSTER_HANDLE_COLUMN = "cluster_handle";
-  public static final String REQUESTSCHEDULE_TABLE = "requestschedule";
-  public static final String AUTHENTICATED_USER_ID_COLUMN = "authenticated_user_id";
-  protected static final String CLUSTER_VERSION_TABLE = "cluster_version";
-  protected static final String HOST_VERSION_TABLE = "host_version";
-  protected static final String TOPOLOGY_REQUEST_TABLE = "topology_request";
-  protected static final String PROVISION_ACTION_COL = "provision_action";
-  protected static final String PHOENIX_QUERY_SERVER_PRINCIPAL_KEY = "phoenix.queryserver.kerberos.principal";
-  protected static final String PHOENIX_QUERY_SERVER_KEYTAB_KEY = "phoenix.queryserver.keytab.file";
-  protected static final String DEFAULT_CONFIG_VERSION = "version1";
-  protected static final String SLIDER_SERVICE_NAME = "SLIDER";
-
-  private static final String OOZIE_ENV_CONFIG = "oozie-env";
-  private static final String SLIDER_CLIENT_CONFIG = "slider-client";
-  private static final String HIVE_ENV_CONFIG = "hive-env";
-  private static final String AMS_SITE = "ams-site";
-  public static final String TIMELINE_METRICS_SINK_COLLECTION_PERIOD = "timeline.metrics.sink.collection.period";
-  public static final String ONE_DIR_PER_PARITION_PROPERTY = "one_dir_per_partition";
-  public static final String VIEWURL_TABLE = "viewurl";
-  public static final String URL_ID_COLUMN = "url_id";
-  private static final String PRINCIPAL_TYPE_TABLE = "adminprincipaltype";
-  private static final String PRINCIPAL_TABLE = "adminprincipal";
-  protected static final String HBASE_SITE_CONFIG = "hbase-site";
-  protected static final String HBASE_SPNEGO_PRINCIPAL_KEY = "hbase.security.authentication.spnego.kerberos.principal";
-  protected static final String HBASE_SPNEGO_KEYTAB_KEY = "hbase.security.authentication.spnego.kerberos.keytab";
-  protected static final String EXTENSION_TABLE = "extension";
-  protected static final String EXTENSION_ID_COLUMN = "extension_id";
-  protected static final String EXTENSION_LINK_TABLE = "extensionlink";
-  protected static final String EXTENSION_LINK_ID_COLUMN = "link_id";
-  protected static final String KAFKA_BROKER_CONFIG = "kafka-broker";
-
-  private static final Map<String, Integer> ROLE_ORDER;
-  private static final String AMS_HBASE_SITE = "ams-hbase-site";
-  private static final String HBASE_RPC_TIMEOUT_PROPERTY = "hbase.rpc.timeout";
-  private static final String AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY = "hbase.normalizer.enabled";
-  public static final String PRECISION_TABLE_TTL_PROPERTY = "timeline.metrics.host.aggregator.ttl";
-  public static final String CLUSTER_SECOND_TABLE_TTL_PROPERTY = "timeline.metrics.cluster.aggregator.second.ttl";
-
-  static {
-    // Manually create role order since there really isn't any mechanism for this
-    ROLE_ORDER = new HashMap<>();
-    ROLE_ORDER.put("AMBARI.ADMINISTRATOR", 1);
-    ROLE_ORDER.put("CLUSTER.ADMINISTRATOR", 2);
-    ROLE_ORDER.put("CLUSTER.OPERATOR", 3);
-    ROLE_ORDER.put("SERVICE.ADMINISTRATOR", 4);
-    ROLE_ORDER.put("SERVICE.OPERATOR", 5);
-    ROLE_ORDER.put("CLUSTER.USER", 6);
-  }
-
-  @Inject
-  UserDAO userDAO;
-
-  @Inject
-  PermissionDAO permissionDAO;
-
-  @Inject
-  PrivilegeDAO privilegeDAO;
-
-  @Inject
-  ResourceTypeDAO resourceTypeDAO;
-
-  @Inject
-  ClusterDAO clusterDAO;
-
-  @Inject
-  PrincipalTypeDAO principalTypeDAO;
-
-  @Inject
-  PrincipalDAO principalDAO;
-
-  @Inject
-  RequestScheduleDAO requestScheduleDAO;
-
-  @Inject
-  Users users;
-
-  @Inject
-  Configuration config;
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog240.class);
-
-  private static final String SETTING_TABLE = "setting";
-
-  protected static final String SERVICE_COMPONENT_DESIRED_STATE_TABLE = "servicecomponentdesiredstate";
-  protected static final String RECOVERY_ENABLED_COL = "recovery_enabled";
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog240(Injector injector) {
-    super(injector);
-    injector.injectMembers(this);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.4.0";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.3.0";
-  }
-
-  public static final String CLUSTER_TYPE_COLUMN = "cluster_type";
-  public static final String REMOTE_AMBARI_CLUSTER_TABLE = "remoteambaricluster";
-  public static final String REMOTE_AMBARI_CLUSTER_SERVICE_TABLE = "remoteambariclusterservice";
-
-  public static final String CLUSTER_ID = "cluster_id";
-  public static final String SERVICE_NAME = "service_name";
-  public static final String CLUSTER_NAME = "name";
-
-
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    updateAdminPermissionTable();
-    updateServiceComponentDesiredStateTable();
-    createExtensionTable();
-    createExtensionLinkTable();
-    createSettingTable();
-    updateRepoVersionTableDDL();
-    updateServiceComponentDesiredStateTableDDL();
-    createServiceComponentHistoryTable();
-    updateClusterTableDDL();
-    updateAlertDefinitionTable();
-    updateAlertCurrentTable();
-    updateAlertTargetTable();
-    createBlueprintSettingTable();
-    updateHostRoleCommandTableDDL();
-    createViewUrlTableDDL();
-    updateViewInstanceEntityTable();
-    createRemoteClusterTable();
-    updateViewInstanceTable();
-    updateRequestScheduleEntityTable();
-    updateTopologyRequestTable();
-  }
-
-  private void createRemoteClusterTable() throws SQLException {
-
-    List<DBColumnInfo> columns = new ArrayList<>();
-    LOG.info("Creating {} table", REMOTE_AMBARI_CLUSTER_TABLE);
-    columns.add(new DBColumnInfo(CLUSTER_ID, Long.class, null, null, false));
-    columns.add(new DBColumnInfo(CLUSTER_NAME, String.class, 255, null, false));
-    columns.add(new DBColumnInfo("url", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("username", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("password", String.class, 255, null, false));
-    dbAccessor.createTable(REMOTE_AMBARI_CLUSTER_TABLE, columns, CLUSTER_ID);
-    dbAccessor.addUniqueConstraint(REMOTE_AMBARI_CLUSTER_TABLE , "UQ_remote_ambari_cluster" , CLUSTER_NAME);
-    addSequence("remote_cluster_id_seq", 1L, false);
-
-    List<DBColumnInfo> remoteClusterServiceColumns = new ArrayList<>();
-    LOG.info("Creating {} table", REMOTE_AMBARI_CLUSTER_SERVICE_TABLE);
-    remoteClusterServiceColumns.add(new DBColumnInfo(ID, Long.class, null, null, false));
-    remoteClusterServiceColumns.add(new DBColumnInfo(SERVICE_NAME, String.class, 255, null, false));
-    remoteClusterServiceColumns.add(new DBColumnInfo(CLUSTER_ID, Long.class, null, null, false));
-    dbAccessor.createTable(REMOTE_AMBARI_CLUSTER_SERVICE_TABLE, remoteClusterServiceColumns, ID);
-    dbAccessor.addFKConstraint(REMOTE_AMBARI_CLUSTER_SERVICE_TABLE, "FK_remote_ambari_cluster_id",
-      CLUSTER_ID, REMOTE_AMBARI_CLUSTER_TABLE, CLUSTER_ID, false);
-    addSequence("remote_cluster_service_id_seq", 1L, false);
-
-  }
-
-  private void createViewUrlTableDDL() throws SQLException {
-    List<DBColumnInfo> columns = new ArrayList<>();
-
-    //  Add setting table
-    LOG.info("Creating " + VIEWURL_TABLE + " table");
-
-    columns.add(new DBColumnInfo(URL_ID_COLUMN, Long.class, null, null, false));
-    columns.add(new DBColumnInfo("url_name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("url_suffix", String.class, 255, null, false));
-    dbAccessor.createTable(VIEWURL_TABLE, columns, URL_ID_COLUMN);
-    addSequence("viewurl_id_seq", 1L, false);
-  }
-
-  private void updateViewInstanceEntityTable() throws SQLException {
-    dbAccessor.addColumn(VIEWINSTANCE_TABLE,
-      new DBColumnInfo(SHORT_URL_COLUMN, Long.class, null, null, true));
-    dbAccessor.addFKConstraint(VIEWINSTANCE_TABLE, "FK_instance_url_id",
-      SHORT_URL_COLUMN, VIEWURL_TABLE, URL_ID_COLUMN, false);
-    dbAccessor.addColumn(VIEWINSTANCE_TABLE,
-      new DBColumnInfo(CLUSTER_TYPE_COLUMN, String.class, 100, ClusterType.LOCAL_AMBARI.name(), false));
-  }
-
-  private void updateRequestScheduleEntityTable() throws SQLException {
-    dbAccessor.addColumn(REQUESTSCHEDULE_TABLE,
-      new DBColumnInfo(AUTHENTICATED_USER_ID_COLUMN, Integer.class, null, null, true));
-  }
-
-  private void updateClusterTableDDL() throws SQLException {
-    dbAccessor.addColumn(CLUSTER_TABLE, new DBColumnInfo(CLUSTER_UPGRADE_ID_COLUMN, Long.class, null, null, true));
-
-    dbAccessor.addFKConstraint(CLUSTER_TABLE, "FK_clusters_upgrade_id",
-      CLUSTER_UPGRADE_ID_COLUMN, UPGRADE_TABLE, "upgrade_id", false);
-  }
-
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    //To change body of implemented methods use File | Settings | File Templates.
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateAlerts();
-    setRoleSortOrder();
-    addSettingPermission();
-    addViewOperationalLogsPermission();
-    addManageUserPersistedDataPermission();
-    allowClusterOperatorToManageCredentials();
-    updateHDFSConfigs();
-    updateKAFKAConfigs();
-    updateHIVEConfigs();
-    updateAMSConfigs();
-    updateClusterEnv();
-    updateSequenceForView();
-    updateHostRoleCommandTableDML();
-    updateKerberosConfigs();
-    updateYarnEnv();
-    updatePhoenixConfigs();
-    updateSparkConfigs();
-    updateHBaseConfigs();
-    updateFalconConfigs();
-    updateKerberosDescriptorArtifacts();
-    removeHiveOozieDBConnectionConfigs();
-    updateClustersAndHostsVersionStateTableDML();
-    removeStandardDeviationAlerts();
-    removeAtlasMetaserverAlert();
-    updateClusterInheritedPermissionsConfig();
-    consolidateUserRoles();
-    createRolePrincipals();
-    updateHDFSWidgetDefinition();
-    updateTezViewProperty();
-    upgradeCapSchedulerView();
-    fixAuthorizationDescriptions();
-    removeAuthorizations();
-    addConnectionTimeoutParamForWebAndMetricAlerts();
-    addSliderClientConfig();
-    updateRequestScheduleEntityUserIds();
-    updateRecoveryConfigurationDML();
-    updatePigSmokeTestEntityClass();
-    updateRangerHbasePluginProperties();
-    adjustHiveJobTimestamps();
-  }
-
-  /**
-   * Populates authenticated_user_id field by correct user id calculated from user name
-   * @throws SQLException
-   */
-  protected void updateRequestScheduleEntityUserIds() throws SQLException {
-    List<RequestScheduleEntity> requestScheduleEntities = requestScheduleDAO.findAll();
-    for (RequestScheduleEntity requestScheduleEntity : requestScheduleEntities) {
-      String createdUserName = requestScheduleEntity.getCreateUser();
-
-      if (createdUserName != null) {
-        User user = users.getUserIfUnique(createdUserName);
-
-        if (user != null && StringUtils.equals(user.getUserName(), createdUserName)) {
-          requestScheduleEntity.setAuthenticatedUserId(user.getUserId());
-          requestScheduleDAO.merge(requestScheduleEntity);
-        }
-      }
-    }
-  }
-
-  protected void updateClusterInheritedPermissionsConfig() throws SQLException {
-    insertClusterInheritedPrincipal("ALL.CLUSTER.ADMINISTRATOR");
-    insertClusterInheritedPrincipal("ALL.CLUSTER.OPERATOR");
-    insertClusterInheritedPrincipal("ALL.CLUSTER.USER");
-    insertClusterInheritedPrincipal("ALL.SERVICE.ADMINISTRATOR");
-    insertClusterInheritedPrincipal("ALL.SERVICE.OPERATIOR");
-  }
-
-  private void insertClusterInheritedPrincipal(String name) {
-    PrincipalTypeEntity principalTypeEntity = new PrincipalTypeEntity();
-    principalTypeEntity.setName(name);
-    principalTypeEntity = principalTypeDAO.merge(principalTypeEntity);
-
-    PrincipalEntity principalEntity = new PrincipalEntity();
-    principalEntity.setPrincipalType(principalTypeEntity);
-    principalDAO.create(principalEntity);
-  }
-  private static final String NAME_PREFIX = "DS_";
-
-  private String getEntityName(ViewEntityEntity entity) {
-    String className = entity.getClassName();
-    String[] parts = className.split("\\.");
-    String simpleClassName = parts[parts.length - 1];
-
-    if (entity.getViewInstance().alterNames()) {
-      return NAME_PREFIX + simpleClassName + "_" + entity.getId();
-    }
-    return simpleClassName + entity.getId();
-  }
-
-  /**
-   * get all entries of viewentity
-   * find all the table names by parsing class_name
-   * create all the sequence names by appending _id_seq
-   * query each dynamic table to find the max of id
-   * insert into ambari_sequence name and counter for each item
-   */
-  protected void updateSequenceForView() {
-    LOG.info("updateSequenceForView called.");
-    EntityManager entityManager = getEntityManagerProvider().get();
-    TypedQuery<ViewEntityEntity> viewEntityQuery = entityManager.createQuery("SELECT vee FROM ViewEntityEntity vee", ViewEntityEntity.class);
-    List<ViewEntityEntity> viewEntities = viewEntityQuery.getResultList();
-    LOG.info("Received view Entities : {}, length : {}", viewEntities, viewEntities.size());
-
-    // as the id fields are string in these entities we will have to get all ids and convert to int and find max.
-    String selectIdsFormat = "select %s from %s";
-    String insertQuery = "insert into ambari_sequences values ('%s',%d)";
-    for (ViewEntityEntity viewEntity : viewEntities) {
-      LOG.info("Working with viewEntity : {} : {} ", viewEntity, viewEntity.getViewName() + viewEntity.getViewInstance());
-      String tableName = getEntityName(viewEntity);
-      String seqName = tableName.toLowerCase() + "_id_seq";
-      try {
-        entityManager.getTransaction().begin();
-        String selectIdsQueryString = String.format(selectIdsFormat, NAME_PREFIX + viewEntity.getIdProperty(), tableName).toLowerCase();
-        LOG.info("executing max query string {}", selectIdsQueryString);
-        Query selectIdsQuery = entityManager.createNativeQuery(selectIdsQueryString);
-        List<String> ids = selectIdsQuery.getResultList();
-        LOG.info("Received ids : {}", ids);
-        int maxId = 0;
-        if (null != ids && ids.size() != 0) {
-          for (String id : ids) {
-            try {
-              Integer intId = Integer.parseInt(id);
-              maxId = Math.max(intId, maxId);
-            } catch (NumberFormatException e) {
-              LOG.error("the id was non integer : id : {}. So ignoring.", id);
-            }
-          }
-        }
-
-        String insertQueryString = String.format(insertQuery, seqName, maxId).toLowerCase();
-        LOG.info("Executing insert query : {}", insertQueryString);
-        Query insertQ = entityManager.createNativeQuery(insertQueryString);
-        int rowsChanged = insertQ.executeUpdate();
-        entityManager.getTransaction().commit();
-        LOG.info("executing insert resulted in {} row changes.", rowsChanged);
-      } catch (Exception e) { // when the entity table is not yet created or other exception.
-        entityManager.getTransaction().rollback();
-        LOG.info("Error (can be ignored) {}", e.getMessage());
-        LOG.debug("Exception occured while updating : {}",viewEntity.getViewName() + viewEntity.getViewInstance(), e);
-      }
-    }
-  }
-
-
-  /**
-   * get all entries of viewentity
-   * find all the table names by parsing class_name
-   * update jobimpls creation timestamp * 1000
-   */
-  protected void adjustHiveJobTimestamps() {
-    LOG.info("updateSequenceForView called.");
-    EntityManager entityManager = getEntityManagerProvider().get();
-    TypedQuery<ViewEntityEntity> viewEntityQuery = entityManager.createQuery("SELECT vee FROM ViewEntityEntity vee where vee.className = 'org.apache.ambari.view.hive.resources.jobs.viewJobs.JobImpl'", ViewEntityEntity.class);
-    List<ViewEntityEntity> viewEntities = viewEntityQuery.getResultList();
-    LOG.info("Received JobImpl view Entities : {}, length : {}", viewEntities, viewEntities.size());
-
-    String selectIdsFormat = "update %s set ds_datesubmitted = ds_datesubmitted * 1000";
-    for (ViewEntityEntity viewEntity : viewEntities) {
-      LOG.info("Working with JobImpl viewEntity : {} : {} ", viewEntity, viewEntity.getViewName() + ":" + viewEntity.getViewInstanceName() + ":" + viewEntity.getClassName());
-      String tableName = getEntityName(viewEntity);
-      try {
-        entityManager.getTransaction().begin();
-        String updatesQueryString = String.format(selectIdsFormat, tableName).toLowerCase();
-        LOG.info("executing update query string for jobimpl {}", updatesQueryString);
-        Query updateQuery = entityManager.createNativeQuery(updatesQueryString);
-        int rowsChanged = updateQuery.executeUpdate();
-        entityManager.getTransaction().commit();
-        LOG.info("executing update on jobimpl resulted in {} row changes.", rowsChanged);
-      } catch (Exception e) { // when the entity table is not yet created or other exception.
-        entityManager.getTransaction().rollback();
-        LOG.info("Error (can be ignored) {}", e.getMessage());
-        LOG.debug("Exception occured while updating : {}",viewEntity.getViewName() + viewEntity.getViewInstance(), e);
-      }
-    }
-  }
-
-  private void createExtensionTable() throws SQLException {
-    List<DBColumnInfo> columns = new ArrayList<>();
-
-    // Add extension table
-    LOG.info("Creating " + EXTENSION_TABLE + " table");
-
-    columns.add(new DBColumnInfo(EXTENSION_ID_COLUMN, Long.class, null, null, false));
-    columns.add(new DBColumnInfo("extension_name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("extension_version", String.class, 255, null, false));
-    dbAccessor.createTable(EXTENSION_TABLE, columns, EXTENSION_ID_COLUMN);
-
-    // create UNIQUE constraint, ensuring column order matches SQL files
-    String[] uniqueColumns = new String[] { "extension_name", "extension_version" };
-    dbAccessor.addUniqueConstraint(EXTENSION_TABLE, "UQ_extension", uniqueColumns);
-
-    addSequence("extension_id_seq", 0L, false);
-  }
-
-  private void createExtensionLinkTable() throws SQLException {
-    List<DBColumnInfo> columns = new ArrayList<>();
-
-    // Add extension link table
-    LOG.info("Creating " + EXTENSION_LINK_TABLE + " table");
-
-    columns.add(new DBColumnInfo(EXTENSION_LINK_ID_COLUMN, Long.class, null, null, false));
-    columns.add(new DBColumnInfo("stack_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo(EXTENSION_ID_COLUMN, Long.class, null, null, false));
-    dbAccessor.createTable(EXTENSION_LINK_TABLE, columns, EXTENSION_LINK_ID_COLUMN);
-
-    // create UNIQUE constraint, ensuring column order matches SQL files
-    String[] uniqueColumns = new String[] { "stack_id", EXTENSION_ID_COLUMN };
-    dbAccessor.addUniqueConstraint(EXTENSION_LINK_TABLE, "UQ_extension_link", uniqueColumns);
-
-    dbAccessor.addFKConstraint(EXTENSION_LINK_TABLE, "FK_extensionlink_extension_id",
-      EXTENSION_ID_COLUMN, EXTENSION_TABLE, EXTENSION_ID_COLUMN, false);
-
-    dbAccessor.addFKConstraint(EXTENSION_LINK_TABLE, "FK_extensionlink_stack_id",
-      "stack_id", STACK_TABLE, "stack_id", false);
-
-    addSequence("link_id_seq", 0L, false);
-  }
-
-  private void createSettingTable() throws SQLException {
-    List<DBColumnInfo> columns = new ArrayList<>();
-
-    //  Add setting table
-    LOG.info("Creating " + SETTING_TABLE + " table");
-
-    columns.add(new DBColumnInfo(ID, Long.class, null, null, false));
-    columns.add(new DBColumnInfo("name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("setting_type", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("content", String.class, 3000, null, false));
-    columns.add(new DBColumnInfo("updated_by", String.class, 255, "_db", false));
-    columns.add(new DBColumnInfo("update_timestamp", Long.class, null, null, false));
-    dbAccessor.createTable(SETTING_TABLE, columns, ID);
-    addSequence("setting_id_seq", 0L, false);
-  }
-
-  protected void addSettingPermission() throws SQLException {
-    addRoleAuthorization("AMBARI.MANAGE_SETTINGS", "Manage settings", Collections.singleton("AMBARI.ADMINISTRATOR:AMBARI"));
-  }
-
-  protected void addViewOperationalLogsPermission() throws SQLException {
-    Collection<String> roles = Arrays.asList(
-        "AMBARI.ADMINISTRATOR:AMBARI",
-        "CLUSTER.ADMINISTRATOR:CLUSTER",
-        "CLUSTER.OPERATOR:CLUSTER",
-        "SERVICE.ADMINISTRATOR:CLUSTER");
-
-    addRoleAuthorization("SERVICE.VIEW_OPERATIONAL_LOGS", "View service operational logs", roles);
-  }
-
-  /**
-   * Add 'MANAGE_USER_PERSISTED_DATA' permissions for CLUSTER.ADMINISTRATOR, SERVICE.OPERATOR, SERVICE.ADMINISTRATOR,
-   * CLUSTER.OPERATOR, AMBARI.ADMINISTRATOR.
-   *
-   */
-  protected void addManageUserPersistedDataPermission() throws SQLException {
-    Collection<String> roles = Arrays.asList(
-        "AMBARI.ADMINISTRATOR:AMBARI",
-        "CLUSTER.ADMINISTRATOR:CLUSTER",
-        "CLUSTER.OPERATOR:CLUSTER",
-        "SERVICE.ADMINISTRATOR:CLUSTER",
-        "SERVICE.OPERATOR:CLUSTER",
-        "CLUSTER.USER:CLUSTER");
-
-    addRoleAuthorization("CLUSTER.MANAGE_USER_PERSISTED_DATA", "Manage cluster-level user persisted data", roles);
-  }
-
-  /**
-   * Adds <code>CLUSTER.MANAGE_CREDENTIALS</code> to the set of authorizations a <code>CLUSTER.OPERATOR</code> can perform.
-   *
-   * @throws SQLException
-   */
-  protected void allowClusterOperatorToManageCredentials() throws SQLException {
-    addAuthorizationToRole("CLUSTER.OPERATOR", "CLUSTER", "CLUSTER.MANAGE_CREDENTIAL");
-  }
-
-  protected void removeHiveOozieDBConnectionConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-      Config oozieEnv = cluster.getDesiredConfigByType(OOZIE_ENV_CONFIG);
-      if(oozieEnv != null) {
-        Map<String, String> oozieEnvProperties = oozieEnv.getProperties();
-        Set<String> removePropertiesSet = new HashSet<>();
-        if (oozieEnvProperties.containsKey("oozie_derby_database")) {
-          LOG.info("Removing property oozie_derby_database from " + OOZIE_ENV_CONFIG);
-          removePropertiesSet.add("oozie_derby_database");
-        }
-        if (oozieEnvProperties.containsKey("oozie_hostname")) {
-          LOG.info("Removing property oozie_hostname from " + OOZIE_ENV_CONFIG);
-          removePropertiesSet.add("oozie_hostname");
-        }
-        if (!removePropertiesSet.isEmpty()) {
-          removeConfigurationPropertiesFromCluster(cluster, OOZIE_ENV_CONFIG, removePropertiesSet);
-        }
-      }
-
-      Config hiveEnv = cluster.getDesiredConfigByType(HIVE_ENV_CONFIG);
-      if(hiveEnv != null) {
-        Map<String, String> hiveEnvProperties = hiveEnv.getProperties();
-        if (hiveEnvProperties.containsKey("hive_hostname")) {
-          LOG.info("Removing property hive_hostname from " + HIVE_ENV_CONFIG);
-          removeConfigurationPropertiesFromCluster(cluster, HIVE_ENV_CONFIG, Collections.singleton("hive_hostname"));
-        }
-      }
-    }
-  }
-
-  protected void addSliderClientConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-    ConfigHelper configHelper = ambariManagementController.getConfigHelper();
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-
-    for (final Cluster cluster : clusterMap.values()) {
-      Set<String> installedServices = cluster.getServices().keySet();
-      if (installedServices.contains(SLIDER_SERVICE_NAME)) {
-        Config sliderClientConfig = cluster.getDesiredConfigByType(SLIDER_CLIENT_CONFIG);
-        if (sliderClientConfig == null) {
-          configHelper.createConfigType(cluster, cluster.getDesiredStackVersion(),
-              ambariManagementController, SLIDER_CLIENT_CONFIG, new HashMap<String, String>(),
-              AUTHENTICATED_USER_NAME, "");
-        }
-      }
-    }
-  }
-
-  protected void updateAlerts() {
-    // map of alert_name -> property_name -> visibility_value
-    final Map<String, String> hdfsVisibilityMap = new HashMap<String, String>(){{
-      put("mergeHaMetrics", "HIDDEN");
-      put("appId", "HIDDEN");
-      put("metricName", "HIDDEN");
-    }};
-    final Map<String, String> defaultKeytabVisibilityMap = new HashMap<String, String>(){{
-      put("default.smoke.principal", "HIDDEN");
-      put("default.smoke.keytab", "HIDDEN");
-    }};
-
-    final Map<String, String> percentParameterMap = new HashMap<String, String>(){{
-      put("units", "%");
-      put("type", "PERCENT");
-    }};
-
-    Map<String, Map<String, String>> visibilityMap = new HashMap<String, Map<String, String>>(){{
-      put("hive_webhcat_server_status", new HashMap<String, String>(){{
-        put("default.smoke.user", "HIDDEN");
-      }});
-      put("hive_metastore_process", defaultKeytabVisibilityMap);
-      put("hive_server_process", defaultKeytabVisibilityMap);
-      put("zookeeper_server_process", new HashMap<String, String>(){{
-        put("socket.command", "HIDDEN");
-        put("socket.command.response", "HIDDEN");
-      }});
-    }};
-
-    Map<String, Map<String, String>> reportingPercentMap = new HashMap<String, Map<String, String>>(){{
-      put("hawq_segment_process_percent", percentParameterMap);
-      put("mapreduce_history_server_cpu", percentParameterMap);
-      put("yarn_nodemanager_webui_percent", percentParameterMap);
-      put("yarn_resourcemanager_cpu", percentParameterMap);
-      put("datanode_process_percent", percentParameterMap);
-      put("datanode_storage_percent", percentParameterMap);
-      put("journalnode_process_percent", percentParameterMap);
-      put("namenode_cpu", percentParameterMap);
-      put("namenode_hdfs_capacity_utilization", percentParameterMap);
-      put("datanode_storage", percentParameterMap);
-      put("datanode_heap_usage", percentParameterMap);
-      put("storm_supervisor_process_percent", percentParameterMap);
-      put("hbase_regionserver_process_percent", percentParameterMap);
-      put("hbase_master_cpu", percentParameterMap);
-      put("zookeeper_server_process_percent", percentParameterMap);
-      put("metrics_monitor_process_percent", percentParameterMap);
-      put("ams_metrics_collector_hbase_master_cpu", percentParameterMap);
-    }};
-
-    Map<String, Map<String, Integer>> reportingMultiplierMap = new HashMap<String, Map<String, Integer>>(){{
-      put("hawq_segment_process_percent", new HashMap<String, Integer>() {{
-        put("warning", 100);
-        put("critical", 100);
-      }});
-      put("yarn_nodemanager_webui_percent", new HashMap<String, Integer>() {{
-        put("warning", 100);
-        put("critical", 100);
-      }});
-      put("datanode_process_percent", new HashMap<String, Integer>() {{
-        put("warning", 100);
-        put("critical", 100);
-      }});
-      put("datanode_storage_percent", new HashMap<String, Integer>() {{
-        put("warning", 100);
-        put("critical", 100);
-      }});
-      put("journalnode_process_percent", new HashMap<String, Integer>() {{
-        put("warning", 100);
-        put("critical", 100);
-      }});
-      put("storm_supervisor_process_percent", new HashMap<String, Integer>() {{
-        put("warning", 100);
-        put("critical", 100);
-      }});
-      put("hbase_regionserver_process_percent", new HashMap<String, Integer>() {{
-        put("warning", 100);
-        put("critical", 100);
-      }});
-      put("zookeeper_server_process_percent", new HashMap<String, Integer>() {{
-        put("warning", 100);
-        put("critical", 100);
-      }});
-      put("metrics_monitor_process_percent", new HashMap<String, Integer>() {{
-        put("warning", 100);
-        put("critical", 100);
-      }});
-    }};
-
-    Map<String, Map<String, Integer>> scriptAlertMultiplierMap = new HashMap<String, Map<String, Integer>>(){{
-      put("ambari_agent_disk_usage", new HashMap<String, Integer>() {{
-        put("percent.used.space.warning.threshold", 100);
-        put("percent.free.space.critical.threshold", 100);
-      }});
-      put("namenode_last_checkpoint", new HashMap<String, Integer>() {{
-        put("checkpoint.time.warning.threshold", 100);
-        put("checkpoint.time.critical.threshold", 100);
-      }});
-    }};
-
-    String newNameservicePropertyValue = "{{hdfs-site/dfs.internal.nameservices}}";
-    final Set<String> alertNamesForNameserviceUpdate = new HashSet<String>() {{
-      add("namenode_webui");
-      add("namenode_hdfs_blocks_health");
-      add("namenode_hdfs_pending_deletion_blocks");
-      add("namenode_rpc_latency");
-      add("namenode_directory_status");
-      add("datanode_health_summary");
-      add("namenode_cpu");
-      add("namenode_hdfs_capacity_utilization");
-    }};
-
-    // list of alerts that need to get property updates
-    Set<String> alertNamesForPropertyUpdates = new HashSet<String>() {{
-      add("hawq_segment_process_percent");
-      add("mapreduce_history_server_cpu");
-      add("yarn_nodemanager_webui_percent");
-      add("yarn_resourcemanager_cpu");
-      add("datanode_process_percent");
-      add("datanode_storage_percent");
-      add("journalnode_process_percent");
-      add("namenode_cpu");
-      add("namenode_hdfs_capacity_utilization");
-      add("datanode_storage");
-      add("datanode_heap_usage");
-      add("storm_supervisor_process_percent");
-      add("hbase_regionserver_process_percent");
-      add("hbase_master_cpu");
-      add("zookeeper_server_process_percent");
-      add("metrics_monitor_process_percent");
-      add("ams_metrics_collector_hbase_master_cpu");
-      add("ambari_agent_disk_usage");
-      add("namenode_last_checkpoint");
-      addAll(alertNamesForNameserviceUpdate);
-    }};
-
-    // list of alerts to be removed
-    Set<String> alertForRemoval = new HashSet<String>() {{
-      add("storm_rest_api");
-      add("mapreduce_history_server_process");
-    }};
-
-    LOG.info("Updating alert definitions.");
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-
-      // here goes alerts that need get new properties
-      final AlertDefinitionEntity namenodeLastCheckpointAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "namenode_last_checkpoint");
-      final AlertDefinitionEntity namenodeHAHealthAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "namenode_ha_health");
-      final AlertDefinitionEntity nodemanagerHealthAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "yarn_nodemanager_health");
-      final AlertDefinitionEntity nodemanagerHealthSummaryAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "nodemanager_health_summary");
-      final AlertDefinitionEntity hiveMetastoreProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "hive_metastore_process");
-      final AlertDefinitionEntity hiveServerProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "hive_server_process");
-      final AlertDefinitionEntity hiveWebhcatServerStatusAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "hive_webhcat_server_status");
-      final AlertDefinitionEntity flumeAgentStatusAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "flume_agent_status");
-      final AlertDefinitionEntity zookeeperServerProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "zookeeper_server_process");
-
-      Map<AlertDefinitionEntity, List<String>> alertDefinitionParams = new HashMap<>();
-      checkedPutToMap(alertDefinitionParams, namenodeLastCheckpointAlertDefinitionEntity,
-              Lists.newArrayList("connection.timeout", "checkpoint.time.warning.threshold",
-                "checkpoint.time.critical.threshold", "checkpoint.txns.multiplier.warning.threshold",
-                "checkpoint.txns.multiplier.critical.threshold"));
-      checkedPutToMap(alertDefinitionParams, namenodeHAHealthAlertDefinitionEntity,
-        Lists.newArrayList("connection.timeout"));
-      checkedPutToMap(alertDefinitionParams, nodemanagerHealthAlertDefinitionEntity,
-              Lists.newArrayList("connection.timeout"));
-      checkedPutToMap(alertDefinitionParams, nodemanagerHealthSummaryAlertDefinitionEntity,
-              Lists.newArrayList("connection.timeout"));
-      checkedPutToMap(alertDefinitionParams, hiveMetastoreProcessAlertDefinitionEntity,
-              Lists.newArrayList("default.smoke.user", "default.smoke.principal", "default.smoke.keytab"));
-      checkedPutToMap(alertDefinitionParams, hiveServerProcessAlertDefinitionEntity,
-              Lists.newArrayList("default.smoke.user", "default.smoke.principal", "default.smoke.keytab"));
-      checkedPutToMap(alertDefinitionParams, hiveWebhcatServerStatusAlertDefinitionEntity,
-              Lists.newArrayList("default.smoke.user", "connection.timeout"));
-      checkedPutToMap(alertDefinitionParams, flumeAgentStatusAlertDefinitionEntity,
-              Lists.newArrayList("run.directory"));
-      checkedPutToMap(alertDefinitionParams, zookeeperServerProcessAlertDefinitionEntity,
-              Lists.newArrayList("socket.command", "socket.command.response"));
-
-
-      Map<Long, AlertDefinitionEntity> definitionsForPropertyUpdates = new HashMap<>();
-
-      // adding new properties
-      for (Map.Entry<AlertDefinitionEntity, List<String>> entry : alertDefinitionParams.entrySet()){
-        AlertDefinitionEntity alertDefinition = entry.getKey();
-        String source = alertDefinition.getSource();
-        alertDefinition.setSource(addParam(source, entry.getValue()));
-        definitionsForPropertyUpdates.put(alertDefinition.getDefinitionId(), alertDefinition);
-      }
-
-      // here goes alerts that need update for existing properties
-      for (String name : alertNamesForPropertyUpdates) {
-        AlertDefinitionEntity alertDefinition = alertDefinitionDAO.findByName(clusterID, name);
-        if (alertDefinition != null && !definitionsForPropertyUpdates.containsKey(alertDefinition.getDefinitionId())) {
-          definitionsForPropertyUpdates.put(alertDefinition.getDefinitionId(), alertDefinition);
-        }
-      }
-
-      // updating old and new properties, best way to use map like visibilityMap.
-      for (AlertDefinitionEntity alertDefinition : definitionsForPropertyUpdates.values()) {
-        // here goes property updates
-        if (visibilityMap.containsKey(alertDefinition.getDefinitionName())) {
-          for (Map.Entry<String, String> entry : visibilityMap.get(alertDefinition.getDefinitionName()).entrySet()){
-            String paramName = entry.getKey();
-            String visibilityValue = entry.getValue();
-            String source = alertDefinition.getSource();
-            alertDefinition.setSource(addParamOption(source, paramName, "visibility", visibilityValue));
-          }
-        }
-        // update percent script alerts param values from 0.x to 0.x * 100 values
-        if (scriptAlertMultiplierMap.containsKey(alertDefinition.getDefinitionName())) {
-          for (Map.Entry<String, Integer> entry : scriptAlertMultiplierMap.get(alertDefinition.getDefinitionName()).entrySet()){
-            String paramName = entry.getKey();
-            Integer multiplier = entry.getValue();
-            String source = alertDefinition.getSource();
-            Float oldValue = getParamFloatValue(source, paramName);
-            if (oldValue == null) {
-              alertDefinition.setSource(addParam(source, Arrays.asList(paramName)));
-            } else {
-              Integer newValue = Math.round(oldValue * multiplier);
-              alertDefinition.setSource(setParamIntegerValue(source, paramName, newValue));
-            }
-          }
-        }
-
-        // update reporting alerts(aggregate and metrics) values from 0.x to 0.x * 100 values
-        if (reportingMultiplierMap.containsKey(alertDefinition.getDefinitionName())) {
-          for (Map.Entry<String, Integer> entry : reportingMultiplierMap.get(alertDefinition.getDefinitionName()).entrySet()){
-            String reportingName = entry.getKey();
-            Integer multiplier = entry.getValue();
-            String source = alertDefinition.getSource();
-            Float oldValue = getReportingFloatValue(source, reportingName);
-            Integer newValue = Math.round(oldValue * multiplier);
-            alertDefinition.setSource(setReportingIntegerValue(source, reportingName, newValue));
-          }
-        }
-
-        if (reportingPercentMap.containsKey(alertDefinition.getDefinitionName())) {
-          for (Map.Entry<String, String> entry : reportingPercentMap.get(alertDefinition.getDefinitionName()).entrySet()){
-            String paramName = entry.getKey();
-            String paramValue = entry.getValue();
-            String source = alertDefinition.getSource();
-            alertDefinition.setSource(addReportingOption(source, paramName, paramValue));
-          }
-        }
-
-        if (alertNamesForNameserviceUpdate.contains(alertDefinition.getDefinitionName())) {
-          String source = alertDefinition.getSource();
-          alertDefinition.setSource(setNameservice(source, newNameservicePropertyValue));
-        }
-        // regeneration of hash and writing modified alerts to database, must go after all modifications finished
-        alertDefinition.setHash(UUID.randomUUID().toString());
-        alertDefinitionDAO.merge(alertDefinition);
-      }
-      //update Atlas alert
-      final AlertDefinitionEntity atlasMetadataServerWebUI = alertDefinitionDAO.findByName(
-              clusterID, "metadata_server_webui");
-      if (atlasMetadataServerWebUI != null) {
-        String source = atlasMetadataServerWebUI.getSource();
-        JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-
-        JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
-        uriJson.remove("kerberos_keytab");
-        uriJson.remove("kerberos_principal");
-        uriJson.addProperty("kerberos_keytab", "{{cluster-env/smokeuser_keytab}}");
-        uriJson.addProperty("kerberos_principal", "{{cluster-env/smokeuser_principal_name}}");
-
-        atlasMetadataServerWebUI.setSource(sourceJson.toString());
-
-        atlasMetadataServerWebUI.setHash(UUID.randomUUID().toString());
-        alertDefinitionDAO.merge(atlasMetadataServerWebUI);
-      }
-
-      for (String alertName: alertForRemoval) {
-        AlertDefinitionEntity alertDefinition = alertDefinitionDAO.findByName(clusterID, alertName);
-        if (alertDefinition != null) {
-          LOG.info("Removing alert : " + alertName);
-          alertDefinitionDAO.remove(alertDefinition);
-        }
-      }
-    }
-  }
-
-  protected String setNameservice(String source, String paramValue) {
-    final String nameservicePropertyName = "nameservice";
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    JsonObject highAvailability = sourceJson.getAsJsonObject("uri").getAsJsonObject("high_availability");
-    if (highAvailability.has(nameservicePropertyName)) {
-      highAvailability.addProperty(nameservicePropertyName, paramValue);
-    }
-    return sourceJson.toString();
-  }
-
-  /*
-  * Simple put method with check for key is not null
-  * */
-  private void checkedPutToMap(Map<AlertDefinitionEntity, List<String>> alertDefinitionParams, AlertDefinitionEntity alertDefinitionEntity,
-                               List<String> params) {
-    if (alertDefinitionEntity != null) {
-      alertDefinitionParams.put(alertDefinitionEntity, params);
-    }
-  }
-
-  /**
-   * Add option to script parameter.
-   * @param source json string of script source
-   * @param paramName parameter name
-   * @param optionName option name
-   * @param optionValue option value
-   * @return modified source
-   */
-  protected String addParamOption(String source, String paramName, String optionName, String optionValue){
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    JsonArray parametersJson = sourceJson.getAsJsonArray("parameters");
-    if(parametersJson != null && !parametersJson.isJsonNull()) {
-      for(JsonElement param : parametersJson) {
-        if(param.isJsonObject()) {
-          JsonObject paramObject = param.getAsJsonObject();
-          if(paramObject.has("name") && paramObject.get("name").getAsString().equals(paramName)){
-            paramObject.add(optionName, new JsonPrimitive(optionValue));
-          }
-        }
-      }
-    }
-    return sourceJson.toString();
-  }
-
-  /**
-   * Returns param value as float.
-   * @param source source of script alert
-   * @param paramName param name
-   * @return param value as float
-   */
-  protected Float getParamFloatValue(String source, String paramName){
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    JsonArray parametersJson = sourceJson.getAsJsonArray("parameters");
-    if(parametersJson != null && !parametersJson.isJsonNull()) {
-      for(JsonElement param : parametersJson) {
-        if(param.isJsonObject()) {
-          JsonObject paramObject = param.getAsJsonObject();
-          if(paramObject.has("name") && paramObject.get("name").getAsString().equals(paramName)){
-            if(paramObject.has("value")) {
-              return paramObject.get("value").getAsFloat();
-            }
-          }
-        }
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Set integer param value.
-   * @param source source of script alert
-   * @param paramName param name
-   * @param value new param value
-   * @return modified source
-   */
-  protected String setParamIntegerValue(String source, String paramName, Integer value){
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    JsonArray parametersJson = sourceJson.getAsJsonArray("parameters");
-    if(parametersJson != null && !parametersJson.isJsonNull()) {
-      for(JsonElement param : parametersJson) {
-        if(param.isJsonObject()) {
-          JsonObject paramObject = param.getAsJsonObject();
-          if(paramObject.has("name") && paramObject.get("name").getAsString().equals(paramName)){
-            paramObject.add("value", new JsonPrimitive(value));
-          }
-        }
-      }
-    }
-    return sourceJson.toString();
-  }
-
-  /**
-   * Returns reporting value as float.
-   * @param source source of aggregate or metric alert
-   * @param reportingName reporting name, must be "warning" or "critical"
-   * @return reporting value as float
-   */
-  protected Float getReportingFloatValue(String source, String reportingName){
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    return sourceJson.getAsJsonObject("reporting").getAsJsonObject(reportingName).get("value").getAsFloat();
-  }
-
-  /**
-   * Set integer value of reporting.
-   * @param source source of aggregate or metric alert
-   * @param reportingName reporting name, must be "warning" or "critical"
-   * @param value new value
-   * @return modified source
-   */
-  protected String setReportingIntegerValue(String source, String reportingName, Integer value){
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    sourceJson.getAsJsonObject("reporting").getAsJsonObject(reportingName).add("value", new JsonPrimitive(value));
-    return sourceJson.toString();
-  }
-
-  /**
-   * Add option to reporting
-   * @param source source of aggregate or metric alert
-   * @param optionName option name
-   * @param value option value
-   * @return modified source
-   */
-  protected String addReportingOption(String source, String optionName, String value){
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    sourceJson.getAsJsonObject("reporting").add(optionName, new JsonPrimitive(value));
-    return sourceJson.toString();
-  }
-
-  protected String addParam(String source, List<String> params) {
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    JsonArray parametersJson = sourceJson.getAsJsonArray("parameters");
-
-    boolean parameterExists = parametersJson != null && !parametersJson.isJsonNull();
-
-    if (parameterExists) {
-      Iterator<JsonElement> jsonElementIterator = parametersJson.iterator();
-      while (jsonElementIterator.hasNext()) {
-        JsonElement element = jsonElementIterator.next();
-        JsonElement name = element.getAsJsonObject().get("name");
-        if (name != null && !name.isJsonNull() && params.contains(name.getAsString())) {
-          params.remove(name.getAsString());
-        }
-      }
-      if (params.size() == 0) {
-        return sourceJson.toString();
-      }
-    }
-
-    List<JsonObject> paramsToAdd = new ArrayList<>();
-
-    if (params.contains("connection.timeout")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("connection.timeout"));
-      param.add("display_name", new JsonPrimitive("Connection Timeout"));
-      param.add("value", new JsonPrimitive(5.0));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The maximum time before this alert is considered to be CRITICAL"));
-      param.add("units", new JsonPrimitive("seconds"));
-      param.add("threshold", new JsonPrimitive("CRITICAL"));
-
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("checkpoint.time.warning.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("checkpoint.time.warning.threshold"));
-      param.add("display_name", new JsonPrimitive("Checkpoint Warning"));
-      param.add("value", new JsonPrimitive(2.0));
-      param.add("type", new JsonPrimitive("PERCENT"));
-      param.add("description", new JsonPrimitive("The percentage of the last checkpoint time greater than the interval in order to trigger a warning alert."));
-      param.add("units", new JsonPrimitive("%"));
-      param.add("threshold", new JsonPrimitive("WARNING"));
-
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("checkpoint.time.critical.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("checkpoint.time.critical.threshold"));
-      param.add("display_name", new JsonPrimitive("Checkpoint Critical"));
-      param.add("value", new JsonPrimitive(4.0));
-      param.add("type", new JsonPrimitive("PERCENT"));
-      param.add("description", new JsonPrimitive("The percentage of the last checkpoint time greater than the interval in order to trigger a critical alert."));
-      param.add("units", new JsonPrimitive("%"));
-      param.add("threshold", new JsonPrimitive("CRITICAL"));
-
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("checkpoint.txns.multiplier.warning.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("checkpoint.txns.multiplier.warning.threshold"));
-      param.add("display_name", new JsonPrimitive("Uncommitted transactions Warning"));
-      param.add("value", new JsonPrimitive(2.0));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The multiplier to use against dfs.namenode.checkpoint.period compared to the difference between last transaction id and most recent transaction id beyond which to trigger a warning alert."));
-      param.add("threshold", new JsonPrimitive("WARNING"));
-
-      paramsToAdd.add(param);
-    }
-    if (params.contains("checkpoint.txns.multiplier.critical.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("checkpoint.txns.multiplier.critical.threshold"));
-      param.add("display_name", new JsonPrimitive("Uncommitted transactions Critical"));
-      param.add("value", new JsonPrimitive(4.0));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The multiplier to use against dfs.namenode.checkpoint.period compared to the difference between last transaction id and most recent transaction id beyond which to trigger a critical alert."));
-      param.add("threshold", new JsonPrimitive("CRITICAL"));
-
-      paramsToAdd.add(param);
-    }
-    if (params.contains("default.smoke.user")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("default.smoke.user"));
-      param.add("display_name", new JsonPrimitive("Default Smoke User"));
-      param.add("value", new JsonPrimitive("ambari-qa"));
-      param.add("type", new JsonPrimitive("STRING"));
-      param.add("description", new JsonPrimitive("The user that will run the Hive commands if not specified in cluster-env/smokeuser"));
-
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("default.smoke.principal")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("default.smoke.principal"));
-      param.add("display_name", new JsonPrimitive("Default Smoke Principal"));
-      param.add("value", new JsonPrimitive("ambari-qa@EXAMPLE.COM"));
-      param.add("type", new JsonPrimitive("STRING"));
-      param.add("description", new JsonPrimitive("The principal to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_principal_name"));
-
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("default.smoke.keytab")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("default.smoke.keytab"));
-      param.add("display_name", new JsonPrimitive("Default Smoke Keytab"));
-      param.add("value", new JsonPrimitive("/etc/security/keytabs/smokeuser.headless.keytab"));
-      param.add("type", new JsonPrimitive("STRING"));
-      param.add("description", new JsonPrimitive("The keytab to use when retrieving the kerberos ticket if not specified in cluster-env/smokeuser_keytab"));
-
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("run.directory")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("run.directory"));
-      param.add("display_name", new JsonPrimitive("Run Directory"));
-      param.add("value", new JsonPrimitive("/var/run/flume"));
-      param.add("type", new JsonPrimitive("STRING"));
-      param.add("description", new JsonPrimitive("The directory where flume agent processes will place their PID files."));
-
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("minimum.free.space")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("minimum.free.space"));
-      param.add("display_name", new JsonPrimitive("Minimum Free Space"));
-      param.add("value", new JsonPrimitive("5000000000"));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The overall amount of free disk space left before an alert is triggered."));
-      param.add("units", new JsonPrimitive("bytes"));
-      param.add("threshold", new JsonPrimitive("WARNING"));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("percent.used.space.warning.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("percent.used.space.warning.threshold"));
-      param.add("display_name", new JsonPrimitive("Warning"));
-      param.add("value", new JsonPrimitive("50"));
-      param.add("type", new JsonPrimitive("PERCENT"));
-      param.add("description", new JsonPrimitive("The percent of disk space consumed before a warning is triggered."));
-      param.add("units", new JsonPrimitive("%"));
-      param.add("threshold", new JsonPrimitive("WARNING"));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("percent.free.space.critical.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("percent.free.space.critical.threshold"));
-      param.add("display_name", new JsonPrimitive("Critical"));
-      param.add("value", new JsonPrimitive("80"));
-      param.add("type", new JsonPrimitive("PERCENT"));
-      param.add("description", new JsonPrimitive("The percent of disk space consumed before a critical alert is triggered."));
-      param.add("units", new JsonPrimitive("%"));
-      param.add("threshold", new JsonPrimitive("CRITICAL"));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("request.by.status.warning.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("request.by.status.warning.threshold"));
-      param.add("display_name", new JsonPrimitive("Warning Request Time"));
-      param.add("value", new JsonPrimitive("3000"));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The time to find requests in progress before a warning alert is triggered."));
-      param.add("units", new JsonPrimitive("ms"));
-      param.add("threshold", new JsonPrimitive("WARNING"));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("request.by.status.critical.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("request.by.status.critical.threshold"));
-      param.add("display_name", new JsonPrimitive("Critical Request Time"));
-      param.add("value", new JsonPrimitive("5000"));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The time to find requests in progress before a critical alert is triggered."));
-      param.add("units", new JsonPrimitive("ms"));
-      param.add("threshold", new JsonPrimitive("CRITICAL"));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("task.status.aggregation.warning.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("task.status.aggregation.warning.threshold"));
-      param.add("display_name", new JsonPrimitive("Warning Process Time"));
-      param.add("value", new JsonPrimitive("3000"));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The time to calculate a request's status from its tasks before a warning alert is triggered."));
-      param.add("units", new JsonPrimitive("ms"));
-      param.add("threshold", new JsonPrimitive("WARNING"));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("task.status.aggregation.critical.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("task.status.aggregation.critical.threshold"));
-      param.add("display_name", new JsonPrimitive("Critical Process Time"));
-      param.add("value", new JsonPrimitive("5000"));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The time to calculate a request's status from its tasks before a critical alert is triggered."));
-      param.add("units", new JsonPrimitive("ms"));
-      param.add("threshold", new JsonPrimitive("CRITICAL"));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("rest.api.cluster.warning.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("rest.api.cluster.warning.threshold"));
-      param.add("display_name", new JsonPrimitive("Warning Response Time"));
-      param.add("value", new JsonPrimitive("5000"));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The time to get a cluster via the REST API before a warning alert is triggered."));
-      param.add("units", new JsonPrimitive("ms"));
-      param.add("threshold", new JsonPrimitive("WARNING"));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("rest.api.cluster.critical.threshold")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("rest.api.cluster.critical.threshold"));
-      param.add("display_name", new JsonPrimitive("Critical Response Time"));
-      param.add("value", new JsonPrimitive("7000"));
-      param.add("type", new JsonPrimitive("NUMERIC"));
-      param.add("description", new JsonPrimitive("The time to get a cluster via the REST API before a critical alert is triggered."));
-      param.add("units", new JsonPrimitive("ms"));
-      param.add("threshold", new JsonPrimitive("CRITICAL"));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("socket.command")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("socket.command"));
-      param.add("display_name", new JsonPrimitive("Socket Command"));
-      param.add("value", new JsonPrimitive("ruok"));
-      param.add("type", new JsonPrimitive("STRING"));
-      param.add("description", new JsonPrimitive("A socket command which queries ZooKeeper to respond with its state. The expected response is imok."));
-      paramsToAdd.add(param);
-
-    }
-    if (params.contains("socket.command.response")) {
-      JsonObject param = new JsonObject();
-      param.add("name", new JsonPrimitive("socket.command.response"));
-      param.add("display_name", new JsonPrimitive("Expected Response"));
-      param.add("value", new JsonPrimitive("imok"));
-      param.add("type", new JsonPrimitive("STRING"));
-      param.add("description", new JsonPrimitive("The expected response to the socket command."));
-      paramsToAdd.add(param);
-
-    }
-
-
-
-    if (!parameterExists) {
-      parametersJson = new JsonArray();
-      for (JsonObject param : paramsToAdd) {
-        parametersJson.add(param);
-      }
-      sourceJson.add("parameters", parametersJson);
-    } else {
-      for (JsonObject param : paramsToAdd) {
-        parametersJson.add(param);
-      }
-      sourceJson.remove("parameters");
-      sourceJson.add("parameters", parametersJson);
-    }
-
-    return sourceJson.toString();
-  }
-
-  protected void updateAdminPermissionTable() throws SQLException {
-    // Add the sort_order column to the adminpermission table
-    dbAccessor.addColumn(ADMIN_PERMISSION_TABLE,
-        new DBColumnInfo(SORT_ORDER_COL, Short.class, null, 1, false));
-
-    // Add the principal_id column to the adminpermission table
-    //   Note: This is set to nullable here, but will be altered once the column has been set
-    //         properly during the DML update phase.
-    dbAccessor.addColumn(ADMIN_PERMISSION_TABLE,
-        new DBColumnInfo(PRINCIPAL_ID_COL, Long.class, null, null, true));
-  }
-
-  protected void updateTopologyRequestTable() throws SQLException {
-    // Add the sort_order column to the adminpermission table
-    dbAccessor.addColumn(TOPOLOGY_REQUEST_TABLE,
-      new DBColumnInfo(PROVISION_ACTION_COL, String.class, 255, null, true));
-  }
-
-  /**
-   * Updates the {@value #ALERT_DEFINITION_TABLE} in the following ways:
-   * <ul>
-   * <li>Craetes the {@value #HELP_URL_COLUMN} column</li>
-   * <li>Craetes the {@value #REPEAT_TOLERANCE_COLUMN} column</li>
-   * <li>Craetes the {@value #REPEAT_TOLERANCE_ENABLED_COLUMN} column</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  protected void updateAlertDefinitionTable() throws SQLException {
-    dbAccessor.addColumn(ALERT_DEFINITION_TABLE,
-        new DBColumnInfo(HELP_URL_COLUMN, String.class, 512, null, true));
-
-    dbAccessor.addColumn(ALERT_DEFINITION_TABLE,
-        new DBColumnInfo(REPEAT_TOLERANCE_COLUMN, Integer.class, null, 1, false));
-
-    dbAccessor.addColumn(ALERT_DEFINITION_TABLE,
-        new DBColumnInfo(REPEAT_TOLERANCE_ENABLED_COLUMN, Short.class, null, 0, false));
-  }
-
-  /**
-   * Updates the {@value #ALERT_CURRENT_TABLE} in the following ways:
-   * <ul>
-   * <li>Creates the {@value #ALERT_CURRENT_OCCURRENCES_COLUMN} column</li>
-   * <li>Creates the {@value #ALERT_CURRENT_FIRMNESS_COLUMN} column</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  protected void updateAlertCurrentTable() throws SQLException {
-    dbAccessor.addColumn(ALERT_CURRENT_TABLE,
-      new DBColumnInfo(ALERT_CURRENT_OCCURRENCES_COLUMN, Long.class, null, 1, false));
-
-    dbAccessor.addColumn(ALERT_CURRENT_TABLE, new DBColumnInfo(ALERT_CURRENT_FIRMNESS_COLUMN,
-      String.class, 255, AlertFirmness.HARD.name(), false));
-  }
-
-  /**
-   * Updates the {@value #ALERT_TARGET_TABLE} in the following ways:
-   * <ul>
-   * <li>Creates the {@value #ALERT_TARGET_ENABLED_COLUMN} column</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  protected void updateAlertTargetTable() throws SQLException {
-    dbAccessor.addColumn(ALERT_TARGET_TABLE,
-      new DBColumnInfo(ALERT_TARGET_ENABLED_COLUMN, Short.class, null, 1, false));
-  }
-
-  protected void setRoleSortOrder() throws SQLException {
-    String updateStatement = "UPDATE " + ADMIN_PERMISSION_TABLE + " SET " + SORT_ORDER_COL + "=%d WHERE " + PERMISSION_ID_COL + "='%s'";
-
-    LOG.info("Setting permission labels");
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        1, PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION_NAME));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        2, PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION_NAME));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-      3, PermissionEntity.CLUSTER_OPERATOR_PERMISSION_NAME));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-      4, PermissionEntity.SERVICE_ADMINISTRATOR_PERMISSION_NAME));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-      5, PermissionEntity.SERVICE_OPERATOR_PERMISSION_NAME));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-      6, PermissionEntity.CLUSTER_USER_PERMISSION_NAME));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-      7, PermissionEntity.VIEW_USER_PERMISSION_NAME));
-  }
-
-  /**
-   * Create and update records to create the role-based principals.
-   * <p>
-   * This includes creating the new "ROLE" principal type, a principal for each role, and finally
-   * updating the princial_id column for the role.
-   */
-  void createRolePrincipals() throws SQLException {
-    // Create Role Principal Type
-    PrincipalTypeEntity rolePrincipalType = new PrincipalTypeEntity();
-    rolePrincipalType.setName("ROLE");
-
-    // creates the new record and returns an entity with the id set.
-    rolePrincipalType = principalTypeDAO.merge(rolePrincipalType);
-
-    // Get the roles (adminpermissions) and create a principal for each.... set the role's principal_id
-    // value as we go...
-    List<PermissionEntity> roleEntities = permissionDAO.findAll();
-
-    for (PermissionEntity roleEntity : roleEntities) {
-      PrincipalEntity principalEntity = new PrincipalEntity();
-      principalEntity.setPrincipalType(rolePrincipalType);
-
-      roleEntity.setPrincipal(principalDAO.merge(principalEntity));
-
-      permissionDAO.merge(roleEntity);
-    }
-
-    // Fix the adminpermission.principal_id column to be non-nullable:
-    dbAccessor.alterColumn(ADMIN_PERMISSION_TABLE,
-        new DBColumnInfo(PRINCIPAL_ID_COL, Long.class, null, null, false));
-  }
-
-  /**
-   * Makes the following changes to the {@value #REPO_VERSION_TABLE} table:
-   * <ul>
-   * <li>repo_type VARCHAR(255) DEFAULT 'STANDARD' NOT NULL</li>
-   * <li>version_url VARCHAR(1024)</li>
-   * <li>version_xml MEDIUMTEXT</li>
-   * <li>version_xsd VARCHAR(512)</li>
-   * <li>parent_id BIGINT</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  private void updateRepoVersionTableDDL() throws SQLException {
-    DBColumnInfo repoTypeColumn = new DBColumnInfo("repo_type", String.class, 255, RepositoryType.STANDARD.name(), false);
-    DBColumnInfo versionUrlColumn = new DBColumnInfo("version_url", String.class, 1024, null, true);
-    DBColumnInfo versionXmlColumn = new DBColumnInfo("version_xml", Clob.class, null, null, true);
-    DBColumnInfo versionXsdColumn = new DBColumnInfo("version_xsd", String.class, 512, null, true);
-    DBColumnInfo parentIdColumn = new DBColumnInfo("parent_id", Long.class, null, null, true);
-
-    dbAccessor.addColumn(REPO_VERSION_TABLE, repoTypeColumn);
-    dbAccessor.addColumn(REPO_VERSION_TABLE, versionUrlColumn);
-    dbAccessor.addColumn(REPO_VERSION_TABLE, versionXmlColumn);
-    dbAccessor.addColumn(REPO_VERSION_TABLE, versionXsdColumn);
-    dbAccessor.addColumn(REPO_VERSION_TABLE, parentIdColumn);
-  }
-
-  /**
-   * Makes the following changes to the {@value #SERVICE_COMPONENT_DS_TABLE} table,
-   * but only if the table doesn't have it's new PK set.
-   * <ul>
-   * <li>id BIGINT NOT NULL</li>
-   * <li>Drops FKs on {@value #HOST_COMPONENT_DS_TABLE} and {@value #HOST_COMPONENT_STATE_TABLE}</li>
-   * <li>Populates ID in {@value #SERVICE_COMPONENT_DS_TABLE}</li>
-   * <li>Creates {@code UNIQUE} constraint on {@value #HOST_COMPONENT_DS_TABLE}</li>
-   * <li>Adds FKs on {@value #HOST_COMPONENT_DS_TABLE} and {@value #HOST_COMPONENT_STATE_TABLE}</li>
-   * <li>Adds new sequence value of {@code servicecomponentdesiredstate_id_seq}</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  void updateServiceComponentDesiredStateTableDDL() throws SQLException {
-    if (dbAccessor.tableHasPrimaryKey(SERVICE_COMPONENT_DS_TABLE, ID)) {
-      LOG.info("Skipping {} table Primary Key modifications since the new {} column already exists",
-          SERVICE_COMPONENT_DS_TABLE, ID);
-
-      return;
-    }
-
-    // drop FKs to SCDS in both HCDS and HCS tables
-    // These are the expected constraint names
-    dbAccessor.dropFKConstraint(HOST_COMPONENT_DS_TABLE, "hstcmpnntdesiredstatecmpnntnme");
-    dbAccessor.dropFKConstraint(HOST_COMPONENT_STATE_TABLE, "hstcomponentstatecomponentname");
-    // These are the old (pre Ambari 1.5) constraint names, however still found on some installations
-    dbAccessor.dropFKConstraint(HOST_COMPONENT_DS_TABLE, "FK_hostcomponentdesiredstate_component_name");
-    dbAccessor.dropFKConstraint(HOST_COMPONENT_STATE_TABLE, "FK_hostcomponentstate_component_name");
-
-    // remove existing compound PK
-    dbAccessor.dropPKConstraint(SERVICE_COMPONENT_DS_TABLE, "servicecomponentdesiredstate_pkey");
-
-    // add new PK column to SCDS, making it nullable for now
-    DBColumnInfo idColumn = new DBColumnInfo(ID, Long.class, null, null, true);
-    dbAccessor.addColumn(SERVICE_COMPONENT_DS_TABLE, idColumn);
-
-    // populate SCDS id column
-    AtomicLong scdsIdCounter = new AtomicLong(1);
-    Statement statement = null;
-    ResultSet resultSet = null;
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-      if (statement != null) {
-        String selectSQL = String.format("SELECT cluster_id, service_name, component_name FROM %s",
-            SERVICE_COMPONENT_DS_TABLE);
-
-        resultSet = statement.executeQuery(selectSQL);
-        while (null != resultSet && resultSet.next()) {
-          final Long clusterId = resultSet.getLong("cluster_id");
-          final String serviceName = resultSet.getString("service_name");
-          final String componentName = resultSet.getString("component_name");
-
-          String updateSQL = String.format(
-              "UPDATE %s SET %s = %d WHERE cluster_id = %d AND service_name = '%s' AND component_name = '%s'",
-              SERVICE_COMPONENT_DS_TABLE, ID, scdsIdCounter.getAndIncrement(), clusterId,
-              serviceName, componentName);
-
-          dbAccessor.executeQuery(updateSQL);
-        }
-      }
-    } finally {
-      JdbcUtils.closeResultSet(resultSet);
-      JdbcUtils.closeStatement(statement);
-    }
-
-    // make the column NON NULL now
-    dbAccessor.alterColumn(SERVICE_COMPONENT_DS_TABLE,
-        new DBColumnInfo(ID, Long.class, null, null, false));
-
-    // create a new PK, matching the name of the constraint found in SQL
-    dbAccessor.addPKConstraint(SERVICE_COMPONENT_DS_TABLE, "pk_sc_desiredstate", ID);
-
-    // create UNIQUE constraint, ensuring column order matches SQL files
-    String[] uniqueColumns = new String[] { "component_name", "service_name", "cluster_id" };
-    dbAccessor.addUniqueConstraint(SERVICE_COMPONENT_DS_TABLE, "UQ_scdesiredstate_name",
-        uniqueColumns);
-
-    // add FKs back to SCDS in both HCDS and HCS tables
-    dbAccessor.addFKConstraint(HOST_COMPONENT_DS_TABLE, "hstcmpnntdesiredstatecmpnntnme",
-        uniqueColumns, SERVICE_COMPONENT_DS_TABLE, uniqueColumns, false);
-
-    dbAccessor.addFKConstraint(HOST_COMPONENT_STATE_TABLE, "hstcomponentstatecomponentname",
-        uniqueColumns, SERVICE_COMPONENT_DS_TABLE, uniqueColumns, false);
-
-    // Add sequence for SCDS id
-    addSequence("servicecomponentdesiredstate_id_seq", scdsIdCounter.get(), false);
-  }
-
-  /**
-   * Makes the following changes to the {@value #SERVICE_COMPONENT_HISTORY_TABLE} table:
-   * <ul>
-   * <li>id BIGINT NOT NULL</li>
-   * <li>component_id BIGINT NOT NULL</li>
-   * <li>upgrade_id BIGINT NOT NULL</li>
-   * <li>from_stack_id BIGINT NOT NULL</li>
-   * <li>to_stack_id BIGINT NOT NULL</li>
-   * <li>CONSTRAINT PK_sc_history PRIMARY KEY (id)</li>
-   * <li>CONSTRAINT FK_sc_history_component_id FOREIGN KEY (component_id) REFERENCES servicecomponentdesiredstate (id)</li>
-   * <li>CONSTRAINT FK_sc_history_upgrade_id FOREIGN KEY (upgrade_id) REFERENCES upgrade (upgrade_id)</li>
-   * <li>CONSTRAINT FK_sc_history_from_stack_id FOREIGN KEY (from_stack_id) REFERENCES stack (stack_id)</li>
-   * <li>CONSTRAINT FK_sc_history_to_stack_id FOREIGN KEY (to_stack_id) REFERENCES stack (stack_id)</li>
-   * <li>Creates the {@code servicecomponent_history_id_seq}</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  private void createServiceComponentHistoryTable() throws SQLException {
-    List<DBColumnInfo> columns = new ArrayList<>();
-    columns.add(new DBColumnInfo(ID, Long.class, null, null, false));
-    columns.add(new DBColumnInfo("component_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("upgrade_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("from_stack_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("to_stack_id", Long.class, null, null, false));
-    dbAccessor.createTable(SERVICE_COMPONENT_HISTORY_TABLE, columns, (String[]) null);
-
-    dbAccessor.addPKConstraint(SERVICE_COMPONENT_HISTORY_TABLE, "PK_sc_history", ID);
-
-    dbAccessor.addFKConstraint(SERVICE_COMPONENT_HISTORY_TABLE, "FK_sc_history_component_id",
-        "component_id", SERVICE_COMPONENT_DS_TABLE, "id", false);
-
-    dbAccessor.addFKConstraint(SERVICE_COMPONENT_HISTORY_TABLE, "FK_sc_history_upgrade_id",
-        "upgrade_id", UPGRADE_TABLE, "upgrade_id", false);
-
-    dbAccessor.addFKConstraint(SERVICE_COMPONENT_HISTORY_TABLE, "FK_sc_history_from_stack_id",
-        "from_stack_id", STACK_TABLE, "stack_id", false);
-
-    dbAccessor.addFKConstraint(SERVICE_COMPONENT_HISTORY_TABLE, "FK_sc_history_to_stack_id",
-      "to_stack_id", STACK_TABLE, "stack_id", false);
-
-    addSequence("servicecomponent_history_id_seq", 0L, false);
-  }
-
-  /**
-   * Alter servicecomponentdesiredstate table to add recovery_enabled column.
-   * @throws SQLException
-   */
-  private void updateServiceComponentDesiredStateTable() throws SQLException {
-    // ALTER TABLE servicecomponentdesiredstate ADD COLUMN
-    // recovery_enabled SMALLINT DEFAULT 0 NOT NULL
-    dbAccessor.addColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE,
-      new DBColumnInfo(RECOVERY_ENABLED_COL, Short.class, null, 0, false));
-
-    dbAccessor.addColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE,
-      new DBColumnInfo(DESIRED_VERSION_COLUMN_NAME, String.class, 255, State.UNKNOWN.toString(), false));
-  }
-
-  /**
-   * Alter host_role_command table to add original_start_time, which is needed because the start_time column now
-   * allows overriding the value in ActionScheduler.java
-   * @throws SQLException
-   */
-  private void updateHostRoleCommandTableDDL() throws SQLException {
-    final String columnName = "original_start_time";
-    DBColumnInfo originalStartTimeColumn = new DBColumnInfo(columnName, Long.class, null, -1L, true);
-    dbAccessor.addColumn(HOST_ROLE_COMMAND_TABLE, originalStartTimeColumn);
-  }
-
-  /**
-   * Alter host_role_command table to update original_start_time with values and make it non-nullable
-   * @throws SQLException
-   */
-  protected void updateHostRoleCommandTableDML() throws SQLException {
-    final String columnName = "original_start_time";
-    dbAccessor.executeQuery("UPDATE " + HOST_ROLE_COMMAND_TABLE + " SET original_start_time = start_time", false);
-    dbAccessor.executeQuery("UPDATE " + HOST_ROLE_COMMAND_TABLE + " SET original_start_time=-1 WHERE original_start_time IS NULL");
-    dbAccessor.setColumnNullable(HOST_ROLE_COMMAND_TABLE, columnName, false);
-  }
-
-  /**
-   * Puts each item in the specified list inside single quotes and
-   * returns a comma separated value for use in a SQL query.
-   * @param list
-   * @return
-   */
-  private String sqlStringListFromArrayList(List<String> list) {
-    List<String> sqlList = new ArrayList<>(list.size());
-
-    for (String item : list) {
-      sqlList.add(String.format("'%s'", item.trim()));
-    }
-
-    return StringUtils.join(sqlList, ',');
-  }
-
-  /**
-   * Update clusterconfig table for config type 'cluster-env' with the
-   * recovery attributes.
-   *
-   * @throws AmbariException
-   */
-  private void updateRecoveryClusterEnvConfig() throws AmbariException {
-    Map<String, String> propertyMap = new HashMap<>();
-
-    if (StringUtils.isNotEmpty(config.getNodeRecoveryType())) {
-      propertyMap.put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "true");
-      propertyMap.put(RecoveryConfigHelper.RECOVERY_TYPE_KEY, config.getNodeRecoveryType());
-    }
-    else {
-      propertyMap.put(RecoveryConfigHelper.RECOVERY_ENABLED_KEY, "false");
-    }
-
-    if (StringUtils.isNotEmpty(config.getNodeRecoveryLifetimeMaxCount())) {
-      propertyMap.put(RecoveryConfigHelper.RECOVERY_LIFETIME_MAX_COUNT_KEY, config.getNodeRecoveryLifetimeMaxCount());
-    }
-
-    if (StringUtils.isNotEmpty(config.getNodeRecoveryMaxCount())) {
-      propertyMap.put(RecoveryConfigHelper.RECOVERY_MAX_COUNT_KEY, config.getNodeRecoveryMaxCount());
-    }
-
-    if (StringUtils.isNotEmpty(config.getNodeRecoveryRetryGap())) {
-      propertyMap.put(RecoveryConfigHelper.RECOVERY_RETRY_GAP_KEY, config.getNodeRecoveryRetryGap());
-    }
-
-    if (StringUtils.isNotEmpty(config.getNodeRecoveryWindowInMin())) {
-      propertyMap.put(RecoveryConfigHelper.RECOVERY_WINDOW_IN_MIN_KEY, config.getNodeRecoveryWindowInMin());
-    }
-
-    AmbariManagementController ambariManagementController = injector.getInstance(
-            AmbariManagementController.class);
-
-    Clusters clusters = ambariManagementController.getClusters();
-
-    // for each cluster, update/create the cluster-env config type in clusterconfig
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      updateConfigurationPropertiesForCluster(cluster, ConfigHelper.CLUSTER_ENV, propertyMap,
-              true /* update if exists */, true /* create new config type */);
-    }
-  }
-
-  /**
-   * Alter servicecomponentdesiredstate table to update recovery_enabled to 1
-   * for the components that have been marked for auto start in ambari.properties
-   * @throws SQLException
-   */
-  private void updateRecoveryComponents() throws SQLException {
-
-    /*
-     * Whether specific components are enabled/disabled for recovery. Being enabled takes
-     * precedence over being disabled. When specific components are enabled then only
-     * those components are enabled. When specific components are disabled then all of
-     * the other components are enabled.
-     */
-    String enabledComponents = config.getRecoveryEnabledComponents();
-    String disabledComponents = config.getRecoveryDisabledComponents();
-    String query;
-
-    if (StringUtils.isEmpty(enabledComponents)) {
-      if (StringUtils.isEmpty(disabledComponents)) {
-        // disable all components
-        query = String.format("UPDATE %s SET recovery_enabled = 0", SERVICE_COMPONENT_DESIRED_STATE_TABLE);
-      }
-      else {
-        // enable (1 - disabledComponents)
-        List<String> disabledComponentsList = Arrays.asList(disabledComponents.split(","));
-        String components = sqlStringListFromArrayList(disabledComponentsList);
-        query = String.format("UPDATE %s SET recovery_enabled = 1 WHERE component_name NOT IN (%s)",
-                SERVICE_COMPONENT_DESIRED_STATE_TABLE, components);
-      }
-    }
-    else {
-      // enable the specified components
-      List<String> enabledComponentsList = Arrays.asList(enabledComponents.split(","));
-      String components = sqlStringListFromArrayList(enabledComponentsList);
-      query = String.format("UPDATE %s SET recovery_enabled = 1 WHERE component_name IN (%s)",
-              SERVICE_COMPONENT_DESIRED_STATE_TABLE, components);
-    }
-
-    dbAccessor.executeQuery(query);
-  }
-
-
-  /**
-   * Update clusterconfig table and servicecomponentdesiredstate table with the
-   * recovery attributes and componenents to be recovered.
-   *
-   * @throws SQLException
-     */
-  @Transactional
-  protected void updateRecoveryConfigurationDML() throws SQLException, AmbariException {
-    updateRecoveryClusterEnvConfig();
-    updateRecoveryComponents();
-  }
-
-  /**
-   * Update Clusters and Hosts Version State from UPGRADING, UPGRADE_FAILED to INSTALLED
-   * and UPGRADED to CURRENT if repo_version_id from cluster_version equals repo_version_id of Clusters and Hosts Version State
-   *
-   * @throws SQLException
-   */
-
-  @Transactional
-  protected void updateClustersAndHostsVersionStateTableDML() throws SQLException, AmbariException {
-
-    dbAccessor.executeQuery("UPDATE " + HOST_VERSION_TABLE + " SET state = 'INSTALLED' WHERE state IN ('UPGRADING', 'UPGRADE_FAILED', 'UPGRADED')");
-    dbAccessor.executeQuery("UPDATE " + CLUSTER_VERSION_TABLE + " SET state = 'INSTALLED' WHERE state IN ('UPGRADING', 'UPGRADE_FAILED', 'UPGRADED')");
-
-    Statement statement = null;
-    ResultSet resultSet = null;
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-      if (statement != null) {
-        String selectSQL = String.format("SELECT repo_version_id, cluster_id FROM %s WHERE state = 'CURRENT'",
-                CLUSTER_VERSION_TABLE);
-
-        resultSet = statement.executeQuery(selectSQL);
-        Set<Long> clusterIds = new HashSet<>();
-        while (null != resultSet && resultSet.next()) {
-          Long clusterId = resultSet.getLong("cluster_id");
-          if (clusterIds.contains(clusterId)) {
-            throw new AmbariExcept

<TRUNCATED>

[16/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json
deleted file mode 100644
index bc167ba..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.2.json
+++ /dev/null
@@ -1,408 +0,0 @@
-{
-    "version": "1.0",
-    "stacks": [
-        {
-            "name": "HDP",
-            "old-version": "2.0",
-            "target-version": "2.2.2",
-            "options": {
-                "config-types": {
-                    "capacity-scheduler": {
-                        "merged-copy": "yes"
-                    },
-                    "cluster-env": {
-                        "merged-copy": "yes"
-                    },
-                    "core-site": {
-                        "merged-copy": "yes"
-                    },
-                    "hadoop-env": {
-                        "merged-copy": "yes"
-                    },
-                    "hbase-env": {
-                        "merged-copy": "yes"
-                    },
-                    "hbase-site": {
-                        "merged-copy": "yes"
-                    },
-                    "hdfs-log4j": {
-                        "merged-copy": "yes"
-                    },
-                    "hdfs-site": {
-                        "merged-copy": "yes"
-                    },
-                    "hive-env": {
-                        "merged-copy": "yes"
-                    },
-                    "hive-site": {
-                        "merged-copy": "yes"
-                    },
-                    "mapred-env": {
-                        "merged-copy": "yes"
-                    },
-                    "mapred-site": {
-                        "merged-copy": "yes"
-                    },
-                    "oozie-env": {
-                        "merged-copy": "yes"
-                    },
-                    "oozie-site": {
-                        "merged-copy": "yes"
-                    },
-                    "webhcat-site": {
-                        "merged-copy": "yes"
-                    },
-                    "yarn-site": {
-                        "merged-copy": "yes"
-                    }
-                }
-            },
-            "properties": {
-                "capacity-scheduler": {
-                    "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
-                    "yarn.scheduler.capacity.root.accessible-node-labels": "*",
-                    "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
-                    "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
-                    "yarn.scheduler.capacity.root.default-node-label-expression": " "
-                },
-                "core-site": {
-                    "hadoop.http.authentication.simple.anonymous.allowed": "true"
-                },
-                "hadoop-env": {
-                    "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options
  appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGC
 DateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The f
 ollowing applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# ex
 port HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /us
 r/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION 
 $HADOOP_OPTS\""
-                },
-                "hbase-env": {
-                    "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to
  enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG
 _DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_RE
 GIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}"
-                },
-                "hbase-site": {
-                    "hbase.hregion.majorcompaction": "604800000",
-                    "hbase.hregion.majorcompaction.jitter": "0.50",
-                    "hbase.hregion.memstore.block.multiplier": "4",
-                    "hbase.hstore.flush.retries.number": {
-                        "remove": "yes"
-                    }
-                },
-                "hdfs-log4j": {
-                    "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop
 .root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.
 appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.secur
 ity.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesyste
 m.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoo
 p.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metri
 cs.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
-                },
-                "hdfs-site": {
-                    "dfs.datanode.max.transfer.threads": "16384",
-                    "dfs.namenode.startup.delay.block.deletion.sec": "3600"
-                },
-                "hive-env": {
-                    "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory can be controlled by:\nexport HIVE_CONF_DIR={{hive_config_dir}}\n\n# Folder containing extra libraries required f
 or hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog\nfi\n\nexport METASTORE_PORT={{hive_metastore_port}}"
-                },
-                "hive-site": {
-                    "datanucleus.cache.level2.type": "none",
-                    "hive.auto.convert.sortmerge.join.noconditionaltask": {
-                      "remove": "yes"
-                    },
-                    "fs.file.impl.disable.cache": {
-                        "remove": "yes"
-                    },
-                    "fs.hdfs.impl.disable.cache": {
-                        "remove": "yes"
-                    },
-                    "hive.auto.convert.join.noconditionaltask.size": "238026752",
-                    "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
-                    "hive.cbo.enable": "true",
-                    "hive.cli.print.header": "false",
-                    "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
-                    "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
-                    "hive.compactor.abortedtxn.threshold": "1000",
-                    "hive.compactor.check.interval": "300L",
-                    "hive.compactor.delta.num.threshold": "10",
-                    "hive.compactor.delta.pct.threshold": "0.1f",
-                    "hive.compactor.initiator.on": "false",
-                    "hive.compactor.worker.threads": "0",
-                    "hive.compactor.worker.timeout": "86400L",
-                    "hive.compute.query.using.stats": "true",
-                    "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
-                    "hive.convert.join.bucket.mapjoin.tez": "false",
-                    "hive.heapsize": {
-                        "remove": "yes"
-                    },
-                    "hive.enforce.sortmergebucketmapjoin": "true",
-                    "hive.exec.compress.intermediate": "false",
-                    "hive.exec.compress.output": "false",
-                    "hive.exec.dynamic.partition": "true",
-                    "hive.exec.dynamic.partition.mode": "nonstrict",
-                    "hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-                    "hive.exec.max.created.files": "100000",
-                    "hive.exec.max.dynamic.partitions": "5000",
-                    "hive.exec.max.dynamic.partitions.pernode": "2000",
-                    "hive.exec.orc.compression.strategy": "SPEED",
-                    "hive.exec.orc.default.compress": "ZLIB",
-                    "hive.exec.orc.default.stripe.size": "67108864",
-                    "hive.exec.parallel": "false",
-                    "hive.exec.parallel.thread.number": "8",
-                    "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-                    "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-                    "hive.exec.reducers.bytes.per.reducer": "67108864",
-                    "hive.exec.reducers.max": "1009",
-                    "hive.exec.scratchdir": "/tmp/hive",
-                    "hive.exec.submit.local.task.via.child": "true",
-                    "hive.exec.submitviachild": "false",
-                    "hive.execution.engine": "mr",
-                    "hive.fetch.task.aggr": "false",
-                    "hive.fetch.task.conversion": "more",
-                    "hive.fetch.task.conversion.threshold": "1073741824",
-                    "hive.limit.optimize.enable": "true",
-                    "hive.limit.pushdown.memory.usage": "0.04",
-                    "hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
-                    "hive.map.aggr.hash.min.reduction": "0.5",
-                    "hive.map.aggr.hash.percentmemory": "0.5",
-                    "hive.mapjoin.optimized.hashtable": "true",
-                    "hive.merge.mapfiles": "true",
-                    "hive.merge.mapredfiles": "false",
-                    "hive.merge.orcfile.stripe.level": "true",
-                    "hive.merge.rcfile.block.level": "true",
-                    "hive.merge.size.per.task": "256000000",
-                    "hive.merge.smallfiles.avgsize": "16000000",
-                    "hive.merge.tezfiles": "false",
-                    "hive.metastore.authorization.storage.checks": "false",
-                    "hive.metastore.client.connect.retry.delay": "5s",
-                    "hive.metastore.client.socket.timeout": "1800s",
-                    "hive.metastore.connect.retries": "24",
-                    "hive.metastore.failure.retries": "24",
-                    "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
-                    "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
-                    "hive.metastore.server.max.threads": "100000",
-                    "hive.optimize.constant.propagation": "true",
-                    "hive.optimize.metadataonly": "true",
-                    "hive.optimize.null.scan": "true",
-                    "hive.optimize.reducededuplication.min.reducer": "4",
-                    "hive.optimize.sort.dynamic.partition": "false",
-                    "hive.orc.compute.splits.num.threads": "10",
-                    "hive.orc.splits.include.file.footer": "false",
-                    "hive.prewarm.enabled": "false",
-                    "hive.prewarm.numcontainers": "10",
-                    "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
-                    "hive.security.metastore.authorization.auth.reads": "true",
-                    "hive.server2.allow.user.substitution": "true",
-                    "hive.server2.authentication.spnego.keytab": "HTTP/_HOST@EXAMPLE.COM",
-                    "hive.server2.authentication.spnego.principal": "/etc/security/keytabs/spnego.service.keytab",
-                    "hive.server2.logging.operation.enabled": "true",
-                    "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs",
-                    "hive.server2.table.type.mapping": "CLASSIC",
-                    "hive.server2.tez.default.queues": "default",
-                    "hive.server2.tez.initialize.default.sessions": "false",
-                    "hive.server2.tez.sessions.per.default.queue": "1",
-                    "hive.server2.thrift.http.path": "cliservice",
-                    "hive.server2.thrift.http.port": "10001",
-                    "hive.server2.thrift.max.worker.threads": "500",
-                    "hive.server2.thrift.sasl.qop": "auth",
-                    "hive.server2.transport.mode": "binary",
-                    "hive.server2.use.SSL": "false",
-                    "hive.smbjoin.cache.rows": "10000",
-                    "hive.stats.autogather": "true",
-                    "hive.stats.dbclass": "fs",
-                    "hive.stats.fetch.column.stats": "false",
-                    "hive.stats.fetch.partition.stats": "true",
-                    "hive.support.concurrency": "false",
-                    "hive.tez.auto.reducer.parallelism": "false",
-                    "hive.tez.container.size": "682",
-                    "hive.tez.cpu.vcores": "-1",
-                    "hive.tez.dynamic.partition.pruning": "true",
-                    "hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
-                    "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
-                    "hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat",
-                    "hive.tez.java.opts": "-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
-                    "hive.tez.log.level": "INFO",
-                    "hive.tez.max.partition.factor": "2.0",
-                    "hive.tez.min.partition.factor": "0.25",
-                    "hive.tez.smb.number.waves": "0.5",
-                    "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
-                    "hive.txn.max.open.batch": "1000",
-                    "hive.txn.timeout": "300",
-                    "hive.user.install.directory": "/user/",
-                    "hive.vectorized.execution.enabled": "true",
-                    "hive.vectorized.execution.reduce.enabled": "false",
-                    "hive.vectorized.groupby.checkinterval": "4096",
-                    "hive.vectorized.groupby.flush.percent": "0.1",
-                    "hive.vectorized.groupby.maxentries": "100000",
-                    "hive.zookeeper.client.port": "2181",
-                    "hive.zookeeper.namespace": "hive_zookeeper_namespace",
-                    "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
-                    "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
-                    "hive.server2.enable.impersonation": {
-                      "remove": "yes"
-                    },
-                    "hive.optimize.mapjoin.mapreduce": {
-                      "remove": "yes"
-                    }
-                },
-                "hiveserver2-site": {
-                    "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator",
-                    "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"
-                },
-                "mapred-env": {
-                    "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\""
-                },
-                "mapred-site": {
-                    "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-                    "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-                    "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-                    "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-                    "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
-                    "mapreduce.job.emit-timeline-data": "false",
-                    "mapreduce.jobhistory.bind-host": "0.0.0.0",
-                    "mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
-                    "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
-                    "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
-                    "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
-                    "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}"
-                },
-                "oozie-env": {
-                    "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie confi
 guration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64\n\n# At least 1 minute of retry time to account for server downtime during\n# upgrade/downgrade\nexport OOZIE_CLIENT_OPTS=\"${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 \"\n\n# This is needed so that Oozie does not run into OOM or GC Overhead limit\n# exceeded exceptions. If the oozie server is handling large number of\n# workflows/coordinator jobs, th
 e memory settings may need to be revised\nexport CATALINA_OPTS=\"${CATALINA_OPTS} -Xmx2048m -XX:MaxPermSize=256m \""
-                },
-                "oozie-site": {
-                    "oozie.authentication.simple.anonymous.allowed": "true",
-                    "oozie.service.HadoopAccessorService.kerberos.enabled": "false",
-                    "oozie.service.coord.check.maximum.frequency": "false",
-                    "oozie.services": "\n      org.apache.oozie.service.SchedulerService,\n      org.apache.oozie.service.InstrumentationService,\n      org.apache.oozie.service.MemoryLocksService,\n      org.apache.oozie.service.UUIDService,\n      org.apache.oozie.service.ELService,\n      org.apache.oozie.service.AuthorizationService,\n      org.apache.oozie.service.UserGroupInformationService,\n      org.apache.oozie.service.HadoopAccessorService,\n      org.apache.oozie.service.JobsConcurrencyService,\n      org.apache.oozie.service.URIHandlerService,\n      org.apache.oozie.service.DagXLogInfoService,\n      org.apache.oozie.service.SchemaService,\n      org.apache.oozie.service.LiteWorkflowAppService,\n      org.apache.oozie.service.JPAService,\n      org.apache.oozie.service.StoreService,\n      org.apache.oozie.service.CoordinatorStoreService,\n      org.apache.oozie.service.SLAStoreService,\n      org.apache.oozie.service.DBLiteWorkflowStoreService,\n      org.apache.oozie
 .service.CallbackService,\n      org.apache.oozie.service.ShareLibService,\n      org.apache.oozie.service.CallableQueueService,\n      org.apache.oozie.service.ActionService,\n      org.apache.oozie.service.ActionCheckerService,\n      org.apache.oozie.service.RecoveryService,\n      org.apache.oozie.service.PurgeService,\n      org.apache.oozie.service.CoordinatorEngineService,\n      org.apache.oozie.service.BundleEngineService,\n      org.apache.oozie.service.DagEngineService,\n      org.apache.oozie.service.CoordMaterializeTriggerService,\n      org.apache.oozie.service.StatusTransitService,\n      org.apache.oozie.service.PauseTransitService,\n      org.apache.oozie.service.GroupsService,\n      org.apache.oozie.service.ProxyUserService,\n      org.apache.oozie.service.XLogStreamingService,\n      org.apache.oozie.service.JvmPauseMonitorService",
-                    "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService"
-                },
-                "ranger-hbase-plugin-properties": {
-                    "REPOSITORY_CONFIG_PASSWORD": "hbase",
-                    "REPOSITORY_CONFIG_USERNAME": "hbase",
-                    "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-                    "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-                    "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-                    "SSL_TRUSTSTORE_PASSWORD": "changeit",
-                    "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
-                    "XAAUDIT.DB.IS_ENABLED": "true",
-                    "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-                    "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-                    "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-                    "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-                    "XAAUDIT.HDFS.IS_ENABLED": "false",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-                    "ranger-hbase-plugin-enabled": "No"
-                },
-                "ranger-hdfs-plugin-properties": {
-                    "REPOSITORY_CONFIG_PASSWORD": "hadoop",
-                    "REPOSITORY_CONFIG_USERNAME": "hadoop",
-                    "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-                    "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-                    "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-                    "SSL_TRUSTSTORE_PASSWORD": "changeit",
-                    "XAAUDIT.DB.IS_ENABLED": "true",
-                    "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-                    "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-                    "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-                    "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-                    "XAAUDIT.HDFS.IS_ENABLED": "false",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-                    "common.name.for.certificate": "-",
-                    "hadoop.rpc.protection": "-",
-                    "ranger-hdfs-plugin-enabled": "No"
-                },
-                "ranger-hive-plugin-properties": {
-                    "REPOSITORY_CONFIG_PASSWORD": "hive",
-                    "REPOSITORY_CONFIG_USERNAME": "hive",
-                    "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-                    "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-                    "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-                    "SSL_TRUSTSTORE_PASSWORD": "changeit",
-                    "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
-                    "XAAUDIT.DB.IS_ENABLED": "true",
-                    "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-                    "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-                    "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-                    "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-                    "XAAUDIT.HDFS.IS_ENABLED": "false",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-                    "common.name.for.certificate": "-",
-                    "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
-                    "ranger-hive-plugin-enabled": "No"
-                },
-                "webhcat-site": {
-                    "templeton.hadoop": "/usr/hdp/current/hadoop-client/bin/hadoop",
-                    "templeton.hcat": "/usr/hdp/current/hive-client/bin/hcat",
-                    "templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz",
-                    "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
-                    "templeton.libjars": "/usr/hdp/current/zookeeper-client/zookeeper.jar",
-                    "templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz",
-                    "templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz",
-                    "templeton.sqoop.home": "sqoop.tar.gz/sqoop",
-                    "templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop",
-                    "templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar"
-                },
-                "yarn-site": {
-                    "hadoop.registry.rm.enabled": "false",
-                    "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
-                    "yarn.client.nodemanager-connect.max-wait-ms": "60000",
-                    "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
-                    "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
-                    "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
-                    "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
-                    "yarn.nodemanager.bind-host": "0.0.0.0",
-                    "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
-                    "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
-                    "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
-                    "yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
-                    "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false",
-                    "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
-                    "yarn.nodemanager.log-aggregation.debug-enabled": "false",
-                    "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30",
-                    "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
-                    "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state",
-                    "yarn.nodemanager.recovery.enabled": "true",
-                    "yarn.nodemanager.resource.cpu-vcores": "1",
-                    "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
-                    "yarn.resourcemanager.bind-host": "0.0.0.0",
-                    "yarn.resourcemanager.connect.max-wait.ms": "900000",
-                    "yarn.resourcemanager.connect.retry-interval.ms": "30000",
-                    "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
-                    "yarn.resourcemanager.fs.state-store.uri": " ",
-                    "yarn.resourcemanager.ha.enabled": "false",
-                    "yarn.resourcemanager.recovery.enabled": "true",
-                    "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
-                    "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
-                    "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
-                    "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
-                    "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
-                    "yarn.resourcemanager.work-preserving-recovery.enabled": "true",
-                    "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
-                    "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
-                    "yarn.resourcemanager.zk-num-retries": "1000",
-                    "yarn.resourcemanager.zk-retry-interval-ms": "1000",
-                    "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
-                    "yarn.resourcemanager.zk-timeout-ms": "10000",
-                    "yarn.timeline-service.bind-host": "0.0.0.0",
-                    "yarn.timeline-service.client.max-retries": "30",
-                    "yarn.timeline-service.client.retry-interval-ms": "1000",
-                    "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore",
-                    "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
-                    "yarn.timeline-service.http-authentication.type": "simple",
-                    "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
-                    "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
-                    "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
-                    "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000",
-                    "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000",
-                    "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore",
-                    "yarn.timeline-service.ttl-ms": "2678400000"
-                }
-            }
-        }
-    ]
-}


[23/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
deleted file mode 100644
index eb835ef..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog211.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.text.MessageFormat;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.configuration.Configuration.DatabaseType;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.jdbc.support.JdbcUtils;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Upgrade catalog for version 2.1.1.
- */
-public class UpgradeCatalog211 extends AbstractUpgradeCatalog {
-  private static final String HOST_COMPONENT_STATE_TABLE = "hostcomponentstate";
-  private static final String HOST_COMPONENT_STATE_ID_COLUMN = "id";
-  private static final String HOST_COMPONENT_STATE_INDEX = "idx_host_component_state";
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog211.class);
-
-  // this "id holder" is a field only for a test that verifies "big" 4 digit+
-  // numbers are formatted correctly
-  private AtomicLong m_hcsId = new AtomicLong(1);
-
-
-  @Inject
-  DaoUtils daoUtils;
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog211(Injector injector) {
-    super(injector);
-
-    daoUtils = injector.getInstance(DaoUtils.class);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.1.1";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.1.0";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    // change out the PK on hostcomponentstate
-    executeHostComponentStateDDLUpdates();
-
-    // make viewinstanceproperty.value & viewinstancedata.value nullable
-    dbAccessor.setColumnNullable("viewinstanceproperty", "value", true);
-    dbAccessor.setColumnNullable("viewinstancedata", "value", true);
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateExistingConfigurations();
-  }
-
-  // ----- UpgradeCatalog211 --------------------------------------------
-
-  /**
-   * Iterates over the set of clusters to call service-specific configuration
-   * update routines.
-   *
-   * @throws AmbariException
-   *           if an error occurs while updating the configurations
-   */
-  protected void updateExistingConfigurations() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if ((clusterMap != null) && !clusterMap.isEmpty()) {
-        // Iterate through the clusters and perform any configuration updates
-        for (final Cluster cluster : clusterMap.values()) {
-          updateKerberosConfigurations(cluster);
-
-          /* *********************************************************
-           * Add additional configuration update methods here
-           * ********************************************************* */
-        }
-      }
-    }
-  }
-
-  /**
-   * Updates the Kerberos configurations for the given cluster
-   * <p/>
-   * Performs the following updates:
-   * <ul>
-   * <li>Rename <code>create_attributes_template</code> to
-   * <code>ad_create_attributes_template</code></li>
-   * </ul>
-   *
-   * @param cluster
-   *          the cluster
-   * @throws AmbariException
-   *           if an error occurs while updating the configurations
-   */
-  protected void updateKerberosConfigurations(Cluster cluster) throws AmbariException {
-    Config config = cluster.getDesiredConfigByType("kerberos-env");
-
-    if (config != null) {
-      // Rename create_attributes_template to ad_create_attributes_template
-      String value = config.getProperties().get("create_attributes_template");
-      Map<String, String> updates = Collections.singletonMap("ad_create_attributes_template", value);
-      Set<String> removes = Collections.singleton("create_attributes_template");
-
-      updateConfigurationPropertiesForCluster(cluster, "kerberos-env", updates, removes, true, false);
-    }
-  }
-
-  /**
-   * Perform the DDL updates required to add a new Primary Key ID column to the
-   * {@code hostcomponentstate} table. This will perform the following actions:
-   * <ul>
-   * <li>Add a new column to hostcomponentstate named id</li>
-   * <li>Populated id with an incrementing long, then make it non-NULL</li>
-   * <li>Drop the existing PK on hostcomponentstate</li>
-   * <li>Add a new surrogate PK on hostcomponentstate on the id column</li>
-   * <li>Add an index on hostcomponentstate for host_id, component_name,
-   * service_name, cluster_id</li>
-   * </ul>
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  private void executeHostComponentStateDDLUpdates() throws AmbariException, SQLException {
-    if (!dbAccessor.tableHasPrimaryKey(HOST_COMPONENT_STATE_TABLE, HOST_COMPONENT_STATE_ID_COLUMN)) {
-      // add the new column, nullable for now until we insert unique IDs
-      dbAccessor.addColumn(HOST_COMPONENT_STATE_TABLE,
-          new DBColumnInfo(HOST_COMPONENT_STATE_ID_COLUMN, Long.class, null, null, true));
-
-      Statement statement = null;
-      ResultSet resultSet = null;
-      try {
-        statement = dbAccessor.getConnection().createStatement();
-        if (statement != null) {
-          String selectSQL = MessageFormat.format(
-              "SELECT id, cluster_id, service_name, component_name, host_id FROM {0} ORDER BY {1} {2}",
-              HOST_COMPONENT_STATE_TABLE, "id", "DESC");
-
-          resultSet = statement.executeQuery(selectSQL);
-          while (resultSet.next()) {
-            final Long clusterId = resultSet.getLong("cluster_id");
-            final String serviceName = resultSet.getString("service_name");
-            final String componentName = resultSet.getString("component_name");
-            final Long hostId = resultSet.getLong("host_id");
-            final Long idKey = resultSet.getLong("id");
-
-            if (idKey != 0 && m_hcsId.get() == 1) {
-              m_hcsId.set(idKey);
-              m_hcsId.getAndIncrement();
-            } else if(idKey == 0) {
-              String updateSQL = MessageFormat.format(
-                  "UPDATE {0} SET {1} = {2,number,#} WHERE cluster_id = {3} AND service_name = ''{4}'' AND component_name = ''{5}'' and host_id = {6,number,#}",
-                  HOST_COMPONENT_STATE_TABLE, HOST_COMPONENT_STATE_ID_COLUMN, m_hcsId.getAndIncrement(),
-                  clusterId, serviceName, componentName, hostId);
-
-              dbAccessor.executeQuery(updateSQL);
-            }
-          }
-        }
-      } finally {
-        JdbcUtils.closeResultSet(resultSet);
-        JdbcUtils.closeStatement(statement);
-      }
-
-      // make the column NON NULL now
-      dbAccessor.alterColumn(HOST_COMPONENT_STATE_TABLE,
-          new DBColumnInfo(HOST_COMPONENT_STATE_ID_COLUMN, Long.class, null, null, false));
-
-      // Add sequence for hostcomponentstate id
-      addSequence("hostcomponentstate_id_seq", m_hcsId.get(), false);
-
-      // drop the current PK
-      String primaryKeyConstraintName = null;
-      Configuration.DatabaseType databaseType = configuration.getDatabaseType();
-      switch (databaseType) {
-        case POSTGRES: {
-          primaryKeyConstraintName = "hostcomponentstate_pkey";
-          break;
-        }
-        case ORACLE:
-        case SQL_SERVER: {
-          // Oracle and SQL Server require us to lookup the PK name
-          primaryKeyConstraintName = dbAccessor.getPrimaryKeyConstraintName(
-              HOST_COMPONENT_STATE_TABLE);
-
-          break;
-        }
-        default:
-          break;
-      }
-
-      if (databaseType == DatabaseType.MYSQL) {
-        String mysqlDropQuery = MessageFormat.format("ALTER TABLE {0} DROP PRIMARY KEY",
-            HOST_COMPONENT_STATE_TABLE);
-
-        dbAccessor.executeQuery(mysqlDropQuery, true);
-      } else {
-        // warn if we can't find it
-        if (null == primaryKeyConstraintName) {
-          LOG.warn("Unable to determine the primary key constraint name for {}",
-              HOST_COMPONENT_STATE_TABLE);
-        } else {
-          dbAccessor.dropPKConstraint(HOST_COMPONENT_STATE_TABLE, primaryKeyConstraintName, true);
-        }
-      }
-
-      // create a new PK, matching the name of the constraint found in the SQL
-      // files
-      dbAccessor.addPKConstraint(HOST_COMPONENT_STATE_TABLE, "pk_hostcomponentstate", "id");
-
-      // create index, ensuring column order matches that of the SQL files
-      dbAccessor.createIndex(HOST_COMPONENT_STATE_INDEX, HOST_COMPONENT_STATE_TABLE, "host_id",
-          "component_name", "service_name", "cluster_id");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
deleted file mode 100644
index 8eb2654..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog212.java
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Matcher;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Upgrade catalog for version 2.1.2.
- */
-public class UpgradeCatalog212 extends AbstractUpgradeCatalog {
-  private static final String HIVE_SITE = "hive-site";
-  private static final String HIVE_ENV = "hive-env";
-  private static final String HBASE_ENV = "hbase-env";
-  private static final String HBASE_SITE = "hbase-site";
-  private static final String CLUSTER_ENV = "cluster-env";
-  private static final String OOZIE_ENV = "oozie-env";
-
-  private static final String TOPOLOGY_REQUEST_TABLE = "topology_request";
-  private static final String CLUSTERS_TABLE = "clusters";
-  private static final String CLUSTERS_TABLE_CLUSTER_ID_COLUMN = "cluster_id";
-  private static final String TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN = "cluster_name";
-  private static final String TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN = "cluster_id";
-  private static final String TOPOLOGY_REQUEST_CLUSTER_ID_FK_CONSTRAINT_NAME = "FK_topology_request_cluster_id";
-
-  private static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
-  private static final String HOST_ROLE_COMMAND_SKIP_COLUMN = "auto_skip_on_failure";
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog212.class);
-
-  @Inject
-  DaoUtils daoUtils;
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog212(Injector injector) {
-    super(injector);
-
-    daoUtils = injector.getInstance(DaoUtils.class);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.1.2";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.1.1";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    executeTopologyDDLUpdates();
-    executeHostRoleCommandDDLUpdates();
-  }
-
-  private void executeTopologyDDLUpdates() throws AmbariException, SQLException {
-    dbAccessor.addColumn(TOPOLOGY_REQUEST_TABLE, new DBColumnInfo(TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN,
-      Long.class, null, null, true));
-    // TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN will be deleted in PreDML. We need a cluster name to set cluster id.
-    // dbAccessor.dropColumn(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN);
-    // dbAccessor.setColumnNullable(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN, false);
-    // dbAccessor.addFKConstraint(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_ID_FK_CONSTRAINT_NAME,
-    //     TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN, CLUSTERS_TABLE, CLUSTERS_TABLE_CLUSTER_ID_COLUMN, false);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    if (dbAccessor.tableHasColumn(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN)) {
-      addClusterIdToTopology();
-      finilizeTopologyDDL();
-    } else {
-      LOG.debug("The column: [ {} ] has already been dropped from table: [ {} ]. Skipping preDMLUpdate logic.",
-          TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN, TOPOLOGY_REQUEST_TABLE);
-    }
-  }
-
-  protected void finilizeTopologyDDL() throws AmbariException, SQLException {
-    dbAccessor.dropColumn(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN);
-    dbAccessor.setColumnNullable(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN, false);
-    dbAccessor.addFKConstraint(TOPOLOGY_REQUEST_TABLE, TOPOLOGY_REQUEST_CLUSTER_ID_FK_CONSTRAINT_NAME,
-      TOPOLOGY_REQUEST_CLUSTER_ID_COLUMN, CLUSTERS_TABLE, CLUSTERS_TABLE_CLUSTER_ID_COLUMN, false);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    addMissingConfigs();
-  }
-
-  protected void addClusterIdToTopology() throws AmbariException, SQLException {
-    Map<String, Long> clusterNameIdMap = new HashMap<>();
-    try (Statement statement = dbAccessor.getConnection().createStatement();
-         ResultSet rs = statement.executeQuery("SELECT DISTINCT cluster_name, cluster_id FROM clusters");
-    ) {
-      while (rs.next()) {
-        long clusterId = rs.getLong("cluster_id");
-        String clusterName = rs.getString("cluster_name");
-        clusterNameIdMap.put(clusterName, clusterId);
-      }
-    }
-
-    for (String clusterName : clusterNameIdMap.keySet()) {
-      try (PreparedStatement preparedStatement = dbAccessor.getConnection().prepareStatement("UPDATE topology_request " +
-          "SET cluster_id=? WHERE cluster_name=?");
-      ) {
-        preparedStatement.setLong(1, clusterNameIdMap.get(clusterName));
-        preparedStatement.setString(2, clusterName);
-        preparedStatement.executeUpdate();
-      }
-    }
-
-    // Set cluster id for all null values.
-    // Useful if cluster was renamed and cluster name does not match.
-    if (clusterNameIdMap.entrySet().size() >= 1) {
-      try (PreparedStatement preparedStatement = dbAccessor.getConnection().prepareStatement("UPDATE topology_request " +
-          "SET cluster_id=? WHERE cluster_id IS NULL");
-      ) {
-        preparedStatement.setLong(1, clusterNameIdMap.entrySet().iterator().next().getValue());
-        preparedStatement.executeUpdate();
-      }
-    }
-    if (clusterNameIdMap.entrySet().size() == 0) {
-      LOG.warn("Cluster not found. topology_request.cluster_id is not set");
-    }
-    if (clusterNameIdMap.entrySet().size() > 1) {
-      LOG.warn("Found more than one cluster. topology_request.cluster_id can be incorrect if you have renamed the cluster.");
-    }
-  }
-
-  protected void addMissingConfigs() throws AmbariException {
-    updateHiveConfigs();
-    updateOozieConfigs();
-    updateHbaseAndClusterConfigurations();
-    updateKafkaConfigurations();
-    updateStormConfigs();
-    removeDataDirMountConfig();
-  }
-
-  protected void updateStormConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if ((clusterMap != null) && !clusterMap.isEmpty()) {
-        // Iterate through the clusters and perform any configuration updates
-        for (final Cluster cluster : clusterMap.values()) {
-          Set<String> removes = new HashSet<>();
-          removes.add("topology.metrics.consumer.register");
-          updateConfigurationPropertiesForCluster(cluster, "storm-site",
-            new HashMap<String, String>(), removes, false, false);
-        }
-      }
-    }
-  }
-
-  protected void updateKafkaConfigurations() throws AmbariException {
-    Map<String, String> properties = new HashMap<>();
-    properties.put("external.kafka.metrics.exclude.prefix",
-      "kafka.network.RequestMetrics,kafka.server.DelayedOperationPurgatory," +
-        "kafka.server.BrokerTopicMetrics.BytesRejectedPerSec");
-    properties.put("external.kafka.metrics.include.prefix",
-      "kafka.network.RequestMetrics.ResponseQueueTimeMs.request.OffsetCommit.98percentile," +
-        "kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Offsets.95percentile," +
-        "kafka.network.RequestMetrics.ResponseSendTimeMs.request.Fetch.95percentile," +
-        "kafka.network.RequestMetrics.RequestsPerSec.request");
-
-    updateConfigurationProperties("kafka-broker", properties, false, false);
-  }
-
-  protected void updateHbaseAndClusterConfigurations() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if ((clusterMap != null) && !clusterMap.isEmpty()) {
-        // Iterate through the clusters and perform any configuration updates
-        for (final Cluster cluster : clusterMap.values()) {
-          Config hbaseEnvProps = cluster.getDesiredConfigByType(HBASE_ENV);
-          Config hbaseSiteProps = cluster.getDesiredConfigByType(HBASE_SITE);
-
-          if (hbaseEnvProps != null) {
-            // Remove override_hbase_uid from hbase-env and add override_uid to cluster-env
-            String value = hbaseEnvProps.getProperties().get("override_hbase_uid");
-            if (value != null) {
-              Map<String, String> updates = new HashMap<>();
-              Set<String> removes = new HashSet<>();
-              updates.put("override_uid", value);
-              removes.add("override_hbase_uid");
-              updateConfigurationPropertiesForCluster(cluster, HBASE_ENV, new HashMap<String, String>(), removes, false, true);
-              updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, updates, true, false);
-            } else {
-              updateOverrideUIDClusterConfig("false", cluster);
-            }
-          } else {
-            updateOverrideUIDClusterConfig("false", cluster);
-          }
-
-          if (hbaseSiteProps != null) {
-            String value = hbaseSiteProps.getProperties().get("hbase.bucketcache.size");
-            if (value != null) {
-              if (value.endsWith("m")) {
-                value = value.substring(0, value.length() - 1);
-                Map<String, String> updates = new HashMap<>();
-                updates.put("hbase.bucketcache.size", value);
-                updateConfigurationPropertiesForCluster(cluster, HBASE_SITE, updates, true, false);
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Set override_uid to false during the upgrade to retain UIDs already set on the cluster
-   * This is necessary for upgrading a third party Ambari/stack distribution from
-   * Ambari version 2.1.0 where HBase does not have override_hbase_uid.
-   * */
-  private void updateOverrideUIDClusterConfig(String toOverride, Cluster cluster) throws AmbariException{
-    Map<String, String> updates = new HashMap<>();
-    updates.put("override_uid", toOverride);
-    updateConfigurationPropertiesForCluster(cluster, CLUSTER_ENV, updates, true, false);
-  }
-
-  protected void updateHiveConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(
-            AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Service service = cluster.getServices().get("HIVE");
-
-          if (null == service) {
-            continue;
-          }
-
-          StackId stackId = service.getDesiredStackId();
-
-          String content = null;
-          Boolean isHiveSitePresent = cluster.getDesiredConfigByType(HIVE_SITE) != null;
-          Boolean isStackNotLess22 = (stackId != null && stackId.getStackName().equals("HDP") &&
-                  VersionUtils.compareVersions(stackId.getStackVersion(), "2.2") >= 0);
-
-          if (cluster.getDesiredConfigByType(HIVE_ENV) != null && isStackNotLess22) {
-            Map<String, String> hiveEnvProps = new HashMap<>();
-            content = cluster.getDesiredConfigByType(HIVE_ENV).getProperties().get("content");
-            if(content != null) {
-              content = updateHiveEnvContent(content);
-              hiveEnvProps.put("content", content);
-            }
-            updateConfigurationPropertiesForCluster(cluster, HIVE_ENV, hiveEnvProps, true, true);
-          }
-
-          if (isHiveSitePresent && isStackNotLess22) {
-            Set<String> hiveSiteRemoveProps = new HashSet<>();
-            hiveSiteRemoveProps.add("hive.heapsize");
-            hiveSiteRemoveProps.add("hive.optimize.mapjoin.mapreduce");
-            hiveSiteRemoveProps.add("hive.server2.enable.impersonation");
-            hiveSiteRemoveProps.add("hive.auto.convert.sortmerge.join.noconditionaltask");
-
-            updateConfigurationPropertiesForCluster(cluster, HIVE_SITE, new HashMap<String, String>(), hiveSiteRemoveProps, false, true);
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateOozieConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config oozieEnv = cluster.getDesiredConfigByType(OOZIE_ENV);
-          if (oozieEnv != null) {
-            Map<String, String> oozieEnvProperties = oozieEnv.getProperties();
-
-            String hostname = oozieEnvProperties.get("oozie_hostname");
-            String db_type = oozieEnvProperties.get("oozie_database");
-            String final_db_host = null;
-            // fix for empty hostname after 1.7 -> 2.1.x+ upgrade
-            if (hostname != null && db_type != null && hostname.equals("")) {
-              switch (db_type.toUpperCase()) {
-                case "EXISTING MYSQL DATABASE":
-                  final_db_host = oozieEnvProperties.get("oozie_existing_mysql_host");
-                  break;
-                case "EXISTING POSTGRESQL DATABASE":
-                  final_db_host = oozieEnvProperties.get("oozie_existing_postgresql_host");
-                  break;
-                case "EXISTING ORACLE DATABASE":
-                  final_db_host = oozieEnvProperties.get("oozie_existing_oracle_host");
-                  break;
-                default:
-                  final_db_host = null;
-                  break;
-              }
-              if (final_db_host != null) {
-                Map<String, String> newProperties = new HashMap<>();
-                newProperties.put("oozie_hostname", final_db_host);
-                updateConfigurationPropertiesForCluster(cluster, OOZIE_ENV, newProperties, true, true);
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected String updateHiveEnvContent(String hiveEnvContent) {
-    if(hiveEnvContent == null) {
-      return null;
-    }
-    String oldHeapSizeRegex = "export HADOOP_HEAPSIZE=\"\\{\\{hive_heapsize\\}\\}\"\\s*\\n" +
-            "export HADOOP_CLIENT_OPTS=\"-Xmx\\$\\{HADOOP_HEAPSIZE\\}m \\$HADOOP_CLIENT_OPTS\"";
-    String newAuxJarPath = "";
-    return hiveEnvContent.replaceAll(oldHeapSizeRegex, Matcher.quoteReplacement(newAuxJarPath));
-  }
-
-  /**
-   * DDL changes for {@link #HOST_ROLE_COMMAND_TABLE}.
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  private void executeHostRoleCommandDDLUpdates() throws AmbariException, SQLException {
-    dbAccessor.addColumn(HOST_ROLE_COMMAND_TABLE,
-        new DBColumnInfo(HOST_ROLE_COMMAND_SKIP_COLUMN, Integer.class, 1, 0, false));
-  }
-
-  protected void removeDataDirMountConfig() throws AmbariException {
-    Set<String> properties = new HashSet<>();
-    properties.add("dfs.datanode.data.dir.mount.file");
-
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          removeConfigurationPropertiesFromCluster(cluster, "hadoop-env", properties);
-        }
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
deleted file mode 100644
index ab41b99..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.DesiredConfig;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-/**
- * Upgrade catalog for version 2.1.2.1
- */
-public class UpgradeCatalog2121 extends AbstractUpgradeCatalog {
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog2121.class);
-
-  @Inject
-  DaoUtils daoUtils;
-
-  private static final String OOZIE_SITE_CONFIG = "oozie-site";
-  private static final String OOZIE_AUTHENTICATION_KERBEROS_NAME_RULES = "oozie.authentication.kerberos.name.rules";
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog2121(Injector injector) {
-    super(injector);
-
-    daoUtils = injector.getInstance(DaoUtils.class);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.1.2.1";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.1.2";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    updatePHDConfigs();
-    updateOozieConfigs();
-  }
-
-  /**
-   * Update PHD stack configs
-   * @throws AmbariException
-   */
-  protected void updatePHDConfigs() throws AmbariException {
-
-    Map<String, String> replacements = new LinkedHashMap<>();
-    replacements.put("-Dstack.name=\\{\\{\\s*stack_name\\s*\\}\\}\\s*", "");
-    replacements.put("-Dstack.name=\\$\\{stack.name\\}\\s*", "");
-    replacements.put("-Dstack.version=\\{\\{\\s*stack_version_buildnum\\s*\\}\\}", "-Dhdp.version=\\$HDP_VERSION");
-    replacements.put("-Dstack.version=\\$\\{stack.version\\}", "-Dhdp.version=\\$\\{hdp.version\\}");
-    replacements.put("\\{\\{\\s*stack_name\\s*\\}\\}", "phd");
-    replacements.put("\\$\\{stack.name\\}", "phd");
-    replacements.put("\\$\\{stack.version\\}", "\\$\\{hdp.version\\}");
-
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if ((clusterMap != null) && !clusterMap.isEmpty()) {
-        // Iterate through the clusters and perform any configuration updates
-        Set<StackId> stackIds = new HashSet<>();
-
-        for (final Cluster cluster : clusterMap.values()) {
-          for (Service service : cluster.getServices().values()) {
-            StackId currentStackVersion = service.getDesiredStackId();
-
-            if (stackIds.contains(currentStackVersion)) {
-              continue;
-            } else {
-              stackIds.add(currentStackVersion);
-            }
-
-            String currentStackName = currentStackVersion != null? currentStackVersion.getStackName() : null;
-            if (currentStackName != null && currentStackName.equalsIgnoreCase("PHD")) {
-              // Update configs only if PHD stack is deployed
-              Map<String, DesiredConfig> desiredConfigs = cluster.getDesiredConfigs();
-              if(desiredConfigs != null && !desiredConfigs.isEmpty()) {
-                for (Map.Entry<String, DesiredConfig> dc : desiredConfigs.entrySet()) {
-                  String configType = dc.getKey();
-                  DesiredConfig desiredConfig = dc.getValue();
-                  String configTag = desiredConfig.getTag();
-                  Config config = cluster.getConfig(configType, configTag);
-
-                  Map<String, String> properties = config.getProperties();
-                  if(properties != null && !properties.isEmpty()) {
-                    Map<String, String> updates = new HashMap<>();
-                    for (Map.Entry<String, String> property : properties.entrySet()) {
-                      String propertyKey = property.getKey();
-                      String propertyValue = property.getValue();
-                      String modifiedPropertyValue = propertyValue;
-                      for (String regex : replacements.keySet()) {
-                        modifiedPropertyValue = modifiedPropertyValue.replaceAll(regex, replacements.get(regex));
-                      }
-                      if (!modifiedPropertyValue.equals(propertyValue)) {
-                        updates.put(propertyKey, modifiedPropertyValue);
-                      }
-                    }
-                    if (!updates.isEmpty()) {
-                      updateConfigurationPropertiesForCluster(cluster, configType, updates, true, false);
-                    }
-                  }
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateOozieConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config oozieSiteProps = cluster.getDesiredConfigByType(OOZIE_SITE_CONFIG);
-      if (oozieSiteProps != null) {
-        // Remove oozie.authentication.kerberos.name.rules if empty
-        String oozieAuthKerbRules = oozieSiteProps.getProperties().get(OOZIE_AUTHENTICATION_KERBEROS_NAME_RULES);
-        if (StringUtils.isBlank(oozieAuthKerbRules)) {
-          Set<String> removeProperties = new HashSet<>();
-          removeProperties.add(OOZIE_AUTHENTICATION_KERBEROS_NAME_RULES);
-          updateConfigurationPropertiesForCluster(cluster, OOZIE_SITE_CONFIG, new HashMap<String, String>(), removeProperties, true, false);
-        }
-      }
-    }
-
-  }
-}
-


[35/63] [abbrv] ambari git commit: AMBARI-21352.Workflow Manager view build failure(Venkata Sairam)

Posted by ab...@apache.org.
AMBARI-21352.Workflow Manager view build failure(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1e295908
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1e295908
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1e295908

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 1e2959086fa2151888f7c1cfddaf3c6a2cedb25f
Parents: b1a1543
Author: Venkata Sairam <ve...@gmail.com>
Authored: Tue Jun 27 15:53:12 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Tue Jun 27 15:53:12 2017 +0530

----------------------------------------------------------------------
 contrib/views/wfmanager/src/main/resources/ui/bower.json | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1e295908/contrib/views/wfmanager/src/main/resources/ui/bower.json
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/bower.json b/contrib/views/wfmanager/src/main/resources/ui/bower.json
index 06fc3e3..3f9de44 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/bower.json
+++ b/contrib/views/wfmanager/src/main/resources/ui/bower.json
@@ -21,11 +21,11 @@
     "abdmob/x2js": "~1.2.0",
     "datatables": "~1.10.11",
     "vkBeautify": "https://github.com/vkiryukhin/vkBeautify.git",
-    "cytoscape": "~2.7.7",
+    "cytoscape": "2.7.20",
     "cytoscape-dagre": "~1.3.0",
     "cytoscape-panzoom": "~2.4.0",
     "codemirror": "~5.15.0",
     "fuse.js": "~2.5.0",
-    "jsog":"1.0.7"
+    "jsog": "1.0.7"
   }
 }


[34/63] [abbrv] ambari git commit: AMBARI-21388.Styling Issues with newly implemented workflow manager file browser(Venkata Sairam)

Posted by ab...@apache.org.
AMBARI-21388.Styling Issues with newly implemented workflow manager file browser(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b1a15435
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b1a15435
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b1a15435

Branch: refs/heads/branch-feature-logsearch-ui
Commit: b1a154358078b02a4d84bbf865ff0209c1912e87
Parents: 9833bc1
Author: Venkata Sairam <ve...@gmail.com>
Authored: Tue Jun 27 14:58:59 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Tue Jun 27 14:59:25 2017 +0530

----------------------------------------------------------------------
 .../src/main/resources/ui/app/styles/app.less   |  4 +-
 .../hdfs-directory-viewer/addon/styles/app.css  |  1 +
 .../wfmanager/src/main/resources/ui/yarn.lock   | 68 +++++++++++++++-----
 3 files changed, 54 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b1a15435/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less b/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
index 597e2e8..9a35aca 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/styles/app.less
@@ -1060,8 +1060,8 @@ input:invalid {
   width: 100%;
 }
 .hdfs-browse{
-  height: 500px;
-  max-height: 500px;
+  height: 350px;
+  max-height: 350px;
   overflow: scroll;
 }
 #wf_title{

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1a15435/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css b/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css
index b46fa34..e0b4463 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css
+++ b/contrib/views/wfmanager/src/main/resources/ui/externaladdons/hdfs-directory-viewer/addon/styles/app.css
@@ -83,6 +83,7 @@
 	width:500px;
 	position:relative;
 	overflow:auto;
+	float:left;
 }
 .directory-viewer .padding-left-10px {
 	padding-left: 10px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/b1a15435/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/yarn.lock b/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
index e9ad6cc..f3602c9 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
+++ b/contrib/views/wfmanager/src/main/resources/ui/yarn.lock
@@ -66,10 +66,6 @@ amdefine@>=0.0.4:
   version "1.0.1"
   resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
 
-ansi-regex@*, ansi-regex@^2.0.0:
-  version "2.1.1"
-  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
-
 ansi-regex@^0.2.0, ansi-regex@^0.2.1:
   version "0.2.1"
   resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
@@ -78,6 +74,10 @@ ansi-regex@^1.0.0:
   version "1.1.1"
   resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
 
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
 ansi-styles@^1.1.0:
   version "1.1.0"
   resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
@@ -1111,6 +1111,19 @@ broccoli-file-creator@^1.0.0, broccoli-file-creator@^1.0.1:
     rsvp "~3.0.6"
     symlink-or-copy "^1.0.1"
 
+broccoli-filter@^0.1.6:
+  version "0.1.14"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-0.1.14.tgz#23cae3891ff9ebb7b4d7db00c6dcf03535daf7ad"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.6"
+    broccoli-writer "^0.1.1"
+    mkdirp "^0.3.5"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rsvp "^3.0.16"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.1.3"
+
 broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
   version "1.2.4"
   resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
@@ -1159,7 +1172,7 @@ broccoli-jshint@^1.0.0:
     json-stable-stringify "^1.0.0"
     mkdirp "~0.4.0"
 
-broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@~0.2.0:
+broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@^0.2.6, broccoli-kitchen-sink-helpers@~0.2.0:
   version "0.2.9"
   resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.9.tgz#a5e0986ed8d76fb5984b68c3f0450d3a96e36ecc"
   dependencies:
@@ -1296,7 +1309,7 @@ broccoli-viz@^2.0.1:
   version "2.0.1"
   resolved "https://registry.yarnpkg.com/broccoli-viz/-/broccoli-viz-2.0.1.tgz#3f3ed2fb83e368aa5306fae460801dea552e40db"
 
-broccoli-writer@~0.1.1:
+broccoli-writer@^0.1.1, broccoli-writer@~0.1.1:
   version "0.1.1"
   resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
   dependencies:
@@ -1935,7 +1948,7 @@ ember-cli-app-version@^1.0.0:
     ember-cli-htmlbars "^1.0.0"
     git-repo-version "0.3.0"
 
-ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6, ember-cli-babel@^5.1.7:
+ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.10, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6, ember-cli-babel@^5.1.7, ember-cli-babel@^5.2.4:
   version "5.2.4"
   resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.2.4.tgz#5ce4f46b08ed6f6d21e878619fb689719d6e8e13"
   dependencies:
@@ -1983,6 +1996,13 @@ ember-cli-htmlbars-inline-precompile@^0.3.1:
     ember-cli-htmlbars "^1.0.0"
     hash-for-dep "^1.0.2"
 
+ember-cli-htmlbars@0.7.9:
+  version "0.7.9"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-0.7.9.tgz#142cd4325ab3f48c76cf8dc4d3a3800f38e721be"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    ember-cli-version-checker "^1.0.2"
+
 ember-cli-htmlbars@^1.0.0, ember-cli-htmlbars@^1.0.1:
   version "1.3.0"
   resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.3.0.tgz#e090f011239153bf45dab29625f94a46fce205af"
@@ -2188,6 +2208,14 @@ ember-cli@2.3.0:
     walk-sync "^0.2.6"
     yam "0.0.18"
 
+"ember-collection@git://github.com/emberjs/ember-collection.git#4dbe10b7498886e277fc21b28139924f908d1926":
+  version "1.0.0-alpha.4"
+  resolved "git://github.com/emberjs/ember-collection.git#4dbe10b7498886e277fc21b28139924f908d1926"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "0.7.9"
+    layout-bin-packer "^1.2.0"
+
 ember-cp-validations@2.9.5:
   version "2.9.5"
   resolved "https://registry.yarnpkg.com/ember-cp-validations/-/ember-cp-validations-2.9.5.tgz#d3e81f6c6365f87e833af9c1f6fc8f35974f68d2"
@@ -3502,6 +3530,12 @@ klaw@^1.0.0:
   optionalDependencies:
     graceful-fs "^4.1.9"
 
+layout-bin-packer@^1.2.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/layout-bin-packer/-/layout-bin-packer-1.3.0.tgz#6f232f67db7606b2a405f39ae7197f2931a26c0c"
+  dependencies:
+    ember-cli-babel "^5.2.4"
+
 lazy-cache@^1.0.3:
   version "1.0.4"
   resolved "https://registry.yarnpkg.com/lazy-cache/-/lazy-cache-1.0.4.tgz#a1d78fc3a50474cb80845d3b3b6e1da49a446e8e"
@@ -3948,6 +3982,10 @@ mkdirp@0.5.x, "mkdirp@>=0.5 0", mkdirp@^0.5.0, mkdirp@^0.5.1, mkdirp@~0.5.0, mkd
   dependencies:
     minimist "0.0.8"
 
+mkdirp@^0.3.5:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.3.5.tgz#de3e5f8961c88c787ee1368df849ac4413eca8d7"
+
 mkdirp@~0.4.0:
   version "0.4.2"
   resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.4.2.tgz#427c8c18ece398b932f6f666f4e1e5b7740e78c8"
@@ -5066,10 +5104,6 @@ spdx-expression-parse@~1.0.0:
   version "1.0.4"
   resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-1.0.4.tgz#9bdf2f20e1f40ed447fbe273266191fced51626c"
 
-spdx-license-ids@*:
-  version "2.0.1"
-  resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-2.0.1.tgz#02017bcc3534ee4ffef6d58d20e7d3e9a1c3c8ec"
-
 spdx-license-ids@^1.0.0, spdx-license-ids@^1.0.2:
   version "1.2.2"
   resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
@@ -5141,12 +5175,6 @@ stringstream@~0.0.4:
   version "0.0.5"
   resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.5.tgz#4e484cd4de5a0bbbee18e46307710a8a81621878"
 
-strip-ansi@*, strip-ansi@^3.0.0, strip-ansi@^3.0.1:
-  version "3.0.1"
-  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
-  dependencies:
-    ansi-regex "^2.0.0"
-
 strip-ansi@^0.3.0:
   version "0.3.0"
   resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
@@ -5159,6 +5187,12 @@ strip-ansi@^2.0.1:
   dependencies:
     ansi-regex "^1.0.0"
 
+strip-ansi@^3.0.0, strip-ansi@^3.0.1:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf"
+  dependencies:
+    ansi-regex "^2.0.0"
+
 strip-ansi@~0.1.0:
   version "0.1.1"
   resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.1.1.tgz#39e8a98d044d150660abe4a6808acf70bb7bc991"


[21/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
deleted file mode 100644
index d9afec8..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog221.java
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.gson.JsonArray;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.gson.JsonPrimitive;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-/**
- * Upgrade catalog for version 2.2.1.
- */
-public class UpgradeCatalog221 extends AbstractUpgradeCatalog {
-
-  private static final String AMS_HBASE_SITE = "ams-hbase-site";
-  private static final String AMS_SITE = "ams-site";
-  private static final String AMS_HBASE_SECURITY_SITE = "ams-hbase-security-site";
-  private static final String AMS_ENV = "ams-env";
-  private static final String AMS_HBASE_ENV = "ams-hbase-env";
-  private static final String AMS_MODE = "timeline.metrics.service.operation.mode";
-  private static final String ZK_ZNODE_PARENT = "zookeeper.znode.parent";
-  private static final String ZK_CLIENT_PORT = "hbase.zookeeper.property.clientPort";
-  private static final String ZK_TICK_TIME = "hbase.zookeeper.property.tickTime";
-  private static final String CLUSTER_ENV = "cluster-env";
-  private static final String SECURITY_ENABLED = "security_enabled";
-  private static final String TOPOLOGY_HOST_INFO_TABLE = "topology_host_info";
-  private static final String TOPOLOGY_HOST_INFO_RACK_INFO_COLUMN = "rack_info";
-  private static final String TEZ_SITE = "tez-site";
-
-  @Inject
-  DaoUtils daoUtils;
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog221.class);
-
-  private static final String OOZIE_SITE_CONFIG = "oozie-site";
-  private static final String OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME = "oozie.service.HadoopAccessorService.hadoop.configurations";
-  private static final String OLD_DEFAULT_HADOOP_CONFIG_PATH = "/etc/hadoop/conf";
-  private static final String NEW_DEFAULT_HADOOP_CONFIG_PATH = "{{hadoop_conf_dir}}";
-
-  private static final String BLUEPRINT_HOSTGROUP_COMPONENT_TABLE_NAME = "hostgroup_component";
-  private static final String BLUEPRINT_PROVISION_ACTION_COLUMN_NAME = "provision_action";
-
-  private static final String RANGER_KMS_DBKS_CONFIG = "dbks-site";
-  private static final String RANGER_KMS_DB_FLAVOR = "DB_FLAVOR";
-  private static final String RANGER_KMS_DB_HOST = "db_host";
-  private static final String RANGER_KMS_DB_NAME = "db_name";
-  private static final String RANGER_KMS_JDBC_URL = "ranger.ks.jpa.jdbc.url";
-  private static final String RANGER_KMS_JDBC_DRIVER = "ranger.ks.jpa.jdbc.driver";
-  private static final String RANGER_KMS_PROPERTIES = "kms-properties";
-
-  private static final String TEZ_COUNTERS_MAX = "tez.counters.max";
-  private static final String TEZ_COUNTERS_MAX_GROUPS = "tez.counters.max.groups";
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog221(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.2.1";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.2.0";
-  }
-
-
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    // indices to improve request status calc performance
-    dbAccessor.createIndex("idx_stage_request_id", "stage", "request_id");
-    dbAccessor.createIndex("idx_hrc_request_id", "host_role_command", "request_id");
-    dbAccessor.createIndex("idx_rsc_request_id", "role_success_criteria", "request_id");
-
-    executeBlueprintProvisionActionDDLUpdates();
-
-    dbAccessor.addColumn(TOPOLOGY_HOST_INFO_TABLE,
-        new DBAccessor.DBColumnInfo(TOPOLOGY_HOST_INFO_RACK_INFO_COLUMN, String.class, 255));
-
-  }
-
-  private void executeBlueprintProvisionActionDDLUpdates() throws AmbariException, SQLException {
-    // add provision_action column to the hostgroup_component table for Blueprints
-    dbAccessor.addColumn(BLUEPRINT_HOSTGROUP_COMPONENT_TABLE_NAME, new DBAccessor.DBColumnInfo(BLUEPRINT_PROVISION_ACTION_COLUMN_NAME,
-      String.class, 255, null, true));
-  }
-
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    //To change body of implemented methods use File | Settings | File Templates.
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateAlerts();
-    updateOozieConfigs();
-    updateTezConfigs();
-    updateRangerKmsDbksConfigs();
-    updateAMSConfigs();
-  }
-
-  protected void updateAlerts() {
-    LOG.info("Updating alert definitions.");
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-      final AlertDefinitionEntity hiveMetastoreProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "hive_metastore_process");
-      final AlertDefinitionEntity hiveServerProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "hive_server_process");
-
-      List<AlertDefinitionEntity> hiveAlertDefinitions = new ArrayList<>();
-      if(hiveMetastoreProcessAlertDefinitionEntity != null) {
-        hiveAlertDefinitions.add(hiveMetastoreProcessAlertDefinitionEntity);
-      }
-      if(hiveServerProcessAlertDefinitionEntity != null) {
-        hiveAlertDefinitions.add(hiveServerProcessAlertDefinitionEntity);
-      }
-
-      for(AlertDefinitionEntity alertDefinition : hiveAlertDefinitions){
-        String source = alertDefinition.getSource();
-
-        alertDefinition.setScheduleInterval(3);
-        alertDefinition.setSource(addCheckCommandTimeoutParam(source));
-        alertDefinition.setHash(UUID.randomUUID().toString());
-
-        alertDefinitionDAO.merge(alertDefinition);
-      }
-
-      final AlertDefinitionEntity amsZookeeperProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-        clusterID, "ams_metrics_collector_zookeeper_server_process");
-
-      if (amsZookeeperProcessAlertDefinitionEntity != null) {
-        LOG.info("Removing alert : ams_metrics_collector_zookeeper_server_process");
-        alertDefinitionDAO.remove(amsZookeeperProcessAlertDefinitionEntity);
-      }
-    }
-  }
-
-  protected String addCheckCommandTimeoutParam(String source) {
-    JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-    JsonArray parametersJson = sourceJson.getAsJsonArray("parameters");
-
-    boolean parameterExists = parametersJson != null && !parametersJson.isJsonNull();
-
-    if (parameterExists) {
-      Iterator<JsonElement> jsonElementIterator = parametersJson.iterator();
-      while(jsonElementIterator.hasNext()) {
-        JsonElement element = jsonElementIterator.next();
-        JsonElement name = element.getAsJsonObject().get("name");
-        if (name != null && !name.isJsonNull() && name.getAsString().equals("check.command.timeout")) {
-          return sourceJson.toString();
-        }
-      }
-    }
-
-    JsonObject checkCommandTimeoutParamJson = new JsonObject();
-    checkCommandTimeoutParamJson.add("name", new JsonPrimitive("check.command.timeout"));
-    checkCommandTimeoutParamJson.add("display_name", new JsonPrimitive("Check command timeout"));
-    checkCommandTimeoutParamJson.add("value", new JsonPrimitive(60.0));
-    checkCommandTimeoutParamJson.add("type", new JsonPrimitive("NUMERIC"));
-    checkCommandTimeoutParamJson.add("description", new JsonPrimitive("The maximum time before check command will be killed by timeout"));
-    checkCommandTimeoutParamJson.add("units", new JsonPrimitive("seconds"));
-
-    if (!parameterExists) {
-      parametersJson = new JsonArray();
-      parametersJson.add(checkCommandTimeoutParamJson);
-      sourceJson.add("parameters", parametersJson);
-    } else {
-      parametersJson.add(checkCommandTimeoutParamJson);
-      sourceJson.remove("parameters");
-      sourceJson.add("parameters", parametersJson);
-    }
-
-    return sourceJson.toString();
-  }
-
-  protected void updateAMSConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-
-          Config amsEnv = cluster.getDesiredConfigByType(AMS_ENV);
-          if (amsEnv != null) {
-            Map<String, String> amsEnvProperties = amsEnv.getProperties();
-            String content = amsEnvProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsEnvContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_ENV, newProperties, true, true);
-          }
-
-          String znodeParent = null;
-          Config amsHbaseSecuritySite = cluster.getDesiredConfigByType(AMS_HBASE_SECURITY_SITE);
-          if (amsHbaseSecuritySite != null) {
-            Map<String, String> amsHbaseSecuritySiteProperties = amsHbaseSecuritySite.getProperties();
-            znodeParent = amsHbaseSecuritySiteProperties.get(ZK_ZNODE_PARENT);
-            LOG.info("Removing config zookeeper.znode.parent from ams-hbase-security-site");
-            removeConfigurationPropertiesFromCluster(cluster, AMS_HBASE_SECURITY_SITE, Collections.singleton(ZK_ZNODE_PARENT));
-          }
-
-          Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
-          if (amsHbaseSite != null) {
-            Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            if (!amsHbaseSiteProperties.containsKey(ZK_ZNODE_PARENT)) {
-              if (StringUtils.isEmpty(znodeParent) || "/hbase".equals(znodeParent)) {
-                boolean isSecurityEnabled = false;
-                Config clusterEnv = cluster.getDesiredConfigByType(CLUSTER_ENV);
-                if (clusterEnv != null) {
-                  Map<String,String> clusterEnvProperties = clusterEnv.getProperties();
-                  if (clusterEnvProperties.containsKey(SECURITY_ENABLED)) {
-                    isSecurityEnabled = Boolean.valueOf(clusterEnvProperties.get(SECURITY_ENABLED));
-                  }
-                }
-                znodeParent = "/ams-hbase-" + (isSecurityEnabled ? "secure" : "unsecure");
-              }
-
-              LOG.info("Adding config zookeeper.znode.parent=" + znodeParent + " to ams-hbase-site");
-              newProperties.put(ZK_ZNODE_PARENT, znodeParent);
-
-            }
-
-            boolean isDistributed = false;
-            Config amsSite = cluster.getDesiredConfigByType(AMS_SITE);
-            if (amsSite != null) {
-              if ("distributed".equals(amsSite.getProperties().get(AMS_MODE))) {
-                isDistributed = true;
-              }
-            }
-
-            // Skip override if custom port found in embedded mode.
-            if (amsHbaseSiteProperties.containsKey(ZK_CLIENT_PORT) &&
-               (isDistributed || amsHbaseSiteProperties.get(ZK_CLIENT_PORT).equals("61181"))) {
-              String newValue = "{{zookeeper_clientPort}}";
-              LOG.info("Replacing value of " + ZK_CLIENT_PORT + " from " +
-                amsHbaseSiteProperties.get(ZK_CLIENT_PORT) + " to " +
-                newValue + " in ams-hbase-site");
-
-              newProperties.put(ZK_CLIENT_PORT, newValue);
-            }
-
-            if (!amsHbaseSiteProperties.containsKey(ZK_TICK_TIME)) {
-              LOG.info("Adding config " + ZK_TICK_TIME + " to ams-hbase-site");
-              newProperties.put(ZK_TICK_TIME, "6000");
-            }
-
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties, true, true);
-          }
-
-          Config amsHbaseEnv = cluster.getDesiredConfigByType(AMS_HBASE_ENV);
-          if (amsHbaseEnv != null) {
-            Map<String, String> amsHbaseEnvProperties = amsHbaseEnv.getProperties();
-            String content = amsHbaseEnvProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsHbaseEnvContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_ENV, newProperties, true, true);
-          }
-        }
-      }
-    }
-  }
-
-  protected String updateAmsHbaseEnvContent(String content) {
-    if (content == null) {
-      return null;
-    }
-    String regSearch = "_jaas_config_file\\}\\} -Dzookeeper.sasl.client.username=\\{\\{zk_servicename\\}\\}";
-    String replacement = "_jaas_config_file}}";
-    content = content.replaceAll(regSearch, replacement);
-    return content;
-  }
-
-  protected String updateAmsEnvContent(String content) {
-
-    if (content == null) {
-      return null;
-    }
-    String regSearch = "-Djava.security.auth.login.config=\\{\\{ams_collector_jaas_config_file\\}\\} " +
-      "-Dzookeeper.sasl.client.username=\\{\\{zk_servicename\\}\\}";
-    String replacement = "-Djava.security.auth.login.config={{ams_collector_jaas_config_file}}";
-    content = content.replaceAll(regSearch, replacement);
-
-    return content;
-  }
-
-  protected void updateOozieConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config oozieSiteProps = cluster.getDesiredConfigByType(OOZIE_SITE_CONFIG);
-      if (oozieSiteProps != null) {
-        // Update oozie.service.HadoopAccessorService.hadoop.configurations
-        Map<String, String> updateProperties = new HashMap<>();
-        String oozieHadoopConfigProperty = oozieSiteProps.getProperties().get(OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME);
-        if(oozieHadoopConfigProperty != null && oozieHadoopConfigProperty.contains(OLD_DEFAULT_HADOOP_CONFIG_PATH)) {
-          String updatedOozieHadoopConfigProperty = oozieHadoopConfigProperty.replaceAll(
-              OLD_DEFAULT_HADOOP_CONFIG_PATH, NEW_DEFAULT_HADOOP_CONFIG_PATH);
-          updateProperties.put(OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME, updatedOozieHadoopConfigProperty);
-          updateConfigurationPropertiesForCluster(cluster, OOZIE_SITE_CONFIG, updateProperties, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateTezConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Service service = cluster.getServices().get("TEZ");
-
-      if (null == service) {
-        continue;
-      }
-
-      Config tezSiteProps = cluster.getDesiredConfigByType(TEZ_SITE);
-      if (tezSiteProps != null) {
-
-        // Update tez.counters.max and tez.counters.max.groups configurations
-        String tezCountersMaxProperty = tezSiteProps.getProperties().get(TEZ_COUNTERS_MAX);
-        String tezCountersMaxGroupesProperty = tezSiteProps.getProperties().get(TEZ_COUNTERS_MAX_GROUPS);
-
-        StackId stackId = service.getDesiredStackId();
-        boolean isStackNotLess23 = (stackId.getStackName().equals("HDP") &&
-            VersionUtils.compareVersions(stackId.getStackVersion(), "2.3") >= 0);
-
-        if (isStackNotLess23) {
-          Map<String, String> updates = new HashMap<>();
-          if (tezCountersMaxProperty != null && tezCountersMaxProperty.equals("2000")) {
-            updates.put(TEZ_COUNTERS_MAX, "10000");
-          }
-          if (tezCountersMaxGroupesProperty != null && tezCountersMaxGroupesProperty.equals("1000")) {
-            updates.put(TEZ_COUNTERS_MAX_GROUPS, "3000");
-          }
-          if (!updates.isEmpty()) {
-            updateConfigurationPropertiesForCluster(cluster, TEZ_SITE, updates, true, false);
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateRangerKmsDbksConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Map<String, String> newRangerKmsProps = new HashMap<>();
-      Config rangerKmsDbConfigs = cluster.getDesiredConfigByType(RANGER_KMS_PROPERTIES);
-      if (rangerKmsDbConfigs != null) {
-        String dbFlavor = rangerKmsDbConfigs.getProperties().get(RANGER_KMS_DB_FLAVOR);
-        String dbHost = rangerKmsDbConfigs.getProperties().get(RANGER_KMS_DB_HOST);
-        String dbName = rangerKmsDbConfigs.getProperties().get(RANGER_KMS_DB_NAME);
-        String dbConnectionString = null;
-        String dbDriver = null;
-
-        if (dbFlavor != null && dbHost != null && dbName != null) {
-          if ("MYSQL".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:mysql://"+dbHost+"/"+dbName;
-            dbDriver = "com.mysql.jdbc.Driver";
-          } else if ("ORACLE".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:oracle:thin:@//"+dbHost;
-            dbDriver = "oracle.jdbc.driver.OracleDriver";
-          } else if ("POSTGRES".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:postgresql://"+dbHost+"/"+dbName;
-            dbDriver = "org.postgresql.Driver";
-          } else if ("MSSQL".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:sqlserver://"+dbHost+";databaseName="+dbName;
-            dbDriver = "com.microsoft.sqlserver.jdbc.SQLServerDriver";
-          } else if ("SQLA".equalsIgnoreCase(dbFlavor)) {
-            dbConnectionString = "jdbc:sqlanywhere:database="+dbName+";host="+dbHost;
-            dbDriver = "sap.jdbc4.sqlanywhere.IDriver";
-          }
-          newRangerKmsProps.put(RANGER_KMS_JDBC_URL, dbConnectionString);
-          newRangerKmsProps.put(RANGER_KMS_JDBC_DRIVER, dbDriver);
-          updateConfigurationPropertiesForCluster(cluster, RANGER_KMS_DBKS_CONFIG, newRangerKmsProps, true, false);
-        }
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java
deleted file mode 100644
index cc7dcb8..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog222.java
+++ /dev/null
@@ -1,781 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.io.File;
-import java.io.FileReader;
-import java.lang.reflect.Type;
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.dao.WidgetDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.WidgetEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceComponentHost;
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.stack.WidgetLayout;
-import org.apache.ambari.server.state.stack.WidgetLayoutInfo;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.reflect.TypeToken;
-import com.google.gson.Gson;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-/**
- * Upgrade catalog for version 2.2.2.
- */
-public class UpgradeCatalog222 extends AbstractUpgradeCatalog {
-
-  @Inject
-  DaoUtils daoUtils;
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog222.class);
-  private static final String AMS_SITE = "ams-site";
-  private static final String AMS_HBASE_SITE = "ams-hbase-site";
-  private static final String HIVE_SITE_CONFIG = "hive-site";
-  private static final String ATLAS_APPLICATION_PROPERTIES_CONFIG = "application-properties";
-  private static final String ATLAS_HOOK_HIVE_MINTHREADS_PROPERTY = "atlas.hook.hive.minThreads";
-  private static final String ATLAS_HOOK_HIVE_MAXTHREADS_PROPERTY = "atlas.hook.hive.maxThreads";
-  private static final String ATLAS_CLUSTER_NAME_PROPERTY = "atlas.cluster.name";
-  private static final String ATLAS_ENABLETLS_PROPERTY = "atlas.enableTLS";
-  private static final String ATLAS_SERVER_HTTP_PORT_PROPERTY = "atlas.server.http.port";
-  private static final String ATLAS_SERVER_HTTPS_PORT_PROPERTY = "atlas.server.https.port";
-  private static final String ATLAS_REST_ADDRESS_PROPERTY = "atlas.rest.address";
-  private static final String HBASE_ENV_CONFIG = "hbase-env";
-  private static final String CONTENT_PROPERTY = "content";
-
-  private static final String UPGRADE_TABLE = "upgrade";
-  private static final String UPGRADE_SUSPENDED_COLUMN = "suspended";
-
-  private static final String HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY =
-    "timeline.metrics.host.aggregator.daily.checkpointCutOffMultiplier";
-  private static final String CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY =
-    "timeline.metrics.cluster.aggregator.daily.checkpointCutOffMultiplier";
-  private static final String TIMELINE_METRICS_SERVICE_WATCHER_DISBALED_PROPERTY = "timeline.metrics.service.watcher.disabled";
-  private static final String AMS_MODE_PROPERTY = "timeline.metrics.service.operation.mode";
-  public static final String PRECISION_TABLE_TTL_PROPERTY = "timeline.metrics.host.aggregator.ttl";
-  public static final String CLUSTER_SECOND_TABLE_TTL_PROPERTY = "timeline.metrics.cluster.aggregator.second.ttl";
-  public static final String CLUSTER_MINUTE_TABLE_TTL_PROPERTY = "timeline.metrics.cluster.aggregator.minute.ttl";
-  public static final String AMS_WEBAPP_ADDRESS_PROPERTY = "timeline.metrics.service.webapp.address";
-  public static final String HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD_PROPERTY = "hbase.client.scanner.timeout.period";
-  public static final String HBASE_RPC_TIMEOUT_PROPERTY = "hbase.rpc.timeout";
-
-  public static final String PHOENIX_QUERY_TIMEOUT_PROPERTY = "phoenix.query.timeoutMs";
-  public static final String PHOENIX_QUERY_KEEPALIVE_PROPERTY = "phoenix.query.keepAliveMs";
-  public static final String TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED
-    = "timeline.metrics.cluster.aggregator.interpolation.enabled";
-  public static final String TIMELINE_METRICS_SINK_COLLECTION_PERIOD = "timeline.metrics.sink.collection.period";
-
-  public static final String AMS_SERVICE_NAME = "AMBARI_METRICS";
-  public static final String AMS_COLLECTOR_COMPONENT_NAME = "METRICS_COLLECTOR";
-
-  protected static final String WIDGET_TABLE = "widget";
-  protected static final String WIDGET_DESCRIPTION = "description";
-  protected static final String WIDGET_NAME = "widget_name";
-  protected static final String WIDGET_CORRUPT_BLOCKS = "Corrupted Blocks";
-  protected static final String WIDGET_CORRUPT_REPLICAS = "Blocks With Corrupted Replicas";
-  protected static final String WIDGET_CORRUPT_REPLICAS_DESCRIPTION = "Number represents data blocks with at least one " +
-    "corrupted replica (but not all of them). Its indicative of HDFS bad health.";
-  protected static final String WIDGET_VALUES = "widget_values";
-  protected static final String WIDGET_VALUES_VALUE =
-    "${Hadoop:service\\" +
-    "\\u003dNameNode,name\\" +
-    "\\u003dFSNamesystem.CorruptBlocks}";
-
-  public final static String HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES = "hbase.coprocessor.master.classes";
-  public final static String HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES = "hbase.coprocessor.region.classes";
-  public final static String HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES = "hbase.coprocessor.regionserver.classes";
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog222(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.2.2";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.2.1";
-  }
-
-
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    DBAccessor.DBColumnInfo columnInfo = new DBAccessor.DBColumnInfo("host_id", Long.class);
-    dbAccessor.addColumn("topology_host_info", columnInfo);
-    dbAccessor.addFKConstraint("topology_host_info", "FK_hostinfo_host_id", "host_id", "hosts", "host_id", true);
-    dbAccessor.executeUpdate("update topology_host_info set host_id = (select hosts.host_id from hosts where hosts.host_name = topology_host_info.fqdn)");
-
-    updateUpgradeTable();
-  }
-
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    //To change body of implemented methods use File | Settings | File Templates.
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateAlerts();
-    updateStormConfigs();
-    updateAMSConfigs();
-    updateHiveConfig();
-    updateHostRoleCommands();
-    updateHDFSWidgetDefinition();
-    updateYARNWidgetDefinition();
-    updateHBASEWidgetDefinition();
-    updateHbaseEnvConfig();
-    updateCorruptedReplicaWidget();
-    updateZookeeperConfigs();
-    updateHBASEConfigs();
-    createNewSliderConfigVersion();
-    initializeStromAndKafkaWidgets();
-  }
-
-  protected void createNewSliderConfigVersion() {
-    // Here we are creating new service config version for SLIDER, to link slider-client
-    // config to SLIDER service, in serviceconfigmapping table. It could be not mapped because
-    // of bug which we had a long time ago.
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-      Service sliderService = null;
-      try {
-        sliderService = cluster.getService("SLIDER");
-      } catch(AmbariException ambariException) {
-        LOG.info("SLIDER service not found in cluster while creating new serviceconfig version for SLIDER service.");
-      }
-      if (sliderService != null) {
-        cluster.createServiceConfigVersion("SLIDER", AUTHENTICATED_USER_NAME, "Creating new service config version for SLIDER service.", null);
-      }
-    }
-  }
-
-  protected void updateZookeeperConfigs() throws  AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-      Config zooEnv = cluster.getDesiredConfigByType("zookeeper-env");
-      if (zooEnv != null && zooEnv.getProperties().containsKey("zk_server_heapsize")) {
-        String heapSizeValue = zooEnv.getProperties().get("zk_server_heapsize");
-        if(!heapSizeValue.endsWith("m")) {
-          Map<String, String> updates = new HashMap<>();
-          updates.put("zk_server_heapsize", heapSizeValue+"m");
-          updateConfigurationPropertiesForCluster(cluster, "zookeeper-env", updates, true, false);
-        }
-
-      }
-    }
-  }
-
-  protected void updateHBASEConfigs() throws  AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-
-      Service service = cluster.getServices().get("HBASE");
-
-      if (null == service) {
-        continue;
-      }
-
-      StackId stackId = service.getDesiredStackId();
-
-      Config hbaseSite = cluster.getDesiredConfigByType("hbase-site");
-      boolean rangerHbasePluginEnabled = isConfigEnabled(cluster,
-        AbstractUpgradeCatalog.CONFIGURATION_TYPE_RANGER_HBASE_PLUGIN_PROPERTIES,
-        AbstractUpgradeCatalog.PROPERTY_RANGER_HBASE_PLUGIN_ENABLED);
-      if (hbaseSite != null && rangerHbasePluginEnabled) {
-        Map<String, String> updates = new HashMap<>();
-        String stackVersion = stackId.getStackVersion();
-        if (VersionUtils.compareVersions(stackVersion, "2.2") == 0) {
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES,
-              "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor");
-          }
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES,
-              "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor");
-          }
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES,
-                "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint," +
-                    "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor");
-          }
-        } else if (VersionUtils.compareVersions(stackVersion, "2.3") == 0) {
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_MASTER_CLASSES,
-              "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor ");
-          }
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_REGIONSERVER_CLASSES,
-              "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor");
-          }
-          if (hbaseSite.getProperties().containsKey(HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES)) {
-            updates.put(HBASE_SITE_HBASE_COPROCESSOR_REGION_CLASSES,
-              "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint," +
-                "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor");
-          }
-        }
-        if (! updates.isEmpty()) {
-          updateConfigurationPropertiesForCluster(cluster, "hbase-site", updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateStormConfigs() throws  AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-      if (cluster.getDesiredConfigByType("storm-site") != null && cluster.getDesiredConfigByType("storm-site").getProperties().containsKey("storm.zookeeper.superACL")
-              && cluster.getDesiredConfigByType("storm-site").getProperties().get("storm.zookeeper.superACL").equals("sasl:{{storm_base_jaas_principal}}")) {
-        Map<String, String> newStormProps = new HashMap<>();
-        newStormProps.put("storm.zookeeper.superACL", "sasl:{{storm_bare_jaas_principal}}");
-        updateConfigurationPropertiesForCluster(cluster, "storm-site", newStormProps, true, false);
-      }
-    }
-  }
-
-  protected void updateAlerts() {
-    LOG.info("Updating alert definitions.");
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-
-      final AlertDefinitionEntity regionserverHealthSummaryDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "regionservers_health_summary");
-
-      final AlertDefinitionEntity atsWebAlert = alertDefinitionDAO.findByName(
-              clusterID, "yarn_app_timeline_server_webui");
-
-      if (regionserverHealthSummaryDefinitionEntity != null) {
-        alertDefinitionDAO.remove(regionserverHealthSummaryDefinitionEntity);
-      }
-
-      if (atsWebAlert != null) {
-        String source = atsWebAlert.getSource();
-        JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-
-        JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
-        uriJson.remove("http");
-        uriJson.remove("https");
-        uriJson.addProperty("http", "{{yarn-site/yarn.timeline-service.webapp.address}}/ws/v1/timeline");
-        uriJson.addProperty("https", "{{yarn-site/yarn.timeline-service.webapp.https.address}}/ws/v1/timeline");
-
-        atsWebAlert.setSource(sourceJson.toString());
-        alertDefinitionDAO.merge(atsWebAlert);
-      }
-
-      //update Atlas alert
-      final AlertDefinitionEntity atlasMetadataServerWebUI = alertDefinitionDAO.findByName(
-              clusterID, "metadata_server_webui");
-      if (atlasMetadataServerWebUI != null) {
-        String source = atlasMetadataServerWebUI.getSource();
-        JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-
-        JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
-        uriJson.remove("http");
-        uriJson.remove("https");
-        uriJson.addProperty("http", "{{application-properties/atlas.server.http.port}}");
-        uriJson.addProperty("https", "{{application-properties/atlas.server.https.port}}");
-
-        atlasMetadataServerWebUI.setSource(sourceJson.toString());
-        alertDefinitionDAO.merge(atlasMetadataServerWebUI);
-      }
-
-    }
-
-
-  }
-
-  protected void updateHostRoleCommands() throws SQLException {
-    dbAccessor.createIndex("idx_hrc_status_role", "host_role_command", "status", "role");
-  }
-
-  protected void updateAMSConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-
-          Config amsSite = cluster.getDesiredConfigByType(AMS_SITE);
-          if (amsSite != null) {
-            Map<String, String> amsSiteProperties = amsSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            if (amsSiteProperties.containsKey(AMS_WEBAPP_ADDRESS_PROPERTY)) {
-              Set<String> collectorHostNames = cluster.getHosts(AMS_SERVICE_NAME, AMS_COLLECTOR_COMPONENT_NAME);
-              for (String collector: collectorHostNames) {
-                String currentValue = amsSiteProperties.get(AMS_WEBAPP_ADDRESS_PROPERTY);
-
-                if (currentValue.startsWith("0.0.0.0")) {
-                  newProperties.put(AMS_WEBAPP_ADDRESS_PROPERTY, currentValue.replace("0.0.0.0", collector));
-                } else if (currentValue.startsWith("localhost")) {
-                  newProperties.put(AMS_WEBAPP_ADDRESS_PROPERTY, currentValue.replace("localhost", collector));
-                }
-              }
-            }
-
-            if (amsSiteProperties.containsKey(HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY) &&
-              amsSiteProperties.get(HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY).equals("1")) {
-
-              LOG.info("Setting value of " + HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY + " : 2");
-              newProperties.put(HOST_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY, String.valueOf(2));
-
-            }
-
-            if (amsSiteProperties.containsKey(CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY) &&
-              amsSiteProperties.get(CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY).equals("1")) {
-
-              LOG.info("Setting value of " + CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY + " : 2");
-              newProperties.put(CLUSTER_AGGREGATOR_DAILY_CHECKPOINTCUTOFFMULTIPIER_PROPERTY, String.valueOf(2));
-
-            }
-
-            if (!amsSiteProperties.containsKey(TIMELINE_METRICS_SERVICE_WATCHER_DISBALED_PROPERTY)) {
-              LOG.info("Add config  " + TIMELINE_METRICS_SERVICE_WATCHER_DISBALED_PROPERTY + " = false");
-              newProperties.put(TIMELINE_METRICS_SERVICE_WATCHER_DISBALED_PROPERTY, String.valueOf(false));
-            }
-
-            boolean isDistributed = false;
-            if ("distributed".equals(amsSite.getProperties().get(AMS_MODE_PROPERTY))) {
-              isDistributed = true;
-            }
-
-            if (amsSiteProperties.containsKey(PRECISION_TABLE_TTL_PROPERTY)) {
-              String oldTtl = amsSiteProperties.get(PRECISION_TABLE_TTL_PROPERTY);
-              String newTtl = oldTtl;
-              if (isDistributed) {
-                if ("86400".equals(oldTtl)) {
-                  newTtl = String.valueOf(3 * 86400); // 3 days
-                }
-              }
-              newProperties.put(PRECISION_TABLE_TTL_PROPERTY, newTtl);
-              LOG.info("Setting value of " + PRECISION_TABLE_TTL_PROPERTY + " : " + newTtl);
-            }
-
-            if (amsSiteProperties.containsKey(CLUSTER_SECOND_TABLE_TTL_PROPERTY)) {
-              String oldTtl = amsSiteProperties.get(CLUSTER_SECOND_TABLE_TTL_PROPERTY);
-              String newTtl = oldTtl;
-
-              if ("2592000".equals(oldTtl)) {
-                newTtl = String.valueOf(7 * 86400); // 7 days
-              }
-
-              newProperties.put(CLUSTER_SECOND_TABLE_TTL_PROPERTY, newTtl);
-              LOG.info("Setting value of " + CLUSTER_SECOND_TABLE_TTL_PROPERTY + " : " + newTtl);
-            }
-
-            if (amsSiteProperties.containsKey(CLUSTER_MINUTE_TABLE_TTL_PROPERTY)) {
-              String oldTtl = amsSiteProperties.get(CLUSTER_MINUTE_TABLE_TTL_PROPERTY);
-              String newTtl = oldTtl;
-
-              if ("7776000".equals(oldTtl)) {
-                newTtl = String.valueOf(30 * 86400); // 30 days
-              }
-
-              newProperties.put(CLUSTER_MINUTE_TABLE_TTL_PROPERTY, newTtl);
-              LOG.info("Setting value of " + CLUSTER_MINUTE_TABLE_TTL_PROPERTY + " : " + newTtl);
-            }
-
-            if (!amsSiteProperties.containsKey(TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED)) {
-              LOG.info("Add config  " + TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED + " = true");
-              newProperties.put(TIMELINE_METRICS_CLUSTER_AGGREGATOR_INTERPOLATION_ENABLED, String.valueOf(true));
-            }
-
-            if (!amsSiteProperties.containsKey(TIMELINE_METRICS_SINK_COLLECTION_PERIOD) ||
-              "60".equals(amsSiteProperties.get(TIMELINE_METRICS_SINK_COLLECTION_PERIOD))) {
-
-              newProperties.put(TIMELINE_METRICS_SINK_COLLECTION_PERIOD, "10");
-              LOG.info("Setting value of " + TIMELINE_METRICS_SINK_COLLECTION_PERIOD + " : 10");
-            }
-
-            updateConfigurationPropertiesForCluster(cluster, AMS_SITE, newProperties, true, true);
-          }
-
-          Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
-          if (amsHbaseSite != null) {
-            Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            if (!amsHbaseSiteProperties.containsKey(HBASE_RPC_TIMEOUT_PROPERTY)) {
-              newProperties.put(HBASE_RPC_TIMEOUT_PROPERTY, String.valueOf(300000));
-            }
-
-            if (!amsHbaseSiteProperties.containsKey(PHOENIX_QUERY_KEEPALIVE_PROPERTY)) {
-              newProperties.put(PHOENIX_QUERY_KEEPALIVE_PROPERTY, String.valueOf(300000));
-            }
-
-            if (!amsHbaseSiteProperties.containsKey(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD_PROPERTY) ||
-              amsHbaseSiteProperties.get(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD_PROPERTY).equals("900000")) {
-              amsHbaseSiteProperties.put(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD_PROPERTY, String.valueOf(300000));
-            }
-
-            if (!amsHbaseSiteProperties.containsKey(PHOENIX_QUERY_TIMEOUT_PROPERTY) ||
-              amsHbaseSiteProperties.get(PHOENIX_QUERY_TIMEOUT_PROPERTY).equals("1200000")) {
-              amsHbaseSiteProperties.put(PHOENIX_QUERY_TIMEOUT_PROPERTY, String.valueOf(300000));
-            }
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties, true, true);
-          }
-
-        }
-      }
-    }
-  }
-
-  protected void updateHDFSWidgetDefinition() throws AmbariException {
-    LOG.info("Updating HDFS widget definition.");
-
-    Map<String, List<String>> widgetMap = new HashMap<>();
-    Map<String, String> sectionLayoutMap = new HashMap<>();
-
-    List<String> hdfsSummaryWidgets = new ArrayList<>(Arrays.asList("NameNode RPC", "NN Connection Load",
-      "NameNode GC count", "NameNode GC time", "NameNode Host Load"));
-    widgetMap.put("HDFS_SUMMARY", hdfsSummaryWidgets);
-    sectionLayoutMap.put("HDFS_SUMMARY", "default_hdfs_dashboard");
-
-    List<String> hdfsHeatmapWidgets = new ArrayList<>(Arrays.asList("HDFS Bytes Read", "HDFS Bytes Written",
-      "DataNode Process Disk I/O Utilization", "DataNode Process Network I/O Utilization"));
-    widgetMap.put("HDFS_HEATMAPS", hdfsHeatmapWidgets);
-    sectionLayoutMap.put("HDFS_HEATMAPS", "default_hdfs_heatmap");
-
-    updateWidgetDefinitionsForService("HDFS", widgetMap, sectionLayoutMap);
-  }
-
-  protected void updateYARNWidgetDefinition() throws AmbariException {
-    LOG.info("Updating YARN widget definition.");
-
-    Map<String, List<String>> widgetMap = new HashMap<>();
-    Map<String, String> sectionLayoutMap = new HashMap<>();
-
-    List<String> yarnSummaryWidgets = new ArrayList<>(Arrays.asList("Container Failures", "App Failures", "Cluster Memory"));
-    widgetMap.put("YARN_SUMMARY", yarnSummaryWidgets);
-    sectionLayoutMap.put("YARN_SUMMARY", "default_yarn_dashboard");
-
-    List<String> yarnHeatmapWidgets = new ArrayList<>(Arrays.asList("Container Failures"));
-    widgetMap.put("YARN_HEATMAPS", yarnHeatmapWidgets);
-    sectionLayoutMap.put("YARN_HEATMAPS", "default_yarn_heatmap");
-
-    updateWidgetDefinitionsForService("YARN", widgetMap, sectionLayoutMap);
-
-  }
-
-  protected void updateHBASEWidgetDefinition() throws AmbariException {
-
-    LOG.info("Updating HBASE widget definition.");
-
-    Map<String, List<String>> widgetMap = new HashMap<>();
-    Map<String, String> sectionLayoutMap = new HashMap<>();
-
-    List<String> hbaseSummaryWidgets = new ArrayList<>(Arrays.asList("Reads and Writes", "Blocked Updates"));
-    widgetMap.put("HBASE_SUMMARY", hbaseSummaryWidgets);
-    sectionLayoutMap.put("HBASE_SUMMARY", "default_hbase_dashboard");
-
-    updateWidgetDefinitionsForService("HBASE", widgetMap, sectionLayoutMap);
-  }
-
-
-  protected void updateHbaseEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config hbaseEnvConfig = cluster.getDesiredConfigByType(HBASE_ENV_CONFIG);
-      if (hbaseEnvConfig != null) {
-        Map<String, String> updates = getUpdatedHbaseEnvProperties(hbaseEnvConfig.getProperties().get(CONTENT_PROPERTY));
-        if (!updates.isEmpty()) {
-          updateConfigurationPropertiesForCluster(cluster, HBASE_ENV_CONFIG, updates, true, false);
-        }
-
-      }
-    }
-  }
-
-  protected Map<String, String> getUpdatedHbaseEnvProperties(String content) {
-    if (content != null) {
-      //Fix bad config added in Upgrade 2.2.0.
-      String badConfig = "export HBASE_OPTS=\"-Djava.io.tmpdir={{java_io_tmpdir}}\"";
-      String correctConfig = "export HBASE_OPTS=\"${HBASE_OPTS} -Djava.io.tmpdir={{java_io_tmpdir}}\"";
-
-      if (content.contains(badConfig)) {
-        content = content.replace(badConfig, correctConfig);
-        return Collections.singletonMap(CONTENT_PROPERTY, content);
-      }
-    }
-    return Collections.emptyMap();
-  }
-
-  @Override
-  protected void updateWidgetDefinitionsForService(String serviceName, Map<String, List<String>> widgetMap,
-                                                 Map<String, String> sectionLayoutMap) throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
-    Type widgetLayoutType = new TypeToken<Map<String, List<WidgetLayout>>>(){}.getType();
-    Gson gson = injector.getInstance(Gson.class);
-    WidgetDAO widgetDAO = injector.getInstance(WidgetDAO.class);
-
-    Clusters clusters = ambariManagementController.getClusters();
-
-
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-
-
-      Set<StackId> stackIds = new HashSet<>();
-      for (Service service : cluster.getServices().values()) {
-        StackId stackId = service.getDesiredStackId();
-        if (stackIds.contains(stackId)) {
-          continue;
-        } else {
-          stackIds.add(stackId);
-        }
-
-        Map<String, Object> widgetDescriptor = null;
-        StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
-        ServiceInfo serviceInfo = stackInfo.getService(serviceName);
-        if (serviceInfo == null) {
-          LOG.info("Skipping updating widget definition, because " + serviceName +  " service is not present in cluster " +
-            "cluster_name= " + cluster.getClusterName());
-          continue;
-        }
-
-        for (String section : widgetMap.keySet()) {
-          List<String> widgets = widgetMap.get(section);
-          for (String widgetName : widgets) {
-            List<WidgetEntity> widgetEntities = widgetDAO.findByName(clusterID,
-              widgetName, "ambari", section);
-
-            if (widgetEntities != null && widgetEntities.size() > 0) {
-              WidgetEntity entityToUpdate = null;
-              if (widgetEntities.size() > 1) {
-                LOG.info("Found more that 1 entity with name = "+ widgetName +
-                  " for cluster = " + cluster.getClusterName() + ", skipping update.");
-              } else {
-                entityToUpdate = widgetEntities.iterator().next();
-              }
-              if (entityToUpdate != null) {
-                LOG.info("Updating widget: " + entityToUpdate.getWidgetName());
-                // Get the definition from widgets.json file
-                WidgetLayoutInfo targetWidgetLayoutInfo = null;
-                File widgetDescriptorFile = serviceInfo.getWidgetsDescriptorFile();
-                if (widgetDescriptorFile != null && widgetDescriptorFile.exists()) {
-                  try {
-                    widgetDescriptor = gson.fromJson(new FileReader(widgetDescriptorFile), widgetLayoutType);
-                  } catch (Exception ex) {
-                    String msg = "Error loading widgets from file: " + widgetDescriptorFile;
-                    LOG.error(msg, ex);
-                    widgetDescriptor = null;
-                  }
-                }
-                if (widgetDescriptor != null) {
-                  LOG.debug("Loaded widget descriptor: {}", widgetDescriptor);
-                  for (Object artifact : widgetDescriptor.values()) {
-                    List<WidgetLayout> widgetLayouts = (List<WidgetLayout>) artifact;
-                    for (WidgetLayout widgetLayout : widgetLayouts) {
-                      if (widgetLayout.getLayoutName().equals(sectionLayoutMap.get(section))) {
-                        for (WidgetLayoutInfo layoutInfo : widgetLayout.getWidgetLayoutInfoList()) {
-                          if (layoutInfo.getWidgetName().equals(widgetName)) {
-                            targetWidgetLayoutInfo = layoutInfo;
-                          }
-                        }
-                      }
-                    }
-                  }
-                }
-                if (targetWidgetLayoutInfo != null) {
-                  entityToUpdate.setMetrics(gson.toJson(targetWidgetLayoutInfo.getMetricsInfo()));
-                  entityToUpdate.setWidgetValues(gson.toJson(targetWidgetLayoutInfo.getValues()));
-                  if ("HBASE".equals(serviceName) && "Reads and Writes".equals(widgetName)) {
-                    entityToUpdate.setDescription(targetWidgetLayoutInfo.getDescription());
-                    LOG.info("Update description for HBase Reads and Writes widget");
-                  }
-                  widgetDAO.merge(entityToUpdate);
-                } else {
-                  LOG.warn("Unable to find widget layout info for " + widgetName +
-                    " in the stack: " + stackId);
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateHiveConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config hiveSiteConfig = cluster.getDesiredConfigByType(HIVE_SITE_CONFIG);
-      Config atlasConfig = cluster.getDesiredConfigByType(ATLAS_APPLICATION_PROPERTIES_CONFIG);
-
-      Service service = cluster.getServices().get("ATLAS");
-
-      if (null == service) {
-        continue;
-      }
-
-      StackId stackId = service.getDesiredStackId();
-
-      boolean isStackNotLess23 = (stackId != null && stackId.getStackName().equals("HDP") &&
-        VersionUtils.compareVersions(stackId.getStackVersion(), "2.3") >= 0);
-
-      List<ServiceComponentHost> atlasHost = cluster.getServiceComponentHosts("ATLAS", "ATLAS_SERVER");
-      Map<String, String> updates = new HashMap<>();
-
-      if (isStackNotLess23 && atlasHost.size() != 0 && hiveSiteConfig != null) {
-
-        updates.put(ATLAS_HOOK_HIVE_MINTHREADS_PROPERTY, "1");
-        updates.put(ATLAS_HOOK_HIVE_MAXTHREADS_PROPERTY, "1");
-        updates.put(ATLAS_CLUSTER_NAME_PROPERTY, "primary");
-
-        if (atlasConfig != null && atlasConfig.getProperties().containsKey(ATLAS_ENABLETLS_PROPERTY)) {
-          String atlasEnableTLSProperty = atlasConfig.getProperties().get(ATLAS_ENABLETLS_PROPERTY);
-          String atlasScheme = "http";
-          String atlasServerHttpPortProperty = atlasConfig.getProperties().get(ATLAS_SERVER_HTTP_PORT_PROPERTY);
-          if (atlasEnableTLSProperty.toLowerCase().equals("true")) {
-            atlasServerHttpPortProperty = atlasConfig.getProperties().get(ATLAS_SERVER_HTTPS_PORT_PROPERTY);
-            atlasScheme = "https";
-          }
-          updates.put(ATLAS_REST_ADDRESS_PROPERTY, String.format("%s://%s:%s", atlasScheme, atlasHost.get(0).getHostName(), atlasServerHttpPortProperty));
-        }
-        updateConfigurationPropertiesForCluster(cluster, HIVE_SITE_CONFIG, updates, false, false);
-      }
-    }
-  }
-
-  protected void updateCorruptedReplicaWidget() throws SQLException {
-    String widgetValues = String.format("[{\"name\": \"%s\", \"value\": \"%s\"}]",
-      WIDGET_CORRUPT_REPLICAS, WIDGET_VALUES_VALUE);
-    String updateStatement = "UPDATE %s SET %s='%s', %s='%s', %s='%s' WHERE %s='%s'";
-
-    LOG.info("Update widget definition for HDFS corrupted blocks metric");
-    dbAccessor.executeUpdate(String.format(updateStatement,
-      WIDGET_TABLE,
-      WIDGET_NAME, WIDGET_CORRUPT_REPLICAS,
-      WIDGET_DESCRIPTION, WIDGET_CORRUPT_REPLICAS_DESCRIPTION,
-      WIDGET_VALUES, widgetValues,
-      WIDGET_NAME, WIDGET_CORRUPT_BLOCKS
-    ));
-  }
-
-  /**
-   * Updates the {@value #UPGRADE_TABLE} in the following ways:
-   * <ul>
-   * <li>{value {@link #UPGRADE_SUSPENDED_COLUMN} is added</li>
-   * </ul>
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  protected void updateUpgradeTable() throws AmbariException, SQLException {
-    dbAccessor.addColumn(UPGRADE_TABLE,
-      new DBAccessor.DBColumnInfo(UPGRADE_SUSPENDED_COLUMN, Short.class, 1, 0, false));
-  }
-
-  /**
-   * Copy cluster & service widgets for Storm and Kafka from stack to DB.
-   */
-  protected void initializeStromAndKafkaWidgets() throws AmbariException {
-    AmbariManagementController controller = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = controller.getClusters();
-    if (clusters == null) {
-      return;
-    }
-
-    Map<String, Cluster> clusterMap = clusters.getClusters();
-
-    if (clusterMap != null && !clusterMap.isEmpty()) {
-      for (Cluster cluster : clusterMap.values()) {
-
-        Map<String, Service> serviceMap = cluster.getServices();
-        if (serviceMap != null && !serviceMap.isEmpty()) {
-          for (Service service : serviceMap.values()) {
-            if ("STORM".equals(service.getName()) || "KAFKA".equals(service.getName())) {
-              controller.initializeWidgetsAndLayouts(cluster, service);
-            }
-          }
-        }
-      }
-    }
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog230.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog230.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog230.java
deleted file mode 100644
index a53ac95..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog230.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.PermissionDAO;
-import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
-import org.apache.ambari.server.orm.dao.RoleAuthorizationDAO;
-import org.apache.ambari.server.orm.entities.PermissionEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
-import org.apache.ambari.server.orm.entities.RoleAuthorizationEntity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Upgrade catalog for version 2.3.0.
- */
-public class UpgradeCatalog230 extends AbstractUpgradeCatalog {
-  private static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
-  private static final String USERS_TABLE = "users";
-
-  private static final String HOST_ID_COL = "host_id";
-  private static final String USER_TYPE_COL = "user_type";
-
-  private static final String ADMIN_PERMISSION_TABLE = "adminpermission";
-  private static final String PERMISSION_ID_COL = "permission_id";
-  private static final String PERMISSION_NAME_COL = "permission_name";
-  private static final String PERMISSION_LABEL_COL = "permission_label";
-
-  private static final String ROLE_AUTHORIZATION_TABLE = "roleauthorization";
-  private static final String PERMISSION_ROLE_AUTHORIZATION_TABLE = "permission_roleauthorization";
-  private static final String ROLE_AUTHORIZATION_ID_COL = "authorization_id";
-  private static final String ROLE_AUTHORIZATION_NAME_COL = "authorization_name";
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.2.1";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.3.0";
-  }
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog230.class);
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog230(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-
-    dbAccessor.alterColumn(HOST_ROLE_COMMAND_TABLE, new DBColumnInfo(HOST_ID_COL, Long.class, null, null, true));
-    dbAccessor.addColumn(USERS_TABLE, new DBColumnInfo(USER_TYPE_COL, String.class, null, "LOCAL", true));
-
-    dbAccessor.executeQuery("UPDATE users SET user_type='LDAP' WHERE ldap_user=1");
-
-    dbAccessor.addUniqueConstraint(USERS_TABLE, "UNQ_users_0", "user_name", "user_type");
-
-    updateAdminPermissionTable();
-    createRoleAuthorizationTables();
-  }
-
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    setPermissionLabels();
-    updatePermissionNames();
-    addNewPermissions();
-    createRoleAuthorizations();
-    createPermissionRoleAuthorizationMap();
-  }
-
-  private void addNewPermissions() throws SQLException {
-    LOG.info("Adding new permissions: CLUSTER.OPERATOR, SERVICE.ADMINISTRATOR, SERVICE.OPERATOR");
-
-    PermissionDAO permissionDAO = injector.getInstance(PermissionDAO.class);
-    ResourceTypeDAO resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
-    ResourceTypeEntity clusterResourceTypeEntity = resourceTypeDAO.findByName("CLUSTER");
-
-    // CLUSTER.OPERATOR: Cluster Operator
-    if(permissionDAO.findPermissionByNameAndType("CLUSTER.OPERATOR", clusterResourceTypeEntity) == null) {
-      PermissionEntity permissionEntity = new PermissionEntity();
-      permissionEntity.setId(null);
-      permissionEntity.setPermissionName("CLUSTER.OPERATOR");
-      permissionEntity.setPermissionLabel("Cluster Operator");
-      permissionEntity.setResourceType(clusterResourceTypeEntity);
-      permissionDAO.create(permissionEntity);
-    }
-
-    // SERVICE.ADMINISTRATOR: Service Administrator
-    if(permissionDAO.findPermissionByNameAndType("SERVICE.ADMINISTRATOR", clusterResourceTypeEntity) == null) {
-      PermissionEntity permissionEntity = new PermissionEntity();
-      permissionEntity.setId(null);
-      permissionEntity.setPermissionName("SERVICE.ADMINISTRATOR");
-      permissionEntity.setPermissionLabel("Service Administrator");
-      permissionEntity.setResourceType(clusterResourceTypeEntity);
-      permissionDAO.create(permissionEntity);
-    }
-
-    // SERVICE.OPERATOR: Service Operator
-    if(permissionDAO.findPermissionByNameAndType("SERVICE.OPERATOR", clusterResourceTypeEntity) == null) {
-      PermissionEntity permissionEntity = new PermissionEntity();
-      permissionEntity.setId(null);
-      permissionEntity.setPermissionName("SERVICE.OPERATOR");
-      permissionEntity.setPermissionLabel("Service Operator");
-      permissionEntity.setResourceType(clusterResourceTypeEntity);
-      permissionDAO.create(permissionEntity);
-    }
-  }
-
-
-  private void createRoleAuthorizations() throws SQLException {
-    LOG.info("Adding authorizations");
-
-    RoleAuthorizationDAO roleAuthorizationDAO = injector.getInstance(RoleAuthorizationDAO.class);
-
-    createRoleAuthorization(roleAuthorizationDAO, "VIEW.USE", "Use View");
-
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.VIEW_METRICS", "View metrics");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.VIEW_STATUS_INFO", "View status information");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.VIEW_CONFIGS", "View configurations");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.COMPARE_CONFIGS", "Compare configurations");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.VIEW_ALERTS", "View service-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.START_STOP", "Start/Stop/Restart Service");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.DECOMMISSION_RECOMMISSION", "Decommission/recommission");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.RUN_SERVICE_CHECK", "Run service checks");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.TOGGLE_MAINTENANCE", "Turn on/off maintenance mode");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.RUN_CUSTOM_COMMAND", "Perform service-specific tasks");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.MODIFY_CONFIGS", "Modify configurations");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.MANAGE_CONFIG_GROUPS", "Manage configuration groups");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.MANAGE_ALERTS", "Manage service-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.MOVE", "Move to another host");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.ENABLE_HA", "Enable HA");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.TOGGLE_ALERTS", "Enable/disable service-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.ADD_DELETE_SERVICES", "Add/delete services");
-    createRoleAuthorization(roleAuthorizationDAO, "SERVICE.SET_SERVICE_USERS_GROUPS", "Set service users and groups");
-
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.VIEW_METRICS", "View metrics");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.VIEW_STATUS_INFO", "View status information");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.VIEW_CONFIGS", "View configuration");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.TOGGLE_MAINTENANCE", "Turn on/off maintenance mode");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.ADD_DELETE_COMPONENTS", "Install components");
-    createRoleAuthorization(roleAuthorizationDAO, "HOST.ADD_DELETE_HOSTS", "Add/Delete hosts");
-
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_METRICS", "View metrics");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_STATUS_INFO", "View status information");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_CONFIGS", "View configuration");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_STACK_DETAILS", "View stack version details");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.VIEW_ALERTS", "View cluster-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.MANAGE_CREDENTIALS", "Manage external credentials");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.MODIFY_CONFIGS", "Modify cluster configurations");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.MANAGE_CONFIG_GROUPS", "Manage cluster configuration groups");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.MANAGE_ALERTS", "Manage cluster-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.TOGGLE_ALERTS", "Enable/disable cluster-level alerts");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.TOGGLE_KERBEROS", "Enable/disable Kerberos");
-    createRoleAuthorization(roleAuthorizationDAO, "CLUSTER.UPGRADE_DOWNGRADE_STACK", "Upgrade/downgrade stack");
-
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.ADD_DELETE_CLUSTERS", "Create new clusters");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.RENAME_CLUSTER", "Rename clusters");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.MANAGE_USERS", "Manage users");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.MANAGE_GROUPS", "Manage groups");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.MANAGE_VIEWS", "Manage Ambari Views");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.ASSIGN_ROLES", "Assign roles");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.MANAGE_STACK_VERSIONS", "Manage stack versions");
-    createRoleAuthorization(roleAuthorizationDAO, "AMBARI.EDIT_STACK_REPOS", "Edit stack repository URLs");
-  }
-
-  private void createRoleAuthorization(RoleAuthorizationDAO roleAuthorizationDAO, String id, String name) {
-    if(roleAuthorizationDAO.findById(id) == null) {
-      RoleAuthorizationEntity roleAuthorizationEntity = new RoleAuthorizationEntity();
-      roleAuthorizationEntity.setAuthorizationId(id);
-      roleAuthorizationEntity.setAuthorizationName(name);
-      roleAuthorizationDAO.create(roleAuthorizationEntity);
-    }
-  }
-
-  private void createPermissionRoleAuthorizationMap() throws SQLException {
-    LOG.info("Creating permission to authorizations map");
-
-    // Determine the role entities
-    PermissionDAO permissionDAO = injector.getInstance(PermissionDAO.class);
-    ResourceTypeDAO resourceTypeDAO = injector.getInstance(ResourceTypeDAO.class);
-
-    ResourceTypeEntity ambariResource = resourceTypeDAO.findByName("AMBARI");
-    ResourceTypeEntity clusterResource = resourceTypeDAO.findByName("CLUSTER");
-    ResourceTypeEntity viewResource = resourceTypeDAO.findByName("VIEW");
-
-    PermissionEntity viewPermission = permissionDAO.findPermissionByNameAndType("VIEW.USER", viewResource);
-    PermissionEntity administratorPermission = permissionDAO.findPermissionByNameAndType("AMBARI.ADMINISTRATOR", ambariResource);
-    PermissionEntity clusterUserPermission = permissionDAO.findPermissionByNameAndType("CLUSTER.USER", clusterResource);
-    PermissionEntity clusterOperatorPermission = permissionDAO.findPermissionByNameAndType("CLUSTER.OPERATOR", clusterResource);
-    PermissionEntity clusterAdministratorPermission = permissionDAO.findPermissionByNameAndType("CLUSTER.ADMINISTRATOR", clusterResource);
-    PermissionEntity serviceAdministratorPermission = permissionDAO.findPermissionByNameAndType("SERVICE.ADMINISTRATOR", clusterResource);
-    PermissionEntity serviceOperatorPermission = permissionDAO.findPermissionByNameAndType("SERVICE.OPERATOR", clusterResource);
-
-    // Create role groups
-    Collection<PermissionEntity> viewUserAndAdministrator = Arrays.asList(viewPermission, administratorPermission);
-    Collection<PermissionEntity> clusterUserAndUp = Arrays.asList(
-        clusterUserPermission,
-        serviceOperatorPermission,
-        serviceAdministratorPermission,
-        clusterOperatorPermission,
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> serviceOperatorAndUp = Arrays.asList(
-        serviceOperatorPermission,
-        serviceAdministratorPermission,
-        clusterOperatorPermission,
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> serviceAdministratorAndUp = Arrays.asList(
-        serviceAdministratorPermission,
-        clusterOperatorPermission,
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> clusterOperatorAndUp = Arrays.asList(
-        clusterOperatorPermission,
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> clusterAdministratorAndUp = Arrays.asList(
-        clusterAdministratorPermission,
-        administratorPermission);
-    Collection<PermissionEntity> administratorOnly = Collections.singleton(administratorPermission);
-
-    // A map of the authorizations to the relevant roles
-    Map<String, Collection<PermissionEntity>> map = new HashMap<>();
-    map.put("VIEW.USE", viewUserAndAdministrator);
-    map.put("SERVICE.VIEW_METRICS", clusterUserAndUp);
-    map.put("SERVICE.VIEW_STATUS_INFO", clusterUserAndUp);
-    map.put("SERVICE.VIEW_CONFIGS", clusterUserAndUp);
-    map.put("SERVICE.COMPARE_CONFIGS", clusterUserAndUp);
-    map.put("SERVICE.VIEW_ALERTS", clusterUserAndUp);
-    map.put("SERVICE.START_STOP", serviceOperatorAndUp);
-    map.put("SERVICE.DECOMMISSION_RECOMMISSION", serviceOperatorAndUp);
-    map.put("SERVICE.RUN_SERVICE_CHECK", serviceOperatorAndUp);
-    map.put("SERVICE.TOGGLE_MAINTENANCE", serviceOperatorAndUp);
-    map.put("SERVICE.RUN_CUSTOM_COMMAND", serviceOperatorAndUp);
-    map.put("SERVICE.MODIFY_CONFIGS", serviceAdministratorAndUp);
-    map.put("SERVICE.MANAGE_CONFIG_GROUPS", serviceAdministratorAndUp);
-    map.put("CLUSTER.MANAGE_CONFIG_GROUPS", serviceAdministratorAndUp);
-    map.put("SERVICE.MANAGE_ALERTS", serviceAdministratorAndUp);
-    map.put("SERVICE.MOVE", serviceAdministratorAndUp);
-    map.put("SERVICE.ENABLE_HA", serviceAdministratorAndUp);
-    map.put("SERVICE.TOGGLE_ALERTS", serviceAdministratorAndUp);
-    map.put("SERVICE.ADD_DELETE_SERVICES", clusterAdministratorAndUp);
-    map.put("SERVICE.SET_SERVICE_USERS_GROUPS", clusterAdministratorAndUp);
-    map.put("HOST.VIEW_METRICS", clusterUserAndUp);
-    map.put("HOST.VIEW_STATUS_INFO", clusterUserAndUp);
-    map.put("HOST.VIEW_CONFIGS", clusterUserAndUp);
-    map.put("HOST.TOGGLE_MAINTENANCE", clusterOperatorAndUp);
-    map.put("HOST.ADD_DELETE_COMPONENTS", clusterOperatorAndUp);
-    map.put("HOST.ADD_DELETE_HOSTS", clusterOperatorAndUp);
-    map.put("CLUSTER.VIEW_METRICS", clusterUserAndUp);
-    map.put("CLUSTER.VIEW_STATUS_INFO", clusterUserAndUp);
-    map.put("CLUSTER.VIEW_CONFIGS", clusterUserAndUp);
-    map.put("CLUSTER.VIEW_STACK_DETAILS", clusterUserAndUp);
-    map.put("CLUSTER.VIEW_ALERTS", clusterUserAndUp);
-    map.put("CLUSTER.MANAGE_CREDENTIALS", clusterAdministratorAndUp);
-    map.put("CLUSTER.MODIFY_CONFIGS", clusterAdministratorAndUp);
-    map.put("CLUSTER.MANAGE_ALERTS", clusterAdministratorAndUp);
-    map.put("CLUSTER.TOGGLE_ALERTS", clusterAdministratorAndUp);
-    map.put("CLUSTER.TOGGLE_KERBEROS", clusterAdministratorAndUp);
-    map.put("CLUSTER.UPGRADE_DOWNGRADE_STACK", clusterAdministratorAndUp);
-    map.put("AMBARI.ADD_DELETE_CLUSTERS", administratorOnly);
-    map.put("AMBARI.RENAME_CLUSTER", administratorOnly);
-    map.put("AMBARI.MANAGE_USERS", administratorOnly);
-    map.put("AMBARI.MANAGE_GROUPS", administratorOnly);
-    map.put("AMBARI.MANAGE_VIEWS", administratorOnly);
-    map.put("AMBARI.ASSIGN_ROLES", administratorOnly);
-    map.put("AMBARI.MANAGE_STACK_VERSIONS", administratorOnly);
-    map.put("AMBARI.EDIT_STACK_REPOS", administratorOnly);
-
-    // Iterate over the map of authorizations to role to find the set of roles to map to each
-    // authorization and then add the relevant record
-    for (Map.Entry<String, Collection<PermissionEntity>> entry : map.entrySet()) {
-      String authorizationId = entry.getKey();
-
-      for (PermissionEntity permission : entry.getValue()) {
-        addAuthorizationToRole(permission, authorizationId);
-      }
-    }
-  }
-
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  private void updateAdminPermissionTable() throws SQLException {
-    // Add the permission_label column to the adminpermission table
-    dbAccessor.addColumn(ADMIN_PERMISSION_TABLE, new DBColumnInfo(PERMISSION_LABEL_COL, String.class, 255, null, true));
-  }
-
-  private void createRoleAuthorizationTables() throws SQLException {
-
-    ArrayList<DBColumnInfo> columns;
-
-    //  Add roleauthorization table
-    LOG.info("Creating " + ROLE_AUTHORIZATION_TABLE + " table");
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo(ROLE_AUTHORIZATION_ID_COL, String.class, 100, null, false));
-    columns.add(new DBColumnInfo(ROLE_AUTHORIZATION_NAME_COL, String.class, 255, null, false));
-    dbAccessor.createTable(ROLE_AUTHORIZATION_TABLE, columns, ROLE_AUTHORIZATION_ID_COL);
-
-    //  Add permission_roleauthorization table to map roleauthorizations to permissions (aka roles)
-    LOG.info("Creating " + PERMISSION_ROLE_AUTHORIZATION_TABLE + " table");
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo(PERMISSION_ID_COL, Long.class, null, null, false));
-    columns.add(new DBColumnInfo(ROLE_AUTHORIZATION_ID_COL, String.class, 100, null, false));
-    dbAccessor.createTable(PERMISSION_ROLE_AUTHORIZATION_TABLE, columns, PERMISSION_ID_COL, ROLE_AUTHORIZATION_ID_COL);
-
-    dbAccessor.addFKConstraint(PERMISSION_ROLE_AUTHORIZATION_TABLE, "FK_permission_roleauth_pid",
-        PERMISSION_ID_COL, ADMIN_PERMISSION_TABLE, PERMISSION_ID_COL, false);
-
-    dbAccessor.addFKConstraint(PERMISSION_ROLE_AUTHORIZATION_TABLE, "FK_permission_roleauth_aid",
-        ROLE_AUTHORIZATION_ID_COL, ROLE_AUTHORIZATION_TABLE, ROLE_AUTHORIZATION_ID_COL, false);
-  }
-
-  private void setPermissionLabels() throws SQLException {
-    String updateStatement = "UPDATE " + ADMIN_PERMISSION_TABLE + " SET " + PERMISSION_LABEL_COL + "='%s' WHERE " + PERMISSION_ID_COL + "=%d";
-
-    LOG.info("Setting permission labels");
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        "Ambari Administrator", PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        "Cluster User", PermissionEntity.CLUSTER_USER_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        "Cluster Administrator", PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        "View User", PermissionEntity.VIEW_USER_PERMISSION));
-  }
-
-  private void updatePermissionNames() throws SQLException {
-    String updateStatement = "UPDATE " + ADMIN_PERMISSION_TABLE + " SET " + PERMISSION_NAME_COL + "='%s' WHERE " + PERMISSION_ID_COL + "=%d";
-
-    // Update permissions names
-    LOG.info("Updating permission names");
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION_NAME, PermissionEntity.AMBARI_ADMINISTRATOR_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        PermissionEntity.CLUSTER_USER_PERMISSION_NAME, PermissionEntity.CLUSTER_USER_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION_NAME, PermissionEntity.CLUSTER_ADMINISTRATOR_PERMISSION));
-    dbAccessor.executeUpdate(String.format(updateStatement,
-        PermissionEntity.VIEW_USER_PERMISSION_NAME, PermissionEntity.VIEW_USER_PERMISSION));
-  }
-
-}


[07/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
deleted file mode 100644
index 1c3d34b..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog220Test.java
+++ /dev/null
@@ -1,1535 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertNull;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.sql.SQLException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
-import org.apache.ambari.server.controller.ConfigurationRequest;
-import org.apache.ambari.server.controller.ConfigurationResponse;
-import org.apache.ambari.server.controller.KerberosHelper;
-import org.apache.ambari.server.controller.MaintenanceStateHelper;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.ArtifactDAO;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.dao.HostVersionDAO;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.stack.StackManagerFactory;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
-import org.easymock.Capture;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockSupport;
-import org.easymock.IMocksControl;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Maps;
-import com.google.gson.Gson;
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-/**
- * {@link org.apache.ambari.server.upgrade.UpgradeCatalog220} unit tests.
- */
-public class UpgradeCatalog220Test {
-  private static Injector injector;
-  private static Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-  private static EntityManager entityManager = createNiceMock(EntityManager.class);
-  private AmbariManagementController amc = createNiceMock(AmbariManagementController.class);
-  private AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
-  private StackDAO stackDAO = createNiceMock(StackDAO.class);
-  private RepositoryVersionDAO repositoryVersionDAO = createNiceMock(RepositoryVersionDAO.class);
-  private HostVersionDAO hostVersionDAO = createNiceMock(HostVersionDAO.class);
-  private ClusterDAO clusterDAO = createNiceMock(ClusterDAO.class);
-
-  private IMocksControl mocksControl = EasyMock.createControl();
-
-  @BeforeClass
-  public static void init() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-
-
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-    // load the stack entity
-
-  }
-
-  @AfterClass
-  public static void tearDown() throws AmbariException, SQLException {
-    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
-  }
-
-  @Test
-  public void testExecuteUpgradeDDLUpdates() throws Exception{
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-
-    dbAccessor.addColumn(eq("upgrade"), anyObject(DBAccessor.DBColumnInfo.class));
-    expectLastCall().times(3);
-
-    replay(dbAccessor);
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(EntityManager.class).toInstance(entityManager);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog220 upgradeCatalog220 = injector.getInstance(UpgradeCatalog220.class);
-    upgradeCatalog220.executeUpgradeDDLUpdates();
-    verify(dbAccessor);
-  }
-
-  @Test
-  public void testExecuteStageDDLUpdates() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-
-    dbAccessor.addColumn(eq("stage"), anyObject(DBAccessor.DBColumnInfo.class));
-    expectLastCall().times(1);
-
-    replay(dbAccessor);
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(EntityManager.class).toInstance(entityManager);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog220 upgradeCatalog220 = injector.getInstance(UpgradeCatalog220.class);
-    upgradeCatalog220.executeStageDDLUpdates();
-    verify(dbAccessor);
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    // TODO AMBARI-13001, readd unit test section.
-    /*
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    Configuration configuration = createNiceMock(Configuration.class);
-    Connection connection = createNiceMock(Connection.class);
-    Statement statement = createNiceMock(Statement.class);
-    ResultSet resultSet = createNiceMock(ResultSet.class);
-    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
-    dbAccessor.getConnection();
-    expectLastCall().andReturn(connection).anyTimes();
-    connection.createStatement();
-    expectLastCall().andReturn(statement).anyTimes();
-    statement.executeQuery(anyObject(String.class));
-    expectLastCall().andReturn(resultSet).anyTimes();
-
-    // Technically, this is a DDL, but it has to be ran during the DML portion
-    // because it requires the persistence layer to be started.
-    UpgradeSectionDDL upgradeSectionDDL = new UpgradeSectionDDL();
-
-    // Execute any DDL schema changes
-    upgradeSectionDDL.execute(dbAccessor);
-
-    // Begin DML verifications
-    verifyBootstrapHDP21();
-
-    // Replay main sections
-    replay(dbAccessor, configuration, resultSet, connection, statement);
-
-
-    AbstractUpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Class<?> c = AbstractUpgradeCatalog.class;
-    Field f = c.getDeclaredField("configuration");
-    f.setAccessible(true);
-    f.set(upgradeCatalog, configuration);
-    */
-
-    Method updateStormConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateStormConfigs");
-    Method updateAMSConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateAMSConfigs");
-    Method updateHDFSConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateHDFSConfigs");
-    Method updateKafkaConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateKafkaConfigs");
-    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-    Method updateHbaseEnvConfig = UpgradeCatalog220.class.getDeclaredMethod("updateHbaseEnvConfig");
-    Method updateFlumeEnvConfig = UpgradeCatalog220.class.getDeclaredMethod("updateFlumeEnvConfig");
-    Method updateZookeeperLog4j = UpgradeCatalog220.class.getDeclaredMethod("updateZookeeperLog4j");
-    Method updateHadoopEnvConfig = UpgradeCatalog220.class.getDeclaredMethod("updateHadoopEnv");
-    Method updateAlertDefinitions = UpgradeCatalog220.class.getDeclaredMethod("updateAlertDefinitions");
-    Method updateRangerEnvConfig = UpgradeCatalog220.class.getDeclaredMethod("updateRangerEnvConfig");
-    Method updateRangerUgsyncSiteConfig = UpgradeCatalog220.class.getDeclaredMethod("updateRangerUgsyncSiteConfig");
-    Method updateHiveConfig = UpgradeCatalog220.class.getDeclaredMethod("updateHiveConfig");
-    Method updateAccumuloConfigs = UpgradeCatalog220.class.getDeclaredMethod("updateAccumuloConfigs");
-    Method updateKerberosDescriptorArtifacts = AbstractUpgradeCatalog.class.getDeclaredMethod("updateKerberosDescriptorArtifacts");
-    Method updateKnoxTopology = UpgradeCatalog220.class.getDeclaredMethod("updateKnoxTopology");
-
-    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
-      .addMockedMethod(updateAMSConfigs)
-      .addMockedMethod(updateHDFSConfigs)
-      .addMockedMethod(updateStormConfigs)
-      .addMockedMethod(addNewConfigurationsFromXml)
-      .addMockedMethod(updateHbaseEnvConfig)
-      .addMockedMethod(updateFlumeEnvConfig)
-      .addMockedMethod(updateAlertDefinitions)
-      .addMockedMethod(updateKafkaConfigs)
-      .addMockedMethod(updateZookeeperLog4j)
-      .addMockedMethod(updateHadoopEnvConfig)
-      .addMockedMethod(updateRangerEnvConfig)
-      .addMockedMethod(updateRangerUgsyncSiteConfig)
-      .addMockedMethod(updateHiveConfig)
-      .addMockedMethod(updateAccumuloConfigs)
-      .addMockedMethod(updateKerberosDescriptorArtifacts)
-      .addMockedMethod(updateKnoxTopology)
-      .createMock();
-
-    upgradeCatalog220.updateHbaseEnvConfig();
-    expectLastCall().once();
-    upgradeCatalog220.updateFlumeEnvConfig();
-    upgradeCatalog220.addNewConfigurationsFromXml();
-    expectLastCall().once();
-    upgradeCatalog220.updateStormConfigs();
-    expectLastCall().once();
-    upgradeCatalog220.updateHadoopEnv();
-    expectLastCall().once();
-    upgradeCatalog220.updateAMSConfigs();
-    expectLastCall().once();
-    upgradeCatalog220.updateAlertDefinitions();
-    expectLastCall().once();
-    upgradeCatalog220.updateKafkaConfigs();
-    expectLastCall().once();
-    upgradeCatalog220.updateHDFSConfigs();
-    expectLastCall().once();
-    upgradeCatalog220.updateZookeeperLog4j();
-    expectLastCall().once();
-    upgradeCatalog220.updateRangerEnvConfig();
-    expectLastCall().once();
-    upgradeCatalog220.updateRangerUgsyncSiteConfig();
-    expectLastCall().once();
-    upgradeCatalog220.updateHiveConfig();
-    expectLastCall().once();
-    upgradeCatalog220.updateAccumuloConfigs();
-    expectLastCall().once();
-    upgradeCatalog220.updateKnoxTopology();
-    expectLastCall().once();
-    upgradeCatalog220.updateKerberosDescriptorArtifacts();
-    expectLastCall().once();
-
-    replay(upgradeCatalog220);
-
-    upgradeCatalog220.executeDMLUpdates();
-
-    verify(upgradeCatalog220);
-  }
-
-  @Test
-  public void testExecuteUpgradePreDMLUpdates() throws Exception {
-    Method executeStackPreDMLUpdates = UpgradeCatalog220.class.getDeclaredMethod("executeUpgradePreDMLUpdates");
-    Method executeStackUpgradeDDLUpdates = UpgradeCatalog220.class.getDeclaredMethod("executeStackUpgradeDDLUpdates");
-    Method bootstrapRepoVersionForHDP21 = UpgradeCatalog220.class.getDeclaredMethod("bootstrapRepoVersionForHDP21");
-
-    final UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
-      .addMockedMethod(executeStackUpgradeDDLUpdates)
-      .addMockedMethod(bootstrapRepoVersionForHDP21)
-      .addMockedMethod(executeStackPreDMLUpdates).createMock();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(UpgradeCatalog220.class).toInstance(upgradeCatalog220);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        bind(EntityManager.class).toInstance(entityManager);
-      }
-    });
-
-    upgradeCatalog220.executeUpgradePreDMLUpdates();
-    expectLastCall().once();
-
-    upgradeCatalog220.executeStackUpgradeDDLUpdates();
-    expectLastCall().once();
-
-    upgradeCatalog220.bootstrapRepoVersionForHDP21();
-    expectLastCall().once();
-
-    replay(upgradeCatalog220);
-    mockInjector.getInstance(UpgradeCatalog220.class).executePreDMLUpdates();
-
-    verify(upgradeCatalog220);
-  }
-
-  @Test
-  public void testUpdateStormSiteConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesStormSite = new HashMap<String, String>() {
-      {
-        put("nimbus.monitor.freq.secs", "10");
-        put("metrics.reporter.register", "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter");
-      }
-    };
-
-    final Config mockStormSite = easyMockSupport.createNiceMock(Config.class);
-    expect(mockStormSite.getProperties()).andReturn(propertiesStormSite).once();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("storm-site")).andReturn(mockStormSite).atLeastOnce();
-    expect(mockStormSite.getProperties()).andReturn(propertiesStormSite).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog220.class).updateStormConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateKerberosDescriptorArtifact() throws Exception {
-    final KerberosDescriptorFactory kerberosDescriptorFactory = new KerberosDescriptorFactory();
-
-    KerberosServiceDescriptor serviceDescriptor;
-
-    URL systemResourceURL = ClassLoader.getSystemResource("kerberos/test_kerberos_descriptor_2_1_3.json");
-    assertNotNull(systemResourceURL);
-
-    final KerberosDescriptor kerberosDescriptorOrig = kerberosDescriptorFactory.createInstance(new File(systemResourceURL.getFile()));
-    assertNotNull(kerberosDescriptorOrig);
-
-    serviceDescriptor = kerberosDescriptorOrig.getService("HDFS");
-    assertNotNull(serviceDescriptor);
-    assertNotNull(serviceDescriptor.getIdentity("hdfs"));
-
-    serviceDescriptor = kerberosDescriptorOrig.getService("OOZIE");
-    assertNotNull(serviceDescriptor);
-    assertNotNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
-
-    UpgradeCatalog220 upgradeMock = createMockBuilder(UpgradeCatalog220.class).createMock();
-
-    Capture<Map<String, Object>> updatedData = EasyMock.newCapture();
-
-    ArtifactEntity artifactEntity = createNiceMock(ArtifactEntity.class);
-    expect(artifactEntity.getArtifactData())
-        .andReturn(kerberosDescriptorOrig.toMap())
-        .once();
-
-    artifactEntity.setArtifactData(capture(updatedData));
-    expectLastCall().once();
-
-    replay(artifactEntity, upgradeMock);
-    upgradeMock.updateKerberosDescriptorArtifact(createNiceMock(ArtifactDAO.class), artifactEntity);
-    verify(artifactEntity, upgradeMock);
-
-    KerberosDescriptor kerberosDescriptorUpdated = new KerberosDescriptorFactory().createInstance(updatedData.getValue());
-    assertNotNull(kerberosDescriptorUpdated);
-
-    serviceDescriptor = kerberosDescriptorUpdated.getService("HDFS");
-    assertNotNull(serviceDescriptor);
-    assertNull(serviceDescriptor.getIdentity("hdfs"));
-
-    KerberosComponentDescriptor namenodeComponent = serviceDescriptor.getComponent("NAMENODE");
-    assertNotNull(namenodeComponent.getIdentity("hdfs"));
-
-    serviceDescriptor = kerberosDescriptorUpdated.getService("OOZIE");
-    assertNotNull(serviceDescriptor);
-    assertNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
-    assertNotNull(serviceDescriptor.getIdentity("/HDFS/NAMENODE/hdfs"));
-
-    // check execution with empty kerberos descriptor
-    KerberosDescriptor kerberosDescriptor= new KerberosDescriptorFactory().createInstance(kerberosDescriptorOrig.toMap());
-    ArtifactEntity artifactEntityOrig = createNiceMock(ArtifactEntity.class);
-
-    kerberosDescriptor.getService("HDFS").removeIdentity("hdfs");
-
-    expect(artifactEntityOrig.getArtifactData()).andReturn(kerberosDescriptor.toMap()).once();
-   //expect(artifactDAO.merge((ArtifactEntity) anyObject())).andReturn(null).atLeastOnce();
-    replay(artifactEntityOrig);
-
-    upgradeMock.updateKerberosDescriptorArtifact(createNiceMock(ArtifactDAO.class), artifactEntityOrig);
-    verify(artifactEntityOrig);
-  }
-
-
-
-  @Test
-  public void testUpdateHbaseEnvConfig() throws AmbariException {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesHbaseEnv = new HashMap<String, String>() {
-      {
-        put("content", "test");
-      }
-    };
-
-    final Config mockHbaseEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).once();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HBASE", easyMockSupport.createNiceMock(Service.class))
-        .build());
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-env")).andReturn(mockHbaseEnv).atLeastOnce();
-    expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).anyTimes();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog220.class).updateHbaseEnvConfig();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testUpdateHDFSConfiguration() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHdfs = new HashMap<>();
-    propertiesExpectedHdfs.put("dfs.namenode.rpc-address", "nn.rpc.address");
-    propertiesExpectedHdfs.put("dfs.nameservices", "nn1");
-    propertiesExpectedHdfs.put("dfs.ha.namenodes.nn1", "value");
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    // Expected operation
-    expect(mockClusterExpected.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
-    expect(mockHdfsSite.getProperties()).andReturn(propertiesExpectedHdfs).anyTimes();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog220.class).updateHDFSConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateAmsHbaseEnvContent() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
-    Method updateAmsHbaseEnvContent = UpgradeCatalog220.class.getDeclaredMethod("updateAmsHbaseEnvContent", String.class);
-    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
-    String oldContent = "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
-      "\n" +
-      "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
-      "export HBASE_HEAPSIZE={{hbase_heapsize}}\n";
-
-    String expectedContent = "export HBASE_CLASSPATH=${HBASE_CLASSPATH}\n" +
-      "\n" +
-      "# The maximum amount of heap to use, in MB. Default is 1000.\n" +
-      "#export HBASE_HEAPSIZE={{hbase_heapsize}}\n" +
-      "\n" +
-      "# The maximum amount of heap to use for hbase shell.\n" +
-      "export HBASE_SHELL_OPTS=\"-Xmx256m\"\n";
-    String result = (String) updateAmsHbaseEnvContent.invoke(upgradeCatalog220, oldContent);
-    Assert.assertEquals(expectedContent, result);
-  }
-
-  @Test
-  public void testAmsSiteUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        //Including only those properties that might be present in an older version.
-        put("timeline.metrics.service.default.result.limit", String.valueOf(5760));
-        put("timeline.metrics.cluster.aggregator.minute.interval", String.valueOf(1000));
-        put("timeline.metrics.host.aggregator.minute.interval", String.valueOf(1000));
-        put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(1000));
-      }
-    };
-    Map<String, String> newPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        put("timeline.metrics.service.default.result.limit", String.valueOf(15840));
-        put("timeline.metrics.cluster.aggregator.second.interval", String.valueOf(120));
-        put("timeline.metrics.cluster.aggregator.minute.interval", String.valueOf(300));
-        put("timeline.metrics.host.aggregator.minute.interval", String.valueOf(300));
-        put("timeline.metrics.cluster.aggregator.second.ttl", String.valueOf(2592000));
-        put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(7776000));
-        put("timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier", String.valueOf(2));
-        put("timeline.metrics.cluster.aggregator.second.disabled", String.valueOf(false));
-        put("timeline.metrics.hbase.fifo.compaction.enabled", String.valueOf(true));
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsSite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-site")).andReturn(mockAmsSite).atLeastOnce();
-    expect(mockAmsSite.getProperties()).andReturn(oldPropertiesAmsSite).times(2);
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .addMockedMethod("createConfig")
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog220(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesAmsSite, updatedProperties).areEqual());
-
-  }
-
-  @Test
-  public void testAmsHbaseSiteUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsHbaseSite = new HashMap<String, String>() {
-      {
-        //Including only those properties that might be present in an older version.
-        put("zookeeper.session.timeout.localHBaseCluster", String.valueOf(20000));
-      }
-    };
-    Map<String, String> newPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        put("zookeeper.session.timeout.localHBaseCluster", String.valueOf(120000));
-        put("hbase.normalizer.enabled", String.valueOf(true));
-        put("hbase.normalizer.period", String.valueOf(600000));
-        put("hbase.master.normalizer.class", "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer");
-
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsHbaseSite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-hbase-site")).andReturn(mockAmsHbaseSite).atLeastOnce();
-    expect(mockAmsHbaseSite.getProperties()).andReturn(oldPropertiesAmsHbaseSite).atLeastOnce();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsHbaseSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .addMockedMethod("createConfig")
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog220(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesAmsSite, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testUpdateAlertDefinitions() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
-    long clusterId = 1;
-
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionEntity mockJournalNodeProcessAlertDefinitionEntity = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-    final AlertDefinitionEntity mockHostDiskUsageAlertDefinitionEntity = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    final String journalNodeProcessAlertSource = "{\"uri\":\"{{hdfs-site/dfs.journalnode.http-address}}\",\"default_port\":8480," +
-        "\"type\":\"PORT\",\"reporting\":{\"ok\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\"}," +
-        "\"warning\":{\"text\":\"TCP OK - {0:.3f}s response on port {1}\",\"value\":1.5}," +
-        "\"critical\":{\"text\":\"Connection failed: {0} to {1}:{2}\",\"value\":5.0}}}";
-    final String journalNodeProcessAlertSourceExpected = "{\"reporting\":{\"ok\":{\"text\":\"HTTP {0} response in {2:.3f}s\"}," +
-        "\"warning\":{\"text\":\"HTTP {0} response from {1} in {2:.3f}s ({3})\"}," +
-        "\"critical\":{\"text\":\"Connection failed to {1} ({3})\"}},\"type\":\"WEB\"," +
-        "\"uri\":{\"http\":\"{{hdfs-site/dfs.journalnode.http-address}}\"," +
-        "\"https\":\"{{hdfs-site/dfs.journalnode.https-address}}\"," +
-        "\"kerberos_keytab\":\"{{hdfs-site/dfs.web.authentication.kerberos.keytab}}\","+
-        "\"kerberos_principal\":\"{{hdfs-site/dfs.web.authentication.kerberos.principal}}\"," +
-        "\"https_property\":\"{{hdfs-site/dfs.http.policy}}\"," +
-        "\"https_property_value\":\"HTTPS_ONLY\",\"connection_timeout\":5.0}}";
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(AlertDefinitionDAO.class).toInstance(mockAlertDefinitionDAO);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("journalnode_process"))).andReturn(mockJournalNodeProcessAlertDefinitionEntity).atLeastOnce();
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("ambari_agent_disk_usage"))).andReturn(mockHostDiskUsageAlertDefinitionEntity).atLeastOnce();
-
-    expect(mockJournalNodeProcessAlertDefinitionEntity.getSource()).andReturn(journalNodeProcessAlertSource).atLeastOnce();
-    Assert.assertEquals(journalNodeProcessAlertSourceExpected, upgradeCatalog220.modifyJournalnodeProcessAlertSource(journalNodeProcessAlertSource));
-
-    mockHostDiskUsageAlertDefinitionEntity.setDescription(eq("This host-level alert is triggered if the amount of disk space " +
-        "used goes above specific thresholds. The default threshold values are 50% for WARNING and 80% for CRITICAL."));
-    expectLastCall().atLeastOnce();
-    mockHostDiskUsageAlertDefinitionEntity.setLabel(eq("Host Disk Usage"));
-    expectLastCall().atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog220.class).updateAlertDefinitions();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateAmsEnvContent() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
-    Method updateAmsEnvContent = UpgradeCatalog220.class.getDeclaredMethod("updateAmsEnvContent", String.class);
-    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
-    String oldContent = "some_content";
-
-    String expectedContent = "some_content" + "\n" +
-      "# AMS Collector GC options\n" +
-      "export AMS_COLLECTOR_GC_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 " +
-      "-XX:+UseCMSInitiatingOccupancyOnly -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps " +
-      "-XX:+UseGCLogFileRotation -XX:GCLogFileSize=10M " +
-      "-Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`\"\n" +
-      "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS\"\n"+
-      "\n" +
-      "# HBase normalizer enabled\n" +
-      "export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n" +
-      "\n" +
-      "# HBase compaction policy enabled\n" +
-      "export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}\n";
-
-    String result = (String) updateAmsEnvContent.invoke(upgradeCatalog220, oldContent);
-    Assert.assertEquals(expectedContent, result);
-  }
-
-  public void testUpdateKafkaConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigurationResponse mockConfigurationResponse = easyMockSupport.createMock(ConfigurationResponse.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Map<String, String> propertiesKafkaEnv = new HashMap<String, String>() {
-      {
-        put("content", "test");
-      }
-    };
-    Map<String, String> updates = Collections.singletonMap("content", "test\n\nexport KAFKA_KERBEROS_PARAMS=\"$KAFKA_KERBEROS_PARAMS {{kafka_kerberos_params}}");
-
-    final Map<String, String> propertiesAmsEnv = new HashMap<String, String>() {
-      {
-        put("kafka.metrics.reporters", "{{kafka_metrics_reporters}}");
-      }
-    };
-    final Map<String, Service> installedServices = new HashMap<String, Service>() {
-      {
-        put("KAFKA", null);
-        put("AMBARI_METRICS", null);
-      }
-    };
-
-    final Config mockAmsEnv = easyMockSupport.createNiceMock(Config.class);
-    final Config mockKafkaEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getServices()).andReturn(installedServices).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("kafka-broker")).andReturn(mockAmsEnv).atLeastOnce();
-    expect(mockAmsEnv.getProperties()).andReturn(propertiesAmsEnv).atLeastOnce();
-
-    expect(mockClusterExpected.getDesiredConfigByType("kafka-env")).andReturn(mockKafkaEnv).atLeastOnce();
-    expect(mockKafkaEnv.getProperties()).andReturn(propertiesKafkaEnv).atLeastOnce();
-
-    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
-        .withConstructor(Injector.class)
-        .withArgs(mockInjector)
-        .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-            Map.class, boolean.class, boolean.class)
-        .createMock();
-    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
-      "kafka-env", updates, true, false);
-    expectLastCall().once();
-
-    expect(mockAmbariManagementController.createConfiguration(EasyMock.<ConfigurationRequest>anyObject())).andReturn(mockConfigurationResponse);
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog220.class).updateKafkaConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateFlumeEnvConfig() throws AmbariException {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesFlumeEnv = new HashMap<String, String>() {
-      {
-        put("content", "test");
-      }
-    };
-
-    final Config mockFlumeEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(mockFlumeEnv.getProperties()).andReturn(propertiesFlumeEnv).once();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    expect(mockClusterExpected.getDesiredConfigByType("flume-env")).andReturn(mockFlumeEnv).atLeastOnce();
-    expect(mockFlumeEnv.getProperties()).andReturn(propertiesFlumeEnv).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog220.class).updateFlumeEnvConfig();
-    easyMockSupport.verifyAll();
-  }
-
-  /**
-   * @param dbAccessor
-   * @return
-   */
-  private AbstractUpgradeCatalog getUpgradeCatalog(final DBAccessor dbAccessor) {
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(DaoUtils.class).toInstance(createNiceMock(DaoUtils.class));
-        binder.bind(ClusterDAO.class).toInstance(clusterDAO);
-        binder.bind(RepositoryVersionHelper.class).toInstance(createNiceMock(RepositoryVersionHelper.class));
-        binder.bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
-        binder.bind(AmbariManagementController.class).toInstance(amc);
-        binder.bind(AmbariMetaInfo.class).toInstance(metaInfo);
-        binder.bind(StackManagerFactory.class).toInstance(createNiceMock(StackManagerFactory.class));
-        binder.bind(StackDAO.class).toInstance(stackDAO);
-        binder.bind(RepositoryVersionDAO.class).toInstance(repositoryVersionDAO);
-        binder.bind(HostVersionDAO.class).toInstance(hostVersionDAO);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    return injector.getInstance(UpgradeCatalog220.class);
-  }
-
-  @Test
-  public void testUpdateZookeeperLog4jConfig() throws AmbariException {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesZookeeperLog4j = new HashMap<String, String>() {
-      {
-        put("content", "log4j.rootLogger=INFO, CONSOLE");
-      }
-    };
-
-    final Config mockZookeeperLog4j = easyMockSupport.createNiceMock(Config.class);
-    expect(mockZookeeperLog4j.getProperties()).andReturn(propertiesZookeeperLog4j).once();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    expect(mockClusterExpected.getDesiredConfigByType("zookeeper-log4j")).andReturn(mockZookeeperLog4j).atLeastOnce();
-    expect(mockZookeeperLog4j.getProperties()).andReturn(propertiesZookeeperLog4j).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog220.class).updateZookeeperLog4j();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testUpdateRangerEnvConfig() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesHiveEnv = new HashMap<String, String>() {{
-        put("hive_security_authorization", "Ranger");
-    }};
-    final Map<String, String> propertiesRangerHdfsPlugin = new HashMap<String, String>() {{
-      put("ranger-hdfs-plugin-enabled", "Yes");
-    }};
-    final Map<String, String> propertiesRangerHbasePlugin = new HashMap<String, String>() {{
-      put("ranger-hbase-plugin-enabled", "Yes");
-    }};
-    final Map<String, String> propertiesRangerKafkaPlugin = new HashMap<String, String>() {{
-      put("ranger-kafka-plugin-enabled", "Yes");
-    }};
-    final Map<String, String> propertiesRangerYarnPlugin = new HashMap<String, String>() {{
-      put("ranger-yarn-plugin-enabled", "No");
-    }};
-
-    final Config mockHiveEnvConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerHdfsPluginConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerHbasePluginConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerKafkaPluginConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerYarnPluginConf = easyMockSupport.createNiceMock(Config.class);
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(mockHiveEnvConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-hdfs-plugin-properties")).andReturn(mockRangerHdfsPluginConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-hbase-plugin-properties")).andReturn(mockRangerHbasePluginConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-kafka-plugin-properties")).andReturn(mockRangerKafkaPluginConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-yarn-plugin-properties")).andReturn(mockRangerYarnPluginConf).atLeastOnce();
-
-    expect(mockHiveEnvConf.getProperties()).andReturn(propertiesHiveEnv).times(2);
-    expect(mockRangerHdfsPluginConf.getProperties()).andReturn(propertiesRangerHdfsPlugin).times(2);
-    expect(mockRangerHbasePluginConf.getProperties()).andReturn(propertiesRangerHbasePlugin).times(2);
-    expect(mockRangerKafkaPluginConf.getProperties()).andReturn(propertiesRangerKafkaPlugin).times(2);
-    expect(mockRangerYarnPluginConf.getProperties()).andReturn(propertiesRangerYarnPlugin).times(2);
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog220.class).updateRangerEnvConfig();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testGetSourceVersion() {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Assert.assertEquals("2.1.2.1", upgradeCatalog.getSourceVersion());
-  }
-
-  @Test
-  public void testGetTargetVersion() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-
-    Assert.assertEquals("2.2.0", upgradeCatalog.getTargetVersion());
-  }
-
-  // *********** Inner Classes that represent sections of the DDL ***********
-  // ************************************************************************
-
-  /**
-   * Verify that the upgrade table has two columns added to it.
-   */
-  class UpgradeSectionDDL implements SectionDDL {
-
-    Capture<DBAccessor.DBColumnInfo> upgradeTablePackageNameColumnCapture = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> upgradeTableUpgradeTypeColumnCapture = EasyMock.newCapture();
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void execute(DBAccessor dbAccessor) throws SQLException {
-      // Add columns
-      dbAccessor.addColumn(eq("upgrade"), capture(upgradeTablePackageNameColumnCapture));
-      dbAccessor.addColumn(eq("upgrade"), capture(upgradeTableUpgradeTypeColumnCapture));
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void verify(DBAccessor dbAccessor) throws SQLException {
-      // Verification section
-      DBAccessor.DBColumnInfo packageNameCol = upgradeTablePackageNameColumnCapture.getValue();
-      Assert.assertEquals(String.class, packageNameCol.getType());
-      Assert.assertEquals("upgrade_package", packageNameCol.getName());
-
-      DBAccessor.DBColumnInfo upgradeTypeCol = upgradeTableUpgradeTypeColumnCapture.getValue();
-      Assert.assertEquals(String.class, upgradeTypeCol.getType());
-      Assert.assertEquals("upgrade_type", upgradeTypeCol.getName());
-    }
-  }
-
-  @Test
-  public void testUpdateRangerUgsyncSiteConfig() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesRangerUgsyncSite = new HashMap<String, String>() {{
-        put("ranger.usersync.source.impl.class", "ldap");
-    }};
-
-    final Config mockRangerUgsyncSite = easyMockSupport.createNiceMock(Config.class);
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-ugsync-site")).andReturn(mockRangerUgsyncSite).atLeastOnce();
-
-    expect(mockRangerUgsyncSite.getProperties()).andReturn(propertiesRangerUgsyncSite).atLeastOnce();
-
-    Map<String, String> updates = Collections.singletonMap("ranger.usersync.source.impl.class", "org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder");
-    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "ranger-ugsync-site", updates, true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog220.class).updateRangerUgsyncSiteConfig();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testShouldDDLsBeExecutedOnUpgrade() throws Exception {
-    // GIVEN
-    Injector mockedInjector = mocksControl.createMock(Injector.class);
-    DBAccessor mockedDbAccessor = mocksControl.createMock(DBAccessor.class);
-    DaoUtils mockedDaoUtils = mocksControl.createMock(DaoUtils.class);
-    Configuration mockedConfiguration = mocksControl.createMock(Configuration.class);
-
-    Capture<String> capturedTableName = EasyMock.newCapture();
-    Capture<String> capturedPKColumn = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> capturedColumns = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedColumn = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedHostRoleCommandColumn = EasyMock.newCapture();
-
-    Capture<String> capturedBlueprintTableName = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedNewBlueprintColumn1 = EasyMock.newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedNewBlueprintColumn2 = EasyMock.newCapture();
-
-    Capture<DBAccessor.DBColumnInfo> stageSkipColumnCapture = EasyMock.newCapture();
-
-    EasyMock.expect(mockedInjector.getInstance(DaoUtils.class)).andReturn(mockedDaoUtils);
-    mockedInjector.injectMembers(anyObject(UpgradeCatalog.class));
-    EasyMock.expect(mockedConfiguration.getDatabaseType()).andReturn(Configuration.DatabaseType.POSTGRES).anyTimes();
-    EasyMock.expect(mockedConfiguration.getDatabaseUser()).andReturn("ambari");
-    EasyMock.expect(mockedConfiguration.getServerJDBCPostgresSchemaName()).andReturn("fo");
-
-
-    mockedDbAccessor.executeQuery("ALTER SCHEMA fo OWNER TO \"ambari\";");
-    mockedDbAccessor.executeQuery("ALTER ROLE \"ambari\" SET search_path to 'fo';");
-
-    // executeUpgradeDDLUpdates
-    mockedDbAccessor.addColumn(eq("upgrade"), capture(capturedColumn));
-    mockedDbAccessor.addColumn(eq("upgrade"), capture(capturedColumn));
-    mockedDbAccessor.addColumn(eq("upgrade"), capture(capturedColumn));
-
-    // addKerberosDescriptorTable
-    mockedDbAccessor.createTable(capture(capturedTableName), capture(capturedColumns), capture(capturedPKColumn));
-    mockedDbAccessor.alterColumn(eq("host_role_command"), capture(capturedHostRoleCommandColumn));
-
-    mockedDbAccessor.addColumn(capture(capturedBlueprintTableName), capture(capturedNewBlueprintColumn1));
-    mockedDbAccessor.addColumn(capture(capturedBlueprintTableName), capture(capturedNewBlueprintColumn2));
-
-    mockedDbAccessor.addColumn(eq("stage"), capture(stageSkipColumnCapture));
-
-    mocksControl.replay();
-
-    UpgradeCatalog220 testSubject = new UpgradeCatalog220(mockedInjector);
-    EasyMockSupport.injectMocks(testSubject);
-
-    //todo refactor the DI approach, don't directly access these members!!!
-    testSubject.dbAccessor = mockedDbAccessor;
-    testSubject.configuration = mockedConfiguration;
-
-    // WHEN
-    testSubject.upgradeSchema();
-
-    // THEN
-    Assert.assertEquals("The table name is wrong!", "kerberos_descriptor", capturedTableName.getValue());
-    Assert.assertEquals("The primary key is wrong!", "kerberos_descriptor_name", capturedPKColumn.getValue());
-    Assert.assertTrue("Ther number of columns is wrong!", capturedColumns.getValue().size() == 2);
-
-    Assert.assertEquals("The table name is wrong!", "blueprint", capturedBlueprintTableName.getValue());
-
-    Assert.assertEquals("The column name is wrong!", "security_type", capturedNewBlueprintColumn1.getValue().getName());
-    Assert.assertEquals("The column name is wrong!", "security_descriptor_reference", capturedNewBlueprintColumn2
-      .getValue().getName());
-
-    Assert.assertEquals("The column name is wrong!", "supports_auto_skip_failure",
-        stageSkipColumnCapture.getValue().getName());
-  }
-
-  @Test
-  public void testUpdateHiveConfig() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesHiveSite = new HashMap<String, String>() {{
-      put("hive.server2.logging.operation.log.location", "${system:java.io.tmpdir}/${system:user.name}/operation_logs");
-    }};
-    final Map<String, String> propertiesHiveSiteExpected = new HashMap<String, String>() {{
-      put("hive.server2.logging.operation.log.location", "/tmp/hive/operation_logs");
-    }};
-    final Map<String, String> propertiesHiveEnv = new HashMap<String, String>() {{
-      put("content", "test content");
-    }};
-    final Config hiveSiteConf = easyMockSupport.createNiceMock(Config.class);
-    final Config hiveEnvConf = easyMockSupport.createNiceMock(Config.class);
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(hiveSiteConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(hiveEnvConf).atLeastOnce();
-    expect(mockClusterExpected.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HIVE", easyMockSupport.createNiceMock(Service.class))
-        .build());
-    expect(hiveSiteConf.getProperties()).andReturn(propertiesHiveSite).once();
-    expect(hiveEnvConf.getProperties()).andReturn(propertiesHiveEnv).once();
-
-    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "hive-site", propertiesHiveSiteExpected, true, false);
-    expectLastCall().once();
-    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "hive-env", propertiesHiveEnv, true, true);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog220);
-    upgradeCatalog220.updateHiveConfig();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testUpdateHiveEnvContentHDP23() throws Exception {
-    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
-    String testContent = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    String expectedResult = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    Assert.assertEquals(expectedResult, upgradeCatalog220.updateHiveEnvContentHDP23(testContent));
-  }
-
-
-  @Test
-  public void testUpdateHiveEnvContent() throws Exception {
-    UpgradeCatalog220 upgradeCatalog220 = new UpgradeCatalog220(injector);
-    // Test first case
-    String testContent = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    String expectedResult = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    Assert.assertEquals(expectedResult, upgradeCatalog220.updateHiveEnvContent(testContent));
-    // Test second case
-    testContent = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "export SERVICE=$SERVICE\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
-            "fi\n" +
-            "\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    expectedResult = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "export SERVICE=$SERVICE\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE=\"{{hive_metastore_heapsize}}\"\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-            "# Larger heap size may be required when running queries over large number of files or partitions.\n";
-    Assert.assertEquals(expectedResult, upgradeCatalog220.updateHiveEnvContent(testContent));
-  }
-
-  @Test
-  public void testupdateKnoxTopology_NoRangerPlugin() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesTopologyWithoutAuthorizationProvider = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  </gateway> </topology>");
-    }};
-    final Map<String, String> propertiesTopologyExpected = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  <provider>\n" +
-              "               <role>authorization</role>\n" +
-              "               <name>AclsAuthz</name>\n" +
-              "               <enabled>true</enabled>\n" +
-              "          </provider>\n" +
-              "     </gateway> </topology>\n");
-    }};
-    final Config mockTopologyConf = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("cl1", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("topology")).andReturn(mockTopologyConf).atLeastOnce();
-    expect(mockTopologyConf.getProperties()).andReturn(propertiesTopologyWithoutAuthorizationProvider).once();
-
-
-    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "topology", propertiesTopologyExpected, true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog220);
-    upgradeCatalog220.updateKnoxTopology();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testupdateKnoxTopology_ProviderAlreadyExists() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesTopologyWitAuthorizationProvider = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  <provider>" +
-              "<role>authorization</role>" +
-              "<name>AclsAuthz</name>" +
-              "<enabled>true</enabled>" +
-              "</provider>" +
-              "</gateway> </topology>\n");
-    }};
-
-    final Config mockTopologyConf = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("cl1", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("topology")).andReturn(mockTopologyConf).atLeastOnce();
-    expect(mockTopologyConf.getProperties()).andReturn(propertiesTopologyWitAuthorizationProvider).once();
-
-    // ATTENTION, this mock should not be called at all. If it was, then something wrong with code
-    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-
-
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog220);
-    upgradeCatalog220.updateKnoxTopology();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testupdateKnoxTopology_RangerPluginAvailable() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesTopologyWithoutAuthorizationProvider = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  </gateway> </topology>");
-    }};
-    final Map<String, String> propertiesRangerKnoxPluginProperties = new HashMap<String, String>() {{
-      put("ranger-knox-plugin-enabled", "Yes");
-    }};
-    final Map<String, String> propertiesTopologyExpected = new HashMap<String, String>() {{
-      put("content", "<topology> <gateway>  <provider>\n" +
-              "               <role>authorization</role>\n" +
-              "               <name>XASecurePDPKnox</name>\n" +
-              "               <enabled>true</enabled>\n" +
-              "          </provider>\n" +
-              "     </gateway> </topology>\n");
-    }};
-    final Config mockTopologyConf = easyMockSupport.createNiceMock(Config.class);
-    final Config mockRangerKnoxPluginConf = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("cl1", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("topology")).andReturn(mockTopologyConf).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-knox-plugin-properties")).andReturn(mockRangerKnoxPluginConf).atLeastOnce();
-    expect(mockTopologyConf.getProperties()).andReturn(propertiesTopologyWithoutAuthorizationProvider).once();
-    expect(mockRangerKnoxPluginConf.getProperties()).andReturn(propertiesRangerKnoxPluginProperties).once();
-
-
-    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "topology", propertiesTopologyExpected, true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog220);
-    upgradeCatalog220.updateKnoxTopology();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testUpdateAccumuloConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    // We start with no client properties (< 2.2.0).
-    final Map<String, String> originalClientProperties = new HashMap<>();
-    // And should get the following property on upgrade.
-    final Map<String, String> updatedClientProperties = new HashMap<String, String>() {
-      {
-        put("kerberos.server.primary", "{{bare_accumulo_principal}}");
-      }
-    };
-
-    final Config clientConfig = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    // Enable KERBEROS
-    expect(mockClusterExpected.getSecurityType()).andReturn(SecurityType.KERBEROS).once();
-    // Mock out our empty original properties
-    expect(mockClusterExpected.getDesiredConfigByType("client")).andReturn(clientConfig).atLeastOnce();
-    expect(clientConfig.getProperties()).andReturn(originalClientProperties).atLeastOnce();
-
-    UpgradeCatalog220 upgradeCatalog220 = createMockBuilder(UpgradeCatalog220.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-    // Verify that we get this method called with the updated properties
-    upgradeCatalog220.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "client", updatedClientProperties, true, false);
-    expectLastCall().once();
-
-    // Run it
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog220);
-    upgradeCatalog220.updateAccumuloConfigs();
-    easyMockSupport.verifyAll();
-  }
-}


[53/63] [abbrv] ambari git commit: AMBARI-21371 - Adding ranger failed when user has custom properties (rzang)

Posted by ab...@apache.org.
AMBARI-21371 - Adding ranger failed when user has custom properties (rzang)

Change-Id: I3b73ade85c7818939e6c384f8b6bcc9966b448cb


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9d224f73
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9d224f73
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9d224f73

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 9d224f73b68279bcec834a28c35dd76122d9e73d
Parents: 2f0de69
Author: Richard Zang <rz...@apache.org>
Authored: Wed Jun 28 13:50:38 2017 -0700
Committer: Richard Zang <rz...@apache.org>
Committed: Wed Jun 28 13:50:38 2017 -0700

----------------------------------------------------------------------
 ambari-web/app/mixins/common/configs/enhanced_configs.js | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9d224f73/ambari-web/app/mixins/common/configs/enhanced_configs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/configs/enhanced_configs.js b/ambari-web/app/mixins/common/configs/enhanced_configs.js
index 4561f77..8fc7a4c 100644
--- a/ambari-web/app/mixins/common/configs/enhanced_configs.js
+++ b/ambari-web/app/mixins/common/configs/enhanced_configs.js
@@ -553,12 +553,13 @@ App.EnhancedConfigsMixin = Em.Mixin.create(App.ConfigWithOverrideRecommendationP
     if (Em.isNone(recommended)) {
       stepConfig.get('configs').removeObject(config);
     } else if (Em.isNone(initial)) {
+      var stackConfigProperty = App.configsCollection.getConfigByName(name, filename);
       stepConfig.get('configs').pushObject(this._createNewProperty(
         name,
         filename,
         Em.get(prop, 'serviceName'),
         recommended,
-        App.configsCollection.getConfigByName(name, filename).propertyDependsOn));
+        stackConfigProperty? stackConfigProperty.propertyDependsOn : []));
     } else {
       Em.set(config, 'value', recommended);
     }


[43/63] [abbrv] ambari git commit: AMBARI-21154: Add JAAS config properties for Atlas Hive hook in HiveCli to use kerberos ticket-cache.

Posted by ab...@apache.org.
AMBARI-21154: Add JAAS config properties for Atlas Hive hook in HiveCli to use kerberos ticket-cache.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9aa786f7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9aa786f7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9aa786f7

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 9aa786f7ea4c21159e6a014b4cbb6a6de155b22c
Parents: 2f40250
Author: Vishal Suvagia <vi...@yahoo.com>
Authored: Fri Jun 23 17:41:50 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Wed Jun 28 11:32:26 2017 +0530

----------------------------------------------------------------------
 .../HIVE/2.1.0.3.0/service_advisor.py           |  19 +++
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |  10 +-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |   7 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |   5 +-
 .../stacks/HDP/2.6/services/stack_advisor.py    |  21 ++++
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |   9 ++
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |   6 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |   1 +
 .../stacks/2.6/common/test_stack_advisor.py     | 123 ++++++++++++++-----
 9 files changed, 165 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
index 6d3e13d..48058f7 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/2.1.0.3.0/service_advisor.py
@@ -683,6 +683,25 @@ class HiveRecommender(service_advisor.ServiceAdvisor):
     else:
       self.logger.info("Not setting Hive Repo user for Ranger.")
 
+    security_enabled = self.isSecurityEnabled(services)
+    enable_atlas_hook = False
+
+    if 'hive-env' in configurations and 'hive.atlas.hook' in configurations['hive-env']['properties']:
+      enable_atlas_hook = configurations['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
+    elif 'hive-env' in services['configurations'] and 'hive.atlas.hook' in services['configurations']['hive-env']['properties']:
+      enable_atlas_hook = services['configurations']['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
+
+    if 'hive-atlas-application.properties' in services['configurations']:
+      putHiveAtlasHookProperty = self.putProperty(configurations, "hive-atlas-application.properties", services)
+      putHiveAtlasHookPropertyAttribute = self.putPropertyAttribute(configurations,"hive-atlas-application.properties")
+      if security_enabled and enable_atlas_hook:
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'required')
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'com.sun.security.auth.module.Krb5LoginModule')
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'true')
+      else:
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'delete', 'true')
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'delete', 'true')
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'delete', 'true')
 
   def getDBDriver(self, databaseType):
     driverDict = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
index a29f74b..30796cc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/config-upgrade.xml
@@ -434,7 +434,15 @@
             <regex-replace key="content" find="property.llap.daemon.log.maxfilesize = ([0-9]+)MB" replace-with="property.llap.daemon.log.maxfilesize = {{hive_llap_log_maxfilesize}}MB"/>
             <regex-replace key="content" find="property.llap.daemon.log.maxbackupindex = ([0-9]+)" replace-with="property.llap.daemon.log.maxbackupindex = {{hive_llap_log_maxbackupindex}}"/>
           </definition>
-
+          <definition xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook" summary="Updating hive atlas application properties">
+            <type>hive-atlas-application.properties</type>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag" value="required"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.loginModuleName" value="com.sun.security.auth.module.Krb5LoginModule"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.option.useTicketCache" value="true"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+          </definition>
 
           <definition xsi:type="configure" id="hdp_2_6_0_0_hive_set_hive_enforce_bucketing_property">
             <type>hive-site</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
index 8c659ee..1f37389 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml
@@ -621,7 +621,12 @@
           <summary>Removing atlas.cluster.name property</summary>
         </task>
       </execute-stage>
-      
+
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Updating hive atlas application properties">
+        <task xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook">
+          <summary>Updating hive atlas application properties</summary>
+        </task>
+      </execute-stage>
       <!-- SPARK -->
       <execute-stage service="SPARK" component="LIVY_SERVER" title="Apply config changes for Livy Server">
         <task xsi:type="configure" id="hdp_2_5_0_0_rename_spark_livy_configs"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
index 3054ca3..22c9a8d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/upgrades/upgrade-2.6.xml
@@ -825,10 +825,11 @@
           <task xsi:type="configure" id="hdp_2_6_0_0_hive_set_hive_enforce_bucketing_property" />
           <task xsi:type="configure" id="hdp_2_6_0_0_copy_hive_tez_container_size_to_hiveInteractive" />
           <task xsi:type="configure" id="hdp_2_5_0_0_remove_atlas_cluster_name" />
+          <task xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook"/>
         </pre-upgrade>
-        
+
         <pre-downgrade />
-        
+
         <upgrade>
           <task xsi:type="restart-task" />
         </upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index f8bbca5..82656aa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -547,6 +547,9 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
 
   def recommendHIVEConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendHIVEConfigurations(configurations, clusterData, services, hosts)
+    putHiveAtlasHookProperty = self.putProperty(configurations, "hive-atlas-application.properties", services)
+    putHiveAtlasHookPropertyAttribute = self.putPropertyAttribute(configurations,"hive-atlas-application.properties")
+
     if 'hive-env' in services['configurations'] and 'hive_user' in services['configurations']['hive-env']['properties']:
       hive_user = services['configurations']['hive-env']['properties']['hive_user']
     else:
@@ -566,6 +569,24 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
     else:
       self.logger.info("Not setting Hive Repo user for Ranger.")
 
+    security_enabled = self.isSecurityEnabled(services)
+    enable_atlas_hook = False
+
+    if 'hive-env' in configurations and 'hive.atlas.hook' in configurations['hive-env']['properties']:
+      enable_atlas_hook = configurations['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
+    elif 'hive-env' in services['configurations'] and 'hive.atlas.hook' in services['configurations']['hive-env']['properties']:
+      enable_atlas_hook = services['configurations']['hive-env']['properties']['hive.atlas.hook'].lower() == 'true'
+
+    if 'hive-atlas-application.properties' in services['configurations']:
+      if security_enabled and enable_atlas_hook:
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'required')
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'com.sun.security.auth.module.Krb5LoginModule')
+        putHiveAtlasHookProperty('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'true')
+      else:
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag', 'delete', 'true')
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.loginModuleName', 'delete', 'true')
+        putHiveAtlasHookPropertyAttribute('atlas.jaas.ticketBased-KafkaClient.option.useTicketCache', 'delete', 'true')
+
   def recommendHBASEConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP26StackAdvisor, self).recommendHBASEConfigurations(configurations, clusterData, services, hosts)
     if 'hbase-env' in services['configurations'] and 'hbase_user' in services['configurations']['hbase-env']['properties']:

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
index 1cbd78b..6dd2129 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/config-upgrade.xml
@@ -66,6 +66,15 @@
             <set key="ranger.plugin.hive.urlauth.filesystem.schemes" value="hdfs:,file:,wasb:,adl:"
               if-type="ranger-hive-security" if-key="ranger.plugin.hive.service.name" if-key-state="present"/>
           </definition>
+          <definition xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook" summary="Updating hive atlas application properties">
+            <type>hive-atlas-application.properties</type>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag" value="required"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.loginModuleName" value="com.sun.security.auth.module.Krb5LoginModule"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+            <set key ="atlas.jaas.ticketBased-KafkaClient.option.useTicketCache" value="true"
+              if-type="cluster-env" if-key="security_enabled" if-value="true"/>
+          </definition>
         </changes>
       </component>
     </service>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
index ede267a..e262971 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml
@@ -307,6 +307,12 @@
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_hive_plugin_urlauth_filesystem_schemes"/>
       </execute-stage>
 
+      <execute-stage service="HIVE" component="HIVE_SERVER" title="Updating hive atlas application properties">
+        <task xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook">
+          <summary>Updating hive atlas application properties</summary>
+        </task>
+      </execute-stage>
+
       <!-- HBASE -->
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for Ranger Hbase plugin">
         <task xsi:type="configure" id="hdp_2_6_maint_ranger_hbase_plugin_cluster_name"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
index b70943b..6b01ce9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/upgrades/upgrade-2.6.xml
@@ -792,6 +792,7 @@
         <pre-upgrade>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_hive_plugin_cluster_name"/>
           <task xsi:type="configure" id="hdp_2_6_maint_ranger_hive_plugin_urlauth_filesystem_schemes"/>
+          <task xsi:type="configure" id="hdp_2_6_maint_jaas_config_for_hive_hook"/>
         </pre-upgrade>
         <pre-downgrade/> <!--  no-op to prevent config changes on downgrade -->
         <upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/9aa786f7/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index d4d28c9..3ba18d8 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -966,19 +966,33 @@ class TestHDP26StackAdvisor(TestCase):
 
   def test_recommendHiveConfigurations(self):
     configurations = {
-      "ranger-hive-plugin-properties": {
-        "properties": {
-          "ranger-hive-plugin-enabled": "Yes",
-          "REPOSITORY_CONFIG_USERNAME":"hive"
+      "hive-env" : {
+        "properties" : {
+          "hive.atlas.hook" : "false",
+          "hive_user": "custom_hive",
+          "hive_security_authorization": "Ranger"
         }
       },
-      "hive-env":{
-        "properties":{
-          "hive_security_authorization":"ranger",
-          "hive_user":"custom_hive"
+      "ranger-env" : {
+        "properties" : {
+          "ranger-hive-plugin-enabled" : "Yes"
+        }
+      },
+      "cluster-env" : {
+        "properties" : {
+          "security_enabled" : "false"
+        }
+      },
+      "ranger-hive-plugin-properties" : {
+        "properties" : {
+          "REPOSITORY_CONFIG_USERNAME": "hive"
         }
+      },
+      "hive-atlas-application.properties" : {
+        "properties": {}
       }
     }
+
     clusterData = {
       "cpu": 4,
       "mapMemory": 3000,
@@ -1012,31 +1026,44 @@ class TestHDP26StackAdvisor(TestCase):
 
     services = {
       "services":
-        [{
-           "StackServices": {
-             "service_name": "YARN"
-           }, "components": []
-         },
-         {
-            "StackServices": {
+        [
+          {
+            "StackServices" : {
+             "service_name" : "YARN"
+            },
+            "components" : []
+          },
+          {
+            "StackServices" : {
               "service_name" : "HIVE",
               "service_version" : "1.2.1.2.6"
             },
-            "components": [
-            ]
+            "components": []
+          },
+          {
+            "StackServices" : {
+              "service_name" : "ATLAS",
+              "service_version": "0.8.0"
+            },
+            "components": []
+          },
+          {
+            "StackServices" : {
+              "service_name" : "RANGER",
+              "service_version": "0.7.0"
+            },
+            "components": []
           }
         ],
       "Versions": {
         "stack_name" : "HDP",
         "stack_version": "2.6"
       },
-      "changed-configurations": [
-      ],
+      "changed-configurations": [],
       "configurations": configurations,
       "ambari-server-properties": {"ambari-server.user":"ambari_user"}
     }
 
-
     expected = {
       'yarn-env': {
         'properties': {
@@ -1047,7 +1074,6 @@ class TestHDP26StackAdvisor(TestCase):
       },
       'ranger-hive-plugin-properties': {
         'properties': {
-          'ranger-hive-plugin-enabled': 'Yes',
           'REPOSITORY_CONFIG_USERNAME': 'custom_hive'
         }
       },
@@ -1068,12 +1094,11 @@ class TestHDP26StackAdvisor(TestCase):
       },
       'hive-env': {
         'properties': {
-          'hive.atlas.hook': 'false',
-          'hive_security_authorization': 'ranger',
+          'hive.atlas.hook': 'true',
+          'hive_security_authorization': 'Ranger',
           'hive_exec_orc_storage_strategy': 'SPEED',
           'hive_timeline_logging_enabled': 'true',
-          'hive_txn_acid': 'off',
-          'hive_user': 'custom_hive'
+          'hive_txn_acid': 'off'
         }
       },
       'hiveserver2-site': {
@@ -1098,7 +1123,7 @@ class TestHDP26StackAdvisor(TestCase):
           'hive.exec.orc.encoding.strategy': 'SPEED',
           'hive.server2.tez.initialize.default.sessions': 'false',
           'hive.security.authorization.enabled': 'true',
-          'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook',
+          'hive.exec.post.hooks': 'org.apache.hadoop.hive.ql.hooks.ATSHook,org.apache.atlas.hive.hook.HiveHook',
           'hive.server2.tez.default.queues': 'default',
           'hive.prewarm.enabled': 'false',
           'hive.exec.orc.compression.strategy': 'SPEED',
@@ -1144,6 +1169,21 @@ class TestHDP26StackAdvisor(TestCase):
           },
           'atlas.rest.address': {
             'delete': 'true'
+          },
+          'hive.server2.authentication.pam.services': {
+            'delete': 'true'
+          },
+          'hive.server2.custom.authentication.class': {
+            'delete': 'true'
+          },
+          'hive.server2.authentication.kerberos.principal': {
+            'delete': 'true'
+          },
+          'hive.server2.authentication.kerberos.keytab': {
+            'delete': 'true'
+          },
+          'hive.server2.authentication.ldap.url': {
+            'delete': 'true'
           }
         }
       },
@@ -1174,16 +1214,35 @@ class TestHDP26StackAdvisor(TestCase):
             'delete': 'true'
           }
         }
+      },
+      'hive-atlas-application.properties' : {
+        'properties' : {},
+        'property_attributes' : {
+            'atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag': {'delete': 'true'},
+            'atlas.jaas.ticketBased-KafkaClient.loginModuleName': {'delete': 'true'},
+            'atlas.jaas.ticketBased-KafkaClient.option.useTicketCache': {'delete': 'true'}
+        }
       }
     }
 
-    self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations,expected)
-    configurations['hive-env']['properties']['hive_user'] = 'hive'
-    expected['hive-env']['properties']['hive_user'] = 'hive'
+    recommendedConfigurations = {}
+    self.stackAdvisor.recommendHIVEConfigurations(recommendedConfigurations, clusterData, services, hosts)
+    self.assertEquals(recommendedConfigurations, expected)
+
+    services['configurations']['hive-env']['properties']['hive_user'] = 'hive'
     expected['ranger-hive-plugin-properties']['properties']['REPOSITORY_CONFIG_USERNAME'] = 'hive'
-    self.stackAdvisor.recommendHIVEConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations,expected)
+    services['configurations']['cluster-env']['properties']['security_enabled'] = 'true'
+    expected['hive-atlas-application.properties']['properties']['atlas.jaas.ticketBased-KafkaClient.loginModuleControlFlag'] = 'required'
+    expected['hive-atlas-application.properties']['properties']['atlas.jaas.ticketBased-KafkaClient.loginModuleName'] = 'com.sun.security.auth.module.Krb5LoginModule'
+    expected['hive-atlas-application.properties']['properties']['atlas.jaas.ticketBased-KafkaClient.option.useTicketCache'] = 'true'
+    del expected['hive-atlas-application.properties']['property_attributes']
+    expected['core-site'] = {
+      'properties': {}
+    }
+
+    recommendedConfigurations = {}
+    self.stackAdvisor.recommendHIVEConfigurations(recommendedConfigurations, clusterData, services, hosts)
+    self.assertEquals(recommendedConfigurations, expected)
 
 
   def test_recommendHBASEConfigurations(self):


[19/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2402.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2402.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2402.java
deleted file mode 100644
index a1ca8a5..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog2402.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-/**
- * Upgrade catalog for version 2.4.0.
- */
-public class UpgradeCatalog2402 extends AbstractUpgradeCatalog {
-
-  @Inject
-  ClusterDAO clusterDAO;
-
-  @Inject
-  Configuration config;
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog2402.class);
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog2402(Injector injector) {
-    super(injector);
-    injector.injectMembers(this);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.4.0.2";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.4.0";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    updateKafkaWidgetDefinition();
-  }
-
-
-  protected void updateKafkaWidgetDefinition() throws AmbariException {
-    LOG.info("Updating Kafka widget definition.");
-
-    Map<String, List<String>> widgetMap = new HashMap<>();
-    Map<String, String> sectionLayoutMap = new HashMap<>();
-
-    List<String> kafkaSummaryWidgets = new ArrayList<>(Arrays.asList("Active Controller Count"));
-    widgetMap.put("KAFKA_SUMMARY", kafkaSummaryWidgets);
-    sectionLayoutMap.put("KAFKA_SUMMARY", "default_kafka_dashboard");
-
-    updateWidgetDefinitionsForService("KAFKA", widgetMap, sectionLayoutMap);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog242.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog242.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog242.java
deleted file mode 100644
index ecf64a2..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog242.java
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.sql.SQLException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.dao.PermissionDAO;
-import org.apache.ambari.server.orm.dao.PrincipalDAO;
-import org.apache.ambari.server.orm.dao.PrincipalTypeDAO;
-import org.apache.ambari.server.orm.dao.PrivilegeDAO;
-import org.apache.ambari.server.orm.entities.PermissionEntity;
-import org.apache.ambari.server.orm.entities.PrincipalEntity;
-import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
-import org.apache.ambari.server.orm.entities.PrivilegeEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.Transactional;
-
-/**
- * Upgrade catalog for version 2.4.2.
- */
-public class UpgradeCatalog242 extends AbstractUpgradeCatalog {
-
-  protected static final String EXTENSION_TABLE = "extension";
-  protected static final String USERS_TABLE = "users";
-  protected static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
-  protected static final String BLUEPRINT_TABLE = "blueprint";
-  protected static final String HOST_GROUP_TABLE = "hostgroup";
-  protected static final String BLUEPRINT_CONFIGURATION = "blueprint_configuration";
-  protected static final String BLUEPRINT_SETTING = "blueprint_setting";
-  protected static final String HOSTGROUP_COMPONENT = "hostgroup_component";
-  protected static final String HOSTGROUP_CONFIGURATION = "hostgroup_configuration";
-
-  protected static final String BLUEPRINT_NAME_COLUMN = "blueprint_name";
-  protected static final String EXTENSION_NAME_COLUMN = "extension_name";
-  protected static final String EXTENSION_VERSION_COLUMN = "extension_version";
-  protected static final String USER_TYPE_COLUMN = "user_type";
-  protected static final String USER_NAME_COLUMN = "user_name";
-  protected static final String ROLE_COLUMN = "role";
-  protected static final String STATUS_COLUMN = "status";
-  protected static final String NAME_COLUMN = "name";
-
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog242.class);
-
-
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog242(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.4.2";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.4.0.2";
-  }
-
-
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    updateTablesForMysql();
-  }
-
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    //To change body of implemented methods use File | Settings | File Templates.
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    createRoleAuthorizations();
-    convertRolePrincipals();
-  }
-
-  /**
-   * Create new role authorizations: CLUSTER.RUN_CUSTOM_COMMAND and AMBARI.RUN_CUSTOM_COMMAND
-   *
-   * @throws SQLException
-   */
-  @Transactional
-  protected void createRoleAuthorizations() throws SQLException {
-    addRoleAuthorization("CLUSTER.RUN_CUSTOM_COMMAND", "Perform custom cluster-level actions",
-        Arrays.asList("AMBARI.ADMINISTRATOR:AMBARI", "CLUSTER.ADMINISTRATOR:CLUSTER"));
-
-    addRoleAuthorization("AMBARI.RUN_CUSTOM_COMMAND", "Perform custom administrative actions",
-        Collections.singletonList("AMBARI.ADMINISTRATOR:AMBARI"));
-  }
-
-  protected void updateTablesForMysql() throws SQLException {
-    final Configuration.DatabaseType databaseType = configuration.getDatabaseType();
-    if (databaseType == Configuration.DatabaseType.MYSQL) {
-      dbAccessor.alterColumn(EXTENSION_TABLE, new DBAccessor.DBColumnInfo(EXTENSION_NAME_COLUMN, String.class, 100, null, false));
-      dbAccessor.alterColumn(EXTENSION_TABLE, new DBAccessor.DBColumnInfo(EXTENSION_VERSION_COLUMN, String.class, 100, null, false));
-
-      dbAccessor.alterColumn(USERS_TABLE, new DBAccessor.DBColumnInfo(USER_TYPE_COLUMN, String.class, 100, null, false));
-      dbAccessor.alterColumn(USERS_TABLE, new DBAccessor.DBColumnInfo(USER_NAME_COLUMN, String.class, 100, null, false));
-
-      dbAccessor.alterColumn(HOST_ROLE_COMMAND_TABLE, new DBAccessor.DBColumnInfo(ROLE_COLUMN, String.class, 100, null, true));
-      dbAccessor.alterColumn(HOST_ROLE_COMMAND_TABLE, new DBAccessor.DBColumnInfo(STATUS_COLUMN, String.class, 100, null, true));
-
-      dbAccessor.dropFKConstraint(HOST_GROUP_TABLE, "FK_hg_blueprint_name");
-
-      dbAccessor.dropFKConstraint(HOST_GROUP_TABLE, "FK_hostgroup_blueprint_name");
-
-      dbAccessor.dropFKConstraint(BLUEPRINT_CONFIGURATION, "FK_cfg_blueprint_name");
-
-      dbAccessor.dropFKConstraint(BLUEPRINT_CONFIGURATION, "FK_blueprint_configuration_blueprint_name");
-
-      dbAccessor.dropFKConstraint(BLUEPRINT_SETTING, "FK_blueprint_setting_blueprint_name");
-
-      dbAccessor.dropFKConstraint(BLUEPRINT_SETTING, "FK_blueprint_setting_name");
-
-      dbAccessor.alterColumn(BLUEPRINT_TABLE, new DBAccessor.DBColumnInfo(BLUEPRINT_NAME_COLUMN, String.class, 100, null, false));
-
-      String[] uniqueColumns1 = new String[] { BLUEPRINT_NAME_COLUMN };
-
-      dbAccessor.addFKConstraint(HOST_GROUP_TABLE, "FK_hg_blueprint_name",
-              uniqueColumns1, BLUEPRINT_TABLE, uniqueColumns1, false);
-
-      dbAccessor.addFKConstraint(BLUEPRINT_CONFIGURATION, "FK_cfg_blueprint_name",
-              uniqueColumns1, BLUEPRINT_TABLE, uniqueColumns1, false);
-
-      dbAccessor.addFKConstraint(BLUEPRINT_SETTING, "FK_blueprint_setting_name",
-              uniqueColumns1, BLUEPRINT_TABLE, uniqueColumns1, false);
-    }
-  }
-
-  /**
-   * Convert the previously set inherited privileges to the more generic inherited privileges model
-   * based on role-based principals rather than specialized principal types.
-   */
-  @Transactional
-  void convertRolePrincipals() {
-    LOG.info("Converting pseudo principle types to role principals");
-
-    PermissionDAO permissionDAO = injector.getInstance(PermissionDAO.class);
-    PrivilegeDAO privilegeDAO = injector.getInstance(PrivilegeDAO.class);
-    PrincipalDAO principalDAO = injector.getInstance(PrincipalDAO.class);
-    PrincipalTypeDAO principalTypeDAO = injector.getInstance(PrincipalTypeDAO.class);
-
-    Map<String, String> principalTypeToRole = new HashMap<>();
-    principalTypeToRole.put("ALL.CLUSTER.ADMINISTRATOR", "CLUSTER.ADMINISTRATOR");
-    principalTypeToRole.put("ALL.CLUSTER.OPERATOR", "CLUSTER.OPERATOR");
-    principalTypeToRole.put("ALL.CLUSTER.USER", "CLUSTER.USER");
-    principalTypeToRole.put("ALL.SERVICE.ADMINISTRATOR", "SERVICE.ADMINISTRATOR");
-    principalTypeToRole.put("ALL.SERVICE.OPERATOR", "SERVICE.OPERATOR");
-
-    // Handle a typo introduced in org.apache.ambari.server.upgrade.UpgradeCatalog240.updateClusterInheritedPermissionsConfig
-    principalTypeToRole.put("ALL.SERVICE.OPERATIOR", "SERVICE.OPERATOR");
-
-    for (Map.Entry<String, String> entry : principalTypeToRole.entrySet()) {
-      String principalTypeName = entry.getKey();
-      String roleName = entry.getValue();
-
-      PermissionEntity role = permissionDAO.findByName(roleName);
-      PrincipalEntity rolePrincipalEntity = (role == null) ? null : role.getPrincipal();
-
-      // Convert Privilege Records
-      PrincipalTypeEntity principalTypeEntity = principalTypeDAO.findByName(principalTypeName);
-
-      if (principalTypeEntity != null) {
-        List<PrincipalEntity> principalEntities = principalDAO.findByPrincipalType(principalTypeName);
-
-        for (PrincipalEntity principalEntity : principalEntities) {
-          Set<PrivilegeEntity> privilegeEntities = principalEntity.getPrivileges();
-
-          for (PrivilegeEntity privilegeEntity : privilegeEntities) {
-            if (rolePrincipalEntity == null) {
-              LOG.info("Removing privilege (id={}) since no role principle was found for {}:\n{}",
-                  privilegeEntity.getId(), roleName, formatPrivilegeEntityDetails(privilegeEntity));
-              // Remove this privilege
-              privilegeDAO.remove(privilegeEntity);
-            } else {
-              LOG.info("Updating privilege (id={}) to use role principle for {}:\n{}",
-                  privilegeEntity.getId(), roleName, formatPrivilegeEntityDetails(privilegeEntity));
-
-              // Set the principal to the updated principal value
-              privilegeEntity.setPrincipal(rolePrincipalEntity);
-              privilegeDAO.merge(privilegeEntity);
-            }
-          }
-
-          // Remove the obsolete principal
-          principalDAO.remove(principalEntity);
-        }
-
-        // Remove the obsolete principal type
-        principalTypeDAO.remove(principalTypeEntity);
-      }
-    }
-
-    LOG.info("Converting pseudo principle types to role principals - complete.");
-  }
-
-  private String formatPrivilegeEntityDetails(PrivilegeEntity privilegeEntity) {
-    if (privilegeEntity == null) {
-      return "";
-    } else {
-      ResourceEntity resource = privilegeEntity.getResource();
-      PrincipalEntity principal = privilegeEntity.getPrincipal();
-      PermissionEntity permission = privilegeEntity.getPermission();
-
-      return String.format("" +
-              "\tPrivilege ID: %d" +
-              "\n\tResource ID: %d" +
-              "\n\tPrincipal ID: %d" +
-              "\n\tPermission ID: %d",
-          privilegeEntity.getId(),
-          resource.getId(),
-          principal.getId(),
-          permission.getId()
-      );
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
deleted file mode 100644
index aed4adf..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog250.java
+++ /dev/null
@@ -1,1352 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.upgrade;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.CommandExecutionType;
-import org.apache.ambari.server.collections.Predicate;
-import org.apache.ambari.server.collections.functors.ContainsPredicate;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.AlertsDAO;
-import org.apache.ambari.server.orm.dao.ArtifactDAO;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
-import org.apache.ambari.server.orm.entities.AlertCurrentEntity;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.AlertHistoryEntity;
-import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosConfigurationDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
-import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosKeytabDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
-import org.apache.ambari.server.view.ViewArchiveUtility;
-import org.apache.ambari.server.view.ViewInstanceOperationHandler;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.jdbc.support.JdbcUtils;
-
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.gson.JsonPrimitive;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-/**
- * Upgrade catalog for version 2.5.0.
- */
-public class UpgradeCatalog250 extends AbstractUpgradeCatalog {
-
-  protected static final String HOST_VERSION_TABLE = "host_version";
-  protected static final String GROUPS_TABLE = "groups";
-  protected static final String GROUP_TYPE_COL = "group_type";
-  private static final String AMS_ENV = "ams-env";
-  private static final String AMS_GRAFANA_INI = "ams-grafana-ini";
-  private static final String AMS_SITE = "ams-site";
-  private static final String AMS_LOG4J = "ams-log4j";
-  private static final String AMS_HBASE_LOG4J = "ams-hbase-log4j";
-  private static final String AMS_MODE = "timeline.metrics.service.operation.mode";
-  private static final String AMS_HBASE_SITE = "ams-hbase-site";
-  private static final String HBASE_ROOTDIR = "hbase.rootdir";
-  private static final String HADOOP_ENV = "hadoop-env";
-  private static final String KAFKA_BROKER = "kafka-broker";
-  private static final String YARN_SITE_CONFIG = "yarn-site";
-  private static final String YARN_ENV_CONFIG = "yarn-env";
-  private static final String YARN_LCE_CGROUPS_MOUNT_PATH = "yarn.nodemanager.linux-container-executor.cgroups.mount-path";
-  private static final String YARN_CGROUPS_ENABLED = "yarn_cgroups_enabled";
-  private static final String KAFKA_TIMELINE_METRICS_HOST = "kafka.timeline.metrics.host";
-
-  public static final String COMPONENT_TABLE = "servicecomponentdesiredstate";
-  public static final String COMPONENT_VERSION_TABLE = "servicecomponent_version";
-  public static final String COMPONENT_VERSION_PK = "PK_sc_version";
-  public static final String COMPONENT_VERSION_FK_COMPONENT = "FK_scv_component_id";
-  public static final String COMPONENT_VERSION_FK_REPO_VERSION = "FK_scv_repo_version_id";
-
-  protected static final String SERVICE_DESIRED_STATE_TABLE = "servicedesiredstate";
-  protected static final String CREDENTIAL_STORE_ENABLED_COL = "credential_store_enabled";
-
-  protected static final String HOST_COMPONENT_DESIREDSTATE_TABLE = "hostcomponentdesiredstate";
-  protected static final String HOST_COMPONENT_DESIREDSTATE_ID_COL = "id";
-  protected static final String HOST_COMPONENT_DESIREDSTATE_INDEX = "UQ_hcdesiredstate_name";
-
-  @Inject
-  ViewInstanceDAO viewInstanceDAO;
-
-  @Inject
-  ViewInstanceOperationHandler viewInstanceOperationHandler;
-
-  @Inject
-  protected ViewArchiveUtility archiveUtility;
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog250.class);
-
-  @Inject
-  DaoUtils daoUtils;
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog250(Injector injector) {
-    super(injector);
-
-    daoUtils = injector.getInstance(DaoUtils.class);
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.5.0";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.4.2";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    updateHostVersionTable();
-    createComponentVersionTable();
-    updateGroupsTable();
-    dbAccessor.addColumn("stage",
-      new DBAccessor.DBColumnInfo("command_execution_type", String.class, 32, CommandExecutionType.STAGE.toString(),
-        false));
-    updateServiceDesiredStateTable();
-    updateHostComponentDesiredStateTable();
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateAMSConfigs();
-    updateStormAlerts();
-    updateLogSearchAlert();
-    removeAlertDuplicates();
-    updateHadoopEnvConfigs();
-    updateKafkaConfigs();
-    updateHIVEInteractiveConfigs();
-    unInstallAllZeppelinViews();
-    updateZeppelinConfigs();
-    updateAtlasConfigs();
-    updateLogSearchConfigs();
-    updateAmbariInfraConfigs();
-    updateYarnSite();
-    updateRangerUrlConfigs();
-    addManageServiceAutoStartPermissions();
-    addManageAlertNotificationsPermissions();
-    updateKerberosDescriptorArtifacts();
-    fixHBaseMasterCPUUtilizationAlertDefinition();
-    updateTezHistoryUrlBase();
-  }
-
-  /**
-   * Fix the HBase Master CPU Utilization alert definition by swapping the values for <code>kerberos_keytab</code>
-   * and <code>kerberos_principal</code>.
-   */
-  protected void fixHBaseMasterCPUUtilizationAlertDefinition() {
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-      AlertDefinitionEntity alertDefinition = alertDefinitionDAO.findByName(clusterID, "hbase_master_cpu");
-      if(alertDefinition != null) {
-        LOG.info("Updating alert definition {} in cluster {}", alertDefinition.getDefinitionName(), clusterID);
-        String source = alertDefinition.getSource();
-
-        if(source != null) {
-          JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-
-          if(sourceJson != null) {
-            boolean changesExist = false;
-            LOG.debug("Source before update : {}", sourceJson);
-
-            JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
-            JsonPrimitive primitive;
-
-            if (uriJson != null) {
-              // Replace
-              //  "kerberos_keytab": "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}"
-              // With
-              //  "kerberos_keytab": "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}"
-              primitive = uriJson.getAsJsonPrimitive("kerberos_keytab");
-              if ((primitive != null) && primitive.isString() && "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}".equals(primitive.getAsString())) {
-                uriJson.remove("kerberos_keytab");
-                uriJson.addProperty("kerberos_keytab", "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}");
-                changesExist = true;
-              }
-
-              // Replace
-              //  "kerberos_principal": "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}"
-              // With
-              //  "kerberos_principal": "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}"
-              primitive = uriJson.getAsJsonPrimitive("kerberos_principal");
-              if ((primitive != null) && primitive.isString() && "{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}".equals(primitive.getAsString())) {
-                uriJson.remove("kerberos_principal");
-                uriJson.addProperty("kerberos_principal", "{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}");
-                changesExist = true;
-              }
-            }
-
-            LOG.debug("Source after update : {}", sourceJson);
-            if(changesExist) {
-              alertDefinition.setSource(sourceJson.toString());
-              alertDefinition.setHash(UUID.randomUUID().toString());
-
-              alertDefinitionDAO.merge(alertDefinition);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Removes all {@link AlertCurrentEntity} duplicates from database.
-   * Alerts are considered as duplicates if their definition, host and alert instance are the same.
-   * Duplicates could be created in earlier versions of Ambari up till 2.4.1.
-   */
-  protected void removeAlertDuplicates() {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertsDAO alertsDao = injector.getInstance(AlertsDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-      LOG.info("Removing alert duplicates on cluster {}", cluster.getClusterName());
-      List<AlertCurrentEntity> alertCurrentEntities = alertsDao.findCurrentByCluster(clusterID);
-      Set<AlertHistoryEntity> uniqueAlerts = new HashSet<>();
-      for (AlertCurrentEntity alertCurrentEntity : alertCurrentEntities) {
-
-        AlertHistoryEntity currentAlert = new AlertHistoryEntity();
-        currentAlert.setAlertDefinition(alertCurrentEntity.getAlertHistory().getAlertDefinition());
-        currentAlert.setHostName(alertCurrentEntity.getAlertHistory().getHostName());
-        currentAlert.setAlertInstance(alertCurrentEntity.getAlertHistory().getAlertInstance());
-
-        if (uniqueAlerts.contains(currentAlert)) {
-          LOG.info("Alert entity duplicate {} will be removed",alertCurrentEntity.getAlertHistory());
-          alertsDao.remove(alertCurrentEntity);
-        } else {
-          uniqueAlerts.add(currentAlert);
-        }
-      }
-    }
-  }
-
-  protected void updateStormAlerts() {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-      LOG.info("Updating storm alert definitions on cluster : " + cluster.getClusterName());
-
-      final AlertDefinitionEntity stormServerProcessDefinitionEntity = alertDefinitionDAO.findByName(
-              clusterID, "storm_server_process");
-
-      final AlertDefinitionEntity stormWebAlert = alertDefinitionDAO.findByName(
-              clusterID, "storm_webui");
-
-      if (stormServerProcessDefinitionEntity != null) {
-        LOG.info("Removing alert definition : " + stormServerProcessDefinitionEntity);
-        alertDefinitionDAO.remove(stormServerProcessDefinitionEntity);
-      }
-
-      if (stormWebAlert != null) {
-        LOG.info("Updating alert definition : " + stormWebAlert.getDefinitionName());
-        String source = stormWebAlert.getSource();
-        JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-        LOG.debug("Source before update : {}", sourceJson);
-
-        JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
-        uriJson.remove("https");
-        uriJson.remove("https_property");
-        uriJson.remove("https_property_value");
-        uriJson.addProperty("https", "{{storm-site/ui.https.port}}");
-        uriJson.addProperty("https_property", "{{storm-site/ui.https.keystore.type}}");
-        uriJson.addProperty("https_property_value", "jks");
-
-        LOG.debug("Source after update : {}", sourceJson);
-        stormWebAlert.setSource(sourceJson.toString());
-        alertDefinitionDAO.merge(stormWebAlert);
-      }
-    }
-  }
-
-  protected void updateLogSearchAlert() {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-      LOG.info("Updating Log Search web ui alert definitions on cluster : " + cluster.getClusterName());
-
-      final AlertDefinitionEntity logSearchWebAlert = alertDefinitionDAO.findByName(
-        clusterID, "logsearch_ui");
-
-      if (logSearchWebAlert != null) {
-        LOG.info("Updating alert definition : " + logSearchWebAlert.getDefinitionName());
-        String source = logSearchWebAlert.getSource();
-        JsonObject sourceJson = new JsonParser().parse(source).getAsJsonObject();
-        LOG.debug("Source before update : {}", sourceJson);
-
-        JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
-        uriJson.remove("https_property");
-        uriJson.remove("https_property_value");
-        uriJson.addProperty("https_property", "{{logsearch-env/logsearch_ui_protocol}}");
-        uriJson.addProperty("https_property_value", "https");
-
-        LOG.debug("Source after update : {}", sourceJson);
-        logSearchWebAlert.setSource(sourceJson.toString());
-        alertDefinitionDAO.merge(logSearchWebAlert);
-      }
-    }
-  }
-
-  /**
-   * This will check if previous value of 'tez.tez-ui.history-url.base' contains tez view's url.
-   * If yes then it will point it to fixed url of tez view auto view as introduced in ambari-2.5.0.0.
-   * else it will log an error and move ahead.
-   */
-  protected void updateTezHistoryUrlBase() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Set<String> installedServices = cluster.getServices().keySet();
-          if (installedServices.contains("TEZ")) {
-            Config tezSite = cluster.getDesiredConfigByType("tez-site");
-            if (tezSite != null) {
-              String currentTezHistoryUrlBase = tezSite.getProperties().get("tez.tez-ui.history-url.base");
-              if (!StringUtils.isEmpty(currentTezHistoryUrlBase)) {
-                LOG.info("Current Tez History URL base: {} ", currentTezHistoryUrlBase);
-                String newTezHistoryUrlBase = null;
-                try {
-                  newTezHistoryUrlBase = getUpdatedTezHistoryUrlBase(currentTezHistoryUrlBase);
-                } catch (AmbariException e) {
-                  LOG.error("Error occurred while creating updated URL of tez view using value in property tez.tez-ui.history-url.base." +
-                    "The current value {} is not of standard format expected by Ambari. Skipping the updation of tez.tez-ui.history-url.base." +
-                    "Please check validity of this property manually in tez site after upgrade.", currentTezHistoryUrlBase, e);
-                  return;
-                }
-                LOG.info("New Tez History URL base: {} ", newTezHistoryUrlBase);
-                updateConfigurationProperties("tez-site", Collections.singletonMap("tez.tez-ui.history-url.base", newTezHistoryUrlBase), true, false);
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Transforms the existing tez history url base to the fixed short url for tez auto instance
-   * @param currentTezHistoryUrlBase Existing value of the tez history url base
-   * @return the updated tez history url base
-   * @throws AmbariException if currentTezHistoryUrlBase is malformed or is not compatible with the Tez View url REGEX
-   */
-  protected String getUpdatedTezHistoryUrlBase(String currentTezHistoryUrlBase) throws AmbariException{
-    String pattern = "(.*)(\\/views\\/TEZ\\/)(.*)";
-    Pattern regex = Pattern.compile(pattern);
-    Matcher matcher = regex.matcher(currentTezHistoryUrlBase);
-    String prefix;
-    if (matcher.find()) {
-      prefix = matcher.group(1);
-    } else {
-      throw new AmbariException("Cannot prepare the new value for property: 'tez.tez-ui.history-url.base' using the old value: '" + currentTezHistoryUrlBase + "'");
-    }
-
-    // adding the auto tez instance short url name instead of the tez version and tez view instance name
-    return prefix + "/view/TEZ/tez_cluster_instance";
-  }
-
-  protected void updateHostVersionTable() throws SQLException {
-    LOG.info("Updating the {} table", HOST_VERSION_TABLE);
-
-    // Add the unique constraint to the host_version table
-    dbAccessor.addUniqueConstraint(HOST_VERSION_TABLE, "UQ_host_repo", "repo_version_id", "host_id");
-  }
-
-  protected void updateGroupsTable() throws SQLException {
-    LOG.info("Updating the {} table", GROUPS_TABLE);
-
-    dbAccessor.addColumn(GROUPS_TABLE, new DBColumnInfo(GROUP_TYPE_COL, String.class, null, "LOCAL", false));
-    dbAccessor.executeQuery("UPDATE groups SET group_type='LDAP' WHERE ldap_group=1");
-    dbAccessor.addUniqueConstraint(GROUPS_TABLE, "UNQ_groups_0", "group_name", "group_type");
-  }
-
-  /**
-   * Updates {@code yarn-site} in the following ways:
-   *
-   * Remove {@code YARN_LCE_CGROUPS_MOUNT_PATH} if  {@code YARN_CGROUPS_ENABLED} is {@code false} and
-   * {@code YARN_LCE_CGROUPS_MOUNT_PATH} is empty string
-   *
-   * @throws AmbariException
-   */
-  protected void updateYarnSite() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-
-    for (final Cluster cluster : clusterMap.values()) {
-      Config yarnEnvConfig = cluster.getDesiredConfigByType(YARN_ENV_CONFIG);
-      Config yarnSiteConfig = cluster.getDesiredConfigByType(YARN_SITE_CONFIG);
-
-      if (yarnEnvConfig != null && yarnSiteConfig != null) {
-        String cgroupEnabled = yarnEnvConfig.getProperties().get(YARN_CGROUPS_ENABLED);
-        String mountPath = yarnSiteConfig.getProperties().get(YARN_LCE_CGROUPS_MOUNT_PATH);
-
-        if (StringUtils.isEmpty(mountPath) && cgroupEnabled != null
-          && cgroupEnabled.trim().equalsIgnoreCase("false")){
-
-          removeConfigurationPropertiesFromCluster(cluster, YARN_SITE_CONFIG, new HashSet<String>(){{
-            add(YARN_LCE_CGROUPS_MOUNT_PATH);
-          }});
-
-        }
-      }
-
-    }
-  }
-
-  protected void updateAMSConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-
-          Config amsEnv = cluster.getDesiredConfigByType(AMS_ENV);
-          if (amsEnv != null) {
-            Map<String, String> amsEnvProperties = amsEnv.getProperties();
-            String content = amsEnvProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsEnvContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_ENV, newProperties, true, true);
-          }
-
-
-          boolean isDistributed = false;
-          Config amsSite = cluster.getDesiredConfigByType(AMS_SITE);
-          if (amsSite != null) {
-            if ("distributed".equals(amsSite.getProperties().get(AMS_MODE))) {
-              isDistributed = true;
-            }
-
-            Map<String, String> amsSiteProperties = amsSite.getProperties();
-
-            if (amsSiteProperties != null && amsSiteProperties.containsKey("timeline.metrics.hbase.fifo.compaction.enabled")) {
-              LOG.info("Removing timeline.metrics.hbase.fifo.compaction.enabled from ams-site");
-              removeConfigurationPropertiesFromCluster(cluster, AMS_SITE, Collections.singleton("timeline.metrics.hbase.fifo.compaction.enabled"));
-            }
-          }
-
-          if (isDistributed) {
-            Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
-            if (amsHbaseSite != null) {
-              Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
-              String rootDir = amsHbaseSiteProperties.get(HBASE_ROOTDIR);
-              if (StringUtils.isNotEmpty(rootDir) && rootDir.startsWith("hdfs://")) {
-                int indexOfSlash = rootDir.indexOf("/", 7);
-                Map<String, String> newProperties = new HashMap<>();
-                String newRootdir = rootDir.substring(indexOfSlash);
-                newProperties.put(HBASE_ROOTDIR, newRootdir);
-                LOG.info("Changing ams-hbase-site rootdir to " + newRootdir);
-                updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties, true, true);
-              }
-            }
-          }
-
-          //Update AMS log4j to make rolling properties configurable as separate fields.
-          Config amsLog4jProperties = cluster.getDesiredConfigByType(AMS_LOG4J);
-          if(amsLog4jProperties != null){
-            Map<String, String> newProperties = new HashMap<>();
-
-            String content = amsLog4jProperties.getProperties().get("content");
-            content = SchemaUpgradeUtil.extractProperty(content,"ams_log_max_backup_size","ams_log_max_backup_size","log4j.appender.file.MaxFileSize=(\\w+)MB","80",newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content,"ams_log_number_of_backup_files","ams_log_number_of_backup_files","log4j.appender.file.MaxBackupIndex=(\\w+)","60",newProperties);
-            newProperties.put("content",content);
-            updateConfigurationPropertiesForCluster(cluster,AMS_LOG4J,newProperties,true,true);
-          }
-
-          Config amsHbaseLog4jProperties = cluster.getDesiredConfigByType(AMS_HBASE_LOG4J);
-          if(amsHbaseLog4jProperties != null){
-            Map<String, String> newProperties = new HashMap<>();
-
-            String content = amsHbaseLog4jProperties.getProperties().get("content");
-            content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_log_maxfilesize","ams_hbase_log_maxfilesize","hbase.log.maxfilesize=(\\w+)MB","256",newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_log_maxbackupindex","ams_hbase_log_maxbackupindex","hbase.log.maxbackupindex=(\\w+)","20",newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_security_log_maxfilesize","ams_hbase_security_log_maxfilesize","hbase.security.log.maxfilesize=(\\w+)MB","256",newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content,"ams_hbase_security_log_maxbackupindex","ams_hbase_security_log_maxbackupindex","hbase.security.log.maxbackupindex=(\\w+)","20",newProperties);
-            newProperties.put("content",content);
-            updateConfigurationPropertiesForCluster(cluster,AMS_HBASE_LOG4J,newProperties,true,true);
-          }
-
-          Config amsGrafanaIni = cluster.getDesiredConfigByType(AMS_GRAFANA_INI);
-          if (amsGrafanaIni != null) {
-            Map<String, String> amsGrafanaIniProperties = amsGrafanaIni.getProperties();
-            String content = amsGrafanaIniProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsGrafanaIniContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_GRAFANA_INI, newProperties, true, true);
-          }
-
-        }
-      }
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
-    if (artifactEntity != null) {
-      Map<String, Object> data = artifactEntity.getArtifactData();
-
-      if (data != null) {
-        final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
-
-        if (kerberosDescriptor != null) {
-          KerberosServiceDescriptor logSearchKerberosDescriptor = kerberosDescriptor.getService("LOGSEARCH");
-          KerberosServiceDescriptor atlasKerberosDescriptor = kerberosDescriptor.getService("ATLAS");
-          KerberosServiceDescriptor rangerKerberosDescriptor = kerberosDescriptor.getService("RANGER");
-          addInfrSolrDescriptor(artifactDAO, artifactEntity, kerberosDescriptor, atlasKerberosDescriptor, "ATLAS_SERVER");
-          addInfrSolrDescriptor(artifactDAO, artifactEntity, kerberosDescriptor, logSearchKerberosDescriptor, "LOGSEARCH_SERVER");
-          addInfrSolrDescriptor(artifactDAO, artifactEntity, kerberosDescriptor, rangerKerberosDescriptor, "RANGER_ADMIN");
-          KerberosServiceDescriptor stormKerberosDescriptor = kerberosDescriptor.getService("STORM");
-
-          if (stormKerberosDescriptor != null) {
-            KerberosComponentDescriptor componentDescriptor = stormKerberosDescriptor.getComponent("NIMBUS");
-            if (componentDescriptor != null) {
-              KerberosIdentityDescriptor origIdentityDescriptor = componentDescriptor.getIdentity("/STORM/NIMBUS/nimbus_server");
-              if (origIdentityDescriptor != null) {
-                KerberosPrincipalDescriptor origPrincipalDescriptor = origIdentityDescriptor.getPrincipalDescriptor();
-                KerberosPrincipalDescriptor newPrincipalDescriptor = new KerberosPrincipalDescriptor(
-                    null,
-                    null,
-                    (origPrincipalDescriptor == null) ?
-                        "ranger-storm-audit/xasecure.audit.jaas.Client.option.principal" : origPrincipalDescriptor.getConfiguration(),
-                    null
-                );
-                KerberosKeytabDescriptor origKeytabDescriptor = origIdentityDescriptor.getKeytabDescriptor();
-                KerberosKeytabDescriptor newKeytabDescriptor = new KerberosKeytabDescriptor(
-                    null,
-                    null,
-                    null,
-                    null,
-                    null,
-                    (origKeytabDescriptor == null) ?
-                        "ranger-storm-audit/xasecure.audit.jaas.Client.option.keyTab" : origKeytabDescriptor.getConfiguration(),
-                    false);
-                componentDescriptor.removeIdentity("/STORM/NIMBUS/nimbus_server");
-                componentDescriptor.putIdentity(new KerberosIdentityDescriptor("/STORM/storm_components", null, newPrincipalDescriptor, newKeytabDescriptor, null));
-              }
-            }
-          }
-          KerberosServiceDescriptor yarnKerberosDescriptor = kerberosDescriptor.getService("YARN");
-          if (yarnKerberosDescriptor != null) {
-            Map<String, KerberosConfigurationDescriptor> configs = yarnKerberosDescriptor.getConfigurations();
-            KerberosConfigurationDescriptor yarnSiteConfigDescriptor = configs.get("yarn-site");
-            if (yarnSiteConfigDescriptor != null) {
-              Map<String, String> properties = yarnSiteConfigDescriptor.getProperties();
-              if (properties != null && properties.containsKey(YARN_LCE_CGROUPS_MOUNT_PATH)) {
-                properties.remove(YARN_LCE_CGROUPS_MOUNT_PATH);
-              }
-            }
-          }
-
-          // Fix HBASE_MASTER Kerberos identity for Ranger audit by clearing out any keytab file or principal name values.
-          KerberosServiceDescriptor hbaseKerberosDescriptor = kerberosDescriptor.getService("HBASE");
-          if (hbaseKerberosDescriptor != null) {
-            KerberosComponentDescriptor hbaseMasterKerberosDescriptor = hbaseKerberosDescriptor.getComponent("HBASE_MASTER");
-            if (hbaseMasterKerberosDescriptor != null) {
-              KerberosIdentityDescriptor identityDescriptor = hbaseMasterKerberosDescriptor.getIdentity("/HBASE/HBASE_MASTER/hbase_master_hbase");
-
-              if (identityDescriptor != null) {
-                KerberosPrincipalDescriptor principalDescriptor = identityDescriptor.getPrincipalDescriptor();
-                KerberosKeytabDescriptor keytabDescriptor = identityDescriptor.getKeytabDescriptor();
-
-                identityDescriptor.setReference(identityDescriptor.getName());
-                identityDescriptor.setName("ranger_hbase_audit");
-
-                principalDescriptor.setValue(null);
-                keytabDescriptor.setFile(null);
-              }
-            }
-          }
-
-          artifactEntity.setArtifactData(kerberosDescriptor.toMap());
-          artifactDAO.merge(artifactEntity);
-        }
-      }
-    }
-  }
-
-  /**
-   * Add /AMBARI-INFRA/INFRA_SOLR/infra-solr reference to specific service component
-   */
-  private void addInfrSolrDescriptor(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity, KerberosDescriptor kerberosDescriptor,
-                                     KerberosServiceDescriptor serviceDescriptor, String componentName) {
-    if (serviceDescriptor != null) {
-      KerberosComponentDescriptor componentDescriptor = serviceDescriptor.getComponent(componentName);
-      if (componentDescriptor != null) {
-        KerberosIdentityDescriptor origIdentityDescriptor = componentDescriptor.getIdentity("/AMBARI_INFRA/INFRA_SOLR/infra-solr");
-        if (origIdentityDescriptor != null) {
-          LOG.info("/AMBARI_INFRA/INFRA_SOLR/infra-solr identity already exists in {} component", componentName);
-        } else {
-          Predicate predicate = ContainsPredicate.fromMap(Collections.<String, Object>singletonMap(ContainsPredicate.NAME, Arrays.asList("services", "AMBARI_INFRA")));
-          componentDescriptor.putIdentity(new KerberosIdentityDescriptor("/AMBARI_INFRA/INFRA_SOLR/infra-solr",null, null, null, predicate));
-        }
-      }
-    }
-  }
-
-  protected void unInstallAllZeppelinViews(){
-    LOG.info("Removing all Zeppelin views.");
-    List<ViewInstanceEntity> viewInstanceList =  viewInstanceDAO.findAll();
-    for( ViewInstanceEntity viewInstanceEntity : viewInstanceList ){
-      if(viewInstanceEntity.getViewName().equalsIgnoreCase("ZEPPELIN{1.0.0}")){
-        LOG.info("Uninstalling zeppelin view : {}", viewInstanceEntity);
-        try {
-          viewInstanceOperationHandler.uninstallViewInstance(viewInstanceEntity);
-        }catch(Exception e){
-          LOG.error("Exception occurred while uninstalling view {}. Ignored for now.", viewInstanceEntity);
-        }
-      }
-    }
-  }
-
-  /**
-   * Updates Zeppelin configs.
-   *
-   * @throws AmbariException
-   */
-  protected void updateZeppelinConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config zeppelinEnvProperties = cluster.getDesiredConfigByType("zeppelin-env");
-          if (zeppelinEnvProperties != null) {
-            String log4jPropertiesContent = zeppelinEnvProperties.getProperties().get("log4j_properties_content");
-            String shiroIniContent = zeppelinEnvProperties.getProperties().get("shiro_ini_content");
-
-            updateConfigurationProperties("zeppelin-log4j-properties", Collections.singletonMap("log4j_properties_content", log4jPropertiesContent), true, true);
-            updateConfigurationProperties("zeppelin-shiro-ini", Collections.singletonMap("shiro_ini_content", shiroIniContent), true, true);
-          }
-        }
-      }
-    }
-  }
-
-  protected String updateAmsGrafanaIniContent(String content) {
-    if (content == null) {
-      return null;
-    }
-
-    String toReplace = "admin_password = {{ams_grafana_admin_pwd}}";
-    String replaceWith = ";admin_password =";
-    content = content.replace(toReplace, replaceWith);
-    return content;
-  }
-
-  protected String updateAmsEnvContent(String content) {
-    if (content == null) {
-      return null;
-    }
-
-    List<String> toReplaceList = new ArrayList<>();
-    toReplaceList.add("\n# HBase normalizer enabled\n");
-    toReplaceList.add("\n# HBase compaction policy enabled\n");
-    toReplaceList.add("export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n");
-    toReplaceList.add("export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}\n");
-
-    //Because of AMBARI-15331 : AMS HBase FIFO compaction policy and Normalizer settings are not handled correctly
-    toReplaceList.add("export HBASE_NORMALIZATION_ENABLED={{ams_hbase_normalizer_enabled}}\n");
-    toReplaceList.add("export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n");
-
-
-    for (String toReplace : toReplaceList) {
-      if (content.contains(toReplace)) {
-        content = content.replace(toReplace, StringUtils.EMPTY);
-      }
-    }
-
-    return content;
-  }
-
-  protected void updateKafkaConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-
-          Config kafkaBrokerConfig = cluster.getDesiredConfigByType(KAFKA_BROKER);
-          if (kafkaBrokerConfig != null) {
-            Map<String, String> kafkaBrokerProperties = kafkaBrokerConfig.getProperties();
-
-            if (kafkaBrokerProperties != null && kafkaBrokerProperties.containsKey(KAFKA_TIMELINE_METRICS_HOST)) {
-              LOG.info("Removing kafka.timeline.metrics.host from kafka-broker");
-              removeConfigurationPropertiesFromCluster(cluster, KAFKA_BROKER, Collections.singleton("kafka.timeline.metrics.host"));
-            }
-          }
-        }
-      }
-    }
-  }
-
-
-  protected void updateHadoopEnvConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(
-        AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      Map<String, String> prop = new HashMap<>();
-
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-
-          String content = null;
-          Boolean contentUpdated = false;
-
-          if (cluster.getDesiredConfigByType(HADOOP_ENV) != null) {
-            content = cluster.getDesiredConfigByType(HADOOP_ENV).getProperties().get("content");
-          }
-
-          if (content != null) {
-            if (!content.contains("ulimit -l")) {  // Append "ulimit -l" to hadoop-env.sh
-              content += "\n" +
-                "{% if is_datanode_max_locked_memory_set %}\n" +
-                "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
-                "# Makes sense to fix only when runing DN as root \n" +
-                "if [ \"$command\" == \"datanode\" ] &amp;&amp; [ \"$EUID\" -eq 0 ] &amp;&amp; [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
-                "  ulimit -l {{datanode_max_locked_memory}}\n" +
-                "fi\n" +
-                "{% endif %}";
-
-              contentUpdated = true;
-            }
-
-            if (!content.contains("ulimit -n")){  // Append "ulimit -n" to hadoop-env.sh
-              content += "\n" +
-                "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then \n" +
-                "  ulimit -n {{hdfs_user_nofile_limit}}\n" +
-                "fi";
-              contentUpdated = true;
-            }
-
-            if (contentUpdated){
-              prop.put("content", content);
-              updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
-                prop, true, false);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Creates the servicecomponent_version table
-   * @throws SQLException
-   */
-  private void createComponentVersionTable() throws SQLException {
-
-    List<DBColumnInfo> columns = new ArrayList<>();
-
-    // Add extension link table
-    LOG.info("Creating {} table", COMPONENT_VERSION_TABLE);
-
-    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("component_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("repo_version_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("state", String.class, 32, null, false));
-    columns.add(new DBColumnInfo("user_name", String.class, 255, null, false));
-    dbAccessor.createTable(COMPONENT_VERSION_TABLE, columns, (String[]) null);
-
-    dbAccessor.addPKConstraint(COMPONENT_VERSION_TABLE, COMPONENT_VERSION_PK, "id");
-
-    dbAccessor.addFKConstraint(COMPONENT_VERSION_TABLE, COMPONENT_VERSION_FK_COMPONENT, "component_id",
-      COMPONENT_TABLE, "id", false);
-
-    dbAccessor.addFKConstraint(COMPONENT_VERSION_TABLE, COMPONENT_VERSION_FK_REPO_VERSION, "repo_version_id",
-      "repo_version", "repo_version_id", false);
-
-    addSequence("servicecomponent_version_id_seq", 0L, false);
-  }
-
-  /**
-   * Alter servicedesiredstate table.
-   * @throws SQLException
-   */
-  private void updateServiceDesiredStateTable() throws SQLException {
-    // ALTER TABLE servicedesiredstate ADD COLUMN
-    // credential_store_enabled SMALLINT DEFAULT 0 NOT NULL
-    dbAccessor.addColumn(SERVICE_DESIRED_STATE_TABLE,
-      new DBColumnInfo(CREDENTIAL_STORE_ENABLED_COL, Short.class, null, 0, false));
-  }
-
-
-  /**
-   * Removes the compound PK from hostcomponentdesiredstate table
-   * and replaces it with a surrogate PK, but only if the table doesn't have it's new PK set.
-   * Create index and unqiue constraint on the columns that originally formed the compound PK.
-   *
-   * @throws SQLException
-   */
-  private void updateHostComponentDesiredStateTable() throws SQLException {
-    if (dbAccessor.tableHasPrimaryKey(HOST_COMPONENT_DESIREDSTATE_TABLE, HOST_COMPONENT_DESIREDSTATE_ID_COL)) {
-      LOG.info("Skipping {} table Primary Key modifications since the new {} column already exists",
-        HOST_COMPONENT_DESIREDSTATE_TABLE, HOST_COMPONENT_DESIREDSTATE_ID_COL);
-
-      return;
-    }
-    // add the new ID column as nullable until we populate
-    dbAccessor.addColumn(HOST_COMPONENT_DESIREDSTATE_TABLE,
-      new DBColumnInfo(HOST_COMPONENT_DESIREDSTATE_ID_COL, Long.class, null, null, true));
-
-    // insert sequence values
-    AtomicLong id = new AtomicLong(1);
-    Statement statement = null;
-    ResultSet resultSet = null;
-
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-
-      if (statement != null) {
-        // Select records by old PK
-        String selectSQL = String.format(
-          "SELECT cluster_id, component_name, host_id, service_name FROM %s", HOST_COMPONENT_DESIREDSTATE_TABLE);
-
-        resultSet = statement.executeQuery(selectSQL);
-
-        while (resultSet.next()) {
-          final Long clusterId = resultSet.getLong("cluster_id");
-          final String componentName = resultSet.getString("component_name");
-          final Long hostId = resultSet.getLong("host_id");
-          final String serviceName = resultSet.getString("service_name");
-
-          String updateSQL = String.format(
-            "UPDATE %s SET %s = %s WHERE cluster_id = %d AND component_name = '%s' AND service_name = '%s' AND host_id = %d",
-            HOST_COMPONENT_DESIREDSTATE_TABLE, HOST_COMPONENT_DESIREDSTATE_ID_COL, id.getAndIncrement(),
-            clusterId, componentName, serviceName, hostId);
-
-          dbAccessor.executeQuery(updateSQL);
-        }
-
-        // Add sequence for hostcomponentdesiredstate table ids
-        addSequence("hostcomponentdesiredstate_id_seq", id.get(), false);
-      }
-
-    }
-    finally {
-      JdbcUtils.closeResultSet(resultSet);
-      JdbcUtils.closeStatement(statement);
-    }
-
-    // make the ID column NON NULL now
-    dbAccessor.alterColumn(HOST_COMPONENT_DESIREDSTATE_TABLE,
-      new DBColumnInfo(HOST_COMPONENT_DESIREDSTATE_ID_COL, Long.class, null, null, false));
-
-    // drop existing PK and create new one on ID column
-    String primaryKeyConstraintName = null;
-    Configuration.DatabaseType databaseType = configuration.getDatabaseType();
-
-    switch (databaseType) {
-      case POSTGRES:
-      case MYSQL:
-      case ORACLE:
-      case SQL_SERVER:
-        primaryKeyConstraintName = dbAccessor.getPrimaryKeyConstraintName(HOST_COMPONENT_DESIREDSTATE_TABLE);
-        break;
-
-      default:
-        throw new UnsupportedOperationException(String.format("Invalid database type '%s'", databaseType));
-
-    }
-
-    // warn if we can't find it
-    if (null == primaryKeyConstraintName) {
-      LOG.warn("Unable to determine the primary key constraint name for {}", HOST_COMPONENT_DESIREDSTATE_TABLE);
-    }
-    else {
-      dbAccessor.dropPKConstraint(HOST_COMPONENT_DESIREDSTATE_TABLE, primaryKeyConstraintName, true);
-    }
-
-    // create a new PK, matching the name of the constraint found in the SQL files
-    dbAccessor.addPKConstraint(HOST_COMPONENT_DESIREDSTATE_TABLE, "PK_hostcomponentdesiredstate", "id");
-
-    // create index, ensuring column order matches that of the SQL files
-    dbAccessor.addUniqueConstraint(HOST_COMPONENT_DESIREDSTATE_TABLE, HOST_COMPONENT_DESIREDSTATE_INDEX,
-      "component_name", "service_name", "host_id", "cluster_id");
-  }
-
-  protected void updateAtlasConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          updateAtlasHookConfig(cluster, "HIVE", "hive-env", "hive.atlas.hook");
-          updateAtlasHookConfig(cluster, "STORM", "storm-env", "storm.atlas.hook");
-          updateAtlasHookConfig(cluster, "FALCON", "falcon-env", "falcon.atlas.hook");
-          updateAtlasHookConfig(cluster, "SQOOP", "sqoop-env", "sqoop.atlas.hook");
-        }
-      }
-    }
-  }
-
-  protected void updateAtlasHookConfig(Cluster cluster, String serviceName, String configType, String propertyName) throws AmbariException {
-    Set<String> installedServices = cluster.getServices().keySet();
-    if (installedServices.contains("ATLAS") && installedServices.contains(serviceName)) {
-      Config configEnv = cluster.getDesiredConfigByType(configType);
-      if (configEnv != null) {
-        Map<String, String> newProperties = new HashMap<>();
-        newProperties.put(propertyName, "true");
-        boolean updateProperty = configEnv.getProperties().containsKey(propertyName);
-        updateConfigurationPropertiesForCluster(cluster, configType, newProperties, updateProperty, true);
-      }
-    }
-  }
-
-  /**
-   * Updates Hive Interactive's config in hive-interactive-site.
-   *
-   * @throws AmbariException
-   */
-  private static final String HIVE_INTERACTIVE_ENV = "hive-interactive-env";
-  private static final String HIVE_ENV = "hive-env";
-  protected void updateHIVEInteractiveConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config hiveInteractiveEnv = cluster.getDesiredConfigByType(HIVE_INTERACTIVE_ENV);
-          Config hiveEnv = cluster.getDesiredConfigByType(HIVE_ENV);
-          if (hiveInteractiveEnv != null) {
-            String hsiHeapSize = "512";
-            if (hiveEnv != null) {
-              if (hiveEnv.getProperties().containsKey("hive.heapsize")) {
-                hsiHeapSize = hiveEnv.getProperties().get("hive.heapsize");
-                LOG.info("Updating HSI heap size to: " + hsiHeapSize);
-              }
-            }
-            updateConfigurationProperties(HIVE_INTERACTIVE_ENV, Collections.singletonMap("hive_heapsize",
-                                                                                         hsiHeapSize), true, true);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Updates Log Search configs.
-   *
-   * @throws AmbariException
-   */
-  protected void updateLogSearchConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config logSearchProperties = cluster.getDesiredConfigByType("logsearch-properties");
-          if (logSearchProperties != null) {
-            Map<String, String> newProperties = new HashMap<>();
-            if (!logSearchProperties.getProperties().containsKey("logsearch.auth.external_auth.enabled"))
-              newProperties.put("logsearch.auth.external_auth.enabled", logSearchProperties.getProperties().get("logsearch.external.auth.enabled"));
-            if (!logSearchProperties.getProperties().containsKey("logsearch.auth.external_auth.host_url"))
-              newProperties.put("logsearch.auth.external_auth.host_url", logSearchProperties.getProperties().get("logsearch.external.auth.host_url"));
-            if (!logSearchProperties.getProperties().containsKey("logsearch.auth.external_auth.login_url"))
-              newProperties.put("logsearch.auth.external_auth.login_url", logSearchProperties.getProperties().get("logsearch.external.auth.login_url"));
-            
-            Set<String> removeProperties = new HashSet<>();
-            removeProperties.add("logsearch.external.auth.enabled");
-            removeProperties.add("logsearch.external.auth.host_url");
-            removeProperties.add("logsearch.external.auth.login_url");
-            
-            updateConfigurationPropertiesForCluster(cluster, "logsearch-properties", newProperties, removeProperties, true, true);
-          }
-          
-          Config logfeederEnvProperties = cluster.getDesiredConfigByType("logfeeder-env");
-          if (logfeederEnvProperties != null) {
-            String content = logfeederEnvProperties.getProperties().get("content");
-            if (content.contains("infra_solr_ssl_enabled")) {
-              content = content.replace("infra_solr_ssl_enabled", "logfeeder_use_ssl");
-              updateConfigurationPropertiesForCluster(cluster, "logfeeder-env", Collections.singletonMap("content", content), true, true);
-            }
-          }
-          
-          Config logsearchEnvProperties = cluster.getDesiredConfigByType("logsearch-env");
-          if (logsearchEnvProperties != null) {
-            Map<String, String> newProperties = new HashMap<>();
-            String content = logsearchEnvProperties.getProperties().get("content");
-            if (content.contains("infra_solr_ssl_enabled or logsearch_ui_protocol == 'https'")) {
-              content = content.replace("infra_solr_ssl_enabled or logsearch_ui_protocol == 'https'", "logsearch_use_ssl");
-            }
-            if (!content.equals(logsearchEnvProperties.getProperties().get("content"))) {
-              newProperties.put("content", content);
-            }
-            
-            if ("http".equals(logsearchEnvProperties.getProperties().get("logsearch_ui_protocol")) &&
-                "/etc/security/serverKeys/logsearch.trustStore.jks".equals(logsearchEnvProperties.getProperties().get("logsearch_truststore_location")) &&
-                "/etc/security/serverKeys/logsearch.keyStore.jks".equals(logsearchEnvProperties.getProperties().get("logsearch_keystore_location"))) {
-              newProperties.put("logsearch_truststore_location", "/etc/ambari-logsearch-portal/conf/keys/logsearch.jks");
-              newProperties.put("logsearch_keystore_location", "/etc/ambari-logsearch-portal/conf/keys/logsearch.jks");
-            }
-            
-            Set<String> removeProperties = new HashSet<>();
-            removeProperties.add("logsearch_solr_audit_logs_use_ranger");
-            removeProperties.add("logsearch_solr_audit_logs_zk_node");
-            removeProperties.add("logsearch_solr_audit_logs_zk_quorum");
-            
-            updateConfigurationPropertiesForCluster(cluster, "logsearch-env", newProperties, removeProperties, true, true);
-          }
-          
-          Config logfeederLog4jProperties = cluster.getDesiredConfigByType("logfeeder-log4j");
-          if (logfeederLog4jProperties != null) {
-            Map<String, String> newProperties = new HashMap<>();
-            
-            String content = logfeederLog4jProperties.getProperties().get("content");
-            content = SchemaUpgradeUtil.extractProperty(content, "logfeeder_log_maxfilesize", "logfeeder_log_maxfilesize",
-                "    <param name=\"file\" value=\"\\{\\{logfeeder_log_dir}}/logfeeder.log\"/>\n" +
-                "    <param name=\"append\" value=\"true\"/>\n" +
-                "    <param name=\"maxFileSize\" value=\"(\\w+)MB\"/>", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logfeeder_log_maxbackupindex", "logfeeder_log_maxbackupindex",
-                "    <param name=\"file\" value=\"\\{\\{logfeeder_log_dir}}/logfeeder.log\"/>\n" +
-                "    <param name=\"append\" value=\"true\"/>\n" +
-                "    <param name=\"maxFileSize\" value=\"\\{\\{logfeeder_log_maxfilesize}}MB\"/>\n" +
-                "    <param name=\"maxBackupIndex\" value=\"(\\w+)\"/>", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logfeeder_json_log_maxfilesize", "logfeeder_json_log_maxfilesize",
-                "    <param name=\"file\" value=\"\\{\\{logfeeder_log_dir}}/logsearch-logfeeder.json\" />\n" +
-                "    <param name=\"append\" value=\"true\" />\n" +
-                "    <param name=\"maxFileSize\" value=\"(\\w+)MB\" />", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logfeeder_json_log_maxbackupindex", "logfeeder_json_log_maxbackupindex",
-                "    <param name=\"file\" value=\"\\{\\{logfeeder_log_dir}}/logsearch-logfeeder.json\" />\n" +
-                "    <param name=\"append\" value=\"true\" />\n" +
-                "    <param name=\"maxFileSize\" value=\"\\{\\{logfeeder_json_log_maxfilesize}}MB\" />\n" +
-                "    <param name=\"maxBackupIndex\" value=\"(\\w+)\" />", "10", newProperties);
-            
-            newProperties.put("content", content);
-            updateConfigurationPropertiesForCluster(cluster, "logfeeder-log4j", newProperties, true, true);
-          }
-          
-          Config logsearchLog4jProperties = cluster.getDesiredConfigByType("logsearch-log4j");
-          if (logsearchLog4jProperties != null) {
-            Map<String, String> newProperties = new HashMap<>();
-
-            String content = logsearchLog4jProperties.getProperties().get("content");
-            if (content.contains("{{logsearch_log_dir}}/logsearch.err")) {
-              content = content.replace("{{logsearch_log_dir}}/logsearch.err", "{{logsearch_log_dir}}/logsearch.log");
-            }
-            if (content.contains("<priority value=\"warn\"/>")) {
-              content = content.replace("<priority value=\"warn\"/>", "<priority value=\"info\"/>");
-            }
-            
-
-            content = SchemaUpgradeUtil.extractProperty(content, "logsearch_log_maxfilesize", "logsearch_log_maxfilesize",
-                "    <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch.log\" />\n" +
-                "    <param name=\"Threshold\" value=\"info\" />\n" +
-                "    <param name=\"append\" value=\"true\" />\n" +
-                "    <param name=\"maxFileSize\" value=\"(\\w+)MB\" />\n", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logsearch_log_maxbackupindex", "logsearch_log_maxbackupindex",
-                "    <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch.log\" />\n" +
-                "    <param name=\"Threshold\" value=\"info\" />\n" +
-                "    <param name=\"append\" value=\"true\" />\n" +
-                "    <param name=\"maxFileSize\" value=\"\\{\\{logsearch_log_maxfilesize}}MB\" />\n" +
-                "    <param name=\"maxBackupIndex\" value=\"(\\w+)\" />\n", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logsearch_json_log_maxfilesize", "logsearch_json_log_maxfilesize",
-                "    <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch.json\"/>\n" +
-                "    <param name=\"append\" value=\"true\"/>\n" +
-                "    <param name=\"maxFileSize\" value=\"(\\w+)MB\"/>\n", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logsearch_json_log_maxbackupindex", "logsearch_json_log_maxbackupindex",
-                "    <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch.json\"/>\n" +
-                "    <param name=\"append\" value=\"true\"/>\n" +
-                "    <param name=\"maxFileSize\" value=\"\\{\\{logsearch_json_log_maxfilesize}}MB\"/>\n" +
-                "    <param name=\"maxBackupIndex\" value=\"(\\w+)\"/>\n", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logsearch_audit_log_maxfilesize", "logsearch_audit_log_maxfilesize",
-                "    <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch-audit.json\"/>\n" +
-                "    <param name=\"append\" value=\"true\"/>\n" +
-                "    <param name=\"maxFileSize\" value=\"(\\w+)MB\"/>\n", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logsearch_audit_log_maxbackupindex", "logsearch_audit_log_maxbackupindex",
-                "    <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch-audit.json\"/>\n" +
-                "    <param name=\"append\" value=\"true\"/>\n" +
-                "    <param name=\"maxFileSize\" value=\"\\{\\{logsearch_audit_log_maxfilesize}}MB\"/>\n" +
-                "    <param name=\"maxBackupIndex\" value=\"(\\w+)\"/>\n", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logsearch_perf_log_maxfilesize", "logsearch_perf_log_maxfilesize",
-                "    <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch-performance.json\"/>\n" +
-                "    <param name=\"Threshold\" value=\"info\"/>\n" +
-                "    <param name=\"append\" value=\"true\"/>\n" +
-                "    <param name=\"maxFileSize\" value=\"(\\w+)MB\"/>\n", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "logsearch_perf_log_maxbackupindex", "logsearch_perf_log_maxbackupindex",
-                "    <param name=\"file\" value=\"\\{\\{logsearch_log_dir}}/logsearch-performance.json\"/>\n" +
-                "    <param name=\"Threshold\" value=\"info\"/>\n" +
-                "    <param name=\"append\" value=\"true\"/>\n" +
-                "    <param name=\"maxFileSize\" value=\"\\{\\{logsearch_perf_log_maxfilesize}}MB\"/>\n" +
-                "    <param name=\"maxBackupIndex\" value=\"(\\w+)\"/>\n", "10", newProperties);
-            
-            newProperties.put("content", content);
-            if (!content.equals(logsearchLog4jProperties.getProperties().get("content"))) {
-              updateConfigurationPropertiesForCluster(cluster, "logsearch-log4j", newProperties, true, true);
-            }
-          }
-        }
-      }
-    }
-  }
-  
-  /**
-   * Updates Ambari Infra configs.
-   *
-   * @throws AmbariException
-   */
-  protected void updateAmbariInfraConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config infraSolrEnvProperties = cluster.getDesiredConfigByType("infra-solr-env");
-          if (infraSolrEnvProperties != null) {
-            String content = infraSolrEnvProperties.getProperties().get("content");
-            if (content.contains("SOLR_SSL_TRUST_STORE={{infra_solr_keystore_location}}")) {
-              content = content.replace("SOLR_SSL_TRUST_STORE={{infra_solr_keystore_location}}", "SOLR_SSL_TRUST_STORE={{infra_solr_truststore_location}}");
-            }
-            if (content.contains("SOLR_SSL_TRUST_STORE_PASSWORD={{infra_solr_keystore_password}}")) {
-              content = content.replace("SOLR_SSL_TRUST_STORE_PASSWORD={{infra_solr_keystore_password}}", "SOLR_SSL_TRUST_STORE_PASSWORD={{infra_solr_truststore_password}}");
-            }
-            if (content.contains("SOLR_KERB_NAME_RULES={{infra_solr_kerberos_name_rules}}")) {
-              content = content.replace("SOLR_KERB_NAME_RULES={{infra_solr_kerberos_name_rules}}", "SOLR_KERB_NAME_RULES=\"{{infra_solr_kerberos_name_rules}}\"");
-            }
-            if (content.contains(" -Dsolr.kerberos.name.rules=${SOLR_KERB_NAME_RULES}")) {
-              content = content.replace(" -Dsolr.kerberos.name.rules=${SOLR_KERB_NAME_RULES}", "");
-            }
-            if (!content.equals(infraSolrEnvProperties.getProperties().get("content"))) {
-              updateConfigurationPropertiesForCluster(cluster, "infra-solr-env", Collections.singletonMap("content", content), true, true);
-            }
-          }
-          
-          Config infraSolrLog4jProperties = cluster.getDesiredConfigByType("infra-solr-log4j");
-          if (infraSolrLog4jProperties != null) {
-            Map<String, String> newProperties = new HashMap<>();
-            
-            String content = infraSolrLog4jProperties.getProperties().get("content");
-            content = SchemaUpgradeUtil.extractProperty(content, "infra_log_maxfilesize", "infra_log_maxfilesize",
-                "log4j.appender.file.MaxFileSize=(\\w+)MB", "10", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "infra_log_maxbackupindex", "infra_log_maxbackupindex",
-                "log4j.appender.file.MaxBackupIndex=(\\w+)\n", "9", newProperties);
-            
-            newProperties.put("content", content);
-            updateConfigurationPropertiesForCluster(cluster, "infra-solr-log4j", newProperties, true, true);
-          }
-          
-          Config infraSolrClientLog4jProperties = cluster.getDesiredConfigByType("infra-solr-client-log4j");
-          if (infraSolrClientLog4jProperties != null) {
-            Map<String, String> newProperties = new HashMap<>();
-            
-            String content = infraSolrClientLog4jProperties.getProperties().get("content");
-            if (content.contains("infra_client_log")) {
-              content = content.replace("infra_client_log", "solr_client_log");
-            }
-            
-            content = SchemaUpgradeUtil.extractProperty(content, "infra_client_log_maxfilesize", "solr_client_log_maxfilesize",
-                "log4j.appender.file.MaxFileSize=(\\w+)MB", "80", newProperties);
-            content = SchemaUpgradeUtil.extractProperty(content, "infra_client_log_maxbackupindex", "solr_client_log_maxbackupindex",
-                "log4j.appender.file.MaxBackupIndex=(\\w+)\n", "60", newProperties);
-            
-            newProperties.put("content", content);
-            updateConfigurationPropertiesForCluster(cluster, "infra-solr-client-log4j", newProperties, true, true);
-          }
-        }
-      }
-    }
-  }
-  
-  /**
-   * Add permissions for managing service auto-start.
-   * <p>
-   * <ul>
-   * <li>SERVICE.MANAGE_AUTO_START permissions for SERVICE.ADMINISTRATOR, CLUSTER.OPERATOR, CLUSTER.ADMINISTRATOR, AMBARI.ADMINISTRATOR</li>
-   * <li>CLUSTER.MANAGE_AUTO_START permissions for CLUSTER.OPERATOR, CLUSTER.ADMINISTRATOR, AMBARI.ADMINISTRATOR</li>
-   * </ul>
-   */
-  protected void addManageServiceAutoStartPermissions() throws SQLException {
-    Collection<String> roles;
-
-    // Add service-level auto-start permission
-    roles = Arrays.asList(
-        "AMBARI.ADMINISTRATOR:AMBARI",
-        "CLUSTER.ADMINISTRATOR:CLUSTER",
-        "CLUSTER.OPERATOR:CLUSTER",
-        "SERVICE.ADMINISTRATOR:CLUSTER");
-    addRoleAuthorization("SERVICE.MANAGE_AUTO_START", "Manage service auto-start", roles);
-
-    // Add cluster-level auto start-permission
-    roles = Arrays.asList(
-        "AMBARI.ADMINISTRATOR:AMBARI",
-        "CLUSTER.ADMINISTRATOR:CLUSTER",
-        "CLUSTER.OPERATOR:CLUSTER");
-    addRoleAuthorization("CLUSTER.MANAGE_AUTO_START", "Manage service auto-start configuration", roles);
-  }
-
-  /**
-   * Add permissions for managing alert notifications configuration.
-   * <p>
-   * <ul>
-   * <li>CLUSTER.MANAGE_ALERT_NOTIFICATIONS permissions for AMBARI.ADMINISTRATOR, CLUSTER.ADMINISTRATOR</li>
-   * </ul>
-   */
-  protected void addManageAlertNotificationsPermissions() throws SQLException {
-    Collection<String> roles;
-    roles = Arrays.asList(
-        "AMBARI.ADMINISTRATOR:AMBARI",
-        "CLUSTER.ADMINISTRATOR:CLUSTER");
-    addRoleAuthorization("CLUSTER.MANAGE_ALERT_NOTIFICATIONS", "Manage alert notifications configuration", roles);
-  }
-
-  /**
-   * Updates Ranger admin url for Ranger plugin supported configs.
-   *
-   * @throws AmbariException
-   */
-  protected void updateRangerUrlConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-
-      Config ranger_admin_properties = cluster.getDesiredConfigByType("admin-properties");
-      if(null != ranger_admin_properties) {
-        String policyUrl = ranger_admin_properties.getProperties().get("policymgr_external_url");
-        if (null != policyUrl) {
-          updateRangerUrl(cluster, "ranger-hdfs-security", "ranger.plugin.hdfs.policy.rest.url", policyUrl);
-          updateRangerUrl(cluster, "ranger-hive-security", "ranger.plugin.hive.policy.rest.url", policyUrl);
-          updateRangerUrl(cluster, "ranger-hbase-security", "ranger.plugin.hbase.policy.rest.url", policyUrl);
-          updateRangerUrl(cluster, "ranger-knox-security", "ranger.plugin.knox.policy.rest.url", policyUrl);
-          updateRangerUrl(cluster, "ranger-storm-security", "ranger.plugin.storm.policy.rest.url", policyUrl);
-          updateRangerUrl(cluster, "ranger-yarn-security", "ranger.plugin.yarn.policy.rest.url", policyUrl);
-          updateRangerUrl(cluster, "ranger-kafka-security", "ranger.plugin.kafka.policy.rest.url", policyUrl);
-          updateRangerUrl(cluster, "ranger-atlas-security", "ranger.plugin.atlas.policy.rest.url", policyUrl);
-          updateRangerUrl(cluster, "ranger-kms-security", "ranger.plugin.kms.policy.rest.url", policyUrl);
-        }
-      }
-    }
-  }
-
-  protected void updateRangerUrl(Cluster cluster, String configType, String configProperty, String policyUrl) throws AmbariException {
-    Config componentSecurity = cluster.getDesiredConfigByType(configType);
-    if(componentSecurity != null && componentSecurity.getProperties().containsKey(configProperty)) {
-      Map<String, String> updateProperty = new HashMap<>();
-      updateProperty.put(configProperty, policyUrl);
-      updateConfigurationPropertiesForCluster(cluster, configType, updateProperty, true, false);
-    }
-  }
-
-
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
index 9b44c53..5fdc885 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog300.java
@@ -76,6 +76,7 @@ public class UpgradeCatalog300 extends AbstractUpgradeCatalog {
   protected static final String CLUSTER_CONFIG_MAPPING_TABLE = "clusterconfigmapping";
   protected static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
   protected static final String HRC_OPS_DISPLAY_NAME_COLUMN = "ops_display_name";
+  protected static final String COMPONENT_TABLE = "servicecomponentdesiredstate";
 
   @Inject
   DaoUtils daoUtils;
@@ -112,7 +113,7 @@ public class UpgradeCatalog300 extends AbstractUpgradeCatalog {
    */
   @Override
   public String getSourceVersion() {
-    return "2.5.0";
+    return "2.5.2";
   }
 
   /**
@@ -181,7 +182,7 @@ public class UpgradeCatalog300 extends AbstractUpgradeCatalog {
    * @throws SQLException
    */
   protected void addServiceComponentColumn() throws SQLException {
-    dbAccessor.addColumn(UpgradeCatalog250.COMPONENT_TABLE,
+    dbAccessor.addColumn(COMPONENT_TABLE,
         new DBColumnInfo("repo_state", String.class, 255,
             RepositoryVersionState.NOT_REQUIRED.name(), false));
   }


[24/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
deleted file mode 100644
index 3461161..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog210.java
+++ /dev/null
@@ -1,1765 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.text.MessageFormat;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.regex.Matcher;
-
-import javax.persistence.EntityManager;
-import javax.persistence.Query;
-import javax.persistence.criteria.CriteriaBuilder;
-import javax.persistence.criteria.CriteriaDelete;
-import javax.persistence.criteria.Root;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.ArtifactDAO;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
-import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptorFactory;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.apache.commons.lang.StringUtils;
-import org.eclipse.persistence.internal.databaseaccess.FieldTypeDefinition;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.gson.Gson;
-import com.google.gson.JsonElement;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-
-
-/**
- * Upgrade catalog for version 2.1.0.
- */
-public class UpgradeCatalog210 extends AbstractUpgradeCatalog {
-  private static final String CLUSTERS_TABLE = "clusters";
-  private static final String CLUSTER_HOST_MAPPING_TABLE = "ClusterHostMapping";
-  private static final String HOSTS_TABLE = "hosts";
-  private static final String HOST_COMPONENT_DESIRED_STATE_TABLE = "hostcomponentdesiredstate";
-  private static final String HOST_COMPONENT_STATE_TABLE = "hostcomponentstate";
-  private static final String HOST_STATE_TABLE = "hoststate";
-  private static final String HOST_VERSION_TABLE = "host_version";
-  private static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
-  private static final String HOST_CONFIG_MAPPING_TABLE = "hostconfigmapping";
-  private static final String CONFIG_GROUP_HOST_MAPPING_TABLE = "configgrouphostmapping";
-  private static final String CONFIG_GROUP_TABLE = "configgroup";
-  private static final String KERBEROS_PRINCIPAL_HOST_TABLE = "kerberos_principal_host";
-  private static final String KERBEROS_PRINCIPAL_TABLE = "kerberos_principal";
-  private static final String REQUEST_OPERATION_LEVEL_TABLE = "requestoperationlevel";
-  private static final String SERVICE_COMPONENT_DESIRED_STATE_TABLE = "servicecomponentdesiredstate";
-  private static final String SERVICE_CONFIG_TABLE = "serviceconfig";
-  private static final String SERVICE_CONFIG_HOSTS_TABLE = "serviceconfighosts";
-  private static final String WIDGET_TABLE = "widget";
-  private static final String WIDGET_LAYOUT_TABLE = "widget_layout";
-  private static final String WIDGET_LAYOUT_USER_WIDGET_TABLE = "widget_layout_user_widget";
-  private static final String VIEW_TABLE = "viewmain";
-  private static final String VIEW_INSTANCE_TABLE = "viewinstance";
-  private static final String VIEW_PARAMETER_TABLE = "viewparameter";
-  private static final String STACK_TABLE = "stack";
-  private static final String REPO_VERSION_TABLE = "repo_version";
-  private static final String ALERT_HISTORY_TABLE = "alert_history";
-  private static final String HOST_ID_COL = "host_id";
-  private static final String HOST_NAME_COL = "host_name";
-  private static final String PUBLIC_HOST_NAME_COL = "public_host_name";
-  private static final String TOPOLOGY_REQUEST_TABLE = "topology_request";
-  private static final String TOPOLOGY_HOST_GROUP_TABLE = "topology_hostgroup";
-  private static final String TOPOLOGY_HOST_INFO_TABLE = "topology_host_info";
-  private static final String TOPOLOGY_LOGICAL_REQUEST_TABLE = "topology_logical_request";
-  private static final String TOPOLOGY_HOST_REQUEST_TABLE = "topology_host_request";
-  private static final String TOPOLOGY_HOST_TASK_TABLE = "topology_host_task";
-  private static final String TOPOLOGY_LOGICAL_TASK_TABLE = "topology_logical_task";
-  private static final String HDFS_SITE_CONFIG = "hdfs-site";
-  private static final String RANGER_SITE_CONFIG = "ranger-site";
-
-  // constants for stack table changes
-  private static final String STACK_ID_COLUMN_NAME = "stack_id";
-  private static final String DESIRED_STACK_ID_COLUMN_NAME = "desired_stack_id";
-  private static final String CURRENT_STACK_ID_COLUMN_NAME = "current_stack_id";
-  private static final String DESIRED_STACK_VERSION_COLUMN_NAME = "desired_stack_version";
-  private static final String CURRENT_STACK_VERSION_COLUMN_NAME = "current_stack_version";
-  private static final DBColumnInfo DESIRED_STACK_ID_COLUMN = new DBColumnInfo(DESIRED_STACK_ID_COLUMN_NAME, Long.class, null, null, true);
-  private static final DBColumnInfo CURRENT_STACK_ID_COLUMN = new DBColumnInfo(CURRENT_STACK_ID_COLUMN_NAME, Long.class, null, null, true);
-  private static final DBColumnInfo STACK_ID_COLUMN = new DBColumnInfo(STACK_ID_COLUMN_NAME, Long.class, null, null, true);
-
-  @Inject
-  DaoUtils daoUtils;
-
-  @Inject
-  private OsFamily osFamily;
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.0.0";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.1.0";
-  }
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger
-      (UpgradeCatalog210.class);
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog210(Injector injector) {
-    super(injector);
-    this.injector = injector;
-
-    daoUtils = injector.getInstance(DaoUtils.class);
-    osFamily = injector.getInstance(OsFamily.class);
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    executeAlertDDLUpdates();
-    executeHostsDDLUpdates();
-    executeWidgetDDLUpdates();
-    executeStackDDLUpdates();
-    executeTopologyDDLUpdates();
-    executeViewDDLUpdates();
-  }
-
-  private void executeTopologyDDLUpdates() throws AmbariException, SQLException {
-    List<DBColumnInfo> columns = new ArrayList<>();
-
-    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("action", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("cluster_name", String.class, 100, null, false));
-    columns.add(new DBColumnInfo("bp_name", String.class, 100, null, false));
-    columns.add(new DBColumnInfo("cluster_properties", char[].class, null, null, true));
-    columns.add(new DBColumnInfo("cluster_attributes", char[].class, null, null, true));
-    columns.add(new DBColumnInfo("description", String.class, 1024, null, true));
-
-    dbAccessor.createTable(TOPOLOGY_REQUEST_TABLE, columns, "id");
-
-    columns.clear();
-    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("group_properties", char[].class, null, null, true));
-    columns.add(new DBColumnInfo("group_attributes", char[].class, null, null, true));
-    columns.add(new DBColumnInfo("request_id", Long.class, null, null, false));
-
-    dbAccessor.createTable(TOPOLOGY_HOST_GROUP_TABLE, columns, "id");
-    dbAccessor.addFKConstraint(TOPOLOGY_HOST_GROUP_TABLE, "FK_hostgroup_req_id", "request_id", TOPOLOGY_REQUEST_TABLE, "id", false, false);
-
-    columns.clear();
-    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("group_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("fqdn", String.class, 255, null, true));
-    columns.add(new DBColumnInfo("host_count", Integer.class, null, null, true));
-    columns.add(new DBColumnInfo("predicate", String.class, 2048, null, true));
-
-    dbAccessor.createTable(TOPOLOGY_HOST_INFO_TABLE, columns, "id");
-    dbAccessor.addFKConstraint(TOPOLOGY_HOST_INFO_TABLE, "FK_hostinfo_group_id", "group_id", TOPOLOGY_HOST_GROUP_TABLE, "id", false, false);
-
-    columns.clear();
-    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("request_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("description", String.class, 1024, null, true));
-
-    dbAccessor.createTable(TOPOLOGY_LOGICAL_REQUEST_TABLE, columns, "id");
-    dbAccessor.addFKConstraint(TOPOLOGY_LOGICAL_REQUEST_TABLE, "FK_logicalreq_req_id", "request_id", TOPOLOGY_REQUEST_TABLE, "id", false, false);
-
-    columns.clear();
-    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("logical_request_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("group_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("stage_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("host_name", String.class, 255, null, true));
-
-    dbAccessor.createTable(TOPOLOGY_HOST_REQUEST_TABLE, columns, "id");
-    dbAccessor.addFKConstraint(TOPOLOGY_HOST_REQUEST_TABLE, "FK_hostreq_logicalreq_id", "logical_request_id", TOPOLOGY_LOGICAL_REQUEST_TABLE, "id", false, false);
-    dbAccessor.addFKConstraint(TOPOLOGY_HOST_REQUEST_TABLE, "FK_hostreq_group_id", "group_id", TOPOLOGY_HOST_GROUP_TABLE, "id", false, false);
-
-    columns.clear();
-    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("host_request_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("type", String.class, 255, null, false));
-    dbAccessor.createTable(TOPOLOGY_HOST_TASK_TABLE, columns, "id");
-    dbAccessor.addFKConstraint(TOPOLOGY_HOST_TASK_TABLE, "FK_hosttask_req_id", "host_request_id", TOPOLOGY_HOST_REQUEST_TABLE, "id", false, false);
-
-    columns.clear();
-    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("host_task_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("physical_task_id", Long.class, null, null, true));
-    columns.add(new DBColumnInfo("component", String.class, 255, null, false));
-    dbAccessor.createTable(TOPOLOGY_LOGICAL_TASK_TABLE, columns, "id");
-    dbAccessor.addFKConstraint(TOPOLOGY_LOGICAL_TASK_TABLE, "FK_ltask_hosttask_id", "host_task_id", TOPOLOGY_HOST_TASK_TABLE, "id", false, false);
-    dbAccessor.addFKConstraint(TOPOLOGY_LOGICAL_TASK_TABLE, "FK_ltask_hrc_id", "physical_task_id", "host_role_command", "task_id", false, false);
-
-    // Sequence updates
-    addSequences(Arrays.asList(
-      "topology_host_info_id_seq",
-      "topology_host_request_id_seq",
-      "topology_host_task_id_seq",
-      "topology_logical_request_id_seq",
-      "topology_logical_task_id_seq",
-      "topology_request_id_seq",
-      "topology_host_group_id_seq"
-    ), 0L, false);
-  }
-
-
-  private void executeAlertDDLUpdates() throws AmbariException, SQLException {
-    //Fix latest_text column type to match for all DBMS
-    Configuration.DatabaseType databaseType = configuration.getDatabaseType();
-
-    // MySQL columns are already TEXT, but we need to be sure in that, since LONGTEXT will really slowdown database when querying the alerts too often
-    if (Configuration.DatabaseType.MYSQL == databaseType) {
-      dbAccessor.alterColumn("alert_current", new DBColumnInfo("latest_text", new FieldTypeDefinition("TEXT"), null));
-      dbAccessor.alterColumn("alert_history", new DBColumnInfo("alert_text", new FieldTypeDefinition("TEXT"), null));
-    } else {
-      dbAccessor.changeColumnType("alert_current", "latest_text", String.class, char[].class);
-      dbAccessor.changeColumnType("alert_history", "alert_text", String.class, char[].class);
-    }
-
-  }
-
-  private void executeViewDDLUpdates() throws AmbariException, SQLException {
-    // cluster association
-    dbAccessor.addColumn(VIEW_INSTANCE_TABLE, new DBColumnInfo("cluster_handle", String.class, 255, null, true));
-    // determine whether to alter the names of the dynamic entities / attributes to
-    // avoid db reserved word conflicts.  should be false for existing instances
-    // for backward compatibility.
-    dbAccessor.addColumn(VIEW_INSTANCE_TABLE, new DBColumnInfo("alter_names", Integer.class, 0, 0, false));
-    // cluster configuration
-    dbAccessor.addColumn(VIEW_PARAMETER_TABLE, new DBColumnInfo("cluster_config", String.class, 255, null, true));
-    // view build number
-    dbAccessor.addColumn(VIEW_TABLE, new DBColumnInfo("build", String.class, 128, null, true));
-  }
-
-  /**
-   * Execute all of the hosts DDL updates.
-   *
-   * @throws org.apache.ambari.server.AmbariException
-   * @throws java.sql.SQLException
-   */
-  private void executeHostsDDLUpdates() throws AmbariException, SQLException {
-    Configuration.DatabaseType databaseType = configuration.getDatabaseType();
-
-    String randomHostName = null;
-    if (dbAccessor.tableHasData(HOST_ROLE_COMMAND_TABLE)) {
-      randomHostName = getRandomHostName();
-      if (StringUtils.isBlank(randomHostName)) {
-        throw new AmbariException("UpgradeCatalog210 could not retrieve a random host_name from the hosts table while running executeHostsDDLUpdates.");
-      }
-    }
-
-    dbAccessor.addColumn(HOSTS_TABLE, new DBColumnInfo(HOST_ID_COL, Long.class, null, null, true));
-
-    // Sequence value for the hosts table primary key. First record will be 1, so ambari_sequence value must be 0.
-    Long hostId = 0L;
-    Statement statement = null;
-    ResultSet rs = null;
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-      if (statement != null) {
-        rs = statement.executeQuery("SELECT host_name, host_id FROM hosts ORDER BY host_id ASC, host_name ASC");
-        if (rs != null) {
-          hostId = populateHostsId(rs);
-        }
-      }
-    } finally {
-      if (rs != null) {
-        rs.close();
-      }
-      if (statement != null) {
-        statement.close();
-      }
-    }
-    // Insert host id number into ambari_sequences
-    addSequence("host_id_seq", hostId, false);
-
-    // Make the hosts id non-null after all the values are populated
-    if (databaseType == Configuration.DatabaseType.DERBY) {
-      // This is a workaround for UpgradeTest.java unit test
-      dbAccessor.executeQuery("ALTER TABLE " + HOSTS_TABLE + " ALTER column " + HOST_ID_COL + " NOT NULL");
-    } else {
-      dbAccessor.alterColumn(HOSTS_TABLE, new DBColumnInfo(HOST_ID_COL, Long.class, null, null, false));
-    }
-
-
-    // Drop the 8 FK constraints in the host-related tables. They will be recreated later after the PK is changed.
-    // The only host-related table not being included is alert_history.
-    if (databaseType == Configuration.DatabaseType.DERBY) {
-      dbAccessor.dropFKConstraint(HOST_COMPONENT_STATE_TABLE, "hostcomponentstate_host_name");
-      dbAccessor.dropFKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, "hstcmponentdesiredstatehstname");
-      dbAccessor.dropFKConstraint(HOST_ROLE_COMMAND_TABLE, "FK_host_role_command_host_name");
-      dbAccessor.dropFKConstraint(HOST_STATE_TABLE, "FK_hoststate_host_name");
-      dbAccessor.dropFKConstraint(HOST_VERSION_TABLE, "FK_host_version_host_name");
-      dbAccessor.dropFKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "FK_cghm_hname");
-      // FK_krb_pr_host_hostname used to have a CASCADE DELETE, which is not needed.
-      dbAccessor.dropFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_krb_pr_host_hostname");
-      // FK_krb_pr_host_principalname used to have a CASCADE DELETE, which is not needed, so it will be recreated without it.
-      dbAccessor.dropFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_krb_pr_host_principalname");
-
-      // This FK name is actually different on Derby.
-      dbAccessor.dropFKConstraint(HOST_CONFIG_MAPPING_TABLE, "FK_hostconfigmapping_host_name");
-    } else {
-      dbAccessor.dropFKConstraint(HOST_COMPONENT_STATE_TABLE, "hostcomponentstate_host_name");
-      dbAccessor.dropFKConstraint(HOST_COMPONENT_STATE_TABLE, "fk_hostcomponentstate_host_name");
-
-      dbAccessor.dropFKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, "hstcmponentdesiredstatehstname");
-      dbAccessor.dropFKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, "fk_hostcomponentdesiredstate_host_name");
-
-      dbAccessor.dropFKConstraint(HOST_ROLE_COMMAND_TABLE, "FK_host_role_command_host_name");
-      dbAccessor.dropFKConstraint(HOST_STATE_TABLE, "FK_hoststate_host_name");
-      dbAccessor.dropFKConstraint(HOST_VERSION_TABLE, "FK_host_version_host_name");
-
-      dbAccessor.dropFKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "FK_cghm_hname");
-      dbAccessor.dropFKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "fk_configgrouphostmapping_host_name");
-
-      // FK_krb_pr_host_hostname used to have a CASCADE DELETE, which is not needed.
-      dbAccessor.dropFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_krb_pr_host_hostname");
-      dbAccessor.dropFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "fk_kerberos_principal_host_host_name");
-
-      // FK_krb_pr_host_principalname used to have a CASCADE DELETE, which is not needed, so it will be recreated without it.
-      dbAccessor.dropFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_krb_pr_host_principalname");
-
-      dbAccessor.dropFKConstraint(HOST_CONFIG_MAPPING_TABLE, "FK_hostconfmapping_host_name");
-    }
-
-    // In Ambari 2.0.0, there were discrepancies with the FK in the ClusterHostMapping table in the Postgres databases.
-    // They were either swapped, or pointing to the wrong table. Ignore failures for both of these.
-    try {
-      dbAccessor.dropFKConstraint(CLUSTER_HOST_MAPPING_TABLE, "ClusterHostMapping_host_name", true);
-      dbAccessor.dropFKConstraint(CLUSTER_HOST_MAPPING_TABLE, "fk_clusterhostmapping_host_name", true);
-    } catch (Exception e) {
-      LOG.warn("Performed best attempt at deleting FK ClusterHostMapping_host_name. " +
-          "It is possible it did not exist or the deletion failed. " +  e.getMessage());
-    }
-    try {
-      dbAccessor.dropFKConstraint(CLUSTER_HOST_MAPPING_TABLE, "ClusterHostMapping_cluster_id", true);
-    } catch (Exception e) {
-      LOG.warn("Performed best attempt at deleting FK ClusterHostMapping_cluster_id. " +
-          "It is possible it did not exist or the deletion failed. " +  e.getMessage());
-    }
-
-    // Re-add the FK to the cluster_id; will add the host_id at the end.
-    dbAccessor.addFKConstraint(CLUSTER_HOST_MAPPING_TABLE, "FK_clhostmapping_cluster_id",
-        "cluster_id", CLUSTERS_TABLE, "cluster_id", false);
-
-    // Drop the PK, and recreate it on the host_id instead
-    if (databaseType == Configuration.DatabaseType.DERBY) {
-      String constraintName = getDerbyTableConstraintName("p", HOSTS_TABLE);
-      if (null != constraintName) {
-        // Derby doesn't support CASCADE DELETE.
-        dbAccessor.executeQuery("ALTER TABLE " + HOSTS_TABLE + " DROP CONSTRAINT " + constraintName);
-      }
-    } else {
-      dbAccessor.dropPKConstraint(HOSTS_TABLE, "hosts_pkey", "host_name", true);
-    }
-
-    dbAccessor.addPKConstraint(HOSTS_TABLE, "PK_hosts_id", "host_id");
-    dbAccessor.addUniqueConstraint(HOSTS_TABLE, "UQ_hosts_host_name", "host_name");
-
-
-    // Add host_id to the host-related tables, and populate the host_id, one table at a time.
-    String[] tablesToAddHostID = new String[] {
-        CONFIG_GROUP_HOST_MAPPING_TABLE,
-        CLUSTER_HOST_MAPPING_TABLE,
-        HOST_CONFIG_MAPPING_TABLE,
-        HOST_COMPONENT_STATE_TABLE,
-        HOST_COMPONENT_DESIRED_STATE_TABLE,
-        HOST_ROLE_COMMAND_TABLE,
-        HOST_STATE_TABLE,
-        HOST_VERSION_TABLE,
-        KERBEROS_PRINCIPAL_HOST_TABLE,
-        REQUEST_OPERATION_LEVEL_TABLE,
-        SERVICE_CONFIG_HOSTS_TABLE
-    };
-
-    for (String tableName : tablesToAddHostID) {
-      dbAccessor.addColumn(tableName, new DBColumnInfo(HOST_ID_COL, Long.class, null, null, true));
-
-      // The column name is different for one table
-      String hostNameColumnName = tableName.equals(SERVICE_CONFIG_HOSTS_TABLE) ? "hostname" : "host_name";
-
-      if (dbAccessor.tableHasData(tableName) && dbAccessor.tableHasColumn(tableName, hostNameColumnName)) {
-        dbAccessor.executeQuery("UPDATE " + tableName + " t SET host_id = (SELECT host_id FROM hosts h WHERE h.host_name = t." + hostNameColumnName + ") WHERE t.host_id IS NULL AND t." + hostNameColumnName + " IS NOT NULL");
-
-        // For legacy reasons, the hostrolecommand table will contain "none" for some records where the host_name was not important.
-        // These records were populated during Finalize in Rolling Upgrade, so they must be updated to use a valid host_name.
-        if (tableName.equals(HOST_ROLE_COMMAND_TABLE) && StringUtils.isNotBlank(randomHostName)) {
-          dbAccessor.executeQuery("UPDATE " + tableName + " t SET host_id = (SELECT host_id FROM hosts h WHERE h.host_name = '" + randomHostName + "') WHERE t.host_id IS NULL AND t.host_name = 'none'");
-        }
-      }
-
-      // The one exception for setting NOT NULL is the requestoperationlevel table
-      if (!tableName.equals(REQUEST_OPERATION_LEVEL_TABLE)) {
-        dbAccessor.setColumnNullable(tableName, HOST_ID_COL, false);
-      }
-    }
-
-
-    // For any tables where the host_name was part of the PK, need to drop the PK, and recreate it with the host_id
-    String[] tablesWithHostNameInPK =  new String[] {
-        CONFIG_GROUP_HOST_MAPPING_TABLE,
-        CLUSTER_HOST_MAPPING_TABLE,
-        HOST_CONFIG_MAPPING_TABLE,
-        HOST_COMPONENT_STATE_TABLE,
-        HOST_COMPONENT_DESIRED_STATE_TABLE,
-        HOST_STATE_TABLE,
-        KERBEROS_PRINCIPAL_HOST_TABLE,
-        SERVICE_CONFIG_HOSTS_TABLE
-    };
-
-    // We can't drop PK, if a one of PK columns is a part of foreign key. We should drop FK and re-create him after dropping PK
-    dbAccessor.dropFKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "FK_cghm_cgid");
-    dbAccessor.dropFKConstraint(CLUSTER_HOST_MAPPING_TABLE, "FK_clhostmapping_cluster_id");
-
-    dbAccessor.dropFKConstraint(HOST_CONFIG_MAPPING_TABLE, "FK_hostconfmapping_cluster_id");
-    dbAccessor.dropFKConstraint(HOST_COMPONENT_STATE_TABLE, "hstcomponentstatecomponentname");
-    dbAccessor.dropFKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, "hstcmpnntdesiredstatecmpnntnme");
-    dbAccessor.dropFKConstraint(SERVICE_CONFIG_HOSTS_TABLE, "FK_scvhosts_scv");
-
-    //These FK's hasn't been deleted previously due to MySQL case sensitivity
-    if (databaseType == Configuration.DatabaseType.MYSQL) {
-      dbAccessor.dropFKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "FK_configgrouphostmapping_config_group_id");
-      dbAccessor.dropFKConstraint(CLUSTER_HOST_MAPPING_TABLE, "FK_ClusterHostMapping_cluster_id");
-      dbAccessor.dropFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_kerberos_principal_host_principal_name");
-      dbAccessor.dropFKConstraint(SERVICE_CONFIG_HOSTS_TABLE, "FK_serviceconfighosts_service_config_id");
-    }
-
-    if (databaseType == Configuration.DatabaseType.DERBY) {
-      for (String tableName : tablesWithHostNameInPK) {
-        String constraintName = getDerbyTableConstraintName("p", tableName);
-        if (null != constraintName) {
-          dbAccessor.executeQuery("ALTER TABLE " + tableName + " DROP CONSTRAINT " + constraintName);
-        }
-      }
-    } else {
-      // drop constrain only if existed constraint contains required column
-      dbAccessor.dropPKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "configgrouphostmapping_pkey", HOST_NAME_COL, true);
-      dbAccessor.dropPKConstraint(CLUSTER_HOST_MAPPING_TABLE, "clusterhostmapping_pkey",HOST_NAME_COL, true);
-      dbAccessor.dropPKConstraint(HOST_CONFIG_MAPPING_TABLE, "hostconfigmapping_pkey", HOST_NAME_COL, true);
-      dbAccessor.dropPKConstraint(HOST_COMPONENT_STATE_TABLE, "hostcomponentstate_pkey", HOST_NAME_COL, true);
-      dbAccessor.dropPKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, "hostcomponentdesiredstate_pkey", HOST_NAME_COL, true);
-      dbAccessor.dropPKConstraint(HOST_STATE_TABLE, "hoststate_pkey", HOST_NAME_COL, true);
-      dbAccessor.dropPKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "kerberos_principal_host_pkey", HOST_NAME_COL, true);
-      dbAccessor.dropPKConstraint(SERVICE_CONFIG_HOSTS_TABLE, "serviceconfighosts_pkey", "hostname", true);
-    }
-
-    // Finish by deleting the unnecessary host_name columns.
-    dbAccessor.dropColumn(CONFIG_GROUP_HOST_MAPPING_TABLE, HOST_NAME_COL);
-    dbAccessor.dropColumn(CLUSTER_HOST_MAPPING_TABLE, HOST_NAME_COL);
-    dbAccessor.dropColumn(HOST_CONFIG_MAPPING_TABLE, HOST_NAME_COL);
-    dbAccessor.dropColumn(HOST_COMPONENT_STATE_TABLE, HOST_NAME_COL);
-    dbAccessor.dropColumn(HOST_COMPONENT_DESIRED_STATE_TABLE, HOST_NAME_COL);
-    dbAccessor.dropColumn(HOST_ROLE_COMMAND_TABLE, HOST_NAME_COL);
-    dbAccessor.dropColumn(HOST_STATE_TABLE, HOST_NAME_COL);
-    dbAccessor.dropColumn(HOST_VERSION_TABLE, HOST_NAME_COL);
-    dbAccessor.dropColumn(KERBEROS_PRINCIPAL_HOST_TABLE, HOST_NAME_COL);
-    dbAccessor.dropColumn(REQUEST_OPERATION_LEVEL_TABLE, HOST_NAME_COL);
-
-    // Notice that the column name doesn't have an underscore here.
-    dbAccessor.dropColumn(SERVICE_CONFIG_HOSTS_TABLE, "hostname");
-
-    dbAccessor.addPKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "configgrouphostmapping_pkey", "config_group_id", "host_id");
-    dbAccessor.addPKConstraint(CLUSTER_HOST_MAPPING_TABLE, "clusterhostmapping_pkey", "cluster_id", "host_id");
-    dbAccessor.addPKConstraint(HOST_CONFIG_MAPPING_TABLE, "hostconfigmapping_pkey", "create_timestamp", "host_id", "cluster_id", "type_name");
-    dbAccessor.addPKConstraint(HOST_COMPONENT_STATE_TABLE, "hostcomponentstate_pkey", "cluster_id", "component_name", "host_id", "service_name");
-    dbAccessor.addPKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, "hostcomponentdesiredstate_pkey", "cluster_id", "component_name", "host_id", "service_name");
-    dbAccessor.addPKConstraint(HOST_STATE_TABLE, "hoststate_pkey", "host_id");
-    dbAccessor.addPKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "kerberos_principal_host_pkey", "principal_name", "host_id");
-    dbAccessor.addPKConstraint(SERVICE_CONFIG_HOSTS_TABLE, "serviceconfighosts_pkey", "service_config_id", "host_id");
-
-    // re-create FK constraints
-    dbAccessor.addFKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "FK_cghm_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(CLUSTER_HOST_MAPPING_TABLE, "FK_clusterhostmapping_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(HOST_CONFIG_MAPPING_TABLE, "FK_hostconfmapping_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(HOST_COMPONENT_STATE_TABLE, "FK_hostcomponentstate_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, "FK_hcdesiredstate_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(HOST_ROLE_COMMAND_TABLE, "FK_host_role_command_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(HOST_STATE_TABLE, "FK_hoststate_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(HOST_VERSION_TABLE, "FK_host_version_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_krb_pr_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(KERBEROS_PRINCIPAL_HOST_TABLE, "FK_krb_pr_host_principalname", "principal_name", KERBEROS_PRINCIPAL_TABLE, "principal_name", false);
-    dbAccessor.addFKConstraint(SERVICE_CONFIG_HOSTS_TABLE, "FK_scvhosts_host_id", "host_id", HOSTS_TABLE, "host_id", false);
-    dbAccessor.addFKConstraint(CONFIG_GROUP_HOST_MAPPING_TABLE, "FK_cghm_cgid", "config_group_id", CONFIG_GROUP_TABLE, "group_id", false);
-    dbAccessor.addFKConstraint(CLUSTER_HOST_MAPPING_TABLE, "FK_clhostmapping_cluster_id", "cluster_id", CLUSTERS_TABLE, "cluster_id", false);
-    dbAccessor.addFKConstraint(HOST_CONFIG_MAPPING_TABLE, "FK_hostconfmapping_cluster_id", "cluster_id", CLUSTERS_TABLE, "cluster_id", false);
-    dbAccessor.addFKConstraint(HOST_COMPONENT_STATE_TABLE, "hstcomponentstatecomponentname",
-                                  new String[]{"component_name", "cluster_id", "service_name"}, SERVICE_COMPONENT_DESIRED_STATE_TABLE,
-                                  new String[]{"component_name", "cluster_id", "service_name"}, false);
-    dbAccessor.addFKConstraint(HOST_COMPONENT_DESIRED_STATE_TABLE, "hstcmpnntdesiredstatecmpnntnme",
-                                  new String[]{"component_name", "cluster_id", "service_name"}, SERVICE_COMPONENT_DESIRED_STATE_TABLE,
-                                  new String[]{"component_name", "cluster_id", "service_name"}, false);
-    dbAccessor.addFKConstraint(SERVICE_CONFIG_HOSTS_TABLE, "FK_scvhosts_scv", "service_config_id", SERVICE_CONFIG_TABLE, "service_config_id", false);
-
-    // Update host names to be case insensitive
-    String UPDATE_TEMPLATE = "UPDATE {0} SET {1} = lower({1})";
-    // First remove duplicate hosts
-    removeDuplicateHosts();
-    // Lowercase host name in hosts
-    String updateHostName = MessageFormat.format(UPDATE_TEMPLATE, HOSTS_TABLE, HOST_NAME_COL);
-    dbAccessor.executeQuery(updateHostName);
-    // Lowercase public host name in hosts
-    String updatePublicHostName = MessageFormat.format(UPDATE_TEMPLATE, HOSTS_TABLE, PUBLIC_HOST_NAME_COL);
-    dbAccessor.executeQuery(updatePublicHostName);
-    // Lowercase host name in alert_history
-    String updateAlertHostName = MessageFormat.format(UPDATE_TEMPLATE, ALERT_HISTORY_TABLE, HOST_NAME_COL);
-    dbAccessor.executeQuery(updateAlertHostName);
-  }
-
-  private void executeWidgetDDLUpdates() throws AmbariException, SQLException {
-    List<DBColumnInfo> columns = new ArrayList<>();
-
-    columns.add(new DBColumnInfo("id", Long.class,    null,  null, false));
-    columns.add(new DBColumnInfo("widget_name", String.class,  255,   null, false));
-    columns.add(new DBColumnInfo("widget_type", String.class,  255,   null, false));
-    columns.add(new DBColumnInfo("metrics", char[].class, null, null, true));
-    columns.add(new DBColumnInfo("time_created", Long.class,  null,   null, false));
-    columns.add(new DBColumnInfo("author", String.class, 255, null, true));
-    columns.add(new DBColumnInfo("description", String.class, 255, null, true));
-    columns.add(new DBColumnInfo("default_section_name", String.class, 255, null, true));
-    columns.add(new DBColumnInfo("scope", String.class,  255,   null, true));
-    columns.add(new DBColumnInfo("widget_values", char[].class, null, null, true));
-    columns.add(new DBColumnInfo("properties", char[].class, null, null, true));
-    columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
-    dbAccessor.createTable(WIDGET_TABLE, columns, "id");
-
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("layout_name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("section_name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("cluster_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("scope", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("user_name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("display_name", String.class, 255, null, true));
-
-    dbAccessor.createTable(WIDGET_LAYOUT_TABLE, columns, "id");
-
-    columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("widget_layout_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("widget_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("widget_order", Short.class, null, null, true));
-    dbAccessor.createTable(WIDGET_LAYOUT_USER_WIDGET_TABLE, columns, "widget_layout_id", "widget_id");
-    dbAccessor.addFKConstraint(WIDGET_LAYOUT_USER_WIDGET_TABLE, "FK_widget_layout_id", "widget_layout_id", "widget_layout", "id", false, false);
-    dbAccessor.addFKConstraint(WIDGET_LAYOUT_USER_WIDGET_TABLE, "FK_widget_id", "widget_id", "widget", "id", false, false);
-
-    //Alter users to store active widget layouts
-    dbAccessor.addColumn("users", new DBColumnInfo("active_widget_layouts", String.class, 1024, null, true));
-
-    // Sequence updates
-      addSequences(Arrays.asList("widget_id_seq", "widget_layout_id_seq"), 0L, false);
-  }
-
-  /**
-   * Adds the stack table, FKs, and constraints.
-   */
-  private void executeStackDDLUpdates() throws AmbariException, SQLException {
-    // stack table creation
-    ArrayList<DBColumnInfo> columns = new ArrayList<>();
-    columns.add(new DBColumnInfo("stack_id", Long.class, null, null, false));
-    columns.add(new DBColumnInfo("stack_name", String.class, 255, null, false));
-    columns.add(new DBColumnInfo("stack_version", String.class, 255, null,
-        false));
-
-    dbAccessor.createTable(STACK_TABLE, columns, "stack_id");
-    dbAccessor.addUniqueConstraint(STACK_TABLE, "unq_stack", "stack_name", "stack_version");
-
-    addSequence("stack_id_seq", 0L, false);
-
-    // create the new stack ID columns NULLABLE for now since we need to insert
-    // data into them later on (we'll change them to NOT NULL after that)
-    dbAccessor.addColumn(CLUSTERS_TABLE, DESIRED_STACK_ID_COLUMN);
-    dbAccessor.addColumn("hostcomponentdesiredstate", DESIRED_STACK_ID_COLUMN);
-    dbAccessor.addColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE, DESIRED_STACK_ID_COLUMN);
-    dbAccessor.addColumn("servicedesiredstate", DESIRED_STACK_ID_COLUMN);
-
-    dbAccessor.addFKConstraint(CLUSTERS_TABLE, "fk_clusters_desired_stack_id", DESIRED_STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-    dbAccessor.addFKConstraint("hostcomponentdesiredstate", "fk_hcds_desired_stack_id", DESIRED_STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-    dbAccessor.addFKConstraint(SERVICE_COMPONENT_DESIRED_STATE_TABLE, "fk_scds_desired_stack_id", DESIRED_STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-    dbAccessor.addFKConstraint("servicedesiredstate", "fk_sds_desired_stack_id", DESIRED_STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-
-    dbAccessor.addColumn("clusterstate", CURRENT_STACK_ID_COLUMN);
-    dbAccessor.addColumn("hostcomponentstate", CURRENT_STACK_ID_COLUMN);
-
-    dbAccessor.addFKConstraint("clusterstate", "fk_cs_current_stack_id", CURRENT_STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-    dbAccessor.addFKConstraint("hostcomponentstate", "fk_hcs_current_stack_id", CURRENT_STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-
-    dbAccessor.addColumn("clusterconfig", STACK_ID_COLUMN);
-    dbAccessor.addColumn("serviceconfig", STACK_ID_COLUMN);
-    dbAccessor.addColumn("blueprint", STACK_ID_COLUMN);
-    dbAccessor.addColumn(REPO_VERSION_TABLE, STACK_ID_COLUMN);
-
-    dbAccessor.addFKConstraint("clusterconfig", "fk_clusterconfig_stack_id", STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-    dbAccessor.addFKConstraint("serviceconfig", "fk_serviceconfig_stack_id", STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-    dbAccessor.addFKConstraint("blueprint", "fk_blueprint_stack_id", STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-    dbAccessor.addFKConstraint(REPO_VERSION_TABLE, "fk_repoversion_stack_id", STACK_ID_COLUMN_NAME, STACK_TABLE, STACK_ID_COLUMN_NAME, true);
-
-    // drop the unique constraint for the old column and add the new one
-    dbAccessor.dropUniqueConstraint(REPO_VERSION_TABLE, "uq_repo_version_stack_version");
-    dbAccessor.addUniqueConstraint("repo_version", "uq_repo_version_stack_id", "stack_id", "version");
-  }
-
-  /**
-   * Adds the stack table and constraints.
-   */
-  protected void executeStackPreDMLUpdates() throws AmbariException, SQLException {
-    Gson gson = new Gson();
-
-    injector.getInstance(AmbariMetaInfo.class);
-
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    List<StackEntity> stacks = stackDAO.findAll();
-    Map<Long,String> entityToJsonMap = new HashMap<>();
-
-    // build a mapping of stack entity to old-school JSON
-    for( StackEntity stack : stacks ){
-      StackId stackId = new StackId(stack.getStackName(),
-          stack.getStackVersion());
-      String stackJson = gson.toJson(stackId);
-      entityToJsonMap.put(stack.getStackId(), stackJson);
-    }
-
-    // use a bulk update on all tables to populate the new FK columns
-    String UPDATE_TEMPLATE = "UPDATE {0} SET {1} = {2} WHERE {3} = ''{4}''";
-    String UPDATE_BLUEPRINT_TEMPLATE = "UPDATE blueprint SET stack_id = {0} WHERE stack_name = ''{1}'' AND stack_version = ''{2}''";
-
-    Set<Long> stackEntityIds = entityToJsonMap.keySet();
-    for (Long stackEntityId : stackEntityIds) {
-      StackEntity stackEntity = stackDAO.findById(stackEntityId);
-      String outdatedJson = entityToJsonMap.get(stackEntityId);
-      String outdatedRepoStack = MessageFormat.format("{0}-{1}",stackEntity.getStackName(),stackEntity.getStackVersion());
-
-      String clustersSQL = MessageFormat.format(UPDATE_TEMPLATE, "clusters",
-          DESIRED_STACK_ID_COLUMN_NAME, stackEntityId,
-          DESIRED_STACK_VERSION_COLUMN_NAME, outdatedJson);
-
-      String hostComponentDesiredStateSQL = MessageFormat.format(
-          UPDATE_TEMPLATE, "hostcomponentdesiredstate",
-          DESIRED_STACK_ID_COLUMN_NAME, stackEntityId,
-          DESIRED_STACK_VERSION_COLUMN_NAME, outdatedJson);
-
-      String serviceComponentDesiredStateSQL = MessageFormat.format(
-          UPDATE_TEMPLATE, SERVICE_COMPONENT_DESIRED_STATE_TABLE,
-          DESIRED_STACK_ID_COLUMN_NAME, stackEntityId,
-          DESIRED_STACK_VERSION_COLUMN_NAME, outdatedJson);
-
-      String serviceDesiredStateSQL = MessageFormat.format(UPDATE_TEMPLATE,
-          "servicedesiredstate",
-          DESIRED_STACK_ID_COLUMN_NAME, stackEntityId,
-          DESIRED_STACK_VERSION_COLUMN_NAME, outdatedJson);
-
-      String clusterStateSQL = MessageFormat.format(UPDATE_TEMPLATE,
-          "clusterstate", CURRENT_STACK_ID_COLUMN_NAME, stackEntityId,
-          CURRENT_STACK_VERSION_COLUMN_NAME, outdatedJson);
-
-      String hostComponentStateSQL = MessageFormat.format(UPDATE_TEMPLATE,
-          "hostcomponentstate", CURRENT_STACK_ID_COLUMN_NAME, stackEntityId,
-          CURRENT_STACK_VERSION_COLUMN_NAME, outdatedJson);
-
-      String blueprintSQL = MessageFormat.format(UPDATE_BLUEPRINT_TEMPLATE,
-          stackEntityId, stackEntity.getStackName(),
-          stackEntity.getStackVersion());
-
-      String repoVersionSQL = MessageFormat.format(UPDATE_TEMPLATE,
-          REPO_VERSION_TABLE, STACK_ID_COLUMN_NAME, stackEntityId, "stack",
-          outdatedRepoStack);
-
-        dbAccessor.executeQuery(clustersSQL, "clusters", DESIRED_STACK_VERSION_COLUMN_NAME);
-        dbAccessor.executeQuery(hostComponentDesiredStateSQL, "hostcomponentdesiredstate", DESIRED_STACK_VERSION_COLUMN_NAME);
-        dbAccessor.executeQuery(serviceComponentDesiredStateSQL, SERVICE_COMPONENT_DESIRED_STATE_TABLE, DESIRED_STACK_VERSION_COLUMN_NAME);
-        dbAccessor.executeQuery(serviceDesiredStateSQL, "servicedesiredstate", DESIRED_STACK_VERSION_COLUMN_NAME);
-        dbAccessor.executeQuery(clusterStateSQL, "clusterstate", CURRENT_STACK_VERSION_COLUMN_NAME);
-        dbAccessor.executeQuery(hostComponentStateSQL, "hostcomponentstate", CURRENT_STACK_VERSION_COLUMN_NAME);
-        dbAccessor.executeQuery(blueprintSQL, "blueprint", "stack_name");
-
-        dbAccessor.executeQuery(repoVersionSQL, REPO_VERSION_TABLE, "stack");
-    }
-
-    // for the tables with no prior stack, set these based on the cluster's
-    // stack for each cluster defined
-    String INSERT_STACK_ID_TEMPLATE = "UPDATE {0} SET {1} = {2} WHERE cluster_id = {3}";
-    // we should do the changes only if they are required
-    if (dbAccessor.tableHasColumn(CLUSTERS_TABLE,DESIRED_STACK_VERSION_COLUMN_NAME)) {
-
-      Statement statement = null;
-      ResultSet rs = null;
-      try {
-        statement = dbAccessor.getConnection().createStatement();
-        if (statement != null) {
-          rs = statement.executeQuery("SELECT * FROM " + CLUSTERS_TABLE);
-          if (rs != null) {
-            while (rs.next()) {
-              long clusterId = rs.getLong("cluster_id");
-              String stackJson = rs.getString(DESIRED_STACK_VERSION_COLUMN_NAME);
-              StackId stackId = gson.fromJson(stackJson, StackId.class);
-
-              StackEntity stackEntity = stackDAO.find(stackId.getStackName(),
-                stackId.getStackVersion());
-
-              String clusterConfigSQL = MessageFormat.format(
-                INSERT_STACK_ID_TEMPLATE, "clusterconfig", STACK_ID_COLUMN_NAME,
-                stackEntity.getStackId(), clusterId);
-
-              String serviceConfigSQL = MessageFormat.format(
-                INSERT_STACK_ID_TEMPLATE, "serviceconfig", STACK_ID_COLUMN_NAME,
-                stackEntity.getStackId(), clusterId);
-
-              dbAccessor.executeQuery(clusterConfigSQL);
-              dbAccessor.executeQuery(serviceConfigSQL);
-            }
-          }
-        }
-        String UPDATE_CURRENT_STACK_ID_IF_NULL_TEMPLATE =
-          "UPDATE hostcomponentstate " +
-          "SET current_stack_id={0} " +
-          "WHERE current_stack_id IS NULL " +
-          "AND cluster_id={1} ";
-        rs = statement.executeQuery("SELECT cluster_id, current_stack_id FROM clusterstate");
-        if (rs != null) {
-          while (rs.next()) {
-            // if hostcomponentstate.current_stack_id is null,
-            // set to cluster's current_stack_id
-            long clusterId = rs.getLong("cluster_id");
-            long currentStackId = rs.getLong("current_stack_id");
-            String hostComponentStateSQL = MessageFormat.format(
-              UPDATE_CURRENT_STACK_ID_IF_NULL_TEMPLATE, currentStackId, clusterId);
-            dbAccessor.executeUpdate(hostComponentStateSQL, false);
-          }
-        }
-      } finally {
-        if (rs != null) {
-          rs.close();
-        }
-        if (statement != null) {
-          statement.close();
-        }
-      }
-    }
-  }
-
-  /**
-   * Copy cluster & service widgets from stack to DB.
-   */
-  protected void initializeClusterAndServiceWidgets() throws AmbariException {
-    AmbariManagementController controller = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = controller.getClusters();
-    if (clusters == null) {
-      return;
-    }
-
-    Map<String, Cluster> clusterMap = clusters.getClusters();
-
-    if (clusterMap != null && !clusterMap.isEmpty()) {
-      for (Cluster cluster : clusterMap.values()) {
-        controller.initializeWidgetsAndLayouts(cluster, null);
-
-        Map<String, Service> serviceMap = cluster.getServices();
-        if (serviceMap != null && !serviceMap.isEmpty()) {
-          for (Service service : serviceMap.values()) {
-            controller.initializeWidgetsAndLayouts(cluster, service);
-          }
-        }
-      }
-    }
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * Populate the id of the hosts table with an auto-increment int.
-   * @param resultSet Rows from the hosts table, sorted first by host_id
-   * @return Returns an integer with the id for the next host record to be inserted.
-   * @throws SQLException
-   */
-  Long populateHostsId(ResultSet resultSet) throws SQLException {
-    Long hostId = 0L;
-    if (resultSet != null) {
-      try {
-        while (resultSet.next()) {
-          hostId++;
-          final String hostName = resultSet.getString(1);
-
-          if (StringUtils.isNotBlank(hostName)) {
-            dbAccessor.executeQuery("UPDATE " + HOSTS_TABLE + " SET host_id = " + hostId +
-                " WHERE " + HOST_NAME_COL + " = '" + hostName + "'");
-          }
-        }
-      } catch (Exception e) {
-        LOG.error("Unable to populate the id of the hosts. " + e.getMessage());
-      }
-    }
-    return hostId;
-  }
-
-  private String getRandomHostName() throws SQLException {
-    String randomHostName = null;
-
-    Statement statement = null;
-    ResultSet rs = null;
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-      if (statement != null) {
-        rs = statement.executeQuery("SELECT " + HOST_NAME_COL + " FROM " + HOSTS_TABLE + " ORDER BY " + HOST_NAME_COL + " ASC");
-        if (rs != null && rs.next()) {
-          randomHostName = rs.getString(1);
-        }
-      }
-    } catch (Exception e) {
-      LOG.error("Failed to retrieve random host name. Exception: " + e.getMessage());
-    } finally {
-      if (rs != null) {
-        rs.close();
-      }
-      if (statement != null) {
-        statement.close();
-      }
-    }
-    return randomHostName;
-  }
-
-  /**
-   * Remove duplicate hosts before making host name case-insensitive
-   * @throws SQLException
-   */
-  private void removeDuplicateHosts() throws SQLException {
-    // Select hosts not in the cluster
-    String hostsNotInClusterQuery = MessageFormat.format(
-        "SELECT * FROM {0} WHERE {1} NOT IN (SELECT {1} FROM {2})",
-        HOSTS_TABLE, HOST_ID_COL, CLUSTER_HOST_MAPPING_TABLE);
-    ResultSet hostsNotInCluster = null;
-    Statement statement = null;
-    Statement duplicatedHostsStatement = null;
-
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-      duplicatedHostsStatement = dbAccessor.getConnection().createStatement();
-      hostsNotInCluster = statement.executeQuery(hostsNotInClusterQuery);
-      if(hostsNotInCluster != null) {
-        while (hostsNotInCluster.next()) {
-          long hostToDeleteId = hostsNotInCluster.getLong(HOST_ID_COL);
-          String hostToDeleteName = hostsNotInCluster.getString(HOST_NAME_COL);
-          String duplicateHostsQuery = "SELECT count(*) FROM hosts WHERE lower(host_name) = '" + hostToDeleteName + "' AND host_id != " + hostToDeleteId;
-          long count = 0;
-          ResultSet duplicateHosts = null;
-          try {
-            duplicateHosts = duplicatedHostsStatement.executeQuery(duplicateHostsQuery);
-            if (duplicateHosts != null && duplicateHosts.next()) {
-              count = duplicateHosts.getLong(1);
-            }
-          } finally {
-            if (null != duplicateHosts) {
-              duplicateHosts.close();
-            }
-          }
-          if (count > 0) {
-            // Delete hosts and host_state table entries for this duplicate host entry
-            dbAccessor.executeQuery(
-                MessageFormat.format("DELETE from {0} WHERE {1} = {2,number,#}", HOST_STATE_TABLE, HOST_ID_COL, hostToDeleteId));
-            dbAccessor.executeQuery(
-                MessageFormat.format("DELETE from {0} WHERE {1} = {2,number,#}", HOSTS_TABLE, HOST_ID_COL, hostToDeleteId));
-          }
-        }
-      }
-    } finally {
-      if (null != hostsNotInCluster) {
-        hostsNotInCluster.close();
-      }
-      if (statement != null) {
-        statement.close();
-      }
-      if (duplicatedHostsStatement != null) {
-        duplicatedHostsStatement.close();
-      }
-    }
-  }
-
-  /**
-   * Get the constraint name created by Derby if one was not specified for the table.
-   * @param type Constraint-type, either, "p" (Primary), "c" (Check), "f" (Foreign), "u" (Unique)
-   * @param tableName Table Name
-   * @return Return the constraint name, or null if not found.
-   * @throws SQLException
-   */
-  private String getDerbyTableConstraintName(String type, String tableName) throws SQLException {
-    boolean found = false;
-    String constraint = null;
-
-    Statement statement = null;
-    ResultSet rs = null;
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-      if (statement != null) {
-        rs = statement.executeQuery("SELECT c.constraintname, c.type, t.tablename FROM sys.sysconstraints c, sys.systables t WHERE c.tableid = t.tableid");
-        if (rs != null) {
-          while(rs.next()) {
-            constraint = rs.getString(1);
-            String recordType = rs.getString(2);
-            String recordTableName = rs.getString(3);
-
-            if (recordType.equalsIgnoreCase(type) && recordTableName.equalsIgnoreCase(tableName)) {
-              found = true;
-              break;
-            }
-          }
-        }
-      }
-    } finally {
-      if (rs != null) {
-        rs.close();
-      }
-      if (statement != null) {
-        statement.close();
-      }
-    }
-    return found ? constraint : null;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    executeStackPreDMLUpdates();
-    cleanupStackUpdates();
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-
-    // Initialize all default widgets and widget layouts
-    initializeClusterAndServiceWidgets();
-
-    addMissingConfigs();
-    updateAlertDefinitions();
-    removeStormRestApiServiceComponent();
-    updateKerberosDescriptorArtifacts();
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
-    if (artifactEntity != null) {
-      Map<String, Object> data = artifactEntity.getArtifactData();
-
-      if (data != null) {
-        final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
-
-        if (kerberosDescriptor != null) {
-          // Get the global "hdfs" identity (if it exists)
-          KerberosIdentityDescriptor hdfsIdentity = kerberosDescriptor.getIdentity("hdfs");
-
-          if (hdfsIdentity != null) {
-            // Move the "hdfs" global identity to under HDFS service by removing it from the
-            // collection of global identities and _merging_ it into the identities for the HDFS
-            // service - creating a sparse HDFS service structure if necessary.
-            KerberosServiceDescriptor hdfsService = kerberosDescriptor.getService("HDFS");
-
-            if (hdfsService == null) {
-              hdfsService = new KerberosServiceDescriptorFactory().createInstance("HDFS", (Map) null);
-              hdfsService.putIdentity(hdfsIdentity);
-              kerberosDescriptor.putService(hdfsService);
-            } else {
-              KerberosIdentityDescriptor hdfsReferenceIdentity = hdfsService.getIdentity("/hdfs");
-
-              if (hdfsReferenceIdentity != null) {
-                // Merge the changes from the reference identity into the global identity...
-                hdfsIdentity.update(hdfsReferenceIdentity);
-                // Make sure the identity's name didn't change.
-                hdfsIdentity.setName("hdfs");
-
-                hdfsService.removeIdentity("/hdfs");
-              }
-
-              hdfsService.putIdentity(hdfsIdentity);
-            }
-
-            kerberosDescriptor.removeIdentity("hdfs");
-          }
-
-          // Find all identities named "/hdfs" and update the name to "/HDFS/hdfs"
-          updateKerberosDescriptorIdentityReferences(kerberosDescriptor, "/hdfs", "/HDFS/hdfs");
-          updateKerberosDescriptorIdentityReferences(kerberosDescriptor.getServices(), "/hdfs", "/HDFS/hdfs");
-
-          artifactEntity.setArtifactData(kerberosDescriptor.toMap());
-          artifactDAO.merge(artifactEntity);
-        }
-      }
-    }
-  }
-
-  /**
-   * Delete STORM_REST_API component if HDP is upgraded past 2.2 and the
-   * Component still exists.
-   */
-  protected void removeStormRestApiServiceComponent() {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      for (final Cluster cluster : clusterMap.values()) {
-
-        ServiceComponentDesiredStateDAO dao = injector.getInstance(ServiceComponentDesiredStateDAO.class);
-        ServiceComponentDesiredStateEntity entity = dao.findByName(cluster.getClusterId(),
-            "STORM", "STORM_REST_API");
-
-        if (null == entity) {
-          continue;
-        }
-
-        StackId stackId = new StackId(entity.getDesiredStack());
-
-        if (stackId.getStackName().equals("HDP") &&
-          VersionUtils.compareVersions(stackId.getStackVersion(), "2.2") >= 0) {
-
-          executeInTransaction(new Runnable() {
-            @Override
-            public void run() {
-              EntityManager em = getEntityManagerProvider().get();
-              CriteriaBuilder cb = em.getCriteriaBuilder();
-
-              try {
-                LOG.info("Deleting STORM_REST_API service component.");
-                CriteriaDelete<HostComponentStateEntity> hcsDelete = cb.createCriteriaDelete(HostComponentStateEntity.class);
-                CriteriaDelete<HostComponentDesiredStateEntity> hcdDelete = cb.createCriteriaDelete(HostComponentDesiredStateEntity.class);
-                CriteriaDelete<ServiceComponentDesiredStateEntity> scdDelete = cb.createCriteriaDelete(ServiceComponentDesiredStateEntity.class);
-
-                Root<HostComponentStateEntity> hcsRoot = hcsDelete.from(HostComponentStateEntity.class);
-                Root<HostComponentDesiredStateEntity> hcdRoot = hcdDelete.from(HostComponentDesiredStateEntity.class);
-                Root<ServiceComponentDesiredStateEntity> scdRoot = scdDelete.from(ServiceComponentDesiredStateEntity.class);
-
-                hcsDelete.where(cb.equal(hcsRoot.get("componentName"), "STORM_REST_API"));
-                hcdDelete.where(cb.equal(hcdRoot.get("componentName"), "STORM_REST_API"));
-                scdDelete.where(cb.equal(scdRoot.get("componentName"), "STORM_REST_API"));
-
-                em.createQuery(hcsDelete).executeUpdate();
-                em.createQuery(hcdDelete).executeUpdate();
-                em.createQuery(scdDelete).executeUpdate();
-              } catch (Exception e) {
-                LOG.warn("Error deleting STORM_REST_API service component. " +
-                  "This could result in issue with ambari server start. " +
-                  "Please make sure the STORM_REST_API component is deleted " +
-                  "from the database by running following commands:\n" +
-                  "delete from hostcomponentdesiredstate where component_name='STORM_REST_API';\n" +
-                  "delete from hostcomponentstate where component_name='STORM_REST_API';\n" +
-                  "delete from servicecomponentdesiredstate where component_name='STORM_REST_API';\n", e);
-              }
-            }
-          });
-        }
-      }
-    }
-  }
-
-  /**
-   * Modifies the JSON of some of the alert definitions which have changed
-   * between Ambari versions.
-   */
-  protected void updateAlertDefinitions() {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    List<String> metricAlerts = Arrays.asList("namenode_cpu", "namenode_hdfs_blocks_health",
-            "namenode_hdfs_capacity_utilization", "namenode_rpc_latency",
-            "namenode_directory_status", "datanode_health_summary", "datanode_storage");
-
-    List<String> mapredAlerts = Arrays.asList("mapreduce_history_server_cpu", "mapreduce_history_server_rpc_latency");
-    List<String> rmAlerts = Arrays.asList("yarn_resourcemanager_cpu", "yarn_resourcemanager_rpc_latency");
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          // HDFS metric alerts
-          for (String alertName : metricAlerts) {
-            AlertDefinitionEntity alertDefinitionEntity = alertDefinitionDAO.findByName(
-                cluster.getClusterId(), alertName);
-
-            if (alertDefinitionEntity != null) {
-              String source = alertDefinitionEntity.getSource();
-              JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
-
-              rootJson.get("uri").getAsJsonObject().addProperty("kerberos_keytab",
-                    "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}");
-
-              rootJson.get("uri").getAsJsonObject().addProperty("kerberos_principal",
-                    "{{hdfs-site/dfs.web.authentication.kerberos.principal}}");
-
-              updateAlertDefinitionEntitySource(alertName, rootJson.toString(), UUID.randomUUID().toString());
-            }
-          }
-
-          // MapR alerts update for kerberos
-          for (String alertName : mapredAlerts) {
-            AlertDefinitionEntity alertDefinitionEntity = alertDefinitionDAO.findByName(
-                cluster.getClusterId(), alertName);
-
-            if (alertDefinitionEntity != null) {
-              String source = alertDefinitionEntity.getSource();
-              JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
-              rootJson.get("uri").getAsJsonObject().addProperty("kerberos_keytab",
-                    "{{mapred-site/mapreduce.jobhistory.webapp.spnego-keytab-file}}");
-
-              rootJson.get("uri").getAsJsonObject().addProperty("kerberos_principal",
-                    "{{mapred-site/mapreduce.jobhistory.webapp.spnego-principal}}");
-
-              updateAlertDefinitionEntitySource(alertName, rootJson.toString(), UUID.randomUUID().toString());
-            }
-          }
-
-          // YARN alerts
-          for (String alertName : rmAlerts) {
-            AlertDefinitionEntity alertDefinitionEntity = alertDefinitionDAO.findByName(
-                cluster.getClusterId(), alertName);
-
-            if (alertDefinitionEntity != null) {
-              String source = alertDefinitionEntity.getSource();
-              JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
-
-              rootJson.get("uri").getAsJsonObject().addProperty("kerberos_keytab",
-                    "{{yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file}}");
-
-              rootJson.get("uri").getAsJsonObject().addProperty("kerberos_principal",
-                    "{{yarn-site/yarn.resourcemanager.webapp.spnego-principal}}");
-
-              updateAlertDefinitionEntitySource(alertName, rootJson.toString(), UUID.randomUUID().toString());
-            }
-          }
-
-          // zookeeper failover conroller alert update for default port and uri
-          // to 8019 and dfs.ha.zkfc.port
-          AlertDefinitionEntity zkFailoverDefinitionEntity = alertDefinitionDAO.findByName(
-              cluster.getClusterId(), "hdfs_zookeeper_failover_controller_process");
-
-          if (zkFailoverDefinitionEntity != null) {
-            String source = zkFailoverDefinitionEntity.getSource();
-            JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
-            rootJson.remove("uri");
-            rootJson.remove("default_port");
-            rootJson.addProperty("uri", "{{hdfs-site/dfs.ha.zkfc.port}}");
-            rootJson.addProperty("default_port", new Integer(8019));
-
-            // save the changes
-            updateAlertDefinitionEntitySource("hdfs_zookeeper_failover_controller_process",
-                rootJson.toString(), UUID.randomUUID().toString());
-          }
-
-          // update ranger admin alerts from type port(2.2) to web(2.3)
-          AlertDefinitionEntity rangerAdminDefinitionEntity = alertDefinitionDAO.findByName(
-            cluster.getClusterId(), "ranger_admin_process");
-
-          if (rangerAdminDefinitionEntity != null) {
-            String source = rangerAdminDefinitionEntity.getSource();
-            JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
-            JsonObject uriJson = new JsonObject();
-            JsonObject reporting = rootJson.getAsJsonObject("reporting");
-            JsonObject ok = reporting.getAsJsonObject("ok");
-            JsonObject warning = reporting.getAsJsonObject("warning");
-            JsonObject critical = reporting.getAsJsonObject("critical");
-
-            rootJson.remove("type");
-            rootJson.remove("default_port");
-            rootJson.addProperty("type", "WEB");
-
-            uriJson.addProperty("http", "{{admin-properties/policymgr_external_url}}");
-            uriJson.addProperty("https", "{{admin-properties/policymgr_external_url}}");
-            uriJson.addProperty("https_property", "{{ranger-site/http.enabled}}");
-            uriJson.addProperty("https_property_value", "false");
-            uriJson.addProperty("connection_timeout", 5.0f);
-
-            rootJson.remove("uri");
-            rootJson.add("uri", uriJson);
-
-            ok.remove("text");
-            ok.addProperty("text", "HTTP {0} response in {2:.3f}s");
-
-            warning.remove("text");
-            warning.remove("value");
-            warning.addProperty("text", "HTTP {0} response from {1} in {2:.3f}s ({3})");
-
-            critical.remove("text");
-            critical.remove("value");
-            critical.addProperty("text", "Connection failed to {1} ({3})");
-
-            // save the changes
-            updateAlertDefinitionEntitySource("ranger_admin_process",
-              rootJson.toString(), UUID.randomUUID().toString());
-          }
-
-          // update oozie web ui alert
-          AlertDefinitionEntity oozieWebUIAlertDefinitionEntity = alertDefinitionDAO.findByName(
-              cluster.getClusterId(), "oozie_server_webui");
-
-          if (oozieWebUIAlertDefinitionEntity != null) {
-            String source = oozieWebUIAlertDefinitionEntity.getSource();
-            JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
-            rootJson.get("uri").getAsJsonObject().remove("http");
-            rootJson.get("uri").getAsJsonObject().remove("kerberos_keytab");
-            rootJson.get("uri").getAsJsonObject().remove("kerberos_principal");
-            rootJson.get("uri").getAsJsonObject().addProperty("http",
-                    "{{oozie-site/oozie.base.url}}/?user.name={{oozie-env/oozie_user}}");
-            rootJson.get("uri").getAsJsonObject().addProperty("kerberos_keytab",
-                    "{{cluster-env/smokeuser_keytab}}");
-            rootJson.get("uri").getAsJsonObject().addProperty("kerberos_principal",
-                    "{{cluster-env/smokeuser_principal_name}}");
-
-            // save the changes
-            updateAlertDefinitionEntitySource("oozie_server_webui", rootJson.toString(),
-                UUID.randomUUID().toString());
-          }
-
-          // update HDFS metric alerts that had changes to their text
-          List<String> hdfsMetricAlertsFloatDivision = Arrays.asList(
-              "namenode_hdfs_capacity_utilization", "datanode_storage");
-
-          for (String metricAlertName : hdfsMetricAlertsFloatDivision) {
-            AlertDefinitionEntity entity = alertDefinitionDAO.findByName(cluster.getClusterId(),
-                metricAlertName);
-
-            if (null == entity) {
-              continue;
-            }
-
-            String source = entity.getSource();
-            JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
-            JsonObject reporting = rootJson.getAsJsonObject("reporting");
-            JsonObject ok = reporting.getAsJsonObject("ok");
-            JsonObject warning = reporting.getAsJsonObject("warning");
-            JsonObject critical = reporting.getAsJsonObject("critical");
-
-            JsonElement okText = ok.remove("text");
-            ok.addProperty("text", okText.getAsString().replace("{2:d}", "{2:.0f}"));
-
-            JsonElement warningText = warning.remove("text");
-            warning.addProperty("text", warningText.getAsString().replace("{2:d}", "{2:.0f}"));
-
-            JsonElement criticalText = critical.remove("text");
-            critical.addProperty("text", criticalText.getAsString().replace("{2:d}", "{2:.0f}"));
-
-            // save the changes
-            updateAlertDefinitionEntitySource(metricAlertName, rootJson.toString(),
-                UUID.randomUUID().toString());
-          }
-        }
-      }
-    }
-  }
-
-  private void updateAlertDefinitionEntitySource(final String alertName, final String source, final String newHash) {
-    executeInTransaction(new Runnable() {
-      @Override
-      public void run() {
-        EntityManager em = getEntityManagerProvider().get();
-        Query nativeQuery = em.createNativeQuery("UPDATE alert_definition SET alert_source=?1, hash=?2 WHERE " +
-          "definition_name=?3");
-        nativeQuery.setParameter(1, source);
-        nativeQuery.setParameter(2, newHash);
-        nativeQuery.setParameter(3, alertName);
-        nativeQuery.executeUpdate();
-      }
-    });
-  }
-
-  protected void addMissingConfigs() throws AmbariException {
-    updateHiveConfigs();
-    updateHdfsConfigs();
-    updateStormConfigs();
-    updateRangerHiveConfigs();
-    updateRangerHBaseConfigs();
-    updateRangerSiteConfigs();
-    updateHBaseConfigs();
-  }
-
-  protected void updateRangerSiteConfigs() throws AmbariException{
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, String> rangerPropertyMap = new HashMap<String, String>() {{
-      put("HTTPS_CLIENT_AUTH", "https.attrib.clientAuth");
-      put("HTTPS_KEYSTORE_FILE", "https.attrib.keystoreFile");
-      put("HTTPS_KEYSTORE_PASS", "https.attrib.keystorePass");
-      put("HTTPS_KEY_ALIAS", "https.attrib.keyAlias");
-      put("HTTPS_SERVICE_PORT", "https.service.port");
-      put("HTTP_ENABLED", "http.enabled");
-      put("HTTP_SERVICE_PORT", "http.service.port");
-    }};
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-
-        for (final Cluster cluster : clusterMap.values()) {
-         Config rangerSite = cluster.getDesiredConfigByType(RANGER_SITE_CONFIG);
-         Map<String, String> rangerSiteProperties;
-         Map<String, String> convertedRangerSiteProperties = new HashMap<>();
-         Set<String> oldPropertiesList = new HashSet<>();
-
-          if (rangerSite != null) {
-            rangerSiteProperties = rangerSite.getProperties();
-            for (Map.Entry<String, String> mapEntry: rangerPropertyMap.entrySet()) {
-              String oldKey = mapEntry.getKey();
-              String newKey = mapEntry.getValue();
-              if (rangerSiteProperties.containsKey(oldKey)) {
-                convertedRangerSiteProperties.put(newKey, rangerSiteProperties.get(oldKey));
-                oldPropertiesList.add(oldKey);
-              }
-            }
-          }
-
-          updateConfigurationPropertiesForCluster(cluster, RANGER_SITE_CONFIG, convertedRangerSiteProperties, true, false);
-          removeConfigurationPropertiesFromCluster(cluster, RANGER_SITE_CONFIG, oldPropertiesList);
-        }
-
-      }
-    }
-  }
-
-  protected void updateRangerHiveConfigs() throws AmbariException{
-    AmbariManagementController ambariManagementController = injector.getInstance(
-            AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config RangerHiveConfig = cluster.getDesiredConfigByType("ranger-hive-plugin-properties");
-          if (RangerHiveConfig != null
-                  && RangerHiveConfig.getProperties().containsKey("ranger-hive-plugin-enabled")
-                  && cluster.getDesiredConfigByType("hive-env") != null) {
-            Map<String, String> newHiveEnvProperties = new HashMap<>();
-            Map<String, String> newHiveServerProperties = new HashMap<>();
-            Set<String> removeRangerHiveProperties = new HashSet<>();
-            removeRangerHiveProperties.add("ranger-hive-plugin-enabled");
-
-            if (RangerHiveConfig.getProperties().get("ranger-hive-plugin-enabled") != null
-                    && RangerHiveConfig.getProperties().get("ranger-hive-plugin-enabled").equalsIgnoreCase("yes")) {
-              newHiveEnvProperties.put("hive_security_authorization", "Ranger");
-              newHiveServerProperties.put("hive.security.authorization.enabled", "true");
-            }
-            boolean updateProperty = cluster.getDesiredConfigByType("hive-env").getProperties().containsKey("hive_security_authorization");
-            updateConfigurationPropertiesForCluster(cluster, "hive-env", newHiveEnvProperties, updateProperty, true);
-            updateConfigurationPropertiesForCluster(cluster, "hiveserver2-site", newHiveServerProperties, updateProperty, true);
-            removeConfigurationPropertiesFromCluster(cluster, "ranger-hive-plugin-properties", removeRangerHiveProperties);
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateRangerHBaseConfigs() throws AmbariException{
-    AmbariManagementController ambariManagementController = injector.getInstance(
-                                                                                  AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Config RangerHBaseConfig = cluster.getDesiredConfigByType("ranger-hbase-plugin-properties");
-          if (RangerHBaseConfig != null
-                && RangerHBaseConfig.getProperties().containsKey("ranger-hbase-plugin-enabled")
-                && cluster.getDesiredConfigByType("hbase-site") != null) {
-            Map<String, String> newHBaseSiteProperties = new HashMap<>();
-
-            if (RangerHBaseConfig.getProperties().get("ranger-hbase-plugin-enabled") != null
-                  && RangerHBaseConfig.getProperties().get("ranger-hbase-plugin-enabled").equalsIgnoreCase("yes")) {
-
-              newHBaseSiteProperties.put("hbase.security.authorization", "true");
-            }
-            boolean updateProperty = cluster.getDesiredConfigByType("hbase-site").getProperties().containsKey("hbase.security.authorization");
-            updateConfigurationPropertiesForCluster(cluster, "hbase-site", newHBaseSiteProperties, updateProperty, true);
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateHdfsConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(
-        AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      Map<String, String> prop = new HashMap<>();
-      String content = null;
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          /*
-           * Append -Dorg.mortbay.jetty.Request.maxFormContentSize=-1 to HADOOP_NAMENODE_OPTS from hadoop-env.sh
-           */
-          content = null;
-          if (cluster.getDesiredConfigByType("hadoop-env") != null) {
-            content = cluster.getDesiredConfigByType(
-                "hadoop-env").getProperties().get("content");
-          }
-
-          if (content != null) {
-            content += "\nexport HADOOP_NAMENODE_OPTS=\"${HADOOP_NAMENODE_OPTS} -Dorg.mortbay.jetty.Request.maxFormContentSize=-1\"";
-
-            prop.put("content", content);
-            updateConfigurationPropertiesForCluster(cluster, "hadoop-env",
-                prop, true, false);
-          }
-          /*
-           * Update dfs.namenode.rpc-address set hostname instead of localhost
-           */
-          if (cluster.getDesiredConfigByType(HDFS_SITE_CONFIG) != null && !cluster.getHosts("HDFS","NAMENODE").isEmpty()) {
-
-            URI nameNodeRpc = null;
-            String hostName = cluster.getHosts("HDFS","NAMENODE").iterator().next();
-            // Try to generate dfs.namenode.rpc-address
-            if (cluster.getDesiredConfigByType("core-site").getProperties() != null &&
-                      cluster.getDesiredConfigByType("core-site").getProperties().get("fs.defaultFS") != null) {
-              try {
-                if (isNNHAEnabled(cluster)) {
-                  // NN HA enabled
-                  // Remove dfs.namenode.rpc-address property
-                  Set<String> removePropertiesSet = new HashSet<>();
-                  removePropertiesSet.add("dfs.namenode.rpc-address");
-                  removeConfigurationPropertiesFromCluster(cluster, HDFS_SITE_CONFIG, removePropertiesSet);
-                } else {
-                  // NN HA disabled
-                  nameNodeRpc = new URI(cluster.getDesiredConfigByType("core-site").getProperties().get("fs.defaultFS"));
-                  Map<String, String> hdfsProp = new HashMap<>();
-                  hdfsProp.put("dfs.namenode.rpc-address", hostName + ":" + nameNodeRpc.getPort());
-                  updateConfigurationPropertiesForCluster(cluster, HDFS_SITE_CONFIG,
-                          hdfsProp, false, false);
-                }
-              } catch (URISyntaxException e) {
-                e.printStackTrace();
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateHiveConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(
-            AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          String content = null;
-          String hive_server2_auth = "";
-          if (cluster.getDesiredConfigByType("hive-site") != null &&
-              cluster.getDesiredConfigByType("hive-site").getProperties().containsKey("hive.server2.authentication")) {
-
-            hive_server2_auth = cluster.getDesiredConfigByType("hive-site").getProperties().get("hive.server2.authentication");
-          }
-
-          if(cluster.getDesiredConfigByType("hive-env") != null) {
-            Map<String, String> hiveEnvProps = new HashMap<>();
-            Set<String> hiveServerSiteRemoveProps = new HashSet<>();
-            // Update logic for setting HIVE_AUX_JARS_PATH in hive-env.sh
-            content = cluster.getDesiredConfigByType("hive-env").getProperties().get("content");
-            if(content != null) {
-              content = updateHiveEnvContent(content);
-              hiveEnvProps.put("content", content);
-            }
-            //hive metastore and client_heapsize are added for HDP2, we should check if it exists and not add it for HDP1
-            if (!cluster.getDesiredConfigByType("hive-env").getProperties().containsKey("hive.client.heapsize")) {
-              hiveEnvProps.put("hive.client.heapsize", "512");
-            }
-            if (!cluster.getDesiredConfigByType("hive-env").getProperties().containsKey("hive.metastore.heapsize")) {
-              hiveEnvProps.put("hive.metastore.heapsize", "1024");
-            }
-
-            boolean isHiveSecurityAuthPresent = cluster.getDesiredConfigByType("hive-env").getProperties().containsKey("hive_security_authorization");
-            String hiveSecurityAuth="";
-
-            if ("kerberos".equalsIgnoreCase(hive_server2_auth) && cluster.getServices().containsKey("KERBEROS")){
-              hiveSecurityAuth = "SQLStdAuth";
-              isHiveSecurityAuthPresent = true;
-              hiveEnvProps.put("hive_security_authorization", hiveSecurityAuth);
-            } else {
-              if (isHiveSecurityAuthPresent) {
-                hiveSecurityAuth = cluster.getDesiredConfigByType("hive-env").getProperties().get("hive_security_authorization");
-              }
-            }
-
-            if (isHiveSecurityAuthPresent && "none".equalsIgnoreCase(hiveSecurityAuth) &&
-                !isConfigEnabled(cluster,
-                    AbstractUpgradeCatalog.CONFIGURATION_TYPE_RANGER_HIVE_PLUGIN_PROPERTIES,
-                    AbstractUpgradeCatalog.PROPERTY_RANGER_HIVE_PLUGIN_ENABLED)) {
-              hiveServerSiteRemoveProps.add("hive.security.authorization.manager");
-              hiveServerSiteRemoveProps.add("hive.security.authenticator.manager");
-            }
-            updateConfigurationPropertiesForCluster(cluster, "hive-env", hiveEnvProps, true, true);
-            removeConfigurationPropertiesFromCluster(cluster, "hiveserver2-site", hiveServerSiteRemoveProps);
-          }
-
-          if(cluster.getDesiredConfigByType("hive-site") != null) {
-            Set<String> hiveSiteRemoveProps = new HashSet<>();
-            Map<String, String> hiveSiteAddProps = new HashMap<>();
-
-            if (!"pam".equalsIgnoreCase(hive_server2_auth)) {
-              hiveSiteRemoveProps.add("hive.server2.authentication.pam.services");
-            } else {
-              hiveSiteAddProps.put("hive.server2.authentication.pam.services", "");
-            }
-            if (!"custom".equalsIgnoreCase(hive_server2_auth)) {
-              hiveSiteRemoveProps.add("hive.server2.custom.authentication.class");
-            } else {
-              hiveSiteAddProps.put("hive.server2.custom.authentication.class", "");
-            }
-            if (!"ldap".equalsIgnoreCase(hive_server2_auth)) {
-              hiveSiteRemoveProps.add("hive.server2.authentication.ldap.url");
-            } else {
-              hiveSiteAddProps.put("hive.server2.authentication.ldap.url", "");
-            }
-            if (!"kerberos".equalsIgnoreCase(hive_server2_auth) && !cluster.getServices().containsKey("KERBEROS")) {
-              hiveSiteRemoveProps.add("hive.server2.authentication.kerberos.keytab");
-              hiveSiteRemoveProps.add("hive.server2.authentication.kerberos.principal");
-            } else {
-              hiveSiteAddProps.put("hive.server2.authentication.kerberos.keytab", "");
-              hiveSiteAddProps.put("hive.server2.authentication.kerberos.principal", "");
-            }
-
-
-            updateConfigurationPropertiesForCluster(cluster, "hive-site", hiveSiteAddProps, hiveSiteRemoveProps, false, true);
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateHBaseConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(
-        AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          if (cluster.getDesiredConfigByType("hbase-site") != null && cluster.getDesiredConfigByType("hbase-env") != null) {
-            Map<String, String> hbaseEnvProps = new HashMap<>();
-            Map<String, String> hbaseSiteProps = new HashMap<>();
-            Set<String> hbaseEnvRemoveProps = new HashSet<>();
-            Set<String> hbaseSiteRemoveProps = new HashSet<>();
-
-            if (cluster.getDesiredConfigByType("hbase-site").getProperties().containsKey("hbase.region.server.rpc.scheduler.factory.class") &&
-                "org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory".equals(cluster.getDesiredConfigByType("hbase-site").getProperties().get(
-                        "hbase.region.server.rpc.scheduler.factory.class"))) {
-              hbaseEnvProps.put("phoenix_sql_enabled", "true");
-            }
-
-            if (cluster.getDesiredConfigByType("hbase-env").getProperties().containsKey("phoenix_sql_enabled") &&
-            "true".equalsIgnoreCase(cluster.getDesiredConfigByType("hbase-env").getProperties().get("phoenix_sql_enabled"))) {
-              hbaseSiteProps.put("hbase.regionserver.wal.codec", "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec");
-              hbaseSiteProps.put("phoenix.functions.allowUserDefinedFunctions", "true");
-            }
-            else {
-              hbaseSiteProps.put("hbase.regionserver.wal.codec", "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec");
-              hbaseSiteRemoveProps.add("hbase.rpc.controllerfactory.class");
-              hbaseSiteRemoveProps.add("phoenix.functions.allowUserDefinedFunctions");
-            }
-
-            if (cluster.getDesiredConfigByType("hbase-site").getProperties().containsKey("hbase.security.authorization")) {
-              if("true".equalsIgnoreCase(cluster.getDesiredConfigByType("hbase-site").getProperties().get("hbase.security.authorization"))) {
-                hbaseSiteProps.put("hbase.coprocessor.master.classes", "org.apache.hadoop.hbase.security.access.AccessController");
-                hbaseSiteProps.put("hbase.coprocessor.regionserver.classes", "org.apache.hadoop.hbase.security.access.AccessController");
-              }
-              else {
-                hbaseSiteProps.put("hbase.coprocessor.master.classes", "");
-                hbaseSiteRemoveProps.add("hbase.coprocessor.regionserver.classes");
-              }
-            }
-            else {
-              hbaseSiteRemoveProps.add("hbase.coprocessor.regionserver.classes");
-            }
-
-            updateConfigurationPropertiesForCluster(cluster, "hbase-site", hbaseSiteProps, true, false);
-            updateConfigurationPropertiesForCluster(cluster, "hbase-env", hbaseEnvProps, true, false);
-            updateConfigurationPropertiesForCluster(cluster, "hbase-site", new HashMap<String, String>(), hbaseSiteRemoveProps, false, true);
-            updateConfigurationPropertiesForCluster(cluster, "hbase-env", new HashMap<String, String>(), hbaseEnvRemoveProps, false, true);
-          }
-        }
-      }
-    }
-  }
-
-  protected String updateHiveEnvContent(String hiveEnvContent) {
-    if(hiveEnvContent == null) {
-      return null;
-    }
-
-    String oldAuxJarRegex = "if\\s*\\[\\s*\"\\$\\{HIVE_AUX_JARS_PATH\\}\"\\s*!=\\s*\"\"\\s*];\\s*then\\s*\\n" +
-        "\\s*export\\s+HIVE_AUX_JARS_PATH\\s*=\\s*\\$\\{HIVE_AUX_JARS_PATH\\}\\s*\\n" +
-        "\\s*elif\\s*\\[\\s*-d\\s*\"/usr/hdp/current/hive-webhcat/share/hcatalog\"\\s*\\];\\s*then\\s*\\n" +
-        "\\s*export\\s+HIVE_AUX_JARS_PATH\\s*=\\s*/usr/hdp/current/hive-webhcat/share/hcatalog\\s*\n" +
-        "\\s*fi";
-    String newAuxJarPath = "if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n" +
-        "  if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then    \n" +
-        "    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n" +
-        "  elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-        "    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-        "  fi\n" +
-        "elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-        "  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-        "fi";
-    return hiveEnvContent.replaceAll(oldAuxJarRegex, Matcher.quoteReplacement(newAuxJarPath));
-  }
-
-  protected  void updateStormConfigs() throws  AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(
-            AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          //if cluster is secured we should set additional properties
-          if(cluster.getDesiredConfigByType("cluster-env") != null
-                  && cluster.getDesiredConfigByType("cluster-env").getProperties().get("security_enabled").equals("true")
-                  && cluster.getDesiredConfigByType("storm-site") != null ) {
-            Map<String, String> newStormProps = new HashMap<>();
-            if (!cluster.getDesiredConfigByType("storm-site").getProperties().containsKey("java.security.auth.login.config")) {
-              newStormProps.put("java.security.auth.login.config", "{{conf_dir}}/storm_jaas.conf");
-            }
-            if (!cluster.getDesiredConfigByType("storm-site").getProperties().containsKey("nimbus.admins")) {
-              newStormProps.put("nimbus.admins", "['{{storm_user}}']");
-            }
-            if (!cluster.getDesiredConfigByType("storm-site").getProperties().containsKey("nimb

<TRUNCATED>

[58/63] [abbrv] ambari git commit: AMBARI-21370: Support VIPs instead of Host Names -- fix imports

Posted by ab...@apache.org.
AMBARI-21370: Support VIPs instead of Host Names -- fix imports


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/aa7a8c65
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/aa7a8c65
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/aa7a8c65

Branch: refs/heads/branch-feature-logsearch-ui
Commit: aa7a8c657f07caa0be3db89d1e8146978d7d438c
Parents: 4d7cc7f
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Thu Jun 29 19:02:45 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Thu Jun 29 19:02:45 2017 +0200

----------------------------------------------------------------------
 .../org/apache/ambari/server/controller/jmx/JMXHostProvider.java | 4 +---
 .../apache/ambari/server/controller/jmx/JMXPropertyProvider.java | 1 -
 2 files changed, 1 insertion(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/aa7a8c65/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
index dbf8eb7..4e48b53 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
@@ -17,12 +17,10 @@
  */
 package org.apache.ambari.server.controller.jmx;
 
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.state.Host;
-
 import java.util.Set;
 
 import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.state.Host;
 
 /**
  * Provider of JMX host information.

http://git-wip-us.apache.org/repos/asf/ambari/blob/aa7a8c65/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
index e4de377..832d9ae 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
@@ -40,7 +40,6 @@ import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.utilities.StreamProvider;
-import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.services.MetricsRetrievalService;
 import org.apache.ambari.server.state.services.MetricsRetrievalService.MetricSourceType;
 import org.slf4j.Logger;


[55/63] [abbrv] ambari git commit: AMBARI-21343. Cleanup relevant Kerberos identities when a component is removed - addendum: fix missing copyrights (amagyar)

Posted by ab...@apache.org.
AMBARI-21343. Cleanup relevant Kerberos identities when a component is removed - addendum: fix missing copyrights (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/09e5d41c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/09e5d41c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/09e5d41c

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 09e5d41c5d52339a0edea97886acf0fd38ee8a91
Parents: 8b5c7db
Author: Attila Magyar <am...@hortonworks.com>
Authored: Thu Jun 29 12:14:54 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Thu Jun 29 12:14:54 2017 +0200

----------------------------------------------------------------------
 .../controller/OrderedRequestStageContainer.java   | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/09e5d41c/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
index 6d8b5a3..4ac6896 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.ambari.server.controller;
 
 import org.apache.ambari.server.AmbariException;


[22/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
deleted file mode 100644
index f171086..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog220.java
+++ /dev/null
@@ -1,1404 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import java.io.StringWriter;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.text.MessageFormat;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.UUID;
-import java.util.regex.Matcher;
-
-import javax.xml.transform.OutputKeys;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerConfigurationException;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.ArtifactDAO;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.DaoUtils;
-import org.apache.ambari.server.orm.dao.HostVersionDAO;
-import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.dao.UpgradeDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.HostVersionEntity;
-import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.orm.entities.UpgradeEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.RepositoryVersionState;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.alert.SourceType;
-import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
-import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
-import org.apache.ambari.server.state.stack.upgrade.Direction;
-import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
-import org.apache.ambari.server.state.stack.upgrade.UpgradeType;
-import org.apache.ambari.server.utils.VersionUtils;
-import org.apache.commons.lang.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.inject.Inject;
-import com.google.inject.Injector;
-import com.google.inject.persist.Transactional;
-
-/**
- * Upgrade catalog for version 2.2.0.
- */
-public class UpgradeCatalog220 extends AbstractUpgradeCatalog {
-
-  private static final String UPGRADE_TABLE = "upgrade";
-  private static final String STORM_SITE = "storm-site";
-  private static final String HDFS_SITE_CONFIG = "hdfs-site";
-  private static final String TOPOLOGY_CONFIG = "topology";
-  private static final String KAFKA_BROKER = "kafka-broker";
-  private static final String KAFKA_ENV_CONFIG = "kafka-env";
-  private static final String KAFKA_ENV_CONTENT_KERBEROS_PARAMS =
-    "export KAFKA_KERBEROS_PARAMS={{kafka_kerberos_params}}";
-  private static final String AMS_ENV = "ams-env";
-  private static final String AMS_HBASE_ENV = "ams-hbase-env";
-  private static final String AMS_SITE = "ams-site";
-  private static final String AMS_HBASE_SITE = "ams-hbase-site";
-  private static final String AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY =
-    "zookeeper.session.timeout.localHBaseCluster";
-  private static final String AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY = "hbase.normalizer.enabled";
-  private static final String AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY = "hbase.normalizer.period";
-  private static final String AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY = "hbase.master.normalizer.class";
-  private static final String TIMELINE_METRICS_HBASE_FIFO_COMPACTION_ENABLED = "timeline.metrics.hbase.fifo.compaction.enabled";
-  private static final String HBASE_ENV_CONFIG = "hbase-env";
-  private static final String FLUME_ENV_CONFIG = "flume-env";
-  private static final String HIVE_SITE_CONFIG = "hive-site";
-  private static final String HIVE_ENV_CONFIG = "hive-env";
-  private static final String RANGER_ENV_CONFIG = "ranger-env";
-  private static final String RANGER_UGSYNC_SITE_CONFIG = "ranger-ugsync-site";
-  private static final String ZOOKEEPER_LOG4J_CONFIG = "zookeeper-log4j";
-  private static final String NIMBS_MONITOR_FREQ_SECS_PROPERTY = "nimbus.monitor.freq.secs";
-  private static final String STORM_METRICS_REPORTER = "metrics.reporter.register";
-  private static final String HIVE_SERVER2_OPERATION_LOG_LOCATION_PROPERTY = "hive.server2.logging.operation.log.location";
-  private static final String HADOOP_ENV_CONFIG = "hadoop-env";
-  private static final String CONTENT_PROPERTY = "content";
-  private static final String HADOOP_ENV_CONTENT_TO_APPEND = "\n{% if is_datanode_max_locked_memory_set %}\n" +
-    "# Fix temporary bug, when ulimit from conf files is not picked up, without full relogin. \n" +
-    "# Makes sense to fix only when runing DN as root \n" +
-    "if [ \"$command\" == \"datanode\" ] && [ \"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
-    "  ulimit -l {{datanode_max_locked_memory}}\n" +
-    "fi\n" +
-    "{% endif %}\n";
-
-  private static final String DOWNGRADE_ALLOWED_COLUMN = "downgrade_allowed";
-  private static final String UPGRADE_SKIP_FAILURE_COLUMN = "skip_failures";
-  private static final String UPGRADE_SKIP_SC_FAILURE_COLUMN = "skip_sc_failures";
-  public static final String UPGRADE_PACKAGE_COL = "upgrade_package";
-  public static final String UPGRADE_TYPE_COL = "upgrade_type";
-  public static final String REPO_VERSION_TABLE = "repo_version";
-
-  private static final String HOST_ROLE_COMMAND_TABLE = "host_role_command";
-  private static final String HOST_ID_COL = "host_id";
-
-  private static final String KERBEROS_DESCRIPTOR_TABLE = "kerberos_descriptor";
-  private static final String KERBEROS_DESCRIPTOR_NAME_COLUMN = "kerberos_descriptor_name";
-  private static final String KERBEROS_DESCRIPTOR_COLUMN = "kerberos_descriptor";
-  private static final String RANGER_HDFS_PLUGIN_ENABLED_PROPERTY = "ranger-hdfs-plugin-enabled";
-  private static final String RANGER_HIVE_PLUGIN_ENABLED_PROPERTY = "ranger-hive-plugin-enabled";
-  private static final String RANGER_HBASE_PLUGIN_ENABLED_PROPERTY = "ranger-hbase-plugin-enabled";
-  private static final String RANGER_STORM_PLUGIN_ENABLED_PROPERTY = "ranger-storm-plugin-enabled";
-  private static final String RANGER_KNOX_PLUGIN_ENABLED_PROPERTY = "ranger-knox-plugin-enabled";
-  private static final String RANGER_YARN_PLUGIN_ENABLED_PROPERTY = "ranger-yarn-plugin-enabled";
-  private static final String RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY = "ranger-kafka-plugin-enabled";
-
-  private static final String RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY = "ranger.usersync.source.impl.class";
-
-  private static final String BLUEPRINT_TABLE = "blueprint";
-  private static final String SECURITY_TYPE_COLUMN = "security_type";
-  private static final String SECURITY_DESCRIPTOR_REF_COLUMN = "security_descriptor_reference";
-
-  private static final String STAGE_TABLE = "stage";
-
-  private static final String KNOX_SERVICE = "KNOX";
-
-  /**
-   * Logger.
-   */
-  private static final Logger LOG = LoggerFactory.getLogger(UpgradeCatalog220.class);
-
-  private static final String OOZIE_SITE_CONFIG = "oozie-site";
-  private static final String OOZIE_SERVICE_HADOOP_CONFIGURATIONS_PROPERTY_NAME = "oozie.service.HadoopAccessorService.hadoop.configurations";
-  private static final String OLD_DEFAULT_HADOOP_CONFIG_PATH = "/etc/hadoop/conf";
-  private static final String NEW_DEFAULT_HADOOP_CONFIG_PATH = "{{hadoop_conf_dir}}";
-
-  @Inject
-  DaoUtils daoUtils;
-
-  @Inject
-  private RepositoryVersionDAO repositoryVersionDAO;
-
-  @Inject
-  private ClusterDAO clusterDAO;
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Don't forget to register new UpgradeCatalogs in {@link org.apache.ambari.server.upgrade.SchemaUpgradeHelper.UpgradeHelperModule#configure()}
-   *
-   * @param injector Guice injector to track dependencies and uses bindings to inject them.
-   */
-  @Inject
-  public UpgradeCatalog220(Injector injector) {
-    super(injector);
-    this.injector = injector;
-  }
-
-  // ----- UpgradeCatalog ----------------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getTargetVersion() {
-    return "2.2.0";
-  }
-
-  // ----- AbstractUpgradeCatalog --------------------------------------------
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public String getSourceVersion() {
-    return "2.1.2.1";
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executeDDLUpdates() throws AmbariException, SQLException {
-    executeUpgradeDDLUpdates();
-
-    // Alter the host_role_command table to allow host_id to be nullable
-    dbAccessor.alterColumn(HOST_ROLE_COMMAND_TABLE, new DBColumnInfo(HOST_ID_COL, Long.class, null, null, true));
-
-    addKerberosDescriptorTable();
-    executeBlueprintDDLUpdates();
-    executeStageDDLUpdates();
-  }
-
-  protected void executeUpgradeDDLUpdates() throws AmbariException, SQLException {
-    updateUpgradesDDL();
-  }
-
-  private void addKerberosDescriptorTable() throws SQLException {
-    List<DBAccessor.DBColumnInfo> columns = new ArrayList<>();
-    columns.add(new DBAccessor.DBColumnInfo(KERBEROS_DESCRIPTOR_NAME_COLUMN, String.class, 255, null, false));
-    columns.add(new DBAccessor.DBColumnInfo(KERBEROS_DESCRIPTOR_COLUMN, char[].class, null, null, false));
-
-    LOG.debug("Creating table [ {} ] with columns [ {} ] and primary key: [ {} ]", KERBEROS_DESCRIPTOR_TABLE, columns, KERBEROS_DESCRIPTOR_NAME_COLUMN);
-    dbAccessor.createTable(KERBEROS_DESCRIPTOR_TABLE, columns, KERBEROS_DESCRIPTOR_NAME_COLUMN);
-  }
-
-  private void executeBlueprintDDLUpdates() throws AmbariException, SQLException {
-    dbAccessor.addColumn(BLUEPRINT_TABLE, new DBAccessor.DBColumnInfo(SECURITY_TYPE_COLUMN,
-      String.class, 32, "NONE", false));
-    dbAccessor.addColumn(BLUEPRINT_TABLE, new DBAccessor.DBColumnInfo(SECURITY_DESCRIPTOR_REF_COLUMN,
-      String.class, null, null, true));
-  }
-
-  /**
-   * Updates the {@code stage} table by:
-   * <ul>
-   * <li>Adding the {@code supports_auto_skip_failure} column</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  protected void executeStageDDLUpdates() throws SQLException {
-    dbAccessor.addColumn(STAGE_TABLE,
-      new DBAccessor.DBColumnInfo("supports_auto_skip_failure", Integer.class, 1, 0, false));
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void executePreDMLUpdates() throws AmbariException, SQLException {
-    // execute DDL updates
-    executeStackUpgradeDDLUpdates();
-
-    // DDL and DML mixed code, double check here
-    bootstrapRepoVersionForHDP21();
-
-    // execute DML updates, no DDL things after this line
-    executeUpgradePreDMLUpdates();
-  }
-
-  /**
-   * Updates the following columns on the {@value #UPGRADE_TABLE} table to
-   * default values:
-   * <ul>
-   * <li>{value {@link #DOWNGRADE_ALLOWED_COLUMN}}</li>
-   * <li>{value {@link #UPGRADE_SKIP_FAILURE_COLUMN}}</li>
-   * <li>{value {@link #UPGRADE_SKIP_SC_FAILURE_COLUMN}}</li>
-   * </ul>
-   *
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  protected void executeUpgradePreDMLUpdates() throws AmbariException, SQLException {
-    UpgradeDAO upgradeDAO = injector.getInstance(UpgradeDAO.class);
-    List<UpgradeEntity> upgrades = upgradeDAO.findAll();
-    for (UpgradeEntity upgrade: upgrades){
-      if (upgrade.isDowngradeAllowed() == null) {
-        upgrade.setDowngradeAllowed(true);
-      }
-
-      // ensure that these are set to false for existing upgrades
-      upgrade.setAutoSkipComponentFailures(false);
-      upgrade.setAutoSkipServiceCheckFailures(false);
-
-      // apply changes
-      upgradeDAO.merge(upgrade);
-
-      LOG.info(String.format("Updated upgrade id %s", upgrade.getId()));
-    }
-
-    // make the columns nullable now that they have defaults
-    dbAccessor.setColumnNullable(UPGRADE_TABLE, DOWNGRADE_ALLOWED_COLUMN, false);
-    dbAccessor.setColumnNullable(UPGRADE_TABLE, UPGRADE_SKIP_FAILURE_COLUMN, false);
-    dbAccessor.setColumnNullable(UPGRADE_TABLE, UPGRADE_SKIP_SC_FAILURE_COLUMN, false);
-  }
-
-  @Override
-  protected void executeDMLUpdates() throws AmbariException, SQLException {
-    addNewConfigurationsFromXml();
-    updateAlertDefinitions();
-    updateStormConfigs();
-    updateAMSConfigs();
-    updateHDFSConfigs();
-    updateHbaseEnvConfig();
-    updateFlumeEnvConfig();
-    updateHadoopEnv();
-    updateKafkaConfigs();
-    updateRangerEnvConfig();
-    updateRangerUgsyncSiteConfig();
-    updateZookeeperLog4j();
-    updateHiveConfig();
-    updateAccumuloConfigs();
-    updateKerberosDescriptorArtifacts();
-    updateKnoxTopology();
-  }
-
-  protected void updateKnoxTopology() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config topology = cluster.getDesiredConfigByType(TOPOLOGY_CONFIG);
-      if (topology != null) {
-        String content = topology.getProperties().get(CONTENT_PROPERTY);
-        if (content != null) {
-          Document topologyXml = convertStringToDocument(content);
-          if (topologyXml != null) {
-            Element root = topologyXml.getDocumentElement();
-            if (root != null)  {
-              NodeList providerNodes = root.getElementsByTagName("provider");
-              boolean authorizationProviderExists = false;
-              try {
-                for (int i = 0; i < providerNodes.getLength(); i++) {
-                  Node providerNode = providerNodes.item(i);
-                  NodeList childNodes = providerNode.getChildNodes();
-                  for (int k = 0; k < childNodes.getLength(); k++) {
-                    Node child = childNodes.item(k);
-                    child.normalize();
-                    String childTextContent = child.getTextContent();
-                    if (childTextContent != null && childTextContent.toLowerCase().equals("authorization")) {
-                      authorizationProviderExists = true;
-                      break;
-                    }
-                  }
-                  if (authorizationProviderExists) {
-                    break;
-                  }
-                }
-              } catch(Exception e) {
-                e.printStackTrace();
-                LOG.error("Error occurred during check 'authorization' provider already exists in topology." + e);
-                return;
-              }
-              if (!authorizationProviderExists) {
-                NodeList nodeList = root.getElementsByTagName("gateway");
-                if (nodeList != null && nodeList.getLength() > 0) {
-                  boolean rangerPluginEnabled = isConfigEnabled(cluster,
-                    AbstractUpgradeCatalog.CONFIGURATION_TYPE_RANGER_KNOX_PLUGIN_PROPERTIES,
-                    AbstractUpgradeCatalog.PROPERTY_RANGER_KNOX_PLUGIN_ENABLED);
-
-                  Node gatewayNode = nodeList.item(0);
-                  Element newProvider = topologyXml.createElement("provider");
-
-                  Element role = topologyXml.createElement("role");
-                  role.appendChild(topologyXml.createTextNode("authorization"));
-                  newProvider.appendChild(role);
-
-                  Element name = topologyXml.createElement("name");
-                  if (rangerPluginEnabled) {
-                    name.appendChild(topologyXml.createTextNode("XASecurePDPKnox"));
-                  } else {
-                    name.appendChild(topologyXml.createTextNode("AclsAuthz"));
-                  }
-                  newProvider.appendChild(name);
-
-                  Element enabled = topologyXml.createElement("enabled");
-                  enabled.appendChild(topologyXml.createTextNode("true"));
-                  newProvider.appendChild(enabled);
-
-
-                  gatewayNode.appendChild(newProvider);
-
-                  DOMSource topologyDomSource = new DOMSource(root);
-                  StringWriter writer = new StringWriter();
-                  try {
-                    Transformer transformer = TransformerFactory.newInstance().newTransformer();
-                    transformer.setOutputProperty(OutputKeys.ENCODING, "UTF-8");
-                    transformer.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
-                    transformer.setOutputProperty(OutputKeys.METHOD, "xml");
-                    transformer.setOutputProperty(OutputKeys.INDENT, "yes");
-                    transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "5");
-                    transformer.transform(topologyDomSource, new StreamResult(writer));
-                  } catch (TransformerConfigurationException e) {
-                    e.printStackTrace();
-                    LOG.error("Unable to create transformer instance, to convert Document(XML) to String. " + e);
-                    return;
-                  } catch (TransformerException e) {
-                    e.printStackTrace();
-                    LOG.error("Unable to transform Document(XML) to StringWriter. " + e);
-                    return;
-                  }
-
-                  content = writer.toString();
-                  Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-                  updateConfigurationPropertiesForCluster(cluster, TOPOLOGY_CONFIG, updates, true, false);
-                }
-              }
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Move the upgrade_package column from the repo_version table to the upgrade table as follows,
-   * add column upgrade_package to upgrade table as String 255 and nullable
-   * populate column in the upgrade table
-   * drop the column in the repo_version table
-   * make the column in the upgrade table non-nullable.
-   * This has to be called as part of DML and not DDL since the persistence service has to be started.
-   * @throws AmbariException
-   * @throws SQLException
-   */
-  @Transactional
-  protected void executeStackUpgradeDDLUpdates() throws SQLException, AmbariException {
-    final Configuration.DatabaseType databaseType = configuration.getDatabaseType();
-
-    // Add columns
-    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_PACKAGE_COL)) {
-      LOG.info("Adding upgrade_package column to upgrade table.");
-      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, true));
-    }
-    if (!dbAccessor.tableHasColumn(UPGRADE_TABLE, UPGRADE_TYPE_COL)) {
-      LOG.info("Adding upgrade_type column to upgrade table.");
-      dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, true));
-    }
-
-    // Populate values in upgrade table.
-    boolean success = populateUpgradeTable();
-
-    if (!success) {
-      throw new AmbariException("Errors found while populating the upgrade table with values for columns upgrade_type and upgrade_package.");
-    }
-
-    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL)) {
-      LOG.info("Dropping upgrade_package column from repo_version table.");
-      dbAccessor.dropColumn(REPO_VERSION_TABLE, UPGRADE_PACKAGE_COL);
-
-      // Now, make the added column non-nullable
-      // Make the hosts id non-null after all the values are populated
-      LOG.info("Making upgrade_package column in the upgrade table non-nullable.");
-      if (databaseType == Configuration.DatabaseType.DERBY) {
-        // This is a workaround for UpgradeTest.java unit test
-        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER column " + UPGRADE_PACKAGE_COL + " NOT NULL");
-      } else {
-        dbAccessor.alterColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_PACKAGE_COL, String.class, 255, null, false));
-      }
-    }
-
-    if (dbAccessor.tableHasColumn(REPO_VERSION_TABLE, UPGRADE_TYPE_COL)) {
-      // Now, make the added column non-nullable
-      // Make the hosts id non-null after all the values are populated
-      LOG.info("Making upgrade_type column in the upgrade table non-nullable.");
-      if (databaseType == Configuration.DatabaseType.DERBY) {
-        // This is a workaround for UpgradeTest.java unit test
-        dbAccessor.executeQuery("ALTER TABLE " + UPGRADE_TABLE + " ALTER column " + UPGRADE_TYPE_COL + " NOT NULL");
-      } else {
-        dbAccessor.alterColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_TYPE_COL, String.class, 32, null, false));
-      }
-    }
-  }
-
-  /**
-   * Populate the upgrade table with values for the columns upgrade_type and upgrade_package.
-   * The upgrade_type will default to {@code org.apache.ambari.server.state.stack.upgrade.UpgradeType.ROLLING}
-   * whereas the upgrade_package will be calculated.
-   * @return {@code} true on success, and {@code} false otherwise.
-   */
-  private boolean populateUpgradeTable() {
-    boolean success = true;
-    Statement statement = null;
-    ResultSet rs = null;
-    try {
-      statement = dbAccessor.getConnection().createStatement();
-      if (statement != null) {
-        // Need to use SQL since the schema is changing and some of the columns have not yet been added..
-        rs = statement.executeQuery("SELECT upgrade_id, cluster_id, from_version, to_version, direction, upgrade_package, upgrade_type FROM upgrade");
-        if (rs != null) {
-          try {
-            while (rs.next()) {
-              final long upgradeId = rs.getLong("upgrade_id");
-              final long clusterId = rs.getLong("cluster_id");
-              final String fromVersion = rs.getString("from_version");
-              final String toVersion = rs.getString("to_version");
-              final Direction direction = Direction.valueOf(rs.getString("direction"));
-              // These two values are likely null.
-              String upgradePackage = rs.getString("upgrade_package");
-              String upgradeType = rs.getString("upgrade_type");
-
-              LOG.info(MessageFormat.format("Populating rows for the upgrade table record with " +
-                  "upgrade_id: {0,number,#}, cluster_id: {1,number,#}, from_version: {2}, to_version: {3}, direction: {4}",
-                upgradeId, clusterId, fromVersion, toVersion, direction));
-
-              // Set all upgrades that have been done so far to type "rolling"
-              if (StringUtils.isEmpty(upgradeType)) {
-                LOG.info("Updating the record's upgrade_type to " + UpgradeType.ROLLING);
-                dbAccessor.executeQuery("UPDATE upgrade SET upgrade_type = '" + UpgradeType.ROLLING + "' WHERE upgrade_id = " + upgradeId);
-              }
-
-              if (StringUtils.isEmpty(upgradePackage)) {
-                String version = null;
-                StackEntity stack = null;
-
-                if (direction == Direction.UPGRADE) {
-                  version = toVersion;
-                } else if (direction == Direction.DOWNGRADE) {
-                  // TODO AMBARI-12698, this is going to be a problem.
-                  // During a downgrade, the "to_version" is overwritten to the source version, but the "from_version"
-                  // doesn't swap. E.g.,
-                  //  upgrade_id | from_version |  to_version  | direction
-                  // ------------+--------------+--------------+----------
-                  //           1 | 2.2.6.0-2800 | 2.3.0.0-2557 | UPGRADE
-                  //           2 | 2.2.6.0-2800 | 2.2.6.0-2800 | DOWNGRADE
-                  version = fromVersion;
-                }
-
-                ClusterEntity cluster = clusterDAO.findById(clusterId);
-
-                if (null != cluster) {
-                  stack = cluster.getDesiredStack();
-                  upgradePackage = calculateUpgradePackage(stack, version);
-                } else {
-                  LOG.error("Could not find a cluster with cluster_id " + clusterId);
-                }
-
-                if (!StringUtils.isEmpty(upgradePackage)) {
-                  LOG.info("Updating the record's upgrade_package to " + upgradePackage);
-                  dbAccessor.executeQuery("UPDATE upgrade SET upgrade_package = '" + upgradePackage + "' WHERE upgrade_id = " + upgradeId);
-                } else {
-                  success = false;
-                  LOG.error("Unable to populate column upgrade_package for record in table upgrade with id " + upgradeId);
-                }
-              }
-            }
-          } catch (Exception e) {
-            success = false;
-            e.printStackTrace();
-            LOG.error("Unable to populate the upgrade_type and upgrade_package columns of the upgrade table. " + e);
-          }
-        }
-      }
-    } catch (Exception e) {
-      success = false;
-      e.printStackTrace();
-      LOG.error("Failed to retrieve records from the upgrade table to populate the upgrade_type and upgrade_package columns. Exception: " + e);
-    } finally {
-      try {
-        if (rs != null) {
-          rs.close();
-        }
-        if (statement != null) {
-          statement.close();
-        }
-      } catch (SQLException e) {
-        ;
-      }
-    }
-    return success;
-  }
-
-  /**
-   * Find the single Repo Version for the given stack and version, and return
-   * its upgrade_package column. Because the upgrade_package column is going to
-   * be removed from this entity, must use raw SQL instead of the entity class.
-   * <p/>
-   * It's possible that there is an invalid version listed in the upgrade table.
-   * For example:
-   *
-   * <pre>
-   * upgrade
-   * 1 2 1295  2.2.0.0-2041  2.2.4.2-2     UPGRADE
-   * 2 2 1296  2.2.0.0-2041  2.2.0.0-2041  DOWNGRADE
-   * 3 2 1299  2.2.0.0-2041  2.2.4.2       UPGRADE
-   *
-   * repo_version
-   * 1  2.2.0.0-2041  HDP-2.2.0.0-2041  upgrade-2.2
-   * 2  2.2.4.2-2     HDP-2.2.4.2-2     upgrade-2.2
-   * </pre>
-   *
-   * Notice that it's possible for the {@code upgrade} table to include entries
-   * for a repo version which does not exist; {@code 2.2.4.2}. In these cases,
-   * this method will attempt a "best match".
-   *
-   * @param stack
-   *          Stack
-   * @param version
-   *          Stack version
-   * @return The value of the upgrade_package column, or null if not found.
-   */
-
-  private String calculateUpgradePackage(StackEntity stack, String version) {
-    String upgradePackage = null;
-    // Find the corresponding repo_version, and extract its upgrade_package
-    if (null != version && null != stack) {
-      RepositoryVersionEntity repoVersion = repositoryVersionDAO.findByStackNameAndVersion(stack.getStackName(), version);
-
-      // a null repoVersion means there's mismatch between the upgrade and repo_version table;
-      // use a best-guess approach based on the Stack
-      if( null == repoVersion ){
-        List<RepositoryVersionEntity> bestMatches = repositoryVersionDAO.findByStack(new StackId(stack));
-        if (!bestMatches.isEmpty()) {
-          repoVersion = bestMatches.get(0);
-        }
-      }
-
-      // our efforts have failed; we have no idea what to use; return null as per the contract of the method
-      if( null == repoVersion ) {
-        return null;
-      }
-
-      Statement statement = null;
-      ResultSet rs = null;
-      try {
-        statement = dbAccessor.getConnection().createStatement();
-        if (statement != null) {
-          // Need to use SQL since the schema is changing and the entity will no longer have the upgrade_package column.
-          rs = statement.executeQuery("SELECT upgrade_package FROM repo_version WHERE repo_version_id = " + repoVersion.getId());
-          if (rs != null && rs.next()) {
-            upgradePackage = rs.getString("upgrade_package");
-          }
-        }
-      } catch (Exception e) {
-        LOG.error("Failed to retrieve upgrade_package for repo_version record with id " + repoVersion.getId() + ". Exception: " + e.getMessage());
-      } finally {
-        try {
-          if (rs != null) {
-            rs.close();
-          }
-          if (statement != null) {
-            statement.close();
-          }
-        } catch (SQLException e) {
-          ;
-        }
-      }
-    }
-    return upgradePackage;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected void updateKerberosDescriptorArtifact(ArtifactDAO artifactDAO, ArtifactEntity artifactEntity) throws AmbariException {
-    if (artifactEntity != null) {
-      Map<String, Object> data = artifactEntity.getArtifactData();
-
-      if (data != null) {
-        final KerberosDescriptor kerberosDescriptor = new KerberosDescriptorFactory().createInstance(data);
-
-        if (kerberosDescriptor != null) {
-          KerberosServiceDescriptor hdfsService = kerberosDescriptor.getService("HDFS");
-          if(hdfsService != null) {
-            // before 2.2.0 hdfs indentity expected to be in HDFS service
-            KerberosIdentityDescriptor hdfsIdentity = hdfsService.getIdentity("hdfs");
-            if (hdfsIdentity != null) {
-              KerberosComponentDescriptor namenodeComponent = hdfsService.getComponent("NAMENODE");
-              hdfsIdentity.setName("hdfs");
-              hdfsService.removeIdentity("hdfs");
-              namenodeComponent.putIdentity(hdfsIdentity);
-            }
-          }
-          updateKerberosDescriptorIdentityReferences(kerberosDescriptor, "/HDFS/hdfs", "/HDFS/NAMENODE/hdfs");
-          updateKerberosDescriptorIdentityReferences(kerberosDescriptor.getServices(), "/HDFS/hdfs", "/HDFS/NAMENODE/hdfs");
-
-          artifactEntity.setArtifactData(kerberosDescriptor.toMap());
-          artifactDAO.merge(artifactEntity);
-        }
-      }
-    }
-  }
-
-  /**
-   * If still on HDP 2.1, then no repo versions exist, so need to bootstrap the HDP 2.1 repo version,
-   * If still on HDP 2.1, then no repo versions exist, so need to bootstrap the HDP 2.1 repo version,
-   * and mark it as CURRENT in the cluster_version table for the cluster, as well as the host_version table
-   * for all hosts.
-   */
-  @Transactional
-  public void bootstrapRepoVersionForHDP21() throws AmbariException, SQLException {
-    final String hardcodedInitialVersion = "2.1.0.0-0001";
-    AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
-    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    RepositoryVersionHelper repositoryVersionHelper = injector.getInstance(RepositoryVersionHelper.class);
-    RepositoryVersionDAO repositoryVersionDAO = injector.getInstance(RepositoryVersionDAO.class);
-    HostVersionDAO hostVersionDAO = injector.getInstance(HostVersionDAO.class);
-
-    Clusters clusters = amc.getClusters();
-    if (clusters == null) {
-      LOG.error("Unable to get Clusters entity.");
-      return;
-    }
-
-    for (Cluster cluster : clusters.getClusters().values()) {
-      ClusterEntity clusterEntity = clusterDAO.findByName(cluster.getClusterName());
-
-      Set<StackId> stackIds = new HashSet<>();
-
-      for (Service service : cluster.getServices().values()) {
-        StackId stackId = service.getDesiredStackId();
-
-        if (stackIds.contains(stackId)) {
-          continue;
-        } else {
-          stackIds.add(stackId);
-        }
-
-
-
-        LOG.info(MessageFormat.format("Analyzing cluster {0}, currently at stack {1} and version {2}",
-          cluster.getClusterName(), stackId.getStackName(), stackId.getStackVersion()));
-
-        if (stackId.getStackName().equalsIgnoreCase("HDP") && stackId.getStackVersion().equalsIgnoreCase("2.1")) {
-          final StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
-          StackEntity stackEntity = stackDAO.find(stackId.getStackName(), stackId.getStackVersion());
-
-          LOG.info("Bootstrapping the versions since using HDP-2.1");
-
-          // The actual value is not known, so use this.
-          String displayName = stackId.getStackName() + "-" + hardcodedInitialVersion;
-
-          // However, the Repo URLs should be correct.
-          String operatingSystems = repositoryVersionHelper.serializeOperatingSystems(stackInfo.getRepositories());
-
-          // Create the Repo Version if it doesn't already exist.
-          RepositoryVersionEntity repoVersionEntity = repositoryVersionDAO.findByDisplayName(displayName);
-          if (null != repoVersionEntity) {
-            LOG.info(MessageFormat.format("A Repo Version already exists with Display Name: {0}", displayName));
-          } else {
-            final long repoVersionIdSeq = repositoryVersionDAO.findMaxId("id");
-            // Safe to attempt to add the sequence if it doesn't exist already.
-            addSequence("repo_version_id_seq", repoVersionIdSeq, false);
-
-            repoVersionEntity = repositoryVersionDAO.create(
-              stackEntity, hardcodedInitialVersion, displayName, operatingSystems);
-            LOG.info(MessageFormat.format("Created Repo Version with ID: {0,number,#}\n, Display Name: {1}, Repo URLs: {2}\n",
-              repoVersionEntity.getId(), displayName, operatingSystems));
-          }
-
-          // Create the Host Versions if they don't already exist.
-          Collection<HostEntity> hosts = clusterEntity.getHostEntities();
-          boolean addedAtLeastOneHost = false;
-          if (null != hosts && !hosts.isEmpty()) {
-            for (HostEntity hostEntity : hosts) {
-              HostVersionEntity hostVersionEntity = hostVersionDAO.findByClusterStackVersionAndHost(cluster.getClusterName(),
-                stackId, hardcodedInitialVersion, hostEntity.getHostName());
-
-              if (null != hostVersionEntity) {
-                LOG.info(MessageFormat.format("A Host Version version for cluster: {0}, version: {1}, host: {2}, already exists; its state is {3}.",
-                  cluster.getClusterName(), hostVersionEntity.getRepositoryVersion().getVersion(),
-                  hostEntity.getHostName(), hostVersionEntity.getState()));
-
-                if (hostVersionEntity.getState() != RepositoryVersionState.CURRENT &&
-                  hostVersionDAO.findByClusterHostAndState(cluster.getClusterName(), hostEntity.getHostName(),
-                    RepositoryVersionState.CURRENT).isEmpty()) {
-                  hostVersionEntity.setState(RepositoryVersionState.CURRENT);
-                  hostVersionDAO.merge(hostVersionEntity);
-                }
-              } else {
-                // This should only be done the first time.
-                if (!addedAtLeastOneHost) {
-                  final long hostVersionIdSeq = hostVersionDAO.findMaxId("id");
-                  // Safe to attempt to add the sequence if it doesn't exist already.
-                  addSequence("host_version_id_seq", hostVersionIdSeq, false);
-                  addedAtLeastOneHost = true;
-                }
-
-                hostVersionEntity = new HostVersionEntity(hostEntity, repoVersionEntity, RepositoryVersionState.CURRENT);
-                hostVersionDAO.create(hostVersionEntity);
-                LOG.info(MessageFormat.format("Created Host Version with ID: {0,number,#}, cluster: {1}, version: {2}, host: {3}, state: {4}.",
-                  hostVersionEntity.getId(), cluster.getClusterName(), hostVersionEntity.getRepositoryVersion().getVersion(),
-                  hostEntity.getHostName(), hostVersionEntity.getState()));
-              }
-            }
-          } else {
-            LOG.info(MessageFormat.format("Not inserting any Host Version records since cluster {0} does not have any hosts.",
-              cluster.getClusterName()));
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Adds the following columns to the {@value #UPGRADE_TABLE} table:
-   * <ul>
-   * <li>{@value #DOWNGRADE_ALLOWED_COLUMN}</li>
-   * <li>{@value #UPGRADE_SKIP_FAILURE_COLUMN}</li>
-   * <li>{@value #UPGRADE_SKIP_SC_FAILURE_COLUMN}</li>
-   * </ul>
-   *
-   * @throws SQLException
-   */
-  protected void updateUpgradesDDL() throws SQLException{
-    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(DOWNGRADE_ALLOWED_COLUMN, Short.class, 1, null, true));
-    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_SKIP_FAILURE_COLUMN, Short.class, 1, null, true));
-    dbAccessor.addColumn(UPGRADE_TABLE, new DBColumnInfo(UPGRADE_SKIP_SC_FAILURE_COLUMN, Short.class, 1, null, true));
-  }
-
-  /**
-   * Modifies the JSON of some of the alert definitions which have changed
-   * between Ambari versions.
-   */
-  protected void updateAlertDefinitions() {
-    LOG.info("Updating alert definitions.");
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    AlertDefinitionDAO alertDefinitionDAO = injector.getInstance(AlertDefinitionDAO.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
-    for (final Cluster cluster : clusterMap.values()) {
-      long clusterID = cluster.getClusterId();
-
-      final AlertDefinitionEntity journalNodeProcessAlertDefinitionEntity = alertDefinitionDAO.findByName(
-        clusterID, "journalnode_process");
-      final AlertDefinitionEntity hostDiskUsageAlertDefinitionEntity = alertDefinitionDAO.findByName(
-          clusterID, "ambari_agent_disk_usage");
-
-      if (journalNodeProcessAlertDefinitionEntity != null) {
-        String source = journalNodeProcessAlertDefinitionEntity.getSource();
-
-        journalNodeProcessAlertDefinitionEntity.setSource(modifyJournalnodeProcessAlertSource(source));
-        journalNodeProcessAlertDefinitionEntity.setSourceType(SourceType.WEB);
-        journalNodeProcessAlertDefinitionEntity.setHash(UUID.randomUUID().toString());
-
-        alertDefinitionDAO.merge(journalNodeProcessAlertDefinitionEntity);
-        LOG.info("journalnode_process alert definition was updated.");
-      }
-
-      if (hostDiskUsageAlertDefinitionEntity != null) {
-        hostDiskUsageAlertDefinitionEntity.setDescription("This host-level alert is triggered if the amount of disk space " +
-            "used goes above specific thresholds. The default threshold values are 50% for WARNING and 80% for CRITICAL.");
-        hostDiskUsageAlertDefinitionEntity.setLabel("Host Disk Usage");
-
-        alertDefinitionDAO.merge(hostDiskUsageAlertDefinitionEntity);
-        LOG.info("ambari_agent_disk_usage alert definition was updated.");
-      }
-
-    }
-  }
-
-  /**
-   * Modifies type of the journalnode_process alert to WEB.
-   * Changes reporting text and uri according to the WEB type.
-   * Removes default_port property.
-   */
-  String modifyJournalnodeProcessAlertSource(String source) {
-    JsonObject rootJson = new JsonParser().parse(source).getAsJsonObject();
-
-    rootJson.remove("type");
-    rootJson.addProperty("type", "WEB");
-
-    rootJson.remove("default_port");
-
-    rootJson.remove("uri");
-    JsonObject uriJson = new JsonObject();
-    uriJson.addProperty("http", "{{hdfs-site/dfs.journalnode.http-address}}");
-    uriJson.addProperty("https", "{{hdfs-site/dfs.journalnode.https-address}}");
-    uriJson.addProperty("kerberos_keytab", "{{hdfs-site/dfs.web.authentication.kerberos.keytab}}");
-    uriJson.addProperty("kerberos_principal", "{{hdfs-site/dfs.web.authentication.kerberos.principal}}");
-    uriJson.addProperty("https_property", "{{hdfs-site/dfs.http.policy}}");
-    uriJson.addProperty("https_property_value", "HTTPS_ONLY");
-    uriJson.addProperty("connection_timeout", 5.0);
-    rootJson.add("uri", uriJson);
-
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("ok").remove("text");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("ok").addProperty(
-      "text", "HTTP {0} response in {2:.3f}s");
-
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").remove("text");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").addProperty(
-      "text", "HTTP {0} response from {1} in {2:.3f}s ({3})");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("warning").remove("value");
-
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").remove("text");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").addProperty("text",
-      "Connection failed to {1} ({3})");
-    rootJson.getAsJsonObject("reporting").getAsJsonObject("critical").remove("value");
-
-    return rootJson.toString();
-  }
-
-  protected void updateHadoopEnv() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config hadoopEnvConfig = cluster.getDesiredConfigByType(HADOOP_ENV_CONFIG);
-      if (hadoopEnvConfig != null) {
-        String content = hadoopEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null) {
-          content += HADOOP_ENV_CONTENT_TO_APPEND;
-          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, HADOOP_ENV_CONFIG, updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateHDFSConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(
-      AmbariManagementController.class);
-    Map<String, Cluster> clusterMap = getCheckedClusterMap(ambariManagementController.getClusters());
-
-    for (final Cluster cluster : clusterMap.values()) {
-      // Remove dfs.namenode.rpc-address property when NN HA is enabled
-      if (cluster.getDesiredConfigByType(HDFS_SITE_CONFIG) != null && isNNHAEnabled(cluster)) {
-        Set<String> removePropertiesSet = new HashSet<>();
-        removePropertiesSet.add("dfs.namenode.rpc-address");
-        removeConfigurationPropertiesFromCluster(cluster, HDFS_SITE_CONFIG, removePropertiesSet);
-      }
-    }
-  }
-
-  protected void updateZookeeperLog4j() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config zookeeperLog4jConfig = cluster.getDesiredConfigByType(ZOOKEEPER_LOG4J_CONFIG);
-      if (zookeeperLog4jConfig != null) {
-        String content = zookeeperLog4jConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null) {
-          content = content.replaceAll("[\n^]\\s*log4j\\.rootLogger\\s*=\\s*INFO\\s*,\\s*CONSOLE", "\nlog4j.rootLogger=INFO, ROLLINGFILE");
-          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, ZOOKEEPER_LOG4J_CONFIG, updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateStormConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config stormSiteProps = cluster.getDesiredConfigByType(STORM_SITE);
-      if (stormSiteProps != null) {
-        Map<String, String> updates = new HashMap<>();
-
-        String nimbusMonitorFreqSecs = stormSiteProps.getProperties().get(NIMBS_MONITOR_FREQ_SECS_PROPERTY);
-        if (nimbusMonitorFreqSecs != null && nimbusMonitorFreqSecs.equals("10")) {
-          updates.put(NIMBS_MONITOR_FREQ_SECS_PROPERTY, "120");
-        }
-
-        Service amsService = null;
-        try {
-          amsService = cluster.getService("AMBARI_METRICS");
-        } catch(AmbariException ambariException) {
-          LOG.info("AMBARI_METRICS service not found in cluster while updating storm-site properties");
-        }
-        String metricsReporter = stormSiteProps.getProperties().get(STORM_METRICS_REPORTER);
-        if (amsService != null && StringUtils.isEmpty(metricsReporter)) {
-          updates.put(STORM_METRICS_REPORTER, "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter");
-        }
-
-        updateConfigurationPropertiesForCluster(cluster, STORM_SITE, updates, true, false);
-      }
-    }
-  }
-
-  protected void updateHiveConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config hiveSiteConfig = cluster.getDesiredConfigByType(HIVE_SITE_CONFIG);
-      if (hiveSiteConfig != null) {
-        String hiveServer2OperationLogLocation = hiveSiteConfig.getProperties().get(HIVE_SERVER2_OPERATION_LOG_LOCATION_PROPERTY);
-        if (hiveServer2OperationLogLocation != null && hiveServer2OperationLogLocation.equals("${system:java.io.tmpdir}/${system:user.name}/operation_logs")) {
-          Map<String, String> updates = Collections.singletonMap(HIVE_SERVER2_OPERATION_LOG_LOCATION_PROPERTY, "/tmp/hive/operation_logs");
-          updateConfigurationPropertiesForCluster(cluster, HIVE_SITE_CONFIG, updates, true, false);
-        }
-      }
-
-      Service service = cluster.getServices().get("HIVE");
-
-      if (null == service) {
-        continue;
-      }
-
-      StackId stackId = service.getDesiredStackId();
-      boolean isStackNotLess23 = (stackId != null && stackId.getStackName().equals("HDP") &&
-              VersionUtils.compareVersions(stackId.getStackVersion(), "2.3") >= 0);
-
-      Config hiveEnvConfig = cluster.getDesiredConfigByType(HIVE_ENV_CONFIG);
-      if (hiveEnvConfig != null) {
-        Map<String, String> hiveEnvProps = new HashMap<>();
-        String content = hiveEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        // For HDP-2.3 we need to add hive heap size management to content,
-        // for others we need to update content
-        if(content != null) {
-          if(isStackNotLess23) {
-            content = updateHiveEnvContentHDP23(content);
-          } else {
-            content = updateHiveEnvContent(content);
-          }
-          hiveEnvProps.put(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, HIVE_ENV_CONFIG, hiveEnvProps, true, true);
-        }
-      }
-    }
-  }
-
-  protected void updateHbaseEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    boolean updateConfig = false;
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Service service = cluster.getServices().get("HBASE");
-
-      if (null == service) {
-        continue;
-      }
-
-      StackId stackId = service.getDesiredStackId();
-      Config hbaseEnvConfig = cluster.getDesiredConfigByType(HBASE_ENV_CONFIG);
-      if (hbaseEnvConfig != null) {
-        String content = hbaseEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null) {
-          if (!content.contains("-Djava.io.tmpdir")) {
-            content += "\n\nexport HBASE_OPTS=\"${HBASE_OPTS} -Djava.io.tmpdir={{java_io_tmpdir}}\"";
-            updateConfig = true;
-          }
-          if (stackId != null && stackId.getStackName().equals("HDP") &&
-              VersionUtils.compareVersions(stackId.getStackVersion(), "2.2") >= 0) {
-            if (!content.contains("MaxDirectMemorySize={{hbase_max_direct_memory_size}}m")) {
-              String newPartOfContent = "\n\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}\"\n\n";
-              content += newPartOfContent;
-              updateConfig = true;
-            }
-            if (updateConfig) {
-              Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-              updateConfigurationPropertiesForCluster(cluster, HBASE_ENV_CONFIG, updates, true, false);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateFlumeEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config flumeEnvConfig = cluster.getDesiredConfigByType(FLUME_ENV_CONFIG);
-      if (flumeEnvConfig != null) {
-        String content = flumeEnvConfig.getProperties().get(CONTENT_PROPERTY);
-        if (content != null && !content.contains("/usr/lib/flume/lib/ambari-metrics-flume-sink.jar")) {
-          String newPartOfContent = "\n\n" +
-            "# Note that the Flume conf directory is always included in the classpath.\n" +
-            "# Add flume sink to classpath\n" +
-            "if [ -e \"/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\" ]; then\n" +
-            "  export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\n" +
-            "fi\n";
-          content += newPartOfContent;
-          Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, content);
-          updateConfigurationPropertiesForCluster(cluster, FLUME_ENV_CONFIG, updates, true, false);
-        }
-      }
-    }
-  }
-
-  protected void updateAMSConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-
-          Config amsHbaseEnv = cluster.getDesiredConfigByType(AMS_HBASE_ENV);
-          if (amsHbaseEnv != null) {
-            Map<String, String> amsHbaseEnvProperties = amsHbaseEnv.getProperties();
-            String content = amsHbaseEnvProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsHbaseEnvContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_ENV, newProperties, true, true);
-          }
-
-          Config amsEnv = cluster.getDesiredConfigByType(AMS_ENV);
-          if (amsEnv != null) {
-            Map<String, String> amsEnvProperties = amsEnv.getProperties();
-            String content = amsEnvProperties.get("content");
-            Map<String, String> newProperties = new HashMap<>();
-            newProperties.put("content", updateAmsEnvContent(content));
-            updateConfigurationPropertiesForCluster(cluster, AMS_ENV, newProperties, true, true);
-          }
-
-          Config amsSite = cluster.getDesiredConfigByType(AMS_SITE);
-          if (amsSite != null) {
-            Map<String, String> currentAmsSiteProperties = amsSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            //Changed AMS result set limit from 5760 to 15840.
-            if(currentAmsSiteProperties.containsKey("timeline.metrics.service.default.result.limit") &&
-              currentAmsSiteProperties.get("timeline.metrics.service.default.result.limit").equals(String.valueOf(5760))) {
-              LOG.info("Updating timeline.metrics.service.default.result.limit to 15840");
-              newProperties.put("timeline.metrics.service.default.result.limit", String.valueOf(15840));
-            }
-
-            //Interval
-            newProperties.put("timeline.metrics.cluster.aggregator.second.interval", String.valueOf(120));
-            newProperties.put("timeline.metrics.cluster.aggregator.minute.interval", String.valueOf(300));
-            newProperties.put("timeline.metrics.host.aggregator.minute.interval", String.valueOf(300));
-
-            //ttl
-            newProperties.put("timeline.metrics.cluster.aggregator.second.ttl", String.valueOf(2592000));
-            newProperties.put("timeline.metrics.cluster.aggregator.minute.ttl", String.valueOf(7776000));
-
-            //checkpoint
-            newProperties.put("timeline.metrics.cluster.aggregator.second.checkpointCutOffMultiplier", String.valueOf(2));
-
-            //disabled
-            newProperties.put("timeline.metrics.cluster.aggregator.second.disabled", String.valueOf(false));
-
-            //Add compaction policy property
-            newProperties.put(TIMELINE_METRICS_HBASE_FIFO_COMPACTION_ENABLED, String.valueOf(true));
-
-            updateConfigurationPropertiesForCluster(cluster, AMS_SITE, newProperties, true, true);
-          }
-
-          Config amsHbaseSite = cluster.getDesiredConfigByType(AMS_HBASE_SITE);
-          if (amsHbaseSite != null) {
-            Map<String, String> amsHbaseSiteProperties = amsHbaseSite.getProperties();
-            Map<String, String> newProperties = new HashMap<>();
-
-            String zkTimeout = amsHbaseSiteProperties.get(AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY);
-            // if old default, set new default
-            if ("20000".equals(zkTimeout)) {
-              newProperties.put(AMS_HBASE_SITE_ZK_TIMEOUT_PROPERTY, "120000");
-            }
-
-            //Adding hbase.normalizer.period to upgrade
-            if(!amsHbaseSiteProperties.containsKey(AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY)) {
-              LOG.info("Enabling " + AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY);
-              newProperties.put(AMS_HBASE_SITE_NORMALIZER_ENABLED_PROPERTY, String.valueOf(true));
-            }
-
-            if(!amsHbaseSiteProperties.containsKey(AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY)) {
-              LOG.info("Updating " + AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY);
-              newProperties.put(AMS_HBASE_SITE_NORMALIZER_PERIOD_PROPERTY, String.valueOf(600000));
-            }
-
-            if(!amsHbaseSiteProperties.containsKey(AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY)) {
-              LOG.info("Updating " + AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY);
-              newProperties.put(AMS_HBASE_SITE_NORMALIZER_CLASS_PROPERTY,
-                "org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer");
-            }
-            updateConfigurationPropertiesForCluster(cluster, AMS_HBASE_SITE, newProperties, true, true);
-          }
-        }
-      }
-    }
-
-  }
-
-  protected String updateAmsHbaseEnvContent(String content) {
-    if (content == null) {
-      return null;
-    }
-    String regSearch = "export HBASE_HEAPSIZE=";
-    String replacement = "#export HBASE_HEAPSIZE=";
-    content = content.replaceAll(regSearch, replacement);
-    content += "\n" +
-      "# The maximum amount of heap to use for hbase shell.\n" +
-      "export HBASE_SHELL_OPTS=\"-Xmx256m\"\n";
-    return content;
-  }
-
-  protected String updateAmsEnvContent(String content) {
-    if (content == null) {
-      return null;
-    }
-    if (!content.contains("AMS_COLLECTOR_GC_OPTS")) {
-      content += "\n" +
-        "# AMS Collector GC options\n" +
-        "export AMS_COLLECTOR_GC_OPTS=\"-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 " +
-        "-XX:+UseCMSInitiatingOccupancyOnly -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps " +
-        "-XX:+UseGCLogFileRotation -XX:GCLogFileSize=10M " +
-        "-Xloggc:{{ams_collector_log_dir}}/collector-gc.log-`date +'%Y%m%d%H%M'`\"\n" +
-        "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS $AMS_COLLECTOR_GC_OPTS\"\n";
-    }
-
-    if (!content.contains("AMS_HBASE_NORMALIZER_ENABLED")) {
-      content += "\n" +
-        "# HBase normalizer enabled\n" +
-        "export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n";
-    }
-
-    if (!content.contains("AMS_HBASE_FIFO_COMPACTION_ENABLED")) {
-      content += "\n" +
-        "# HBase compaction policy enabled\n" +
-        "export AMS_HBASE_FIFO_COMPACTION_ENABLED={{ams_hbase_fifo_compaction_enabled}}\n";
-    }
-
-    return content;
-  }
-
-  protected void updateKafkaConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    Clusters clusters = ambariManagementController.getClusters();
-
-    if (clusters != null) {
-      Map<String, Cluster> clusterMap = clusters.getClusters();
-      if (clusterMap != null && !clusterMap.isEmpty()) {
-        for (final Cluster cluster : clusterMap.values()) {
-          Set<String> installedServices = cluster.getServices().keySet();
-          Config kafkaBroker = cluster.getDesiredConfigByType(KAFKA_BROKER);
-          if (kafkaBroker != null) {
-            Map<String, String> newProperties = new HashMap<>();
-            Map<String, String> kafkaBrokerProperties = kafkaBroker.getProperties();
-            String kafkaMetricsReporters = kafkaBrokerProperties.get("kafka.metrics.reporters");
-            if (kafkaMetricsReporters == null ||
-              "{{kafka_metrics_reporters}}".equals(kafkaMetricsReporters)) {
-
-              if (installedServices.contains("AMBARI_METRICS")) {
-                newProperties.put("kafka.metrics.reporters", "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter");
-              } else if (installedServices.contains("GANGLIA")) {
-                newProperties.put("kafka.metrics.reporters", "kafka.ganglia.KafkaGangliaMetricsReporter");
-              } else {
-                newProperties.put("kafka.metrics.reporters", " ");
-              }
-
-            }
-            if (!newProperties.isEmpty()) {
-              updateConfigurationPropertiesForCluster(cluster, KAFKA_BROKER, newProperties, true, true);
-            }
-          }
-
-          Config kafkaEnv = cluster.getDesiredConfigByType(KAFKA_ENV_CONFIG);
-          if (kafkaEnv != null) {
-            String kafkaEnvContent = kafkaEnv.getProperties().get(CONTENT_PROPERTY);
-            if (kafkaEnvContent != null && !kafkaEnvContent.contains(KAFKA_ENV_CONTENT_KERBEROS_PARAMS)) {
-              kafkaEnvContent += "\n\nexport KAFKA_KERBEROS_PARAMS=\"$KAFKA_KERBEROS_PARAMS {{kafka_kerberos_params}}\"";
-              Map<String, String> updates = Collections.singletonMap(CONTENT_PROPERTY, kafkaEnvContent);
-              updateConfigurationPropertiesForCluster(cluster, KAFKA_ENV_CONFIG, updates, true, false);
-            }
-          }
-        }
-      }
-    }
-  }
-
-  protected void updateRangerEnvConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Map<String, String> newRangerEnvProps = new HashMap<>();
-      Config rangerHdfsPluginProperties = cluster.getDesiredConfigByType("ranger-hdfs-plugin-properties");
-      if (rangerHdfsPluginProperties != null && rangerHdfsPluginProperties.getProperties().containsKey(RANGER_HDFS_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_HDFS_PLUGIN_ENABLED_PROPERTY, rangerHdfsPluginProperties.getProperties().get(RANGER_HDFS_PLUGIN_ENABLED_PROPERTY));
-      }
-      Config hiveEnvProperties = cluster.getDesiredConfigByType("hive-env");
-      if (hiveEnvProperties != null && hiveEnvProperties.getProperties().containsKey("hive_security_authorization")
-              && hiveEnvProperties.getProperties().get("hive_security_authorization").toLowerCase().equals("ranger")) {
-        newRangerEnvProps.put(RANGER_HIVE_PLUGIN_ENABLED_PROPERTY, "Yes");
-      }
-      Config rangerHbasePluginProperties = cluster.getDesiredConfigByType("ranger-hbase-plugin-properties");
-      if (rangerHbasePluginProperties != null && rangerHbasePluginProperties.getProperties().containsKey(RANGER_HBASE_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_HBASE_PLUGIN_ENABLED_PROPERTY, rangerHbasePluginProperties.getProperties().get(RANGER_HBASE_PLUGIN_ENABLED_PROPERTY));
-      }
-
-      Config rangerStormPluginProperties = cluster.getDesiredConfigByType("ranger-storm-plugin-properties");
-      if (rangerStormPluginProperties != null && rangerStormPluginProperties.getProperties().containsKey(RANGER_STORM_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_STORM_PLUGIN_ENABLED_PROPERTY, rangerStormPluginProperties.getProperties().get(RANGER_STORM_PLUGIN_ENABLED_PROPERTY));
-      }
-      Config rangerKnoxPluginProperties = cluster.getDesiredConfigByType("ranger-knox-plugin-properties");
-      if (rangerKnoxPluginProperties != null && rangerKnoxPluginProperties.getProperties().containsKey(RANGER_KNOX_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_KNOX_PLUGIN_ENABLED_PROPERTY, rangerKnoxPluginProperties.getProperties().get(RANGER_KNOX_PLUGIN_ENABLED_PROPERTY));
-      }
-      Config rangerYarnPluginProperties = cluster.getDesiredConfigByType("ranger-yarn-plugin-properties");
-      if (rangerYarnPluginProperties != null && rangerYarnPluginProperties.getProperties().containsKey(RANGER_YARN_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_YARN_PLUGIN_ENABLED_PROPERTY, rangerYarnPluginProperties.getProperties().get(RANGER_YARN_PLUGIN_ENABLED_PROPERTY));
-      }
-      Config rangerKafkaPluginProperties = cluster.getDesiredConfigByType("ranger-kafka-plugin-properties");
-      if (rangerKafkaPluginProperties != null && rangerKafkaPluginProperties.getProperties().containsKey(RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY)) {
-        newRangerEnvProps.put(RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY, rangerKafkaPluginProperties.getProperties().get(RANGER_KAFKA_PLUGIN_ENABLED_PROPERTY));
-      }
-      if (!newRangerEnvProps.isEmpty()) {
-        updateConfigurationPropertiesForCluster(cluster, RANGER_ENV_CONFIG, newRangerEnvProps, true, true);
-      }
-    }
-  }
-
-  protected void updateRangerUgsyncSiteConfig() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      Config rangerUgsyncSiteProperties = cluster.getDesiredConfigByType(RANGER_UGSYNC_SITE_CONFIG);
-      if (rangerUgsyncSiteProperties != null && rangerUgsyncSiteProperties.getProperties().containsKey(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY)) {
-        String sourceClassValue = rangerUgsyncSiteProperties.getProperties().get(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY);
-        if (sourceClassValue != null) {
-          if ("ldap".equals(sourceClassValue)) {
-            Map<String, String> updates = Collections.singletonMap(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY,
-                "org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder");
-            updateConfigurationPropertiesForCluster(cluster, RANGER_UGSYNC_SITE_CONFIG, updates, true, false);
-          } else if ("unix".equals(sourceClassValue)) {
-            Map<String, String> updates = Collections.singletonMap(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY,
-                "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder");
-            updateConfigurationPropertiesForCluster(cluster, RANGER_UGSYNC_SITE_CONFIG, updates, true, false);
-          } else if ("file".equals(sourceClassValue)) {
-            Map<String, String> updates = Collections.singletonMap(RANGER_USERSYNC_SOURCE_IMPL_CLASS_PROPERTY,
-                "org.apache.ranger.unixusersync.process.FileSourceUserGroupBuilder");
-            updateConfigurationPropertiesForCluster(cluster, RANGER_UGSYNC_SITE_CONFIG, updates, true, false);
-          }
-        }
-      }
-    }
-  }
-
-  protected String updateHiveEnvContent(String hiveEnvContent) {
-    if(hiveEnvContent == null) {
-      return null;
-    }
-    // There are two cases here
-    // We do not have "export HADOOP_CLIENT_OPTS" and we need to add it
-    // We have "export HADOOP_CLIENT_OPTS" with wrong order
-    String exportHadoopClientOpts = "(?s).*export\\s*HADOOP_CLIENT_OPTS.*";
-    if (hiveEnvContent.matches(exportHadoopClientOpts)) {
-      String oldHeapSizeRegex = "export\\s*HADOOP_CLIENT_OPTS=\"-Xmx\\$\\{HADOOP_HEAPSIZE\\}m\\s*\\$HADOOP_CLIENT_OPTS\"";
-      String newHeapSizeRegex = "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"";
-      return hiveEnvContent.replaceAll(oldHeapSizeRegex, Matcher.quoteReplacement(newHeapSizeRegex));
-    } else {
-      String oldHeapSizeRegex = "export\\s*HADOOP_HEAPSIZE\\s*=\\s*\"\\{\\{hive_heapsize\\}\\}\"\\.*\\n\\s*fi\\s*\\n";
-      String newHeapSizeRegex = "export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-              "fi\n" +
-              "\n" +
-              "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n";
-      return hiveEnvContent.replaceAll(oldHeapSizeRegex, Matcher.quoteReplacement(newHeapSizeRegex));
-    }
-  }
-
-  protected String updateHiveEnvContentHDP23(String hiveEnvContent) {
-    if(hiveEnvContent == null) {
-      return null;
-    }
-    String oldHeapSizeRegex = "# The heap size of the jvm stared by hive shell script can be controlled via:\\s*\\n";
-    String newHeapSizeRegex = "# The heap size of the jvm stared by hive shell script can be controlled via:\n" +
-            "\n" +
-            "if [ \"$SERVICE\" = \"metastore\" ]; then\n" +
-            "  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore\n" +
-            "else\n" +
-            "  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client\n" +
-            "fi\n" +
-            "\n" +
-            "export HADOOP_CLIENT_OPTS=\"$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m\"\n" +
-            "\n";
-    return hiveEnvContent.replaceFirst(oldHeapSizeRegex, Matcher.quoteReplacement(newHeapSizeRegex));
-  }
-
-  protected void updateAccumuloConfigs() throws AmbariException {
-    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
-    for (final Cluster cluster : getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
-      // If security type is set to Kerberos, update Kerberos-related configs
-      if(cluster.getSecurityType() == SecurityType.KERBEROS) {
-        Config clientProps = cluster.getDesiredConfigByType("client");
-        if (clientProps != null) {
-          Map<String, String> properties = clientProps.getProperties();
-          if (properties == null) {
-            properties = new HashMap<>();
-          }
-          // <2.2.0 did not account for a custom service principal.
-          // Need to ensure that the client knows the server's principal (the primary) to properly authenticate.
-          properties.put("kerberos.server.primary", "{{bare_accumulo_principal}}");
-          updateConfigurationPropertiesForCluster(cluster, "client", properties, true, false);
-        }
-      } // else -- no special client-configuration is necessary.
-    }
-  }
-}


[33/63] [abbrv] ambari git commit: AMBARI-21347. Service Page: Some Alerts are missing their Status (magyari_sandor)

Posted by ab...@apache.org.
AMBARI-21347. Service Page: Some Alerts are missing their Status (magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9833bc18
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9833bc18
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9833bc18

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 9833bc182d9b44a69bb766de77311d4a3a50fa5e
Parents: 08dd492
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Mon Jun 26 20:01:59 2017 +0200
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Tue Jun 27 11:15:03 2017 +0200

----------------------------------------------------------------------
 .../controller/AmbariManagementControllerImpl.java      | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9833bc18/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index ed707e7..6781f65 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -182,6 +182,7 @@ import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.UnlimitedKeyJCERequirement;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
+import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinkVisibilityController;
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinkVisibilityControllerFactory;
 import org.apache.ambari.server.state.quicklinksprofile.QuickLinksProfile;
@@ -193,6 +194,7 @@ import org.apache.ambari.server.state.stack.WidgetLayout;
 import org.apache.ambari.server.state.stack.WidgetLayoutInfo;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStopEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostUpgradeEvent;
@@ -3023,7 +3025,15 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
             // START task should run configuration script.
             if (newState == State.INSTALLED && skipInstallTaskForComponent(requestProperties, cluster, scHost)) {
               LOG.info("Skipping create of INSTALL task for {} on {}.", scHost.getServiceComponentName(), scHost.getHostName());
-              scHost.setState(State.INSTALLED);
+              // set state to INSTALLING, then immediately send an ServiceComponentHostOpSucceededEvent to allow
+              // transitioning from INSTALLING --> INSTALLED.
+              scHost.setState(State.INSTALLING);
+              long now = System.currentTimeMillis();
+              try {
+                scHost.handleEvent(new ServiceComponentHostOpSucceededEvent(scHost.getServiceComponentName(), scHost.getHostName(), now));
+              } catch (InvalidStateTransitionException e) {
+                LOG.error("Error transitioning ServiceComponentHost state to INSTALLED", e);
+              }
             } else {
               // !!! can never be null
               RepositoryVersionEntity repoVersion = serviceComponent.getDesiredRepositoryVersion();


[31/63] [abbrv] ambari git commit: AMBARI-21268. Remove Upgrade Catalogs For Every Version Before 2.5 - fix build error: remove leftover Python test

Posted by ab...@apache.org.
AMBARI-21268. Remove Upgrade Catalogs For Every Version Before 2.5 - fix build error: remove leftover Python test


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4522cf5a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4522cf5a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4522cf5a

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 4522cf5a6a9389dbb7867938ce72cd2d734dc20d
Parents: 6eaabc1
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Tue Jun 27 09:03:02 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Tue Jun 27 09:03:02 2017 +0200

----------------------------------------------------------------------
 .../src/test/python/TestUpgradeHelper.py        | 1028 ------------------
 1 file changed, 1028 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4522cf5a/ambari-server/src/test/python/TestUpgradeHelper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestUpgradeHelper.py b/ambari-server/src/test/python/TestUpgradeHelper.py
deleted file mode 100644
index 6da4507..0000000
--- a/ambari-server/src/test/python/TestUpgradeHelper.py
+++ /dev/null
@@ -1,1028 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-
-from mock.mock import MagicMock, call
-from mock.mock import patch
-
-from unittest import TestCase
-import sys
-import os
-import unittest
-import upgradeHelper
-import json
-import copy
-from StringIO import StringIO
-
-
-class UpgradeCatalogFactoryMock(upgradeHelper.UpgradeCatalogFactory):
-  def __init__(self, data):
-    self._load(data)
-
-  def _load(self, data):
-    fn = StringIO(data)
-    with patch("__builtin__.open") as open_mock:
-      open_mock.return_value = fn
-      super(UpgradeCatalogFactoryMock, self)._load("")
-
-
-class TestUpgradeHelper(TestCase):
-  original_curl = None
-  out = None
-  catalog_from = "1.3"
-  catalog_to = "2.2"
-  catalog_cfg_type = "my type"
-  required_service = "TEST"
-  curl_response = "{}"
-  test_catalog = """{
-   "version": "1.0",
-   "stacks": [
-     {
-       "name": "HDP",
-       "old-version": "%s",
-       "target-version": "%s",
-       "options": {
-         "config-types": {
-           "%s": {
-             "merged-copy": "yes"
-           }
-          }
-       },
-       "properties": {
-         "%s": {
-           "my property": {
-             "value": "my value",
-             "required-services": [\"%s\"]
-           }
-         }
-       },
-       "property-mapping": {
-         "my replace property": "my property 2"
-       }
-     }
-   ]
-  }
-  """
-
-  def setUp(self):
-    # replace original curl call to mock
-    self.test_catalog = self.test_catalog % (self.catalog_from, self.catalog_to,
-                                             self.catalog_cfg_type, self.catalog_cfg_type,
-                                             self.required_service)
-
-    self.original_curl = upgradeHelper.curl
-    upgradeHelper.curl = self.magic_curl
-
-    # mock logging methods
-    upgradeHelper.logging.getLogger = MagicMock()
-    upgradeHelper.logging.FileHandler = MagicMock()
-
-    self.out = StringIO()
-    sys.stdout = self.out
-
-  def magic_curl(self, *args, **kwargs):
-    resp = self.curl_response
-    self.curl_response = "{}"
-    if "parse" in kwargs and isinstance(resp, str) and kwargs["parse"] == True:
-      resp = json.loads(resp)
-    return resp
-
-  def tearDown(self):
-    sys.stdout = sys.__stdout__
-
-  @patch("optparse.OptionParser")
-  @patch("upgradeHelper.modify_configs")
-  @patch("__builtin__.open")
-  def test_ParseOptions(self, open_mock, modify_action_mock, option_parser_mock):
-    class options(object):
-      user = "test_user"
-      hostname = "127.0.0.1"
-      clustername = "test1"
-      password = "test_password"
-      upgrade_json = "catalog_file"
-      from_stack = "0.0"
-      to_stack = "1.3"
-      logfile = "test.log"
-      report = "report.txt"
-      https = False
-      port = "8080"
-      warnings = []
-      printonly = False
-
-    args = ["update-configs"]
-    modify_action_mock.return_value = MagicMock()
-    test_mock = MagicMock()
-    test_mock.parse_args = lambda: (options, args)
-    option_parser_mock.return_value = test_mock
-
-    upgradeHelper.main()
-
-    self.assertEqual("8080", upgradeHelper.Options.API_PORT)
-    self.assertEqual("http", upgradeHelper.Options.API_PROTOCOL)
-    self.assertEqual(1, modify_action_mock.call_count)
-    self.assertEqual({"user": options.user, "pass": options.password}, upgradeHelper.Options.API_TOKENS)
-    self.assertEqual(options.clustername, upgradeHelper.Options.CLUSTER_NAME)
-
-  def test_is_services_exists(self):
-    old_services = upgradeHelper.Options.SERVICES
-
-    upgradeHelper.Options.SERVICES = set(['TEST1', 'TEST2'])
-    actual_result = upgradeHelper.is_services_exists(['TEST1'])
-
-    # check for situation with two empty sets
-    upgradeHelper.Options.SERVICES = set()
-    actual_result_1 = upgradeHelper.is_services_exists([])
-
-    upgradeHelper.Options.SERVICES = old_services
-
-    self.assertEqual(True, actual_result)
-    self.assertEqual(True, actual_result_1)
-
-
-  @patch("__builtin__.open")
-  @patch.object(os.path, "isfile")
-  @patch("os.remove")
-  def test_write_mapping(self, remove_mock, isfile_mock, open_mock):
-    test_data = {
-      "test_field": "test_value"
-    }
-    test_result = json.dumps(test_data)
-    output = StringIO()
-    isfile_mock.return_value = True
-    open_mock.return_value = output
-
-    # execute testing function
-    upgradeHelper.write_mapping(test_data)
-
-    self.assertEquals(1, isfile_mock.call_count)
-    self.assertEquals(1, remove_mock.call_count)
-    self.assertEquals(1, open_mock.call_count)
-
-    # check for content
-    self.assertEquals(test_result, output.getvalue())
-
-  @patch("__builtin__.open")
-  @patch.object(os.path, "isfile")
-  def test_read_mapping(self, isfile_mock, open_mock):
-    test_data = {
-      "test_field": "test_value"
-    }
-    test_result = json.dumps(test_data)
-    isfile_mock.return_value = True
-    output = StringIO(test_result)
-    open_mock.return_value = output
-
-    # execute testing function
-    actual_mapping = upgradeHelper.read_mapping()
-
-    self.assertEquals(1, isfile_mock.call_count)
-    self.assertEquals(1, open_mock.call_count)
-
-    self.assertEquals(test_data, actual_mapping)
-
-  @patch.object(upgradeHelper, "curl")
-  @patch.object(upgradeHelper, "write_mapping")
-  def test_get_mr1_mapping(self, write_mapping_mock, curl_mock):
-    return_data = [
-     {
-      "host_components": [   # MAPREDUCE_CLIENT
-        {
-          "HostRoles": {
-            "host_name": "test.host.vm"
-           }
-        }
-      ]
-     },
-     {
-      "host_components": [  # JOBTRACKER
-        {
-          "HostRoles": {
-            "host_name": "test1.host.vm"
-           }
-        }
-      ]
-     },
-     {
-      "host_components": [  # TASKTRACKER
-        {
-          "HostRoles": {
-            "host_name": "test2.host.vm"
-           }
-        }
-      ]
-     },
-     {
-      "host_components": [  # HISTORYSERVER
-        {
-          "HostRoles": {
-            "host_name": "test3.host.vm"
-           }
-        }
-      ]
-     }
-    ]
-    expect_data = {
-      "MAPREDUCE_CLIENT": ["test.host.vm"],
-      "JOBTRACKER": ["test1.host.vm"],
-      "TASKTRACKER": ["test2.host.vm"],
-      "HISTORYSERVER": ["test3.host.vm"]
-    }
-
-    tricky_mock = MagicMock(side_effect=return_data)
-    curl_mock.side_effect = tricky_mock
-
-    # execute testing function
-    upgradeHelper.get_mr1_mapping()
-
-    self.assertEquals(write_mapping_mock.call_count, 1)
-    self.assertEquals(expect_data, write_mapping_mock.call_args[0][0])
-
-  @patch.object(upgradeHelper, "get_choice_string_input")
-  def test_get_YN_input(self, get_choice_string_input_mock):
-    yes = set(['yes', 'ye', 'y'])
-    no = set(['no', 'n'])
-
-    prompt = "test prompt"
-    default = "default value"
-
-    # execute testing function
-    upgradeHelper.get_YN_input(prompt, default)
-
-    expect_args = (prompt, default, yes, no)
-    self.assertEquals(expect_args, get_choice_string_input_mock.call_args[0])
-
-  @patch("__builtin__.raw_input")
-  def test_get_choice_string_input(self, raw_input_mock):
-    yes = set(['yes', 'ye', 'y'])
-    no = set(['no', 'n'])
-    input_answers = ["yes", "no", ""]
-    tricky_mock = MagicMock(side_effect=input_answers)
-    raw_input_mock.side_effect = tricky_mock
-    default = "default value"
-
-    expect_result = [True, False, default]
-    actual_result = []
-    for i in range(0, len(input_answers)):
-      actual_result.append(upgradeHelper.get_choice_string_input("test prompt", default, yes, no))
-
-    self.assertEquals(expect_result, actual_result)
-
-  @patch.object(upgradeHelper, "get_YN_input")
-  @patch.object(upgradeHelper, "read_mapping")
-  @patch.object(upgradeHelper, "curl")
-  def test_delete_mr(self, curl_mock, read_mapping_mock, get_YN_mock):
-    COMPONENT_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/hosts/%s/host_components/%s'
-    SERVICE_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/services/MAPREDUCE'
-    NON_CLIENTS = ["JOBTRACKER", "TASKTRACKER", "HISTORYSERVER"]
-    PUT_IN_DISABLED = {
-      "HostRoles": {
-        "state": "DISABLED"
-      }
-    }
-    mr_mapping = {
-      "MAPREDUCE_CLIENT": ["test.host.vm"],
-      "JOBTRACKER": ["test1.host.vm"],
-      "TASKTRACKER": ["test2.host.vm"],
-      "HISTORYSERVER": ["test3.host.vm"]
-    }
-    expected_curl_exec_args = []
-    for key, hosts in mr_mapping.items():
-      if key in NON_CLIENTS:
-        for host in hosts:
-          expected_curl_exec_args.append(
-            [
-              (COMPONENT_URL_FORMAT % (host, key),),
-              {
-                "request_type": "PUT",
-                "data": PUT_IN_DISABLED,
-                "validate": True
-              }
-            ]
-          )
-
-    expected_curl_exec_args.append(
-      [
-        (SERVICE_URL_FORMAT,),
-        {
-          "request_type": "DELETE",
-          "validate": True
-        }
-      ]
-    )
-
-    get_YN_mock.return_value = True
-    read_mapping_mock.return_value = mr_mapping
-
-    # execute testing function
-    upgradeHelper.delete_mr()
-
-    self.assertEqual(expected_curl_exec_args, curl_mock.call_args_list)
-
-    pass
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_cluster_stackname(self, curl_mock):
-    expected_result = "test version"
-    actual_result = ""
-    curl_mock.return_value = {
-      "Clusters": {
-        "version": expected_result
-      }
-    }
-
-    # execute testing function
-    actual_result = upgradeHelper.get_cluster_stackname()
-
-    self.assertEqual(expected_result, actual_result)
-
-  @patch.object(upgradeHelper, "curl")
-  def test_has_component_in_stack_def(self, curl_mock):
-    curl_mock.side_effect = MagicMock(side_effect=["", upgradeHelper.FatalException(1, "some reason")])
-
-    # execute testing function
-    result_ok = upgradeHelper.has_component_in_stack_def("-", "", "")
-    result_fail = upgradeHelper.has_component_in_stack_def("-", "", "")
-
-    self.assertEqual(True, result_ok)
-    self.assertEqual(False, result_fail)
-
-  @patch.object(upgradeHelper, "get_cluster_stackname")
-  @patch.object(upgradeHelper, "has_component_in_stack_def")
-  @patch.object(upgradeHelper, "read_mapping")
-  @patch.object(upgradeHelper, "curl")
-  def test_add_services(self, curl_mock, read_mapping_mock, has_component_mock, get_stack_name_mock):
-    host_mapping = {
-      "MAPREDUCE_CLIENT": ["test.host.vm"],
-      "JOBTRACKER": ["test1.host.vm"],
-      "TASKTRACKER": ["test2.host.vm"],
-      "HISTORYSERVER": ["test3.host.vm"]
-    }
-    SERVICE_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/services/{0}'
-    COMPONENT_URL_FORMAT = SERVICE_URL_FORMAT + '/components/{1}'
-    HOST_COMPONENT_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/hosts/{0}/host_components/{1}'
-    service_comp = {
-      "YARN": ["NODEMANAGER", "RESOURCEMANAGER", "YARN_CLIENT"],
-      "MAPREDUCE2": ["HISTORYSERVER", "MAPREDUCE2_CLIENT"]}
-    new_old_host_map = {
-      "NODEMANAGER": "TASKTRACKER",
-      "HISTORYSERVER": "HISTORYSERVER",
-      "RESOURCEMANAGER": "JOBTRACKER",
-      "YARN_CLIENT": "MAPREDUCE_CLIENT",
-      "MAPREDUCE2_CLIENT": "MAPREDUCE_CLIENT"}
-    get_stack_name_mock.return_value = ""
-    has_component_mock.return_value = False
-    read_mapping_mock.return_value = host_mapping
-    expected_curl_args = []
-
-    for service in service_comp.keys():
-      expected_curl_args.append([
-        (SERVICE_URL_FORMAT.format(service),),
-        {
-          "validate": True,
-          "request_type": "POST"
-        }
-      ])
-      for component in service_comp[service]:
-        expected_curl_args.append([
-          (COMPONENT_URL_FORMAT.format(service, component),),
-          {
-            "validate": True,
-            "request_type": "POST"
-          }
-        ])
-        for host in host_mapping[new_old_host_map[component]]:
-          expected_curl_args.append([
-            (HOST_COMPONENT_URL_FORMAT.format(host, component),),
-            {
-              "validate": True,
-              "request_type": "POST"
-            }
-          ])
-
-    # execute testing function
-    upgradeHelper.add_services()
-
-    self.assertEqual(expected_curl_args, curl_mock.call_args_list)
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  def test_coerce_tag(self, get_config_resp_all_mock):
-    test_catalog = """
-        {
-      "version": "1.0",
-      "stacks": [
-        {
-          "name": "HDP",
-          "old-version": "1.0",
-          "target-version": "1.1",
-          "options": {
-            "config-types":{
-              "test": {
-                "merged-copy": "yes"
-              }
-            }
-          },
-          "properties": {
-             "test": {
-               "test": "host1.com"
-            }
-          },
-          "property-mapping": {
-            "test":{
-                "map-to": "test-arr",
-                "coerce-to": "yaml-array"
-           }
-          }
-        }
-      ]
-    }
-    """
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = "1.0"
-    options.to_stack = "1.1"
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    get_config_resp_all_mock.return_value = {
-      "test": {
-        "properties": {}
-      }
-    }
-
-    ucf = UpgradeCatalogFactoryMock(test_catalog)
-    scf = upgradeHelper.ServerConfigFactory()
-
-    cfg = scf.get_config("test")
-    ucfg = ucf.get_catalog("1.0", "1.1")
-
-    cfg.merge(ucfg)
-    scf.process_mapping_transformations(ucfg)
-
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(True, "test-arr" in cfg.properties)
-    self.assertEqual("['host1.com']", cfg.properties["test-arr"])
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  def test_override_tag(self, get_config_resp_all_mock):
-    test_catalog = """
-        {
-      "version": "1.0",
-      "stacks": [
-        {
-          "name": "HDP",
-          "old-version": "1.0",
-          "target-version": "1.1",
-          "options": {
-            "config-types":{
-              "test": {
-                "merged-copy": "yes"
-              }
-            }
-          },
-          "properties": {
-             "test": {
-               "test_property": {
-                  "value": "host1.com",
-                  "override": "no"
-                }
-
-            }
-          },
-          "property-mapping": {}
-        }
-      ]
-    }
-    """
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = "1.0"
-    options.to_stack = "1.1"
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    get_config_resp_all_mock.return_value = {
-      "test": {
-        "properties": {
-          "test_property": "test host"
-        }
-      }
-    }
-
-    ucf = UpgradeCatalogFactoryMock(test_catalog)
-    scf = upgradeHelper.ServerConfigFactory()
-
-    cfg = scf.get_config("test")
-    ucfg = ucf.get_catalog("1.0", "1.1")
-
-    cfg.merge(ucfg)
-    scf.process_mapping_transformations(ucfg)
-
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(True, "test_property" in cfg.properties)
-    self.assertEqual("test host", cfg.properties["test_property"])
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  def test_replace_tag(self, get_config_resp_all_mock):
-    test_catalog = """
-        {
-      "version": "1.0",
-      "stacks": [
-        {
-          "name": "HDP",
-          "old-version": "1.0",
-          "target-version": "1.1",
-          "options": {
-            "config-types":{
-              "test": {
-                "merged-copy": "yes"
-              }
-            }
-          },
-          "properties": {
-             "test": {
-               "test": "host1.com"
-            }
-          },
-          "property-mapping": {
-            "test":{
-                "map-to": "test-arr",
-                "replace-from": "com",
-                "replace-to": "org"
-           }
-          }
-        }
-      ]
-    }
-    """
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = "1.0"
-    options.to_stack = "1.1"
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    get_config_resp_all_mock.return_value = {
-      "test": {
-        "properties": {}
-      }
-    }
-
-    ucf = UpgradeCatalogFactoryMock(test_catalog)
-    scf = upgradeHelper.ServerConfigFactory()
-
-    cfg = scf.get_config("test")
-    ucfg = ucf.get_catalog("1.0", "1.1")
-
-    cfg.merge(ucfg)
-    scf.process_mapping_transformations(ucfg)
-
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(True, "test-arr" in cfg.properties)
-    self.assertEqual("host1.org", cfg.properties["test-arr"])
-
-  @patch.object(upgradeHelper, "curl")
-  @patch("time.time")
-  def test_update_config(self, time_mock, curl_mock):
-    time_pass = 2
-    config_type = "test config"
-    properties = {
-      "test property": "test value"
-    }
-    attributes = {
-      "test attribute": "attribute value"
-    }
-    expected_tag = "version" + str(int(time_pass * 1000))
-    properties_payload = {"Clusters": {"desired_config": {"type": config_type, "tag": expected_tag, "properties": properties}}}
-    time_mock.return_value = time_pass
-
-    expected_simple_result = (
-      (upgradeHelper.Options.CLUSTER_URL,),
-      {
-        "request_type": "PUT",
-        "data": copy.deepcopy(properties_payload),
-        "validate": True,
-        "soft_validation": True
-      }
-    )
-
-    properties_payload["Clusters"]["desired_config"]["properties_attributes"] = attributes
-    expected_complex_result = (
-      (upgradeHelper.Options.CLUSTER_URL,),
-      {
-        "request_type": "PUT",
-        "data": copy.deepcopy(properties_payload),
-        "validate": True,
-        "soft_validation": True
-      }
-    )
-
-    # execute testing function
-    upgradeHelper.update_config(properties, config_type)
-    simple_result = tuple(curl_mock.call_args)
-
-    upgradeHelper.update_config(properties, config_type, attributes)
-    complex_result = tuple(curl_mock.call_args)
-
-    self.assertEqual(expected_simple_result, simple_result)
-    self.assertEqual(expected_complex_result, complex_result)
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_zookeeper_quorum(self, curl_mock):
-    zoo_def_port = "2181"
-    return_curl_data = {
-      "host_components": [
-                           {
-                             "HostRoles": {
-                               "host_name": "test.host.vm"
-                             }
-                           },
-                           {
-                             "HostRoles": {
-                               "host_name": "test.host.vm"
-                             }
-                           }
-      ]
-    }
-
-    curl_mock.return_value = copy.deepcopy(return_curl_data)
-
-    # build zookeeper quorum string from return_curl_data and remove trailing comas
-    expected_result = reduce(
-      lambda x, y: x + "%s:%s," % (y["HostRoles"]["host_name"], zoo_def_port),
-      return_curl_data["host_components"],
-      ''  # initializer
-    ).rstrip(',')
-
-    # execute testing function
-    actual_result = upgradeHelper.get_zookeeper_quorum()
-
-    self.assertEqual(expected_result, actual_result)
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_tez_history_url_base(self, curl_mock):
-    return_curl_data = {
-      'href': 'http://127.0.0.1:8080/api/v1/views/TEZ',
-      'ViewInfo': {'view_name': 'TEZ'},
-      'versions': [
-        {
-          'ViewVersionInfo': {
-            'view_name': 'TEZ',
-            'version': '0.7.0.2.3.0.0-1319'
-          },
-          'href': 'http://127.0.0.1:8080/api/v1/views/TEZ/versions/0.7.0.2.3.0.0-1319'
-        }
-      ]
-    }
-
-    curl_mock.return_value = copy.deepcopy(return_curl_data)
-
-    # build zookeeper quorum string from return_curl_data and remove trailing comas
-    expected_result = "http://127.0.0.1:8080/#/main/views/TEZ/0.7.0.2.3.0.0-1319/TEZ_CLUSTER_INSTANCE"
-
-    # execute testing function
-    actual_result = upgradeHelper.get_tez_history_url_base()
-
-    self.assertEqual(expected_result, actual_result)
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_ranger_xaaudit_hdfs_destination_directory(self, curl_mock):
-    return_curl_data = {
-      "host_components": [
-        {
-          "HostRoles": {
-            "host_name": "test.host.vm"
-          }
-        }
-      ]
-    }
-
-    curl_mock.return_value = copy.deepcopy(return_curl_data)
-
-    # build zookeeper quorum string from return_curl_data and remove trailing comas
-    expected_result = "hdfs://test.host.vm:8020/ranger/audit"
-
-    # execute testing function
-    actual_result = upgradeHelper.get_ranger_xaaudit_hdfs_destination_directory()
-
-    self.assertEqual(expected_result, actual_result)
-
-
-  @patch.object(upgradeHelper, "curl")
-  def test_get_config_resp_all(self, curl_mock):
-    cfg_type = "my type"
-    cfg_tag = "my tag"
-    cfg_properties = {
-      "my property": "property value"
-    }
-    curl_resp = [
-      {
-        'Clusters': {
-          'desired_configs': {
-            cfg_type: {
-              "tag": cfg_tag
-            }
-          }
-        }
-      },
-      {
-        "items": [
-          {
-            "type": cfg_type,
-            "tag": cfg_tag,
-            "properties": cfg_properties
-          }
-        ]
-      }
-    ]
-
-    expected_result = {
-        cfg_type: {
-          "properties": cfg_properties,
-          "tag": cfg_tag
-        }
-      }
-    curl_mock.side_effect = MagicMock(side_effect=curl_resp)
-
-    # execute testing function
-    actual_result = upgradeHelper.get_config_resp_all()
-
-    self.assertEquals(expected_result, actual_result)
-    pass
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  @patch("os.mkdir")
-  @patch("os.path.exists")
-  @patch("__builtin__.open")
-  def test_backup_configs(self, open_mock, os_path_exists_mock, mkdir_mock, get_config_resp_all_mock):
-    data = {
-      self.catalog_cfg_type: {
-        "properties": {
-          "test-property": "value"
-        },
-        "tag": "version1"
-      }
-    }
-    os_path_exists_mock.return_value = False
-    get_config_resp_all_mock.return_value = data
-    expected = json.dumps(data[self.catalog_cfg_type]["properties"], indent=4)
-    stream = StringIO()
-    m = MagicMock()
-    m.__enter__.return_value = stream
-    open_mock.return_value = m
-
-    # execute testing function
-    upgradeHelper.backup_configs(self.catalog_cfg_type)
-
-    self.assertEqual(expected, stream.getvalue())
-
-  @patch.object(upgradeHelper, "curl")
-  def test_install_services(self, curl_mock):
-    expected_args = (
-      (
-        ('http://127.0.0.1:8080/api/v1/clusters/test1/services/MAPREDUCE2',),
-        {
-          'request_type': 'PUT',
-          'data': {
-            'RequestInfo': {
-              'context': 'Install MapReduce2'
-            },
-            'Body': {
-              'ServiceInfo': {
-                'state': 'INSTALLED'
-              }
-            }
-          },
-          'validate': True
-        }
-      ),
-      (
-        ('http://127.0.0.1:8080/api/v1/clusters/test1/services/YARN',),
-        {
-          'request_type': 'PUT',
-          'data': {
-            'RequestInfo': {
-              'context': 'Install YARN'
-            },
-            'Body': {
-              'ServiceInfo': {
-                'state': 'INSTALLED'
-              }
-            }
-          },
-          'validate': True
-        }
-      )
-    )
-
-    # execute testing function
-    upgradeHelper.install_services()
-
-    self.assertEqual(2, curl_mock.call_count)
-    for i in range(0, 1):
-      self.assertEqual(expected_args[i], tuple(curl_mock.call_args_list[i]))
-
-  def test_configuration_diff_analyze(self):
-    in_data = {
-        self.catalog_cfg_type: [
-          {
-            'catalog_item': {
-              'value': 'my value'
-            },
-            'property': 'my property',
-            'actual_value': 'my value',
-            'catalog_value': 'my value'
-          }
-        ]
-    }
-
-    expected_result = {
-      'my type': {
-        'fail': {
-          'count': 0,
-          'items': []
-        },
-        'total': {
-          'count': 1,
-          'items': []
-        },
-      'skipped': {
-        'count': 0,
-        'items': []
-      },
-        'ok': {
-          'count': 1,
-          'items': [
-                    {
-                      'catalog_item': {
-                        'value': 'my value'
-                      },
-                      'property': 'my property',
-                      'actual_value': 'my value',
-                      'catalog_value': 'my value'
-                    }
-          ]
-        }
-      }
-    }
-
-    # execute testing function
-    actual_result = upgradeHelper.configuration_diff_analyze(in_data)
-
-    self.assertEqual(expected_result, actual_result)
-
-  @patch.object(upgradeHelper, "UpgradeCatalogFactory", autospec=True)
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  @patch.object(upgradeHelper, "configuration_item_diff")
-  @patch.object(upgradeHelper, "configuration_diff_analyze")
-  @patch("__builtin__.open")
-  def test_verify_configuration(self, open_mock, configuration_diff_analyze_mock, configuration_item_diff_mock,
-                                get_config_resp_all_mock, upgradecatalogfactory_mock):
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = self.catalog_from
-    options.to_stack = self.catalog_to
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    upgradecatalogfactory_mock.return_value = UpgradeCatalogFactoryMock(self.test_catalog)
-    get_config_resp_all_mock.return_value = {
-      self.catalog_cfg_type: {
-        "properties": {}
-      }
-    }
-
-    # execute testing function
-    upgradeHelper.verify_configuration()
-
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(1, get_config_resp_all_mock.call_count)
-    self.assertEqual(1, configuration_item_diff_mock.call_count)
-    self.assertEqual(1, configuration_diff_analyze_mock.call_count)
-    self.assertEqual(1, open_mock.call_count)
-
-  def test_report_formatter(self):
-    file = StringIO()
-    cfg_item = self.catalog_cfg_type
-    analyzed_list = {
-        'fail': {
-          'count': 1,
-          'items': [
-            {
-              'catalog_item': {
-                'value': 'my value'
-              },
-              'property': 'my property',
-              'actual_value': 'my value 1',
-              'catalog_value': 'my value'
-            }
-          ]
-        },
-        'total': {
-          'count': 1,
-          'items': []
-        },
-        'skipped': {
-          'count': 0,
-          'items': []
-        },
-        'ok': {
-          'count': 0,
-          'items': []
-        }
-    }
-
-    expected_output = "Configuration item my type: property \"my property\" is set to \"my value 1\", but should be set to \"my value\"\n"
-
-    # execute testing function
-    upgradeHelper.report_formatter(file, cfg_item, analyzed_list)
-
-    self.assertEqual(expected_output, file.getvalue())
-
-  @patch.object(upgradeHelper, "get_config_resp_all")
-  def test_conditional_replace(self, get_config_resp_all_mock):
-    test_catalog = """
-        {
-      "version": "1.0",
-      "stacks": [
-        {
-          "name": "HDP",
-          "old-version": "1.0",
-          "target-version": "1.1",
-          "options": {
-            "config-types":{
-              "test": {
-                "merged-copy": "yes"
-              }
-            }
-          },
-          "properties": {
-             "test": {
-               "test": {
-                 "value": "10",
-                 "value-required": "-1"
-               },
-               "test2": {
-                 "value": "10",
-                 "value-required": "-2"
-               }
-            }
-          },
-          "property-mapping": {
-          }
-        }
-      ]
-    }
-    """
-
-    expected_properties = {"test":"10", "test2":"15"}
-
-    old_opt = upgradeHelper.Options.OPTIONS
-    options = lambda: ""
-    options.from_stack = "1.0"
-    options.to_stack = "1.1"
-    options.upgrade_json = ""
-
-    upgradeHelper.Options.OPTIONS = options
-    upgradeHelper.Options.SERVICES = [self.required_service]
-    get_config_resp_all_mock.return_value = {
-      "test": {
-        "properties": {"test":"-1", "test2":"15"}
-      }
-    }
-
-    ucf = UpgradeCatalogFactoryMock(test_catalog)
-    scf = upgradeHelper.ServerConfigFactory()
-
-    cfg = scf.get_config("test")
-    ucfg = ucf.get_catalog("1.0", "1.1")
-
-    cfg.merge(ucfg)
-    upgradeHelper.Options.OPTIONS = old_opt
-
-    self.assertEqual(expected_properties, cfg.properties)
-
-if __name__ == "__main__":
-  unittest.main()


[37/63] [abbrv] ambari git commit: Revert "AMBARI-21206 - Remove Zookeeper as a required service from YARN"

Posted by ab...@apache.org.
Revert "AMBARI-21206 - Remove Zookeeper as a required service from YARN"

This reverts commit a2464b9045637c1d5014db4aff7d83a0bc573fc0.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ebd79e98
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ebd79e98
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ebd79e98

Branch: refs/heads/branch-feature-logsearch-ui
Commit: ebd79e989984ee1fd55ebe6cdb4e8469874bd8b7
Parents: 535660b
Author: Tim Thorpe <tt...@apache.org>
Authored: Tue Jun 27 04:53:36 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Tue Jun 27 04:53:36 2017 -0700

----------------------------------------------------------------------
 .../YARN/3.0.0.3.0/configuration/yarn-site.xml  |  10 +-
 .../common-services/YARN/3.0.0.3.0/metainfo.xml |  46 +------
 .../YARN/3.0.0.3.0/service_advisor.py           |  53 +-------
 .../stacks/HDP/2.2/services/stack_advisor.py    |  53 +-------
 .../stacks/2.2/common/test_stack_advisor.py     | 132 +------------------
 .../stacks/2.6/common/test_stack_advisor.py     |   9 --
 6 files changed, 14 insertions(+), 289 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
index c77aa2a..64e0bcb 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/configuration/yarn-site.xml
@@ -486,10 +486,7 @@
   </property>
   <property>
     <name>hadoop.registry.zk.quorum</name>
-    <value></value>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
+    <value>localhost:2181</value>
     <description>
       List of hostname:port pairs defining the zookeeper quorum binding for the registry
     </description>
@@ -556,10 +553,7 @@
   </property>
   <property>
     <name>yarn.resourcemanager.zk-address</name>
-    <value></value>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
+    <value>localhost:2181</value>
     <description>
       List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc...  "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).
     </description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
index 90f4a92..061587d 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/metainfo.xml
@@ -73,41 +73,17 @@
             <timeout>1200</timeout>
           </commandScript>
 
+          <!-- TODO HDP 3.0, add later after UI is fixed,
           <dependencies>
             <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>false</enabled>
-              </auto-deploy>
-              <conditions>
-                <condition xsi:type="propertyExists">
-                  <configType>yarn-site</configType>
-                  <property>yarn.resourcemanager.recovery.enabled</property>
-                  <value>true</value>
-                </condition>
-                <condition xsi:type="propertyExists">
-                  <configType>yarn-site</configType>
-                  <property>yarn.resourcemanager.ha.enabled</property>
-                  <value>true</value>
-                </condition>
-                <condition xsi:type="propertyExists">
-                  <configType>yarn-site</configType>
-                  <property>hadoop.registry.rm.enabled</property>
-                  <value>true</value>
-                </condition>
-              </conditions>
-            </dependency>
-            <!-- TODO HDP 3.0, add later after UI is fixed,
-            <dependency>
               <name>TEZ/TEZ_CLIENT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>
               </auto-deploy>
             </dependency>
-            -->
           </dependencies>
+          -->
 
           <logs>
             <log>
@@ -169,23 +145,6 @@
               <logId>yarn_nodemanager</logId>
             </log>
           </logs>
-
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>false</enabled>
-              </auto-deploy>
-              <conditions>
-                <condition xsi:type="propertyExists">
-                  <configType>yarn-site</configType>
-                  <property>yarn.nodemanager.recovery.enabled</property>
-                  <value>true</value>
-                </condition>
-              </conditions>
-            </dependency>
-          </dependencies>
         </component>
 
         <component>
@@ -255,6 +214,7 @@
       <requiredServices>
         <service>HDFS</service>
         <service>MAPREDUCE2</service>
+        <service>ZOOKEEPER</service>
       </requiredServices>
 
       <themes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
index 1af9821..0fb538d 100644
--- a/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/YARN/3.0.0.3.0/service_advisor.py
@@ -351,21 +351,12 @@ class YARNRecommender(service_advisor.ServiceAdvisor):
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount-path', 'delete', 'true')
-    # recommend hadoop.registry.rm.enabled based on SLIDER and ZOOKEEPER in services
+    # recommend hadoop.registry.rm.enabled based on SLIDER in services
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "SLIDER" in servicesList and "ZOOKEEPER" in servicesList:
+    if "SLIDER" in servicesList:
       putYarnProperty('hadoop.registry.rm.enabled', 'true')
     else:
       putYarnProperty('hadoop.registry.rm.enabled', 'false')
-    # recommend enabling RM and NM recovery if ZOOKEEPER in services
-    if "ZOOKEEPER" in servicesList:
-      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'true')
-      putYarnProperty('yarn.nodemanager.recovery.enabled', 'true')
-    else:
-      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'false')
-      putYarnProperty('yarn.nodemanager.recovery.enabled', 'false')
-      # recommend disabling RM HA if ZOOKEEPER is not in services
-      putYarnProperty('yarn.resourcemanager.ha.enabled', 'false')
 
   def recommendYARNConfigurationsFromHDP23(self, configurations, clusterData, services, hosts):
     putYarnSiteProperty = self.putProperty(configurations, "yarn-site", services)
@@ -1804,7 +1795,6 @@ class YARNValidator(service_advisor.ServiceAdvisor):
     self.as_super.__init__(*args, **kwargs)
 
     self.validators = [("yarn-site", self.validateYARNSiteConfigurationsFromHDP206),
-                       ("yarn-site", self.validateYARNSiteConfigurationsFromHDP22),
                        ("yarn-site", self.validateYARNSiteConfigurationsFromHDP25),
                        ("yarn-site" , self.validateYARNSiteConfigurationsFromHDP26),
                        ("yarn-env", self.validateYARNEnvConfigurationsFromHDP206),
@@ -1847,45 +1837,6 @@ class YARNValidator(service_advisor.ServiceAdvisor):
                         {"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
     return self.toConfigurationValidationProblems(validationItems, "yarn-site")
 
-  def validateYARNSiteConfigurationsFromHDP22(self, properties, recommendedDefaults, configurations, services, hosts):
-    """
-    This was copied from HDP 2.2; validate yarn-site
-    :return: A list of configuration validation problems.
-    """
-    yarn_site = properties
-    validationItems = []
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-
-    zk_hosts = self.getHostsForComponent(services, "ZOOKEEPER", "ZOOKEEPER_SERVER")
-    if len(zk_hosts) == 0:
-      # ZOOKEEPER_SERVER isn't assigned to at least one host
-      if 'yarn.resourcemanager.recovery.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.resourcemanager.recovery.enabled']:
-        validationItems.append({"config-name": "yarn.resourcemanager.recovery.enabled",
-                                "item": self.getWarnItem(
-                                  "YARN resource manager recovery can only be enabled if ZOOKEEPER is installed.")})
-      if 'yarn.nodemanager.recovery.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.nodemanager.recovery.enabled']:
-        validationItems.append({"config-name": "yarn.nodemanager.recovery.enabled",
-                                "item": self.getWarnItem(
-                                  "YARN node manager recovery can only be enabled if ZOOKEEPER is installed.")})
-
-    if len(zk_hosts) < 3:
-      if 'yarn.resourcemanager.ha.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.resourcemanager.ha.enabled']:
-        validationItems.append({"config-name": "yarn.resourcemanager.ha.enabled",
-                                "item": self.getWarnItem(
-                                  "You must have at least 3 ZooKeeper Servers in your cluster to enable ResourceManager HA.")})
-
-    if 'ZOOKEEPER' not in servicesList or 'SLIDER' not in servicesList:
-      if 'hadoop.registry.rm.enabled' in yarn_site and \
-              'true' == yarn_site['hadoop.registry.rm.enabled']:
-        validationItems.append({"config-name": "hadoop.registry.rm.enabled",
-                                "item": self.getWarnItem(
-                                  "HADOOP resource manager registry can only be enabled if ZOOKEEPER and SLIDER are installed.")})
-
-    return self.toConfigurationValidationProblems(validationItems, "yarn-site")
-
   def validateYARNSiteConfigurationsFromHDP25(self, properties, recommendedDefaults, configurations, services, hosts):
     yarn_site_properties = self.getSiteProperties(configurations, "yarn-site")
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 54ddd89..726514b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -174,23 +174,12 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.hierarchy', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount', 'delete', 'true')
           putYarnPropertyAttribute('yarn.nodemanager.linux-container-executor.cgroups.mount-path', 'delete', 'true')
-    # recommend hadoop.registry.rm.enabled based on SLIDER and ZOOKEEPER in services
+    # recommend hadoop.registry.rm.enabled based on SLIDER in services
     servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    if "SLIDER" in servicesList and "ZOOKEEPER" in servicesList:
+    if "SLIDER" in servicesList:
       putYarnProperty('hadoop.registry.rm.enabled', 'true')
     else:
       putYarnProperty('hadoop.registry.rm.enabled', 'false')
-    # recommend enabling RM and NM recovery if ZOOKEEPER in services
-    if "ZOOKEEPER" in servicesList:
-      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'true')
-      putYarnProperty('yarn.nodemanager.recovery.enabled', 'true')
-    else:
-      putYarnProperty('yarn.resourcemanager.recovery.enabled', 'false')
-      putYarnProperty('yarn.nodemanager.recovery.enabled', 'false')
-      # recommend disabling RM HA if ZOOKEEPER is not in services
-      putYarnProperty('yarn.resourcemanager.ha.enabled', 'false')
-
-
 
   def recommendHDFSConfigurations(self, configurations, clusterData, services, hosts):
     super(HDP22StackAdvisor, self).recommendHDFSConfigurations(configurations, clusterData, services, hosts)
@@ -1045,7 +1034,6 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
                "hadoop-env": self.validateHDFSConfigurationsEnv,
                "ranger-hdfs-plugin-properties": self.validateHDFSRangerPluginConfigurations},
       "YARN": {"yarn-env": self.validateYARNEnvConfigurations,
-               "yarn-site": self.validateYARNConfigurations,
                "ranger-yarn-plugin-properties": self.validateYARNRangerPluginConfigurations},
       "HIVE": {"hiveserver2-site": self.validateHiveServer2Configurations,
                "hive-site": self.validateHiveConfigurations,
@@ -1726,43 +1714,6 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
 
     return self.toConfigurationValidationProblems(validationItems, "ranger-storm-plugin-properties")
 
-  def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    parentValidationProblems = super(HDP22StackAdvisor, self).validateYARNConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    yarn_site = properties
-    validationItems = []
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-    zk_hosts = self.getHostsForComponent(services, "ZOOKEEPER", "ZOOKEEPER_SERVER")
-    if len(zk_hosts) == 0:
-      # ZOOKEEPER_SERVER isn't assigned to at least one host
-      if 'yarn.resourcemanager.recovery.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.resourcemanager.recovery.enabled']:
-        validationItems.append({"config-name": "yarn.resourcemanager.recovery.enabled",
-                                "item": self.getWarnItem(
-                                  "YARN resource manager recovery can only be enabled if ZOOKEEPER is installed.")})
-      if 'yarn.nodemanager.recovery.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.nodemanager.recovery.enabled']:
-        validationItems.append({"config-name": "yarn.nodemanager.recovery.enabled",
-                                "item": self.getWarnItem(
-                                  "YARN node manager recovery can only be enabled if ZOOKEEPER is installed.")})
-
-    if len(zk_hosts) < 3:
-      if 'yarn.resourcemanager.ha.enabled' in yarn_site and \
-              'true' == yarn_site['yarn.resourcemanager.ha.enabled']:
-        validationItems.append({"config-name": "yarn.resourcemanager.ha.enabled",
-                                "item": self.getWarnItem(
-                                  "You must have at least 3 ZooKeeper Servers in your cluster to enable ResourceManager HA.")})
-
-    if 'ZOOKEEPER' not in servicesList or 'SLIDER' not in servicesList:
-      if 'hadoop.registry.rm.enabled' in yarn_site and \
-              'true' == yarn_site['hadoop.registry.rm.enabled']:
-        validationItems.append({"config-name": "hadoop.registry.rm.enabled",
-                                "item": self.getWarnItem(
-                                  "HADOOP resource manager registry can only be enabled if ZOOKEEPER and SLIDER are installed.")})
-
-    validationProblems = self.toConfigurationValidationProblems(validationItems, "yarn-site")
-    validationProblems.extend(parentValidationProblems)
-    return validationProblems
-
   def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     parentValidationProblems = super(HDP22StackAdvisor, self).validateYARNEnvConfigurations(properties, recommendedDefaults, configurations, services, hosts)
     validationItems = []

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index ee620b5..571ff26 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -906,62 +906,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-vcores": "4",
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.nodemanager.resource.cpu-vcores": "4",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false"
-        }
-      }
-    }
-
-    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
-
-  def test_recommendYARNConfigurationsWithZKAndSlider(self):
-    configurations = {}
-    services = {"configurations": configurations}
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "ZOOKEEPER"
-        },
-        },
-      {
-        "StackServices": {
-          "service_name": "YARN"
-        },
-        },
-      {
-        "StackServices": {
-          "service_name": "SLIDER"
-        },
-        }
-    ]
-    clusterData = {
-      "cpu": 4,
-      "containers" : 5,
-      "ramPerContainer": 256,
-      "yarnMinContainerSize": 256
-    }
-    expected = {
-      "yarn-env": {
-        "properties": {
-          "min_user_id": "500",
-          'service_check.queue.name': 'default'
-        }
-      },
-      "yarn-site": {
-        "properties": {
-          "yarn.nodemanager.linux-container-executor.group": "hadoop",
-          "yarn.nodemanager.resource.memory-mb": "1280",
-          "yarn.scheduler.minimum-allocation-mb": "256",
-          "yarn.scheduler.maximum-allocation-mb": "1280",
-          "yarn.scheduler.maximum-allocation-vcores": "4",
-          "yarn.scheduler.minimum-allocation-vcores": "1",
-          "yarn.nodemanager.resource.cpu-vcores": "4",
-          "hadoop.registry.rm.enabled": "true",
-          "yarn.resourcemanager.recovery.enabled": "true",
-          "yarn.nodemanager.recovery.enabled": "true"
+          "hadoop.registry.rm.enabled": "true"
         }
       }
     }
@@ -969,55 +914,6 @@ class TestHDP22StackAdvisor(TestCase):
     self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
     self.assertEquals(configurations, expected)
 
-  def test_recommendYARNConfigurationsWithZK(self):
-    configurations = {}
-    services = {"configurations": configurations}
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "ZOOKEEPER"
-        },
-        },
-      {
-        "StackServices": {
-          "service_name": "YARN"
-        },
-        }
-    ]
-    clusterData = {
-      "cpu": 4,
-      "containers" : 5,
-      "ramPerContainer": 256,
-      "yarnMinContainerSize": 256
-    }
-    expected = {
-      "yarn-env": {
-        "properties": {
-          "min_user_id": "500",
-          'service_check.queue.name': 'default'
-        }
-      },
-      "yarn-site": {
-        "properties": {
-          "yarn.nodemanager.linux-container-executor.group": "hadoop",
-          "yarn.nodemanager.resource.memory-mb": "1280",
-          "yarn.scheduler.minimum-allocation-mb": "256",
-          "yarn.scheduler.maximum-allocation-mb": "1280",
-          "yarn.scheduler.maximum-allocation-vcores": "4",
-          "yarn.scheduler.minimum-allocation-vcores": "1",
-          "yarn.nodemanager.resource.cpu-vcores": "4",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "true",
-          "yarn.nodemanager.recovery.enabled": "true"
-        }
-      }
-    }
-
-    self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
-    self.assertEquals(configurations, expected)
-
-
-
   def test_recommendSPARKConfigurations(self):
     configurations = {}
     services = {"configurations": configurations}
@@ -1083,10 +979,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1280",
           "yarn.nodemanager.resource.cpu-vcores": "2",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false"
+          "hadoop.registry.rm.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -1913,10 +1806,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1792",
           "yarn.nodemanager.resource.cpu-vcores": "1",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false"
+          "hadoop.registry.rm.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -2180,10 +2070,7 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.minimum-allocation-vcores": "1",
           "yarn.scheduler.maximum-allocation-mb": "1280",
           "yarn.nodemanager.resource.cpu-vcores": "1",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false"
+          "hadoop.registry.rm.enabled": "false"
         },
         "property_attributes": {
           'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -2398,10 +2285,7 @@ class TestHDP22StackAdvisor(TestCase):
                 "yarn.scheduler.minimum-allocation-vcores": "1",
                 "yarn.scheduler.maximum-allocation-mb": "1280",
                 "yarn.nodemanager.resource.cpu-vcores": "1",
-                "hadoop.registry.rm.enabled": "false",
-                "yarn.resourcemanager.recovery.enabled": "false",
-                "yarn.nodemanager.recovery.enabled": "false",
-                "yarn.resourcemanager.ha.enabled": "false"
+                "hadoop.registry.rm.enabled": "false"
             },
             "property_attributes": {
                 'yarn.nodemanager.resource.memory-mb': {'maximum': '1877'},
@@ -3960,9 +3844,6 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
           "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
           "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",
@@ -4022,9 +3903,6 @@ class TestHDP22StackAdvisor(TestCase):
           "yarn.scheduler.maximum-allocation-mb": "33792",
           "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler",
           "hadoop.registry.rm.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.ha.enabled": "false",
           "yarn.timeline-service.leveldb-state-store.path": "/hadoop/yarn/timeline",
           "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
           "yarn.nodemanager.local-dirs": "/hadoop/yarn/local,/dev/shm/hadoop/yarn/local,/vagrant/hadoop/yarn/local",

http://git-wip-us.apache.org/repos/asf/ambari/blob/ebd79e98/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index 96a595f..d4d28c9 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -1153,9 +1153,6 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
-          'yarn.resourcemanager.recovery.enabled': 'false',
-          'yarn.nodemanager.recovery.enabled': 'false',
-          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.scheduler.minimum-allocation-vcores': '1',
           'yarn.scheduler.maximum-allocation-vcores': '4',
@@ -1332,9 +1329,6 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
-          'yarn.resourcemanager.recovery.enabled': 'false',
-          'yarn.nodemanager.recovery.enabled': 'false',
-          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
           'yarn.acl.enable': 'true',
@@ -1442,9 +1436,6 @@ class TestHDP26StackAdvisor(TestCase):
       'yarn-site': {
         'properties': {
           'hadoop.registry.rm.enabled': 'false',
-          'yarn.resourcemanager.recovery.enabled': 'false',
-          'yarn.nodemanager.recovery.enabled': 'false',
-          'yarn.resourcemanager.ha.enabled': 'false',
           'yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes': '',
           'yarn.authorization-provider': 'org.apache.ranger.authorization.yarn.authorizer.RangerYarnAuthorizer',
           'yarn.acl.enable': 'true',


[15/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json
deleted file mode 100755
index a2c1ae1..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.4.json
+++ /dev/null
@@ -1,453 +0,0 @@
-{
-    "version": "1.0",
-    "stacks": [
-        {
-            "name": "HDP",
-            "old-version": "2.0",
-            "target-version": "2.2.4",
-            "options": {
-                "config-types": {
-                    "capacity-scheduler": {
-                        "merged-copy": "yes"
-                    },
-                    "cluster-env": {
-                        "merged-copy": "yes"
-                    },
-                    "core-site": {
-                        "merged-copy": "yes"
-                    },
-                    "flume-env": {
-                        "merged-copy": "yes"
-                    },
-                    "hadoop-env": {
-                        "merged-copy": "yes"
-                    },
-                    "hbase-env": {
-                        "merged-copy": "yes"
-                    },
-                    "hbase-site": {
-                        "merged-copy": "yes"
-                    },
-                    "hdfs-log4j": {
-                        "merged-copy": "yes"
-                    },
-                    "hdfs-site": {
-                        "merged-copy": "yes"
-                    },
-                    "hive-env": {
-                        "merged-copy": "yes"
-                    },
-                    "hive-site": {
-                        "merged-copy": "yes"
-                    },
-                    "mapred-env": {
-                        "merged-copy": "yes"
-                    },
-                    "mapred-site": {
-                        "merged-copy": "yes"
-                    },
-                    "oozie-env": {
-                        "merged-copy": "yes"
-                    },
-                    "oozie-site": {
-                        "merged-copy": "yes"
-                    },
-                    "webhcat-site": {
-                        "merged-copy": "yes"
-                    },
-                    "webhcat-log4j": {
-                        "merged-copy": "yes"
-                    },
-                    "yarn-site": {
-                        "merged-copy": "yes"
-                    },
-                    "pig-properties": {
-                        "merged-copy": "yes"
-                    }
-                }
-            },
-            "properties": {
-                "pig-properties": {
-                    "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.\n# see bin/pig -help\n\n# brief logging (no ti
 mestamps)\nbrief=false\n\n# debug level, INFO is default\ndebug=INFO\n\n# verbose print all log messages to screen (default to print only INFO and above to screen)\nverbose=false\n\n# exectype local|mapreduce, mapreduce is default\nexectype=mapreduce\n\n# Enable insertion of information about script into hadoop job conf \npig.script.info.enabled=true\n\n# Do not spill temp files smaller than this size (bytes)\npig.spill.size.threshold=5000000\n\n# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)\n# This should help reduce the number of files being spilled.\npig.spill.gc.activation.size=40000000\n\n# the following two parameters are to help estimate the reducer number\npig.exec.reducers.bytes.per.reducer=1000000000\npig.exec.reducers.max=999\n\n# Temporary location to store the intermediate data.\npig.temp.dir=/tmp/\n\n# Threshold for merging FRJoin fragment files\npig.files.concatenation.threshold=100\npig.optimistic.files.concatenation=fals
 e;\n\npig.disable.counter=false\n\n# Avoid pig failures when multiple jobs write to the same location\npig.location.check.strict=false\n\nhcat.bin=/usr/bin/hcat"
-                },
-                "capacity-scheduler": {
-                    "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
-                    "yarn.scheduler.capacity.root.accessible-node-labels": "*",
-                    "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
-                    "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
-                    "yarn.scheduler.capacity.root.default-node-label-expression": " "
-                },
-                "cluster-env": {
-                    "smokeuser_principal_name": {
-                        "remove": "yes"
-                    }
-                },
-                "core-site": {
-                    "hadoop.http.authentication.simple.anonymous.allowed": "true",
-                    "hadoop.proxyuser.falcon.groups": "users",
-                    "hadoop.proxyuser.falcon.hosts": "*",
-                    "hadoop.security.auth_to_local": "\n        DEFAULT"
-                },
-                "flume-env":{
-                    "content": "\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements.  See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced\n# during Flume startup.\n\n# Enviroment variables can be set here.\n\nexport JAVA_HOM
 E={{java_home}}\n\n# Give Flume more memory and pre-allocate, enable remote monitoring via JMX\n# export JAVA_OPTS=\"-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote\"\n\n# Note that the Flume conf directory is always included in the classpath.\n# Add flume sink to classpath\nif [ -e \"/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\" ]; then\n  export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\nfi\n\nexport HIVE_HOME={{flume_hive_home}}\nexport HCAT_HOME={{flume_hcat_home}}"
-                },
-                "hadoop-env": {
-                    "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options
  appended to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGC
 DateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The f
 ollowing applies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# ex
 port HADOOP_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /us
 r/share/java/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION 
 $HADOOP_OPTS\"",
-                    "namenode_opt_maxnewsize": "256m",
-                    "namenode_opt_newsize": "256m"
-                },
-                "hbase-env": {
-                    "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to
  enable JMX exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG
 _DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_RE
 GIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}"
-                },
-                "hbase-site": {
-                    "hbase.hregion.majorcompaction": "604800000",
-                    "hbase.hregion.memstore.block.multiplier": "4",
-                    "hbase.hstore.flush.retries.number": {
-                      "remove": "yes"
-                    },
-                    "hbase.hregion.majorcompaction.jitter": "0.50"
-                },
-                "hdfs-log4j": {
-                    "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop
 .root.logger=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.
 appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.secur
 ity.logger}\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesyste
 m.audit=false\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoo
 p.log.file}\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metri
 cs.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
-                },
-                "hdfs-site": {
-                    "dfs.datanode.max.transfer.threads": "16384",
-                    "dfs.namenode.handler.count": "100",
-                    "dfs.namenode.startup.delay.block.deletion.sec": "3600"
-                },
-                "hive-env": {
-                    "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory can be controlled by:\nexport HIVE_CONF_DIR={{hive_config_dir}}\n\n# Folder containing extra libraries required f
 or hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then    \n    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n  elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n  fi\nelif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\nfi      \n\nexport METASTORE_PORT={{hive_metastore_port}}"
-                },
-                "hive-site": {
-                    "fs.file.impl.disable.cache": {
-                      "remove": "yes"
-                    },
-                    "fs.hdfs.impl.disable.cache": {
-                      "remove": "yes"
-                    },
-                    "hive.heapsize": {
-                        "remove": "yes"
-                    },
-                    "hive.optimize.mapjoin.mapreduce": {
-                        "remove": "yes"
-                    },
-                    "hive.server2.enable.impersonation": {
-                        "remove": "yes"
-                    },
-                    "datanucleus.cache.level2.type": "none",
-                    "hive.auto.convert.join.noconditionaltask.size": "238026752",
-                    "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
-                    "hive.auto.convert.sortmerge.join.noconditionaltask": {
-                      "remove": "yes"
-                    },
-                    "hive.cbo.enable": "true",
-                    "hive.cli.print.header": "false",
-                    "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
-                    "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
-                    "hive.compactor.abortedtxn.threshold": "1000",
-                    "hive.compactor.check.interval": "300L",
-                    "hive.compactor.delta.num.threshold": "10",
-                    "hive.compactor.delta.pct.threshold": "0.1f",
-                    "hive.compactor.initiator.on": "false",
-                    "hive.compactor.worker.threads": "0",
-                    "hive.compactor.worker.timeout": "86400L",
-                    "hive.compute.query.using.stats": "true",
-                    "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
-                    "hive.convert.join.bucket.mapjoin.tez": "false",
-                    "hive.enforce.sortmergebucketmapjoin": "true",
-                    "hive.exec.compress.intermediate": "false",
-                    "hive.exec.compress.output": "false",
-                    "hive.exec.dynamic.partition": "true",
-                    "hive.exec.dynamic.partition.mode": "nonstrict",
-                    "hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-                    "hive.exec.max.created.files": "100000",
-                    "hive.exec.max.dynamic.partitions": "5000",
-                    "hive.exec.max.dynamic.partitions.pernode": "2000",
-                    "hive.exec.orc.compression.strategy": "SPEED",
-                    "hive.exec.orc.default.compress": "ZLIB",
-                    "hive.exec.orc.default.stripe.size": "67108864",
-                    "hive.exec.parallel": "false",
-                    "hive.exec.parallel.thread.number": "8",
-                    "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-                    "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-                    "hive.exec.reducers.bytes.per.reducer": "67108864",
-                    "hive.exec.reducers.max": "1009",
-                    "hive.exec.scratchdir": "/tmp/hive",
-                    "hive.exec.submit.local.task.via.child": "true",
-                    "hive.exec.submitviachild": "false",
-                    "hive.execution.engine": "tez",
-                    "hive.fetch.task.aggr": "false",
-                    "hive.fetch.task.conversion": "more",
-                    "hive.fetch.task.conversion.threshold": "1073741824",
-                    "hive.limit.optimize.enable": "true",
-                    "hive.limit.pushdown.memory.usage": "0.04",
-                    "hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
-                    "hive.map.aggr.hash.min.reduction": "0.5",
-                    "hive.map.aggr.hash.percentmemory": "0.5",
-                    "hive.mapjoin.optimized.hashtable": "true",
-                    "hive.merge.mapfiles": "true",
-                    "hive.merge.mapredfiles": "false",
-                    "hive.merge.orcfile.stripe.level": "true",
-                    "hive.merge.rcfile.block.level": "true",
-                    "hive.merge.size.per.task": "256000000",
-                    "hive.merge.smallfiles.avgsize": "16000000",
-                    "hive.merge.tezfiles": "false",
-                    "hive.metastore.authorization.storage.checks": "false",
-                    "hive.metastore.client.connect.retry.delay": "5s",
-                    "hive.metastore.client.socket.timeout": "1800s",
-                    "hive.metastore.connect.retries": "24",
-                    "hive.metastore.failure.retries": "24",
-                    "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
-                    "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
-                    "hive.metastore.server.max.threads": "100000",
-                    "hive.optimize.constant.propagation": "true",
-                    "hive.optimize.metadataonly": "true",
-                    "hive.optimize.null.scan": "true",
-                    "hive.optimize.reducededuplication.min.reducer": "4",
-                    "hive.optimize.sort.dynamic.partition": "false",
-                    "hive.orc.compute.splits.num.threads": "10",
-                    "hive.orc.splits.include.file.footer": "false",
-                    "hive.prewarm.enabled": "false",
-                    "hive.prewarm.numcontainers": "10",
-                    "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
-                    "hive.security.metastore.authorization.auth.reads": "true",
-                    "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
-                    "hive.server2.allow.user.substitution": "true",
-                    "hive.server2.authentication.spnego.keytab": "HTTP/_HOST@EXAMPLE.COM",
-                    "hive.server2.authentication.spnego.principal": "/etc/security/keytabs/spnego.service.keytab",
-                    "hive.server2.logging.operation.enabled": "true",
-                    "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs",
-                    "hive.server2.support.dynamic.service.discovery": "true",
-                    "hive.server2.table.type.mapping": "CLASSIC",
-                    "hive.server2.tez.default.queues": "default",
-                    "hive.server2.tez.initialize.default.sessions": "false",
-                    "hive.server2.tez.sessions.per.default.queue": "1",
-                    "hive.server2.thrift.http.path": "cliservice",
-                    "hive.server2.thrift.http.port": "10001",
-                    "hive.server2.thrift.max.worker.threads": "500",
-                    "hive.server2.thrift.sasl.qop": "auth",
-                    "hive.server2.use.SSL": "false",
-                    "hive.smbjoin.cache.rows": "10000",
-                    "hive.stats.autogather": "true",
-                    "hive.stats.dbclass": "fs",
-                    "hive.stats.fetch.column.stats": "false",
-                    "hive.stats.fetch.partition.stats": "true",
-                    "hive.support.concurrency": "false",
-                    "hive.tez.auto.reducer.parallelism": "false",
-                    "hive.tez.container.size": "682",
-                    "hive.tez.cpu.vcores": "-1",
-                    "hive.tez.dynamic.partition.pruning": "true",
-                    "hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
-                    "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
-                    "hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat",
-                    "hive.tez.java.opts": "-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
-                    "hive.tez.log.level": "INFO",
-                    "hive.tez.max.partition.factor": "2.0",
-                    "hive.tez.min.partition.factor": "0.25",
-                    "hive.tez.smb.number.waves": "0.5",
-                    "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
-                    "hive.txn.max.open.batch": "1000",
-                    "hive.txn.timeout": "300",
-                    "hive.user.install.directory": "/user/",
-                    "hive.vectorized.execution.enabled": "true",
-                    "hive.vectorized.execution.reduce.enabled": "false",
-                    "hive.vectorized.groupby.checkinterval": "4096",
-                    "hive.vectorized.groupby.flush.percent": "0.1",
-                    "hive.vectorized.groupby.maxentries": "100000",
-                    "hive.zookeeper.client.port": "2181",
-                    "hive.zookeeper.namespace": "hive_zookeeper_namespace"
-                },
-                "hiveserver2-site": {
-                    "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator",
-                    "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"
-                },
-                "mapred-env": {
-                    "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\""
-                },
-                "mapred-site": {
-                    "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-                    "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-                    "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-                    "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-                    "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
-                    "mapreduce.job.emit-timeline-data": "false",
-                    "mapreduce.jobhistory.bind-host": "0.0.0.0",
-                    "mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
-                    "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
-                    "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
-                    "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
-                    "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}"
-                },
-                "oozie-env": {
-                    "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie confi
 guration directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64\n\n# At least 1 minute of retry time to account for server downtime during\n# upgrade/downgrade\nexport OOZIE_CLIENT_OPTS=\"${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 \"\n\n# This is needed so that Oozie does not run into OOM or GC Overhead limit\n# exceeded exceptions. If the oozie server is handling large number of\n# workflows/coordinator jobs, th
 e memory settings may need to be revised\nexport CATALINA_OPTS=\"${CATALINA_OPTS} -Xmx2048m -XX:MaxPermSize=256m \"",
-                    "oozie_ambari_database":{
-                        "remove": "yes"
-                    }
-                },
-                "oozie-site": {
-                    "oozie.authentication.simple.anonymous.allowed": "true",
-                    "oozie.service.ELService.ext.functions.coord-action-create": "\n      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,\n      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,\n      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,\n      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,\n      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,\n      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,\n      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,\n      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,\n      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,\n      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,\n      user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-                    "oozie.service.ELService.ext.functions.coord-action-create-inst": "\n      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,\n      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,\n      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,\n      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,\n      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,\n      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,\n      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,\n      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,\n      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,\n      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,\n      user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-                    "oozie.service.ELService.ext.functions.coord-action-start": "\n      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,\n      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,\n      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,\n      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,\n      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,\n      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,\n      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,\n      latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,\n      future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,\n      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,\n      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,\n      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,\n 
      formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,\n      user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-                    "oozie.service.ELService.ext.functions.coord-job-submit-data": "\n      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,\n      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,\n      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,\n      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,\n      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,\n      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,\n      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,\n      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,\n      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,\n      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,\n      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,\n  
     user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-                    "oozie.service.ELService.ext.functions.coord-job-submit-instances": "\n      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,\n      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,\n      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,\n      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,\n      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,\n      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,\n      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,\n      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,\n      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,\n      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo",
-                    "oozie.service.ELService.ext.functions.coord-sla-create": "\n      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,\n      user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-                    "oozie.service.ELService.ext.functions.coord-sla-submit": "\n      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,\n      user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-                    "oozie.service.HadoopAccessorService.supported.filesystems": "*",
-                    "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd",
-                    "oozie.service.coord.check.maximum.frequency": "false",
-                    "oozie.services": "\n      org.apache.oozie.service.SchedulerService,\n      org.apache.oozie.service.InstrumentationService,\n      org.apache.oozie.service.MemoryLocksService,\n      org.apache.oozie.service.UUIDService,\n      org.apache.oozie.service.ELService,\n      org.apache.oozie.service.AuthorizationService,\n      org.apache.oozie.service.UserGroupInformationService,\n      org.apache.oozie.service.HadoopAccessorService,\n      org.apache.oozie.service.JobsConcurrencyService,\n      org.apache.oozie.service.URIHandlerService,\n      org.apache.oozie.service.DagXLogInfoService,\n      org.apache.oozie.service.SchemaService,\n      org.apache.oozie.service.LiteWorkflowAppService,\n      org.apache.oozie.service.JPAService,\n      org.apache.oozie.service.StoreService,\n      org.apache.oozie.service.CoordinatorStoreService,\n      org.apache.oozie.service.SLAStoreService,\n      org.apache.oozie.service.DBLiteWorkflowStoreService,\n      org.apache.oozie
 .service.CallbackService,\n      org.apache.oozie.service.ShareLibService,\n      org.apache.oozie.service.CallableQueueService,\n      org.apache.oozie.service.ActionService,\n      org.apache.oozie.service.ActionCheckerService,\n      org.apache.oozie.service.RecoveryService,\n      org.apache.oozie.service.PurgeService,\n      org.apache.oozie.service.CoordinatorEngineService,\n      org.apache.oozie.service.BundleEngineService,\n      org.apache.oozie.service.DagEngineService,\n      org.apache.oozie.service.CoordMaterializeTriggerService,\n      org.apache.oozie.service.StatusTransitService,\n      org.apache.oozie.service.PauseTransitService,\n      org.apache.oozie.service.GroupsService,\n      org.apache.oozie.service.ProxyUserService,\n      org.apache.oozie.service.XLogStreamingService,\n      org.apache.oozie.service.JvmPauseMonitorService",
-                    "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService"
-                },
-                "ranger-hbase-plugin-properties": {
-                    "REPOSITORY_CONFIG_PASSWORD": "hbase",
-                    "REPOSITORY_CONFIG_USERNAME": "hbase",
-                    "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-                    "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-                    "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-                    "SSL_TRUSTSTORE_PASSWORD": "changeit",
-                    "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
-                    "XAAUDIT.DB.IS_ENABLED": "true",
-                    "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-                    "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-                    "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-                    "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-                    "XAAUDIT.HDFS.IS_ENABLED": "false",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-                    "common.name.for.certificate": "-",
-                    "policy_user": "ambari-qa",
-                    "ranger-hbase-plugin-enabled": "No"
-                },
-                "ranger-hdfs-plugin-properties": {
-                    "REPOSITORY_CONFIG_PASSWORD": "hadoop",
-                    "REPOSITORY_CONFIG_USERNAME": "hadoop",
-                    "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-                    "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-                    "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-                    "SSL_TRUSTSTORE_PASSWORD": "changeit",
-                    "XAAUDIT.DB.IS_ENABLED": "true",
-                    "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-                    "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-                    "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-                    "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-                    "XAAUDIT.HDFS.IS_ENABLED": "false",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-                    "common.name.for.certificate": "-",
-                    "hadoop.rpc.protection": "-",
-                    "policy_user": "ambari-qa",
-                    "ranger-hdfs-plugin-enabled": "No"
-                },
-                "ranger-hive-plugin-properties": {
-                    "REPOSITORY_CONFIG_PASSWORD": "hive",
-                    "REPOSITORY_CONFIG_USERNAME": "hive",
-                    "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-                    "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-                    "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-                    "SSL_TRUSTSTORE_PASSWORD": "changeit",
-                    "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
-                    "XAAUDIT.DB.IS_ENABLED": "true",
-                    "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-                    "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-                    "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-                    "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-                    "XAAUDIT.HDFS.IS_ENABLED": "false",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-                    "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-                    "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-                    "common.name.for.certificate": "-",
-                    "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
-                    "policy_user": "ambari-qa",
-                    "ranger-hive-plugin-enabled": "No"
-                },
-                "webhcat-log4j": {
-                    "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Define some default values that can be overridden by system properties\nwebhcat.root.logger = INFO, standard\nwebhcat.log.dir = .\nwebhcat.log.file = we
 bhcat.log\n\nlog4j.rootLogger = ${webhcat.root.logger}\n\n# Logging Threshold\nlog4j.threshhold = DEBUG\n\nlog4j.appender.standard  =  org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.standard.File = ${webhcat.log.dir}/${webhcat.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern = .yyyy-MM-dd\n\nlog4j.appender.DRFA.layout = org.apache.log4j.PatternLayout\n\nlog4j.appender.standard.layout = org.apache.log4j.PatternLayout\nlog4j.appender.standard.layout.conversionPattern = %-5p | %d{DATE} | %c | %m%n\n\n# Class logging settings\nlog4j.logger.com.sun.jersey = DEBUG\nlog4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = ERROR\nlog4j.logger.org.apache.hadoop = INFO\nlog4j.logger.org.apache.hadoop.conf = WARN\nlog4j.logger.org.apache.zookeeper = WARN\nlog4j.logger.org.eclipse.jetty = INFO"
-                },
-                "webhcat-site": {
-                    "templeton.hadoop": "/usr/hdp/current/hadoop-client/bin/hadoop",
-                    "templeton.hcat": "/usr/hdp/current/hive-client/bin/hcat",
-                    "templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz",
-                    "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
-                    "templeton.libjars": "/usr/hdp/current/zookeeper-client/zookeeper.jar",
-                    "templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz",
-                    "templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz",
-                    "templeton.sqoop.home": "sqoop.tar.gz/sqoop",
-                    "templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop",
-                    "templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar"
-                },
-                "yarn-site": {
-                    "hadoop.registry.rm.enabled": "false",
-                    "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
-                    "yarn.client.nodemanager-connect.max-wait-ms": "60000",
-                    "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
-                    "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
-                    "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
-                    "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
-                    "yarn.nodemanager.bind-host": "0.0.0.0",
-                    "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
-                    "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
-                    "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
-                    "yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
-                    "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false",
-                    "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
-                    "yarn.nodemanager.log-aggregation.debug-enabled": "false",
-                    "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30",
-                    "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
-                    "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state",
-                    "yarn.nodemanager.recovery.enabled": "true",
-                    "yarn.nodemanager.resource.cpu-vcores": "1",
-                    "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
-                    "yarn.resourcemanager.bind-host": "0.0.0.0",
-                    "yarn.resourcemanager.connect.max-wait.ms": "900000",
-                    "yarn.resourcemanager.connect.retry-interval.ms": "30000",
-                    "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
-                    "yarn.resourcemanager.fs.state-store.uri": " ",
-                    "yarn.resourcemanager.ha.enabled": "false",
-                    "yarn.resourcemanager.recovery.enabled": "true",
-                    "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
-                    "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
-                    "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
-                    "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
-                    "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
-                    "yarn.resourcemanager.work-preserving-recovery.enabled": "true",
-                    "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
-                    "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
-                    "yarn.resourcemanager.zk-num-retries": "1000",
-                    "yarn.resourcemanager.zk-retry-interval-ms": "1000",
-                    "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
-                    "yarn.resourcemanager.zk-timeout-ms": "10000",
-                    "yarn.timeline-service.bind-host": "0.0.0.0",
-                    "yarn.timeline-service.client.max-retries": "30",
-                    "yarn.timeline-service.client.retry-interval-ms": "1000",
-                    "yarn.timeline-service.enabled": "true",
-                    "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore",
-                    "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
-                    "yarn.timeline-service.http-authentication.type": "simple",
-                    "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
-                    "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
-                    "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
-                    "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000",
-                    "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000",
-                    "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore",
-                    "yarn.timeline-service.ttl-enable": "true",
-                    "yarn.timeline-service.ttl-ms": "2678400000"
-                }
-            }
-        }
-    ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.json
deleted file mode 100644
index 798e276..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.0_to_2.2.json
+++ /dev/null
@@ -1,275 +0,0 @@
-{
-  "version": "1.0",
-  "stacks": [
-    {
-      "name": "HDP",
-      "old-version": "2.0",
-      "target-version": "2.2",
-      "options": {
-        "config-types": {
-          "core-site": {
-            "merged-copy": "yes"
-          },
-          "hdfs-site": {
-            "merged-copy": "yes"
-          },
-          "hbase-site": {
-            "merged-copy": "yes"
-          },
-          "hive-site": {
-            "merged-copy": "yes"
-          },
-          "yarn-site": {
-            "merged-copy": "yes"
-          },
-          "mapred-site": {
-            "merged-copy": "yes"
-          },
-          "oozie-site": {
-            "merged-copy": "yes"
-          },
-          "webhcat-site": {
-            "merged-copy": "yes"
-          }
-        }
-      },
-      "properties": {
-        "webhcat-site": {
-          "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
-          "templeton.port": "50111"
-        },
-        "oozie-site": {
-          "oozie.authentication.simple.anonymous.allowed": "true",
-          "oozie.service.coord.check.maximum.frequency": "false",
-          "oozie.service.ELService.ext.functions.coord-action-create": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-          "oozie.service.ELService.ext.functions.coord-action-create-inst": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-          "oozie.service.ELService.ext.functions.coord-action-start": "now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,user=org.apache.oozie.coord.CoordELFu
 nctions#coord_user",
-          "oozie.service.ELService.ext.functions.coord-job-submit-data": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-          "oozie.service.ELService.ext.functions.coord-job-submit-instances": "now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo",
-          "oozie.service.ELService.ext.functions.coord-sla-create": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-          "oozie.service.ELService.ext.functions.coord-sla-submit": "instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,user=org.apache.oozie.coord.CoordELFunctions#coord_user",
-          "oozie.service.HadoopAccessorService.kerberos.enabled": "false",
-          "oozie.service.HadoopAccessorService.supported.filesystems": "*",
-          "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd",
-          "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService"
-        },
-        "mapred-site": {
-          "mapreduce.job.emit-timeline-data": "false",
-          "mapreduce.jobhistory.bind-host": "0.0.0.0",
-          "mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
-          "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
-          "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
-          "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "mapreduce.map.java.opts": "-Xmx546m",
-          "mapreduce.map.memory.mb": "682",
-          "mapreduce.reduce.java.opts": "-Xmx546m",
-          "mapreduce.task.io.sort.mb": "273",
-          "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
-          "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}",
-          "yarn.app.mapreduce.am.resource.mb": "682",
-          "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
-          "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-          "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64"
-        },
-        "yarn-site": {
-          "yarn.timeline-service.leveldb-timeline-store.path": "/var/log/hadoop-yarn/timeline",
-          "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000",
-          "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore",
-          "yarn.timeline-service.ttl-enable": "true",
-          "yarn.timeline-service.ttl-ms": "2678400000",
-          "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.client.nodemanager-connect.max-wait-ms": "900000",
-          "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
-          "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
-          "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
-          "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
-          "yarn.nodemanager.bind-host": "0.0.0.0",
-          "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
-          "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
-          "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
-          "yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
-          "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false",
-          "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
-          "yarn.nodemanager.log-aggregation.debug-enabled": "false",
-          "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30",
-          "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
-          "yarn.nodemanager.recovery.dir": "/var/log/hadoop-yarn/nodemanager/recovery-state",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.nodemanager.resource.cpu-vcores": "1",
-          "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
-          "yarn.resourcemanager.bind-host": "0.0.0.0",
-          "yarn.resourcemanager.connect.max-wait.ms": "900000",
-          "yarn.resourcemanager.connect.retry-interval.ms": "30000",
-          "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
-          "yarn.resourcemanager.fs.state-store.uri": " ",
-          "yarn.resourcemanager.ha.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
-          "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
-          "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
-          "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
-          "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
-          "yarn.resourcemanager.work-preserving-recovery.enabled": "false",
-          "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
-          "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
-          "yarn.resourcemanager.zk-address": {
-            "value": "{ZOOKEEPER_QUORUM}",
-            "template": "yes"
-          },
-          "yarn.resourcemanager.zk-num-retries": "1000",
-          "yarn.resourcemanager.zk-retry-interval-ms": "1000",
-          "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
-          "yarn.resourcemanager.zk-timeout-ms": "10000",
-          "yarn.timeline-service.bind-host": "0.0.0.0",
-          "yarn.timeline-service.client.max-retries": "30",
-          "yarn.timeline-service.client.retry-interval-ms": "1000",
-          "yarn.timeline-service.enabled": "true",
-          "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
-          "yarn.timeline-service.http-authentication.type": "simple",
-          "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
-          "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
-          "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000"
-        },
-        "hive-site": {
-          "hive.execution.engine": "mr",
-          "hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-          "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-          "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-          "datanucleus.cache.level2.type": "none",
-          "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
-          "hive.cbo.enable": "true",
-          "hive.cli.print.header": "false",
-          "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
-          "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
-          "hive.compactor.abortedtxn.threshold": "1000",
-          "hive.compactor.check.interval": "300L",
-          "hive.compactor.delta.num.threshold": "10",
-          "hive.compactor.delta.pct.threshold": "0.1f",
-          "hive.compactor.initiator.on": "false",
-          "hive.compactor.worker.threads": "0",
-          "hive.compactor.worker.timeout": "86400L",
-          "hive.compute.query.using.stats": "true",
-          "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
-          "hive.convert.join.bucket.mapjoin.tez": "false",
-          "hive.enforce.sortmergebucketmapjoin": "true",
-          "hive.exec.compress.intermediate": "false",
-          "hive.exec.compress.output": "false",
-          "hive.exec.dynamic.partition": "true",
-          "hive.exec.dynamic.partition.mode": "nonstrict",
-          "hive.exec.max.created.files": "100000",
-          "hive.exec.max.dynamic.partitions": "5000",
-          "hive.exec.max.dynamic.partitions.pernode": "2000",
-          "hive.exec.orc.compression.strategy": "SPEED",
-          "hive.exec.orc.default.compress": "ZLIB",
-          "hive.exec.orc.default.stripe.size": "67108864",
-          "hive.exec.parallel": "false",
-          "hive.exec.parallel.thread.number": "8",
-          "hive.exec.reducers.bytes.per.reducer": "67108864",
-          "hive.exec.reducers.max": "1009",
-          "hive.exec.scratchdir": "/tmp/hive",
-          "hive.exec.submit.local.task.via.child": "true",
-          "hive.exec.submitviachild": "false",
-          "hive.fetch.task.aggr": "false",
-          "hive.fetch.task.conversion": "more",
-          "hive.fetch.task.conversion.threshold": "1073741824",
-          "hive.limit.optimize.enable": "true",
-          "hive.limit.pushdown.memory.usage": "0.04",
-          "hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
-          "hive.map.aggr.hash.min.reduction": "0.5",
-          "hive.map.aggr.hash.percentmemory": "0.5",
-          "hive.mapjoin.optimized.hashtable": "true",
-          "hive.merge.mapfiles": "true",
-          "hive.merge.mapredfiles": "false",
-          "hive.merge.orcfile.stripe.level": "true",
-          "hive.merge.rcfile.block.level": "true",
-          "hive.merge.size.per.task": "256000000",
-          "hive.merge.smallfiles.avgsize": "16000000",
-          "hive.merge.tezfiles": "false",
-          "hive.metastore.authorization.storage.checks": "false",
-          "hive.metastore.client.connect.retry.delay": "5s",
-          "hive.metastore.connect.retries": "24",
-          "hive.metastore.failure.retries": "24",
-          "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
-          "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
-          "hive.metastore.server.max.threads": "100000",
-          "hive.optimize.constant.propagation": "true",
-          "hive.optimize.metadataonly": "true",
-          "hive.optimize.null.scan": "true",
-          "hive.optimize.sort.dynamic.partition": "false",
-          "hive.orc.compute.splits.num.threads": "10",
-          "hive.orc.splits.include.file.footer": "false",
-          "hive.prewarm.enabled": "false",
-          "hive.prewarm.numcontainers": "10",
-          "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
-          "hive.security.metastore.authorization.auth.reads": "true",
-          "hive.server2.allow.user.substitution": "true",
-          "hive.server2.authentication.spnego.keytab": "HTTP/_HOST@EXAMPLE.COM",
-          "hive.server2.authentication.spnego.principal": "/etc/security/keytabs/spnego.service.keytab",
-          "hive.server2.logging.operation.enabled": "true",
-          "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs",
-          "hive.server2.table.type.mapping": "CLASSIC",
-          "hive.server2.tez.default.queues": "default",
-          "hive.server2.tez.sessions.per.default.queue": "1",
-          "hive.server2.thrift.http.path": "cliservice",
-          "hive.server2.thrift.http.port": "10001",
-          "hive.server2.thrift.max.worker.threads": "500",
-          "hive.server2.thrift.sasl.qop": "auth",
-          "hive.server2.transport.mode": "binary",
-          "hive.server2.use.SSL": "false",
-          "hive.smbjoin.cache.rows": "10000",
-          "hive.stats.autogather": "true",
-          "hive.stats.dbclass": "fs",
-          "hive.stats.fetch.column.stats": "false",
-          "hive.stats.fetch.partition.stats": "true",
-          "hive.support.concurrency": "false",
-          "hive.tez.auto.reducer.parallelism": "false",
-          "hive.tez.cpu.vcores": "-1",
-          "hive.tez.dynamic.partition.pruning": "true",
-          "hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
-          "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
-          "hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat",
-          "hive.tez.log.level": "INFO",
-          "hive.tez.max.partition.factor": "2.0",
-          "hive.tez.min.partition.factor": "0.25",
-          "hive.tez.smb.number.waves": "0.5",
-          "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
-          "hive.txn.max.open.batch": "1000",
-          "hive.txn.timeout": "300",
-          "hive.user.install.directory": "/user/",
-          "hive.vectorized.execution.reduce.enabled": "false",
-          "hive.vectorized.groupby.checkinterval": "4096",
-          "hive.vectorized.groupby.flush.percent": "0.1",
-          "hive.vectorized.groupby.maxentries": "100000",
-          "hive.zookeeper.client.port": "2181",
-          "hive.zookeeper.namespace": "hive_zookeeper_namespace",
-          "hive.auto.convert.join.noconditionaltask.size": "238026752",
-          "hive.metastore.client.socket.timeout": "1800s",
-          "hive.optimize.reducededuplication.min.reducer": "4",
-          "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
-          "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
-          "hive.server2.support.dynamic.service.discovery": "true",
-          "hive.tez.container.size": "682",
-          "hive.tez.java.opts": "-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
-          "fs.file.impl.disable.cache": "true",
-          "fs.hdfs.impl.disable.cache": "true"
-        },
-        "core-site": {
-          "hadoop.proxyuser.falcon.groups": "users",
-          "hadoop.proxyuser.falcon.hosts": "*"
-        },
-        "hdfs-site": {
-          "dfs.namenode.startup.delay.block.deletion.sec": "3600",
-          "dfs.datanode.max.transfer.threads": "4096"
-        },
-        "hbase-site": {
-          "hbase.hregion.majorcompaction.jitter": "0.50",
-          "hbase.hregion.majorcompaction": "604800000",
-          "hbase.hregion.memstore.block.multiplier": "4",
-          "hbase.hstore.flush.retries.number": {"remove": "yes"}
-        }
-      }
-    }
-  ]
-}


[63/63] [abbrv] ambari git commit: Merge branch 'trunk' into branch-feature-logsearch-ui

Posted by ab...@apache.org.
Merge branch 'trunk' into branch-feature-logsearch-ui


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0b6679af
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0b6679af
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0b6679af

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 0b6679afc5c5852077d7b470b4e73585a3e6b1a7
Parents: b7edc6c ae6b74f
Author: ababiichuk <ab...@hortonworks.com>
Authored: Fri Jun 30 16:35:46 2017 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Fri Jun 30 16:35:46 2017 +0300

----------------------------------------------------------------------
 .../controllers/ambariViews/ViewsListCtrl.js    |    20 +
 .../controllers/groups/GroupsEditCtrl.js        |    19 +-
 .../app/views/ambariViews/listTable.html        |     3 +
 .../ui/admin-web/app/views/groups/edit.html     |     3 +
 ambari-agent/conf/unix/install-helper.sh        |     8 +
 ambari-agent/etc/init.d/ambari-agent            |    22 +-
 ambari-agent/pom.xml                            |    11 -
 .../ambari_agent/AlertSchedulerHandler.py       |    10 +-
 .../python/ambari_agent/alerts/base_alert.py    |     8 +-
 .../python/ambari_agent/alerts/port_alert.py    |   107 +-
 ambari-agent/src/packages/tarball/all.xml       |     2 +-
 .../ambari_agent/TestAlertSchedulerHandler.py   |    17 +-
 .../libraries/functions/mounted_dirs_helper.py  |     1 +
 .../libraries/functions/packages_analyzer.py    |    15 +-
 ambari-infra/ambari-infra-manager/pom.xml       |     1 -
 ambari-infra/ambari-infra-solr-plugin/pom.xml   |     4 +-
 ambari-infra/pom.xml                            |     2 +-
 .../logsearch/config/api/LogSearchConfig.java   |     8 +
 .../model/inputconfig/FilterGrokDescriptor.java |     2 +
 .../config/api/LogSearchConfigClass1.java       |     5 +
 .../config/api/LogSearchConfigClass2.java       |     5 +
 .../config/zookeeper/LogSearchConfigZK.java     |     9 +-
 .../impl/FilterGrokDescriptorImpl.java          |     1 +
 .../ambari-logsearch-logfeeder/pom.xml          |     9 +-
 .../org/apache/ambari/logfeeder/LogFeeder.java  |    55 +-
 .../ambari/logfeeder/LogFeederCommandLine.java  |   168 +
 .../ambari/logfeeder/common/ConfigHandler.java  |    26 +
 .../logfeeder/common/LogEntryParseTester.java   |   127 +
 .../ambari/logfeeder/filter/FilterGrok.java     |     5 +-
 .../apache/ambari/logfeeder/input/Input.java    |     2 +-
 .../apache/ambari/logfeeder/util/AliasUtil.java |     6 +-
 .../apache/ambari/logfeeder/util/FileUtil.java  |    23 +-
 .../ambari/logfeeder/util/LogFeederUtil.java    |    26 +-
 .../src/main/scripts/run.sh                     |     4 +-
 .../logconfig/LogConfigHandlerTest.java         |     2 +-
 .../logfeeder/metrics/MetrcisManagerTest.java   |   128 -
 .../logfeeder/metrics/MetricsManagerTest.java   |   128 +
 .../ambari-logsearch-server/pom.xml             |    39 +-
 .../ambari/logsearch/doc/DocConstants.java      |     1 +
 .../logsearch/manager/ShipperConfigManager.java |    24 +
 .../logsearch/rest/ShipperConfigResource.java   |    12 +
 ambari-logsearch/pom.xml                        |     5 +-
 ambari-metrics/ambari-metrics-common/pom.xml    |     4 +
 .../timeline/AbstractTimelineMetricsSink.java   |    25 +-
 .../cache/HandleConnectExceptionTest.java       |    86 +-
 .../ambari-metrics/datasource.js                |     5 +-
 .../timeline/HBaseTimelineMetricStore.java      |    15 +-
 .../timeline/TimelineMetricConfiguration.java   |    21 +
 .../timeline/TestTimelineMetricStore.java       |     1 +
 ambari-server/docs/configuration/index.md       |    13 +-
 ambari-server/src/main/assemblies/server.xml    |     4 +
 .../actionmanager/ActionDBAccessorImpl.java     |    17 +
 .../server/agent/AlertDefinitionCommand.java    |     7 +-
 .../ambari/server/agent/ExecutionCommand.java   |     4 +
 .../ambari/server/agent/HeartBeatHandler.java   |     4 +-
 .../server/configuration/Configuration.java     |    65 +-
 .../controller/AmbariActionExecutionHelper.java |     2 +
 .../AmbariCustomCommandExecutionHelper.java     |    12 +-
 .../controller/AmbariManagementController.java  |     4 +
 .../AmbariManagementControllerImpl.java         |    43 +-
 .../ambari/server/controller/AmbariServer.java  |     4 +
 .../server/controller/ConfigGroupResponse.java  |    10 +
 .../controller/DeleteIdentityHandler.java       |   283 +
 .../server/controller/KerberosHelper.java       |     3 +
 .../server/controller/KerberosHelperImpl.java   |    31 +-
 .../OrderedRequestStageContainer.java           |    62 +
 .../internal/AbstractProviderModule.java        |    47 +-
 .../internal/ClientConfigResourceProvider.java  |     9 +-
 .../internal/ConfigGroupResourceProvider.java   |    31 +-
 .../server/controller/jmx/JMXHostProvider.java  |    11 +
 .../controller/jmx/JMXPropertyProvider.java     |    24 +
 .../utilities/KerberosIdentityCleaner.java      |   135 +
 .../system/impl/AmbariMetricSinkImpl.java       |     3 +
 .../system/impl/DatabaseMetricsSource.java      |     4 +-
 .../metrics/system/impl/JvmMetricsSource.java   |    12 +-
 .../metrics/system/impl/MetricsServiceImpl.java |     5 +-
 .../apache/ambari/server/orm/DBAccessor.java    |    41 +-
 .../ambari/server/orm/DBAccessorImpl.java       |   123 +-
 .../server/orm/helpers/dbms/DbmsHelper.java     |    10 +
 .../orm/helpers/dbms/GenericDbmsHelper.java     |    12 +
 .../server/orm/helpers/dbms/H2Helper.java       |    10 +
 .../AbstractPrepareKerberosServerAction.java    |    19 +-
 .../server/serveraction/kerberos/Component.java |    74 +
 .../kerberos/FinalizeKerberosServerAction.java  |    27 +-
 .../kerberos/KerberosServerAction.java          |    27 +
 .../ambari/server/stack/MasterHostResolver.java |    11 +-
 .../org/apache/ambari/server/state/Cluster.java |     8 +
 .../apache/ambari/server/state/ConfigImpl.java  |     3 +-
 .../server/state/alert/AlertDefinitionHash.java |    14 +-
 .../server/state/cluster/ClusterImpl.java       |    18 +
 .../kerberos/AbstractKerberosDescriptor.java    |    15 +
 .../kerberos/KerberosComponentDescriptor.java   |    15 +
 .../state/kerberos/KerberosDescriptor.java      |     8 -
 .../kerberos/KerberosIdentityDescriptor.java    |    30 +
 .../kerberos/KerberosServiceDescriptor.java     |     6 +
 .../server/upgrade/SchemaUpgradeHelper.java     |    13 -
 .../server/upgrade/UpgradeCatalog200.java       |   613 -
 .../server/upgrade/UpgradeCatalog210.java       |  1765 -
 .../server/upgrade/UpgradeCatalog211.java       |   295 -
 .../server/upgrade/UpgradeCatalog212.java       |   427 -
 .../server/upgrade/UpgradeCatalog2121.java      |   206 -
 .../server/upgrade/UpgradeCatalog220.java       |  1404 -
 .../server/upgrade/UpgradeCatalog221.java       |   456 -
 .../server/upgrade/UpgradeCatalog222.java       |   781 -
 .../server/upgrade/UpgradeCatalog230.java       |   402 -
 .../server/upgrade/UpgradeCatalog240.java       |  3079 --
 .../server/upgrade/UpgradeCatalog2402.java      |   121 -
 .../server/upgrade/UpgradeCatalog242.java       |   272 -
 .../server/upgrade/UpgradeCatalog250.java       |  1352 -
 .../server/upgrade/UpgradeCatalog251.java       |     3 +-
 .../server/upgrade/UpgradeCatalog300.java       |     5 +-
 .../apache/ambari/server/utils/StageUtils.java  |    54 +
 ambari-server/src/main/python/ambari-server.py  |     2 +
 .../ambari_server/dbConfiguration_linux.py      |    34 +-
 .../python/ambari_server/serverConfiguration.py |     6 +
 .../main/python/ambari_server/serverSetup.py    |   105 +-
 ambari-server/src/main/python/upgradeHelper.py  |  2338 -
 .../0.1.0/package/scripts/params.py             |     4 +-
 .../0.1.0/package/scripts/params.py             |    10 +-
 .../ATLAS/0.1.0.2.3/package/scripts/metadata.py |     8 +-
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |     1 +
 .../HBASE/0.96.0.2.0/role_command_order.json    |     3 +-
 .../HBASE/2.0.0.3.0/role_command_order.json     |     2 +-
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |     4 +-
 .../package/scripts/namenode_upgrade.py         |     2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |     4 +
 .../HIVE/2.1.0.3.0/service_advisor.py           |    19 +
 .../LOGSEARCH/0.5.0/package/scripts/params.py   |     4 +-
 .../RANGER/0.4.0/package/scripts/params.py      |     1 +
 .../0.4.0/package/scripts/setup_ranger_xml.py   |    10 +-
 .../0.6.0.2.5/package/scripts/master.py         |    21 +-
 .../0.6.0.2.5/package/scripts/params.py         |    23 +
 .../2.0.6/hooks/before-ANY/scripts/params.py    |     2 +
 .../before-ANY/scripts/shared_initialization.py |    30 +-
 .../2.0.6/hooks/before-START/scripts/params.py  |     4 +
 .../scripts/shared_initialization.py            |    22 +-
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml  |    10 +-
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml |     7 +-
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml     |     5 +-
 .../stacks/HDP/2.6/services/stack_advisor.py    |    21 +
 .../stacks/HDP/2.6/upgrades/config-upgrade.xml  |     9 +
 .../HDP/2.6/upgrades/nonrolling-upgrade-2.6.xml |     6 +
 .../stacks/HDP/2.6/upgrades/upgrade-2.6.xml     |     1 +
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  |     3 +
 .../before-ANY/scripts/shared_initialization.py |    31 +-
 .../3.0/hooks/before-START/scripts/params.py    |     4 +
 .../scripts/shared_initialization.py            |    22 +-
 .../main/resources/stacks/HDP/3.0/metainfo.xml  |     2 +-
 .../PERF/1.0/hooks/before-ANY/scripts/params.py |     3 +
 .../before-ANY/scripts/shared_initialization.py |    23 +-
 .../src/main/resources/stacks/stack_advisor.py  |     2 +-
 .../catalog/UpgradeCatalog_1.3_to_2.2.json      |   948 -
 .../catalog/UpgradeCatalog_2.0_to_2.2.2.json    |   408 -
 .../catalog/UpgradeCatalog_2.0_to_2.2.4.json    |   453 -
 .../catalog/UpgradeCatalog_2.0_to_2.2.json      |   275 -
 .../catalog/UpgradeCatalog_2.1_to_2.2.2.json    |   465 -
 .../catalog/UpgradeCatalog_2.1_to_2.2.4.json    |   499 -
 .../catalog/UpgradeCatalog_2.1_to_2.2.json      |   292 -
 .../catalog/UpgradeCatalog_2.1_to_2.3.json      |   440 -
 .../catalog/UpgradeCatalog_2.2_to_2.3.json      |  2234 -
 .../UpgradeCatalog_2.2_to_2.3_step2.json        |    81 -
 .../AmbariManagementControllerImplTest.java     |    16 +-
 .../ClientConfigResourceProviderTest.java       |     8 +
 .../ConfigGroupResourceProviderTest.java        |     2 +
 .../metrics/JMXPropertyProviderTest.java        |     9 +
 .../utilities/KerberosIdentityCleanerTest.java  |   204 +
 .../ambari/server/orm/DBAccessorImplTest.java   |    90 +
 .../state/alerts/AlertDefinitionHashTest.java   |     4 +-
 .../server/upgrade/UpgradeCatalog200Test.java   |   915 -
 .../server/upgrade/UpgradeCatalog210Test.java   |  1360 -
 .../server/upgrade/UpgradeCatalog211Test.java   |   446 -
 .../server/upgrade/UpgradeCatalog2121Test.java  |   161 -
 .../server/upgrade/UpgradeCatalog212Test.java   |   694 -
 .../server/upgrade/UpgradeCatalog220Test.java   |  1535 -
 .../server/upgrade/UpgradeCatalog221Test.java   |   614 -
 .../server/upgrade/UpgradeCatalog222Test.java   |  1180 -
 .../server/upgrade/UpgradeCatalog230Test.java   |   317 -
 .../server/upgrade/UpgradeCatalog240Test.java   |  2688 -
 .../server/upgrade/UpgradeCatalog242Test.java   |   430 -
 .../server/upgrade/UpgradeCatalog250Test.java   |  2129 -
 .../server/upgrade/UpgradeCatalog300Test.java   |     2 +-
 .../server/upgrade/UpgradeCatalogTest.java      |    13 +-
 .../ambari/server/utils/StageUtilsTest.java     |    99 +
 .../src/test/python/TestAmbariServer.py         |    49 +-
 .../src/test/python/TestUpgradeHelper.py        |  1028 -
 .../stacks/2.0.6/common/test_stack_advisor.py   |     2 +-
 .../configs/ha_bootstrap_standby_node.json      |     2 +-
 ...ha_bootstrap_standby_node_initial_start.json |     2 +-
 ...dby_node_initial_start_dfs_nameservices.json |     2 +-
 .../stacks/2.5/common/test_stack_advisor.py     |     2 +-
 .../stacks/2.6/common/test_stack_advisor.py     |   123 +-
 .../global/wizard_watcher_controller.js         |     6 +-
 ambari-web/app/controllers/main/service/item.js |     6 +-
 .../app/controllers/wizard/step8_controller.js  |    16 +-
 ambari-web/app/messages.js                      |    36 +-
 .../mixins/common/configs/enhanced_configs.js   |     3 +-
 ambari-web/app/models/stack.js                  |     4 +-
 ambari-web/app/styles/alerts.less               |    52 +-
 ambari-web/app/styles/application.less          |    91 +-
 ambari-web/app/styles/bootstrap_overrides.less  |    18 +-
 ambari-web/app/styles/dashboard.less            |     9 +-
 .../app/styles/enhanced_service_dashboard.less  |    79 +-
 .../templates/common/export_metrics_menu.hbs    |    16 +-
 .../templates/common/widget/gauge_widget.hbs    |    33 +-
 .../templates/common/widget/graph_widget.hbs    |    44 +-
 .../templates/common/widget/number_widget.hbs   |    33 +-
 .../templates/common/widget/template_widget.hbs |    35 +-
 .../main/dashboard/widgets/cluster_metrics.hbs  |    19 +-
 .../app/templates/main/service/info/summary.hbs |    91 +-
 .../main/service/info/summary/base.hbs          |    13 +-
 .../service/info/summary/client_components.hbs  |    10 +-
 .../service/info/summary/master_components.hbs  |    26 +-
 .../service/info/summary/slave_components.hbs   |    12 +-
 ambari-web/app/templates/main/service/item.hbs  |    40 +-
 .../templates/main/service/services/hbase.hbs   |   128 +-
 .../templates/main/service/services/hdfs.hbs    |   270 +-
 .../templates/main/service/services/hive.hbs    |    13 +-
 .../templates/main/service/services/ranger.hbs  |    59 +-
 .../templates/main/service/services/storm.hbs   |   118 +-
 .../templates/main/service/services/yarn.hbs    |   194 +-
 .../app/views/common/quick_view_link_view.js    |    14 +-
 .../views/common/widget/gauge_widget_view.js    |    14 +-
 .../app/views/main/service/info/summary.js      |    24 +
 ambari-web/app/views/main/service/service.js    |    13 +-
 .../app/views/main/service/services/hbase.js    |     2 +-
 .../app/views/main/service/services/hdfs.js     |    19 +-
 .../app/views/main/service/services/yarn.js     |     3 +-
 .../global/wizard_watcher_controller_test.js    |     3 +-
 .../common/widget/gauge_widget_view_test.js     |     2 +-
 .../stacks/ODPi/2.0/services/stack_advisor.py   |     2 +-
 .../src/main/resources/ui/app/styles/app.scss   |     1 +
 contrib/views/pom.xml                           |     1 -
 contrib/views/slider/docs/index.md              |   164 -
 contrib/views/slider/gzip-content.cmd           |    17 -
 contrib/views/slider/gzip-content.ps1           |    81 -
 contrib/views/slider/pom.xml                    |   463 -
 .../apache/ambari/view/slider/AlertField.java   |    62 -
 .../apache/ambari/view/slider/AlertState.java   |    40 -
 .../ambari/view/slider/MetricsHolder.java       |    44 -
 .../apache/ambari/view/slider/SliderApp.java    |   198 -
 .../ambari/view/slider/SliderAppComponent.java  |    62 -
 .../ambari/view/slider/SliderAppType.java       |   107 -
 .../view/slider/SliderAppTypeComponent.java     |   103 -
 .../slider/SliderAppTypesResourceProvider.java  |    94 -
 .../ambari/view/slider/SliderAppsAlerts.java    |   127 -
 .../view/slider/SliderAppsConfiguration.java    |    52 -
 .../view/slider/SliderAppsResourceProvider.java |   124 -
 .../view/slider/SliderAppsViewController.java   |   112 -
 .../slider/SliderAppsViewControllerImpl.java    |  1510 -
 .../apache/ambari/view/slider/TemporalInfo.java |    48 -
 .../apache/ambari/view/slider/ViewStatus.java   |    72 -
 .../view/slider/clients/AmbariClient.java       |    58 -
 .../view/slider/clients/AmbariCluster.java      |    52 -
 .../view/slider/clients/AmbariClusterInfo.java  |    40 -
 .../slider/clients/AmbariHostComponent.java     |    49 -
 .../view/slider/clients/AmbariHostInfo.java     |    32 -
 .../view/slider/clients/AmbariService.java      |    36 -
 .../view/slider/clients/AmbariServiceInfo.java  |    51 -
 .../slider/rest/SliderAppTypesResource.java     |    54 -
 .../view/slider/rest/SliderAppsResource.java    |   163 -
 .../view/slider/rest/ViewStatusResource.java    |    40 -
 .../slider/rest/client/AmbariHttpClient.java    |   233 -
 .../view/slider/rest/client/BaseHttpClient.java |   157 -
 .../slider/rest/client/JMXMetricHolder.java     |    50 -
 .../ambari/view/slider/rest/client/Metric.java  |   158 -
 .../slider/rest/client/SliderAppJmxHelper.java  |   205 -
 .../rest/client/SliderAppMasterClient.java      |   324 -
 .../rest/client/SliderAppMetricsHelper.java     |   159 -
 .../view/slider/rest/client/TimelineMetric.java |   172 -
 .../slider/rest/client/TimelineMetrics.java     |   101 -
 .../rest/client/URLStreamProviderBasicAuth.java |   105 -
 .../slider/src/main/resources/slider.properties |    19 -
 .../slider/src/main/resources/ui/.gitignore     |    31 -
 .../views/slider/src/main/resources/ui/LICENSE  |    21 -
 .../slider/src/main/resources/ui/README.md      |   122 -
 .../src/main/resources/ui/app/assets/404.html   |   175 -
 .../app/assets/apple-touch-icon-precomposed.png |   Bin 1226 -> 0 bytes
 .../resources/ui/app/assets/crossdomain.xml     |    31 -
 .../resources/ui/app/assets/data/apps/apps.json |  1320 -
 .../ui/app/assets/data/apptypes/all_fields.json |   642 -
 .../ui/app/assets/data/metrics/metric.json      |   946 -
 .../ui/app/assets/data/metrics/metric2.json     |  3838 --
 .../ui/app/assets/data/metrics/metric3.json     |  1856 -
 .../ui/app/assets/data/metrics/metric4.json     |   968 -
 .../ui/app/assets/data/resource/empty_json.json |     1 -
 .../data/resource/slider-properties-2.json      |     9 -
 .../assets/data/resource/slider-properties.json |   148 -
 .../app/assets/data/resource/status_false.json  |     8 -
 .../app/assets/data/resource/status_true.json   |     5 -
 .../main/resources/ui/app/assets/favicon.ico    |   Bin 766 -> 0 bytes
 .../ui/app/assets/font/fontawesome-webfont.eot  |   Bin 37405 -> 0 bytes
 .../ui/app/assets/font/fontawesome-webfont.svg  |   399 -
 .../ui/app/assets/font/fontawesome-webfont.ttf  |   Bin 79076 -> 0 bytes
 .../ui/app/assets/font/fontawesome-webfont.woff |   Bin 43572 -> 0 bytes
 .../resources/ui/app/assets/images/.gitkeep     |     0
 .../ui-bg_flat_0_aaaaaa_40x100.png              |   Bin 180 -> 0 bytes
 .../ui-bg_glass_55_fbf9ee_1x400.png             |   Bin 120 -> 0 bytes
 .../ui-bg_glass_65_ffffff_1x400.png             |   Bin 105 -> 0 bytes
 .../ui-bg_glass_75_dadada_1x400.png             |   Bin 111 -> 0 bytes
 .../ui-bg_glass_75_e6e6e6_1x400.png             |   Bin 110 -> 0 bytes
 .../ui-bg_glass_75_ffffff_1x400.png             |   Bin 107 -> 0 bytes
 .../ui-bg_highlight-soft_75_cccccc_1x100.png    |   Bin 101 -> 0 bytes
 .../ui-bg_inset-soft_95_fef1ec_1x100.png        |   Bin 123 -> 0 bytes
 .../ui-icons_222222_256x240.png                 |   Bin 4369 -> 0 bytes
 .../ui-icons_2e83ff_256x240.png                 |   Bin 4369 -> 0 bytes
 .../ui-icons_454545_256x240.png                 |   Bin 4369 -> 0 bytes
 .../ui-icons_888888_256x240.png                 |   Bin 4369 -> 0 bytes
 .../ui-icons_cd0a0a_256x240.png                 |   Bin 4369 -> 0 bytes
 .../ui-icons_f6cf3b_256x240.png                 |   Bin 8884 -> 0 bytes
 .../src/main/resources/ui/app/assets/index.html |    46 -
 .../ui/app/assets/javascripts/ember-qunit.js    |   266 -
 .../ui/app/assets/javascripts/jquery.mockjax.js |   692 -
 .../assets/javascripts/modernizr-2.6.2.min.js   |     4 -
 .../ui/app/assets/javascripts/qunit.js          |  2495 -
 .../ui/app/assets/javascripts/sinon-1.13.0.js   |  5830 ---
 .../app/assets/javascripts/sinon-qunit-1.0.0.js |    62 -
 .../ui/app/assets/javascripts/tests.js          |    29 -
 .../ui/app/assets/stylesheets/qunit.css         |   237 -
 .../src/main/resources/ui/app/assets/tests.html |    46 -
 .../main/resources/ui/app/components/.gitkeep   |     0
 .../ui/app/components/configSection.js          |   164 -
 .../src/main/resources/ui/app/config/app.js     |    27 -
 .../src/main/resources/ui/app/config/env.js     |    33 -
 .../src/main/resources/ui/app/config/router.js  |    35 -
 .../src/main/resources/ui/app/config/store.js   |    23 -
 .../main/resources/ui/app/controllers/.gitkeep  |     0
 .../app/controllers/application_controller.js   |    27 -
 .../createAppWizard/step1_controller.js         |   251 -
 .../createAppWizard/step2_controller.js         |   136 -
 .../createAppWizard/step3_controller.js         |   250 -
 .../createAppWizard/step4_controller.js         |   176 -
 .../controllers/create_app_wizard_controller.js |   122 -
 .../slider_app/summary_controller.js            |    27 -
 .../ui/app/controllers/slider_app_controller.js |   472 -
 .../app/controllers/slider_apps_controller.js   |    35 -
 .../ui/app/controllers/slider_controller.js     |   138 -
 .../ui/app/controllers/tooltip_controller.js    |    19 -
 .../src/main/resources/ui/app/helpers/.gitkeep  |     0
 .../src/main/resources/ui/app/helpers/ajax.js   |   388 -
 .../src/main/resources/ui/app/helpers/helper.js |   169 -
 .../resources/ui/app/helpers/string_utils.js    |   163 -
 .../src/main/resources/ui/app/initialize.js     |   171 -
 .../ui/app/mappers/application_type.js          |   162 -
 .../src/main/resources/ui/app/mappers/mapper.js |    56 -
 .../ui/app/mappers/slider_apps_mapper.js        |   292 -
 .../ui/app/mixins/ajax_error_handler.js         |    82 -
 .../resources/ui/app/mixins/run_periodically.js |   109 -
 .../main/resources/ui/app/mixins/with_panels.js |    53 -
 .../src/main/resources/ui/app/models/.gitkeep   |     0
 .../resources/ui/app/models/config_property.js  |    47 -
 .../src/main/resources/ui/app/models/host.js    |    33 -
 .../main/resources/ui/app/models/slider_app.js  |   207 -
 .../resources/ui/app/models/slider_app_alert.js |   149 -
 .../ui/app/models/slider_app_component.js       |    65 -
 .../resources/ui/app/models/slider_app_type.js  |    64 -
 .../ui/app/models/slider_app_type_component.js  |    63 -
 .../resources/ui/app/models/slider_config.js    |    45 -
 .../ui/app/models/slider_quick_link.js          |    33 -
 .../resources/ui/app/models/typed_property.js   |    38 -
 .../ui/app/routes/create_app_wizard.js          |    54 -
 .../src/main/resources/ui/app/routes/main.js    |    75 -
 .../src/main/resources/ui/app/styles/app.less   |   303 -
 .../resources/ui/app/styles/application.less    |    23 -
 .../resources/ui/app/styles/application.styl    |    18 -
 .../resources/ui/app/styles/apps-table.less     |   320 -
 .../main/resources/ui/app/styles/common.less    |   109 -
 .../resources/ui/app/styles/old-bootstrap.less  |   164 -
 .../main/resources/ui/app/styles/wizard.less    |   232 -
 .../resources/ui/app/templates/application.hbs  |    56 -
 .../ui/app/templates/common/ajax_error.hbs      |    24 -
 .../ui/app/templates/common/app_tooltip.hbs     |    24 -
 .../resources/ui/app/templates/common/chart.hbs |    26 -
 .../ui/app/templates/common/config.hbs          |    37 -
 .../ui/app/templates/components/.gitkeep        |     0
 .../app/templates/components/configSection.hbs  |    78 -
 .../ui/app/templates/createAppWizard.hbs        |    43 -
 .../ui/app/templates/createAppWizard/step1.hbs  |   192 -
 .../ui/app/templates/createAppWizard/step2.hbs  |    62 -
 .../ui/app/templates/createAppWizard/step3.hbs  |    37 -
 .../ui/app/templates/createAppWizard/step4.hbs  |    41 -
 .../main/resources/ui/app/templates/index.hbs   |    21 -
 .../resources/ui/app/templates/slider_app.hbs   |    70 -
 .../ui/app/templates/slider_app/configs.hbs     |    40 -
 .../slider_app/destroy/destroy_popup.hbs        |    20 -
 .../slider_app/destroy/destroy_popup_footer.hbs |    28 -
 .../ui/app/templates/slider_app/flex_popup.hbs  |    40 -
 .../ui/app/templates/slider_app/summary.hbs     |   135 -
 .../resources/ui/app/templates/slider_apps.hbs  |    87 -
 .../ui/app/templates/slider_title_tooltip.hbs   |    27 -
 .../ui/app/templates/unavailable_apps.hbs       |    22 -
 .../src/main/resources/ui/app/translations.js   |   169 -
 .../src/main/resources/ui/app/views/.gitkeep    |     0
 .../resources/ui/app/views/application_view.js  |    70 -
 .../resources/ui/app/views/common/chart_view.js |   914 -
 .../ui/app/views/common/config_set_view.js      |    57 -
 .../ui/app/views/common/filter_view.js          |   370 -
 .../resources/ui/app/views/common/sort_view.js  |   206 -
 .../resources/ui/app/views/common/table_view.js |   418 -
 .../ui/app/views/createAppWizard/step1_view.js  |    47 -
 .../ui/app/views/createAppWizard/step2_view.js  |    33 -
 .../ui/app/views/createAppWizard/step3_view.js  |    25 -
 .../ui/app/views/createAppWizard/step4_view.js  |    24 -
 .../ui/app/views/create_app_wizard_view.js      |    80 -
 .../ui/app/views/slider_app/configs_view.js     |    57 -
 .../slider_app/destroy_modal_footer_view.js     |    41 -
 .../app/views/slider_app/destroy_popup_view.js  |    37 -
 .../views/slider_app/metrics/app_metric_view.js |    77 -
 .../views/slider_app/metrics/metric2_view.js    |    63 -
 .../views/slider_app/metrics/metric3_view.js    |    61 -
 .../views/slider_app/metrics/metric4_view.js    |    54 -
 .../app/views/slider_app/metrics/metric_view.js |    70 -
 .../ui/app/views/slider_app/summary_view.js     |   141 -
 .../resources/ui/app/views/slider_app_view.js   |    30 -
 .../resources/ui/app/views/slider_apps_view.js  |   175 -
 .../slider/src/main/resources/ui/config.js      |   123 -
 .../main/resources/ui/envs/development/env.js   |    21 -
 .../main/resources/ui/envs/production/env.js    |    21 -
 .../arraycontroller/arraycontroller.js.hbs      |    23 -
 .../generators/arraycontroller/generator.json   |    10 -
 .../ui/generators/component/component.hbs.hbs   |    19 -
 .../ui/generators/component/component.js.hbs    |    23 -
 .../ui/generators/component/generator.json      |    14 -
 .../ui/generators/controller/controller.js.hbs  |    23 -
 .../ui/generators/controller/generator.json     |    10 -
 .../ui/generators/helper/generator.json         |    10 -
 .../ui/generators/helper/helper.js.hbs          |    24 -
 .../ui/generators/model/generator.json          |    10 -
 .../resources/ui/generators/model/model.js.hbs  |    24 -
 .../ui/generators/route/generator.json          |    10 -
 .../resources/ui/generators/route/route.js.hbs  |    25 -
 .../ui/generators/template/generator.json       |    10 -
 .../ui/generators/template/template.hbs.hbs     |    19 -
 .../resources/ui/generators/view/generator.json |    10 -
 .../resources/ui/generators/view/view.js.hbs    |    23 -
 .../slider/src/main/resources/ui/karma.conf.js  |    94 -
 .../slider/src/main/resources/ui/package.json   |    38 -
 .../slider/src/main/resources/ui/runner.js      |   136 -
 .../views/slider/src/main/resources/ui/setup.js |    78 -
 .../slider/src/main/resources/ui/test/index.md  |    28 -
 .../ui/test/integration/pages/index_test.js     |   127 -
 .../integration/pages/slider_errors_test.js     |    63 -
 .../processes/create_new_app_test.js            |   358 -
 .../createAppWizard/step1_controller_test.js    |   431 -
 .../createAppWizard/step2_controller_test.js    |   403 -
 .../createAppWizard/step3_controller_test.js    |   421 -
 .../createAppWizard/step4_controller_test.js    |   440 -
 .../create_app_wizard_controller_test.js        |   201 -
 .../slider_app/summary_controller_test.js       |    36 -
 .../controllers/slider_app_controller_test.js   |   607 -
 .../controllers/slider_apps_controller_test.js  |    58 -
 .../unit/controllers/slider_controller_test.js  |   140 -
 .../unit/mappers/slider_apps_mapper_test.js     |    85 -
 .../unit/models/slider_app_component_test.js    |    51 -
 .../ui/test/unit/models/slider_app_test.js      |    95 -
 .../test/unit/views/common/table_view_test.js   |    35 -
 .../unit/views/slider_app/summary_view_test.js  |    68 -
 .../ui/vendor/scripts/common/bootstrap.js       |  1951 -
 .../ui/vendor/scripts/common/bs-basic.min.js    |     1 -
 .../ui/vendor/scripts/common/bs-button.min.js   |     1 -
 .../ui/vendor/scripts/common/bs-core.min.js     |     1 -
 .../ui/vendor/scripts/common/bs-modal.min.js    |     1 -
 .../ui/vendor/scripts/common/bs-nav.min.js      |     1 -
 .../ui/vendor/scripts/common/bs-popover.min.js  |     1 -
 .../vendor/scripts/common/console-polyfill.js   |    13 -
 .../ui/vendor/scripts/common/cubism.v1.js       |  1085 -
 .../resources/ui/vendor/scripts/common/d3.v2.js |  7033 ---
 .../vendor/scripts/common/ember-i18n-1.4.1.js   |   199 -
 .../ui/vendor/scripts/common/handlebars.js      |  2746 --
 .../ui/vendor/scripts/common/jquery.js          |  8829 ----
 .../ui/vendor/scripts/common/jquery.timeago.js  |   214 -
 .../ui/vendor/scripts/common/jquery.ui.core.js  |   334 -
 .../ui/vendor/scripts/common/jquery.ui.mouse.js |   175 -
 .../vendor/scripts/common/jquery.ui.sortable.js |  1088 -
 .../vendor/scripts/common/jquery.ui.widget.js   |   276 -
 .../ui/vendor/scripts/common/moment.min.js      |     7 -
 .../ui/vendor/scripts/common/rickshaw.js        |  2659 -
 .../resources/ui/vendor/scripts/common/tv4.js   |  1605 -
 .../ui/vendor/scripts/development/ember-data.js | 10620 ----
 .../ui/vendor/scripts/development/ember.js      | 43235 -----------------
 .../ui/vendor/scripts/production/ember-data.js  | 10626 ----
 .../ui/vendor/scripts/production/ember.js       | 41620 ----------------
 .../resources/ui/vendor/styles/bootstrap.css    |  5785 ---
 .../main/resources/ui/vendor/styles/cubism.css  |    82 -
 .../ui/vendor/styles/font-awesome-ie7.css       |  1203 -
 .../resources/ui/vendor/styles/font-awesome.css |  1479 -
 .../jquery-ui-1.8.16.custom.css                 |  1320 -
 .../resources/ui/vendor/styles/rickshaw.css     |   307 -
 .../src/main/resources/view.log4j.properties    |    27 -
 .../views/slider/src/main/resources/view.xml    |    82 -
 .../ambari/view/slider/SliderClientTest.java    |    31 -
 .../rest/client/SliderAppMasterClientTest.java  |   177 -
 .../src/main/resources/ui/app/styles/app.less   |     4 +-
 .../wfmanager/src/main/resources/ui/bower.json  |     4 +-
 .../hdfs-directory-viewer/addon/styles/app.css  |     1 +
 .../wfmanager/src/main/resources/ui/yarn.lock   |    68 +-
 docs/pom.xml                                    |    24 +
 496 files changed, 4173 insertions(+), 219502 deletions(-)
----------------------------------------------------------------------



[57/63] [abbrv] ambari git commit: AMBARI-21370: Support VIPs instead of Host Names (jluniya)

Posted by ab...@apache.org.
AMBARI-21370: Support VIPs instead of Host Names (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4d7cc7f3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4d7cc7f3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4d7cc7f3

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 4d7cc7f392a6c4b52d39456504ad490d74fd019a
Parents: 4cd3150
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Thu Jun 29 07:17:24 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Thu Jun 29 07:17:24 2017 -0700

----------------------------------------------------------------------
 .../ambari_agent/AlertSchedulerHandler.py       |  10 +-
 .../python/ambari_agent/alerts/base_alert.py    |   8 +-
 .../python/ambari_agent/alerts/port_alert.py    | 107 +++++++++++--------
 .../ambari_agent/TestAlertSchedulerHandler.py   |  17 +--
 .../server/agent/AlertDefinitionCommand.java    |   7 +-
 .../ambari/server/agent/HeartBeatHandler.java   |   4 +-
 .../internal/AbstractProviderModule.java        |  47 ++++++--
 .../server/controller/jmx/JMXHostProvider.java  |  13 +++
 .../controller/jmx/JMXPropertyProvider.java     |  25 +++++
 .../org/apache/ambari/server/state/Cluster.java |   8 ++
 .../server/state/alert/AlertDefinitionHash.java |  14 +--
 .../server/state/cluster/ClusterImpl.java       |  18 ++++
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py  |   4 +-
 .../package/scripts/namenode_upgrade.py         |   2 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   4 +
 .../metrics/JMXPropertyProviderTest.java        |   9 ++
 .../state/alerts/AlertDefinitionHashTest.java   |   4 +-
 .../configs/ha_bootstrap_standby_node.json      |   2 +-
 ...ha_bootstrap_standby_node_initial_start.json |   2 +-
 ...dby_node_initial_start_dfs_nameservices.json |   2 +-
 20 files changed, 224 insertions(+), 83 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py b/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
index 6c1d29c..55c3d6e 100644
--- a/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
@@ -283,6 +283,7 @@ class AlertSchedulerHandler():
     for command_json in all_commands:
       clusterName = '' if not 'clusterName' in command_json else command_json['clusterName']
       hostName = '' if not 'hostName' in command_json else command_json['hostName']
+      publicHostName = '' if not 'publicHostName' in command_json else command_json['publicHostName']
       clusterHash = None if not 'hash' in command_json else command_json['hash']
 
       # cache the cluster and cluster hash after loading the JSON
@@ -291,7 +292,7 @@ class AlertSchedulerHandler():
         self._cluster_hashes[clusterName] = clusterHash
 
       for definition in command_json['alertDefinitions']:
-        alert = self.__json_to_callable(clusterName, hostName, definition)
+        alert = self.__json_to_callable(clusterName, hostName, publicHostName, definition)
 
         if alert is None:
           continue
@@ -303,7 +304,7 @@ class AlertSchedulerHandler():
     return definitions
 
 
-  def __json_to_callable(self, clusterName, hostName, json_definition):
+  def __json_to_callable(self, clusterName, hostName, publicHostName, json_definition):
     """
     converts the json that represents all aspects of a definition
     and makes an object that extends BaseAlert that is used for individual
@@ -336,7 +337,7 @@ class AlertSchedulerHandler():
         alert = RecoveryAlert(json_definition, source, self.config, self.recovery_manger)
 
       if alert is not None:
-        alert.set_cluster(clusterName, hostName)
+        alert.set_cluster(clusterName, hostName, publicHostName)
 
     except Exception, exception:
       logger.exception("[AlertScheduler] Unable to load an invalid alert definition. It will be skipped.")
@@ -402,8 +403,9 @@ class AlertSchedulerHandler():
 
         clusterName = '' if not 'clusterName' in execution_command else execution_command['clusterName']
         hostName = '' if not 'hostName' in execution_command else execution_command['hostName']
+        publicHostName = '' if not 'publicHostName' in execution_command else execution_command['publicHostName']
 
-        alert = self.__json_to_callable(clusterName, hostName, alert_definition)
+        alert = self.__json_to_callable(clusterName, hostName, publicHostName, alert_definition)
 
         if alert is None:
           continue

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
index 7f3b2a5..add29fc 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
@@ -46,6 +46,7 @@ class BaseAlert(object):
     self.alert_source_meta = alert_source_meta
     self.cluster_name = ''
     self.host_name = ''
+    self.public_host_name = ''
     self.config = config
     
   def interval(self):
@@ -86,10 +87,13 @@ class BaseAlert(object):
     self.cluster_configuration = cluster_configuration
 
 
-  def set_cluster(self, cluster_name, host_name):
+  def set_cluster(self, cluster_name, host_name, public_host_name = None):
     """ sets cluster information for the alert """
     self.cluster_name = cluster_name
     self.host_name = host_name
+    self.public_host_name = host_name
+    if public_host_name:
+      self.public_host_name = public_host_name
 
 
   def _get_alert_meta_value_safely(self, meta_key):
@@ -452,7 +456,7 @@ class BaseAlert(object):
       # get the host for dfs.namenode.http-address.c1ha.nn1 and see if it's
       # this host
       value = self._get_configuration_value(key)
-      if value is not None and self.host_name in value:
+      if value is not None and (self.host_name in value or self.public_host_name in value):
         return AlertUri(uri=value, is_ssl_enabled=is_ssl_enabled)
 
     return None

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
index 1e32718..02cc91c 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
@@ -91,7 +91,9 @@ class PortAlert(BaseAlert):
     # if not parameterized, this will return the static value
     uri_value = self._get_configuration_value(self.uri)
 
+    host_not_specified = False
     if uri_value is None:
+      host_not_specified = True
       uri_value = self.host_name
       logger.debug("[Alert][{0}] Setting the URI to this host since it wasn't specified".format(
         self.get_name()))
@@ -112,6 +114,16 @@ class PortAlert(BaseAlert):
     host = BaseAlert.get_host_from_url(uri_value)
     if host is None or host == "localhost" or host == "0.0.0.0":
       host = self.host_name
+      host_not_specified = True
+
+    hosts = [host]
+    # If host is not specified in the uri, hence we are using current host name
+    # then also add public host name as a fallback.  
+    if host_not_specified and host.lower() == self.host_name.lower() \
+      and self.host_name.lower() != self.public_host_name.lower():
+      hosts.append(self.public_host_name)
+    if logger.isEnabledFor(logging.DEBUG):
+      logger.debug("[Alert][{0}] List of hosts = {1}".format(self.get_name(), hosts))
 
     try:
       port = int(get_port_from_url(uri_value))
@@ -122,51 +134,56 @@ class PortAlert(BaseAlert):
 
       port = self.default_port
 
-
-    if logger.isEnabledFor(logging.DEBUG):
-      logger.debug("[Alert][{0}] Checking {1} on port {2}".format(
-        self.get_name(), host, str(port)))
-
-    s = None
-    try:
-      s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-      s.settimeout(self.critical_timeout)
-
-      if OSCheck.is_windows_family():
-        # on windows 0.0.0.0 is invalid address to connect but on linux it resolved to 127.0.0.1
-        host = resolve_address(host)
-
-      start_time = time.time()
-      s.connect((host, port))
-      if self.socket_command is not None:
-        s.sendall(self.socket_command)
-        data = s.recv(1024)
-        if self.socket_command_response is not None and data != self.socket_command_response:
-          raise Exception("Expected response {0}, Actual response {1}".format(
-            self.socket_command_response, data))
-      end_time = time.time()
-      milliseconds = end_time - start_time
-      seconds = milliseconds / 1000.0
-
-      # not sure why this happens sometimes, but we don't always get a
-      # socket exception if the connect() is > than the critical threshold
-      if seconds >= self.critical_timeout:
-        return (self.RESULT_CRITICAL, ['Socket Timeout', host, port])
-
-      result = self.RESULT_OK
-      if seconds >= self.warning_timeout:
-        result = self.RESULT_WARNING
-
-      return (result, [seconds, port])
-    except Exception as e:
-      return (self.RESULT_CRITICAL, [str(e), host, port])
-    finally:
-      if s is not None:
-        try:
-          s.close()
-        except:
-          # no need to log a close failure
-          pass
+    exceptions = []
+
+    for host in hosts:
+      if logger.isEnabledFor(logging.DEBUG):
+        logger.debug("[Alert][{0}] Checking {1} on port {2}".format(
+          self.get_name(), host, str(port)))
+
+      s = None
+      try:
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        s.settimeout(self.critical_timeout)
+
+        if OSCheck.is_windows_family():
+          # on windows 0.0.0.0 is invalid address to connect but on linux it resolved to 127.0.0.1
+          host = resolve_address(host)
+
+        start_time = time.time()
+        s.connect((host, port))
+        if self.socket_command is not None:
+          s.sendall(self.socket_command)
+          data = s.recv(1024)
+          if self.socket_command_response is not None and data != self.socket_command_response:
+            raise Exception("Expected response {0}, Actual response {1}".format(
+              self.socket_command_response, data))
+        end_time = time.time()
+        milliseconds = end_time - start_time
+        seconds = milliseconds / 1000.0
+
+        # not sure why this happens sometimes, but we don't always get a
+        # socket exception if the connect() is > than the critical threshold
+        if seconds >= self.critical_timeout:
+          return (self.RESULT_CRITICAL, ['Socket Timeout', host, port])
+
+        result = self.RESULT_OK
+        if seconds >= self.warning_timeout:
+          result = self.RESULT_WARNING
+
+        return (result, [seconds, port])
+      except Exception as e:
+        exceptions.append(e)
+      finally:
+        if s is not None:
+          try:
+            s.close()
+          except:
+            # no need to log a close failure
+            pass
+
+    if exceptions:
+      return (self.RESULT_CRITICAL, [str(exceptions[0]), hosts[0], port])
 
   def _get_reporting_text(self, state):
     '''

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py b/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
index d1d27ef..fbcd33f 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
@@ -70,7 +70,7 @@ class TestAlertSchedulerHandler(TestCase):
       }
     }
 
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
     self.assertTrue(isinstance(callable_result, MetricAlert))
@@ -85,7 +85,7 @@ class TestAlertSchedulerHandler(TestCase):
       }
     }
 
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
     self.assertTrue(isinstance(callable_result, AmsAlert))
@@ -100,7 +100,7 @@ class TestAlertSchedulerHandler(TestCase):
     }
 
     scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
     self.assertTrue(isinstance(callable_result, PortAlert))
@@ -116,7 +116,7 @@ class TestAlertSchedulerHandler(TestCase):
     }
 
     scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
     self.assertTrue(isinstance(callable_result, WebAlert))
@@ -131,7 +131,7 @@ class TestAlertSchedulerHandler(TestCase):
     }
 
     scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
-    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
+    callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is None)
 
@@ -174,6 +174,7 @@ class TestAlertSchedulerHandler(TestCase):
       {
         'clusterName': 'cluster',
         'hostName': 'host',
+        'publicHostName' : 'host',
         'alertDefinition': {
           'name': 'alert1'
         }
@@ -191,7 +192,7 @@ class TestAlertSchedulerHandler(TestCase):
 
     scheduler.execute_alert(execution_commands)
 
-    scheduler._AlertSchedulerHandler__json_to_callable.assert_called_with('cluster', 'host', {'name': 'alert1'})
+    scheduler._AlertSchedulerHandler__json_to_callable.assert_called_with('cluster', 'host', 'host', {'name': 'alert1'})
     self.assertTrue(alert_mock.collect.called)
 
   def test_execute_alert_from_extension(self):
@@ -199,6 +200,7 @@ class TestAlertSchedulerHandler(TestCase):
       {
         'clusterName': 'cluster',
         'hostName': 'host',
+        'publicHostName' : 'host',
         'alertDefinition': {
           'name': 'alert1'
         }
@@ -216,7 +218,7 @@ class TestAlertSchedulerHandler(TestCase):
 
     scheduler.execute_alert(execution_commands)
 
-    scheduler._AlertSchedulerHandler__json_to_callable.assert_called_with('cluster', 'host', {'name': 'alert1'})
+    scheduler._AlertSchedulerHandler__json_to_callable.assert_called_with('cluster', 'host', 'host', {'name': 'alert1'})
     self.assertTrue(alert_mock.collect.called)
 
   def test_load_definitions(self):
@@ -245,6 +247,7 @@ class TestAlertSchedulerHandler(TestCase):
       {
         'clusterName': 'cluster',
         'hostName': 'host',
+        'publicHostName' : 'host',
         'alertDefinition': {
           'name': 'alert1'
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java
index 2929087..be837db 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/AlertDefinitionCommand.java
@@ -46,6 +46,9 @@ public class AlertDefinitionCommand extends AgentCommand {
   @SerializedName("hostName")
   private final String m_hostName;
 
+  @SerializedName("publicHostName")
+  private final String m_publicHostName;
+
   @SerializedName("hash")
   private final String m_hash;
 
@@ -61,17 +64,19 @@ public class AlertDefinitionCommand extends AgentCommand {
    * @param clusterName
    *          the name of the cluster this response is for (
    * @param hostName
+   * @param publicHostName
    * @param hash
    * @param definitions
    *
    * @see AlertDefinitionHash
    */
-  public AlertDefinitionCommand(String clusterName, String hostName,
+  public AlertDefinitionCommand(String clusterName, String hostName, String publicHostName,
       String hash, List<AlertDefinition> definitions) {
     super(AgentCommandType.ALERT_DEFINITION_COMMAND);
 
     m_clusterName = clusterName;
     m_hostName = hostName;
+    m_publicHostName = publicHostName;
     m_hash = hash;
     m_definitions = definitions;
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
index 89ec963..1bc4c36 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartBeatHandler.java
@@ -557,8 +557,10 @@ public class HeartBeatHandler {
           clusterName, hostname);
 
       String hash = alertDefinitionHash.getHash(clusterName, hostname);
+      Host host = cluster.getHost(hostname);
+      String publicHostName = host == null? hostname : host.getPublicHostName();
       AlertDefinitionCommand command = new AlertDefinitionCommand(clusterName,
-          hostname, hash, definitions);
+          hostname, publicHostName, hash, definitions);
 
       command.addConfigs(configHelper, cluster);
       commands.add(command);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
index 0e4f3f4..f3211bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
@@ -65,6 +65,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.Service;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
@@ -459,6 +460,12 @@ public abstract class AbstractProviderModule implements ProviderModule,
   }
 
   @Override
+  public String getPublicHostName(String clusterName, String hostName) {
+    Host host = getHost(clusterName, hostName);
+    return host == null? hostName : host.getPublicHostName();
+  }
+
+  @Override
   public Set<String> getHostNames(String clusterName, String componentName) {
     Set<String> hosts = null;
     try {
@@ -472,6 +479,21 @@ public abstract class AbstractProviderModule implements ProviderModule,
   }
 
   @Override
+  public Host getHost(String clusterName, String hostName) {
+    Host host = null;
+    try {
+      Cluster cluster = managementController.getClusters().getCluster(clusterName);
+      if(cluster != null) {
+        host = cluster.getHost(hostName);
+      }
+    } catch (Exception e) {
+      LOG.warn("Exception in getting host info for jmx metrics: ", e);
+    }
+    return host;
+  }
+
+
+  @Override
   public boolean isCollectorComponentLive(String clusterName, MetricsService service) throws SystemException {
 
     final String collectorHostName = getCollectorHostName(clusterName, service);
@@ -528,12 +550,14 @@ public abstract class AbstractProviderModule implements ProviderModule,
               serviceConfigTypes.get(service)
           );
 
+          String publicHostName = getPublicHostName(clusterName, hostName);
           Map<String, String[]> componentPortsProperties = new HashMap<>();
           componentPortsProperties.put(
               componentName,
               getPortProperties(service,
                   componentName,
                   hostName,
+                  publicHostName,
                   configProperties,
                   httpsEnabled
               )
@@ -553,7 +577,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
             }
           }
 
-          initRpcSuffixes(clusterName, componentName, configType, currVersion, hostName);
+          initRpcSuffixes(clusterName, componentName, configType, currVersion, hostName, publicHostName);
         }
       } catch (Exception e) {
         LOG.error("Exception initializing jmx port maps. ", e);
@@ -575,8 +599,8 @@ public abstract class AbstractProviderModule implements ProviderModule,
   }
 
   /**
-   * Computes properties that contains proper port for {@code componentName} on {@code hostName}. Must contain custom logic
-   * for different configurations(like NAMENODE HA).
+   * Computes properties that contains proper port for {@code componentName} on {@code hostName}.
+   * Must contain custom logic for different configurations(like NAMENODE HA).
    * @param service service type
    * @param componentName component name
    * @param hostName host which contains requested component
@@ -584,16 +608,20 @@ public abstract class AbstractProviderModule implements ProviderModule,
    * @param httpsEnabled indicates if https enabled for component
    * @return property name that contain port for {@code componentName} on {@code hostName}
    */
-  String[] getPortProperties(Service.Type service, String componentName, String hostName, Map<String, Object> properties, boolean httpsEnabled) {
+  String[] getPortProperties(Service.Type service, String componentName,
+    String hostName, String publicHostName, Map<String, Object> properties, boolean httpsEnabled) {
     componentName = httpsEnabled ? componentName + "-HTTPS" : componentName;
     if(componentName.startsWith("NAMENODE") && properties.containsKey("dfs.internal.nameservices")) {
       componentName += "-HA";
-      return getNamenodeHaProperty(properties, serviceDesiredProperties.get(service).get(componentName), hostName);
+      return getNamenodeHaProperty(
+        properties, serviceDesiredProperties.get(service).get(componentName), hostName, publicHostName);
     }
     return serviceDesiredProperties.get(service).get(componentName);
   }
 
-  private String[] getNamenodeHaProperty(Map<String, Object> properties, String pattern[], String hostName) {
+  private String[] getNamenodeHaProperty(Map<String, Object> properties, String pattern[],
+    String hostName, String publicHostName) {
+
     // iterate over nameservices and namenodes, to find out namenode http(s) property for concrete host
     for(String nameserviceId : ((String)properties.get("dfs.internal.nameservices")).split(",")) {
       if(properties.containsKey("dfs.ha.namenodes."+nameserviceId)) {
@@ -605,7 +633,8 @@ public abstract class AbstractProviderModule implements ProviderModule,
           );
           if (properties.containsKey(propertyName)) {
             String propertyValue = (String)properties.get(propertyName);
-            if (propertyValue.split(":")[0].equals(hostName)) {
+            String propHostName = propertyValue.split(":")[0];
+            if (propHostName.equals(hostName) || propHostName.equals(publicHostName)) {
               return new String[] {propertyName};
             }
           }
@@ -1181,7 +1210,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
 
   private void initRpcSuffixes(String clusterName, String componentName,
                                String config, String configVersion,
-                               String hostName)
+                               String hostName, String publicHostName)
                               throws Exception {
     if (jmxDesiredRpcSuffixProperties.containsKey(componentName)) {
       Map<String, Map<String, String>> componentToPortsMap;
@@ -1209,7 +1238,7 @@ public abstract class AbstractProviderModule implements ProviderModule,
           keys = jmxDesiredRpcSuffixProperties.get(componentName);
           Map<String, String[]> stringMap = jmxDesiredRpcSuffixProperties.get(componentName);
           for (String tag: stringMap.keySet()) {
-            keys.put(tag, getNamenodeHaProperty(configProperties, stringMap.get(tag), hostName));
+            keys.put(tag, getNamenodeHaProperty(configProperties, stringMap.get(tag), hostName, publicHostName));
           }
         }
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
index cbeea1c..dbf8eb7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXHostProvider.java
@@ -17,6 +17,9 @@
  */
 package org.apache.ambari.server.controller.jmx;
 
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.state.Host;
+
 import java.util.Set;
 
 import org.apache.ambari.server.controller.spi.SystemException;
@@ -26,6 +29,8 @@ import org.apache.ambari.server.controller.spi.SystemException;
  */
 public interface JMXHostProvider {
 
+  String getPublicHostName(String clusterName, String hostName);
+
   /**
    * Get the JMX host names for the given cluster name and component name.
    *
@@ -38,6 +43,14 @@ public interface JMXHostProvider {
   Set<String> getHostNames(String clusterName, String componentName);
 
   /**
+   * Get cluster host info given the host name
+   * @param clusterName
+   * @param hostName the host name
+   * @return the host info {@link Host}
+   */
+  Host getHost(String clusterName, String hostName);
+
+  /**
    * Get the port for the specified cluster name and component.
    *
    * @param clusterName    the cluster name

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
index 870d1ef..e4de377 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
@@ -40,6 +40,7 @@ import org.apache.ambari.server.controller.spi.Request;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.utilities.StreamProvider;
+import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.services.MetricsRetrievalService;
 import org.apache.ambari.server.state.services.MetricsRetrievalService.MetricSourceType;
 import org.slf4j.Logger;
@@ -254,6 +255,8 @@ public class JMXPropertyProvider extends ThreadPoolEnabledPropertyProvider {
     for (String hostName : hostNames) {
       try {
         String port = getPort(clusterName, componentName, hostName, httpsEnabled);
+        String publicHostName = jmxHostProvider.getPublicHostName(clusterName, hostName);
+
         if (port == null) {
           LOG.warn("Unable to get JMX metrics.  No port value for " + componentName);
           return resource;
@@ -268,6 +271,17 @@ public class JMXPropertyProvider extends ThreadPoolEnabledPropertyProvider {
         // check to see if there is a cached value and use it if there is
         JMXMetricHolder jmxMetricHolder = metricsRetrievalService.getCachedJMXMetric(jmxUrl);
 
+        if( jmxMetricHolder == null && !hostName.equalsIgnoreCase(publicHostName)) {
+          // build the URL using public host name
+          String publicJmxUrl = getSpec(protocol, publicHostName, port, "/jmx");
+
+          // always submit a request to cache the latest data
+          metricsRetrievalService.submitRequest(MetricSourceType.JMX, streamProvider, publicJmxUrl);
+
+          // check to see if there is a cached value and use it if there is
+          jmxMetricHolder = metricsRetrievalService.getCachedJMXMetric(publicJmxUrl);
+        }
+
         // if the ticket becomes invalid (timeout) then bail out
         if (!ticket.isValid()) {
           return resource;
@@ -290,6 +304,17 @@ public class JMXPropertyProvider extends ThreadPoolEnabledPropertyProvider {
                 metricsRetrievalService.submitRequest(MetricSourceType.JMX, streamProvider, adHocUrl);
                 JMXMetricHolder adHocJMXMetricHolder = metricsRetrievalService.getCachedJMXMetric(adHocUrl);
 
+                if( adHocJMXMetricHolder == null && !hostName.equalsIgnoreCase(publicHostName)) {
+                  // build the adhoc URL using public host name
+                  String publicAdHocUrl = getSpec(protocol, publicHostName, port, queryURL);
+
+                  // always submit a request to cache the latest data
+                  metricsRetrievalService.submitRequest(MetricSourceType.JMX, streamProvider, publicAdHocUrl);
+
+                  // check to see if there is a cached value and use it if there is
+                  adHocJMXMetricHolder = metricsRetrievalService.getCachedJMXMetric(publicAdHocUrl);
+                }
+
                 // if the ticket becomes invalid (timeout) then bail out
                 if (!ticket.isValid()) {
                   return resource;

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
index b4ebcd8..b4f7120 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/Cluster.java
@@ -133,6 +133,14 @@ public interface Cluster {
    */
   Set<String> getHosts(String serviceName, String componentName);
 
+  /**
+   * Get specific host info using host name.
+   *
+   * @param hostName the host name
+   * @return Host info {@link Host}
+   */
+  Host getHost(String hostName);
+
 
   /**
    * Adds schs to cluster AND persists them

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
index a79b05d..15f7048 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/alert/AlertDefinitionHash.java
@@ -462,7 +462,7 @@ public class AlertDefinitionHash {
         hostNames.add(host.getHostName());
       }
 
-      enqueueAgentCommands(clusterName, hostNames);
+      enqueueAgentCommands(cluster, clusterName, hostNames);
     } catch (AmbariException ae) {
       LOG.error("Unable to lookup cluster for alert definition commands", ae);
     }
@@ -484,15 +484,16 @@ public class AlertDefinitionHash {
    */
   public void enqueueAgentCommands(long clusterId, Collection<String> hosts) {
     String clusterName = null;
+    Cluster cluster = null;
 
     try {
-      Cluster cluster = m_clusters.get().getClusterById(clusterId);
+      cluster = m_clusters.get().getClusterById(clusterId);
       clusterName = cluster.getClusterName();
     } catch (AmbariException ae) {
       LOG.error("Unable to lookup cluster for alert definition commands", ae);
     }
 
-    enqueueAgentCommands(clusterName, hosts);
+    enqueueAgentCommands(cluster, clusterName, hosts);
   }
 
   /**
@@ -509,7 +510,7 @@ public class AlertDefinitionHash {
    * @param hosts
    *          the hosts to push {@link AlertDefinitionCommand}s for.
    */
-  private void enqueueAgentCommands(String clusterName, Collection<String> hosts) {
+  private void enqueueAgentCommands(Cluster cluster, String clusterName, Collection<String> hosts) {
     if (null == clusterName) {
       LOG.warn("Unable to create alert definition agent commands because of a null cluster name");
       return;
@@ -527,11 +528,12 @@ public class AlertDefinitionHash {
 
         String hash = getHash(clusterName, hostName);
 
+        Host host = cluster.getHost(hostName);
+        String publicHostName = host == null? hostName : host.getPublicHostName();
         AlertDefinitionCommand command = new AlertDefinitionCommand(
-            clusterName, hostName, hash, definitions);
+            clusterName, hostName, publicHostName, hash, definitions);
 
         try {
-          Cluster cluster = m_clusters.get().getCluster(clusterName);
           command.addConfigs(m_configHelper.get(), cluster);
         } catch (AmbariException ae) {
           LOG.warn("Unable to add configurations to alert definition command",

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index a4bf815..06b6217 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -2124,6 +2124,24 @@ public class ClusterImpl implements Cluster {
   }
 
   @Override
+  public Host getHost(final String hostName) {
+    if (StringUtils.isEmpty(hostName)) {
+      return null;
+    }
+
+    Collection<Host> hosts = getHosts();
+    if(hosts != null) {
+      for (Host host : hosts) {
+        String hostString = host.getHostName();
+        if(hostName.equalsIgnoreCase(hostString)) {
+          return host;
+        }
+      }
+    }
+    return null;
+  }
+
+  @Override
   public Collection<Host> getHosts() {
     Map<String, Host> hosts;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
index 139fe98..7226d22 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_namenode.py
@@ -115,7 +115,7 @@ def namenode(action=None, hdfs_binary=None, do_format=True, upgrade_type=None,
 
     if params.dfs_ha_enabled and \
       params.dfs_ha_namenode_standby is not None and \
-      params.hostname == params.dfs_ha_namenode_standby:
+      (params.hostname == params.dfs_ha_namenode_standby or params.public_hostname == params.dfs_ha_namenode_standby):
         # if the current host is the standby NameNode in an HA deployment
         # run the bootstrap command, to start the NameNode in standby mode
         # this requires that the active NameNode is already up and running,
@@ -332,7 +332,7 @@ def format_namenode(force=None):
           )
   else:
     if params.dfs_ha_namenode_active is not None and \
-       params.hostname == params.dfs_ha_namenode_active:
+       (params.hostname == params.dfs_ha_namenode_active  or params.public_hostname == params.dfs_ha_namenode_active):
       # check and run the format command in the HA deployment scenario
       # only format the "active" namenode in an HA deployment
       if force:

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
index f683dcc..14d6ce2 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py
@@ -47,7 +47,7 @@ def prepare_upgrade_check_for_previous_dir():
 
   if params.dfs_ha_enabled:
     namenode_ha = NamenodeHAState()
-    if namenode_ha.is_active(params.hostname):
+    if namenode_ha.is_active(params.hostname) or namenode_ha.is_active(params.public_hostname):
       Logger.info("NameNode High Availability is enabled and this is the Active NameNode.")
 
       problematic_previous_namenode_dirs = set()

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 82fd950..a9fc179 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -170,6 +170,7 @@ klist_path_local = get_klist_path(default('/configurations/kerberos-env/executab
 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 #hosts
 hostname = config["hostname"]
+public_hostname = config["public_hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
 slave_hosts = default("/clusterHostInfo/slave_hosts", [])
 oozie_servers = default("/clusterHostInfo/oozie_server", [])
@@ -307,6 +308,9 @@ if dfs_ha_enabled:
     if hostname.lower() in nn_host.lower():
       namenode_id = nn_id
       namenode_rpc = nn_host
+    elif public_hostname.lower() in nn_host.lower():
+      namenode_id = nn_id
+      namenode_rpc = nn_host
   # With HA enabled namenode_address is recomputed
   namenode_address = format('hdfs://{dfs_ha_nameservices}')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java
index 7e0c66d..156ee66 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/metrics/JMXPropertyProviderTest.java
@@ -53,6 +53,7 @@ import org.apache.ambari.server.security.authorization.AuthorizationException;
 import org.apache.ambari.server.security.authorization.AuthorizationHelperInitializer;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.services.MetricsRetrievalService;
 import org.apache.ambari.server.utils.SynchronousThreadPoolExecutor;
 import org.junit.After;
@@ -604,11 +605,19 @@ public class JMXPropertyProviderTest {
       this.unknownPort = unknownPort;
     }
 
+    @Override public String getPublicHostName(final String clusterName, final String hostName) {
+      return null;
+    }
+
     @Override
     public Set<String> getHostNames(String clusterName, String componentName) {
       return null;
     }
 
+    @Override public Host getHost(final String clusterName, final String hostName) {
+      return null;
+    }
+
     @Override
     public String getPort(String clusterName, String componentName, String hostName, boolean httpsEnabled) throws SystemException {
       return getPort(clusterName, componentName, hostName);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
index e6e288e..4895d82 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/alerts/AlertDefinitionHashTest.java
@@ -379,10 +379,10 @@ public class AlertDefinitionHashTest extends TestCase {
     ActionQueue actionQueue = m_injector.getInstance(ActionQueue.class);
 
     AlertDefinitionCommand definitionCommand1 = new AlertDefinitionCommand(
-        CLUSTERNAME, HOSTNAME, "12345", null);
+        CLUSTERNAME, HOSTNAME, HOSTNAME, "12345", null);
 
     AlertDefinitionCommand definitionCommand2 = new AlertDefinitionCommand(
-        CLUSTERNAME, "anotherHost", "67890", null);
+        CLUSTERNAME, "anotherHost", "anotherHost", "67890", null);
 
     AlertExecutionCommand executionCommand = new AlertExecutionCommand(
         CLUSTERNAME, HOSTNAME, null);

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
index 96f4d9d..df09021 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
@@ -36,7 +36,7 @@
         "script_type": "PYTHON"
     }, 
     "taskId": 93, 
-    "public_hostname": "c6401.ambari.apache.org", 
+    "public_hostname": "c6402.ambari.apache.org",
     "configurations": {
         "mapred-site": {
             "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
index de2742f..a0a8f36 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
@@ -37,7 +37,7 @@
         "phase": "INITIAL_START"
     }, 
     "taskId": 93, 
-    "public_hostname": "c6401.ambari.apache.org", 
+    "public_hostname": "c6402.ambari.apache.org",
     "configurations": {
         "mapred-site": {
             "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4d7cc7f3/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
index ba0fa8f..a3176bd 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
@@ -37,7 +37,7 @@
         "phase": "INITIAL_START"
     }, 
     "taskId": 93, 
-    "public_hostname": "c6401.ambari.apache.org", 
+    "public_hostname": "c6402.ambari.apache.org",
     "configurations": {
         "mapred-site": {
             "mapreduce.jobhistory.address": "c6402.ambari.apache.org:10020", 


[18/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/python/upgradeHelper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/upgradeHelper.py b/ambari-server/src/main/python/upgradeHelper.py
deleted file mode 100644
index 31aa721..0000000
--- a/ambari-server/src/main/python/upgradeHelper.py
+++ /dev/null
@@ -1,2338 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-"""
-Upgrade catalog file format description:
-
-
-Format version 1.0
-
-Global section description:
-  STACKNAME - name of stack, for example HDP
-  OLDVERSION - version of stack from which upgrade should be done, used by fromStack script argument
-  NEWVERSION - version of stack to which upgrade should be done, used by toStack script argument
-
-Sub-section options:
-  config-types - contains global per-config settings
-    merged-copy - would merge latest server properties with properties defined in "properties" section,
-                  without this option server properties would be rewritten by properties defined in "properties" section
-    required-services - properties from json catalog would be processed only if desired services are present on the cluster
-                        property level definition will always override catalog level definition.
-
-Sub-section properties - Contains property definition
-Sub-section property-mapping(optional) - contains mapping of property names in case, if some property changed their name in NEWVERSION
-
-Example:
-
-{
-  "version": "1.0",
-  "stacks": [
-    {
-      "name": "STACKNAME",
-      "old-version": "OLDVERSION",
-      "target-version": "NEWVERSION",
-      "options": {
-        "config-types": {
-          "CONFIGTYPE1": {
-            "merged-copy": "yes",
-            "required-services": ["HDFS"]
-          }
-        }
-      },
-      "properties": {
-        "CONFIGTYPE1": {
-          "some_property": "some property value",
-          "some_second_property: {
-             "remove": "yes"
-          },
-          "template_property": {
-           "value": "{TEMPLATE_TAG}",
-           "template": "yes",
-           "required-services": ["HDFS", "YARN"]
-          },
-          "test_property": {
-           "value": "new value",
-           "override: "no", (optional, override already existed property yes/no. Default: yes)
-           "value-required": "old value",  (optional, property would be set if the required value is present)
-           "can-create": "no", (optional, process property only if that property present on the server.
-                                         i.e. ability to create new property. Default: yes)
-           "required-services": ["HDFS", "YARN"],  (optional, process property only if selected services existed)
-           "resolve-dependency": "no" (optional, use Stack Advisor to get depended properties changes. Default: no)
-          }
-        }
-      },
-      "property-mapping": {
-        "old-property-name": "new-property-name", (short form, equal to "old-property-name": { "map-to": "new-property-name" })
-        "old-property1-name": {    (usually key is an name of the property which need to be mapped, but in case of same
-                                     property should be set to unique name and "map-from" option used instead)
-          "map-from": "old property name", (optional, define property name which should be mapped)
-          "map-to": "new_property1_name", (optional, new property name. If not set, would be used old property name)
-          "from-catalog": "test",        (optional, require "to-catalog. Source of old-property1-name)
-          "to-catalog": "test",          (optional, require "from-catalog. Target of new_property1_name)
-          "default": "default value",    (optional, if set and old property not exists, new one would be created with default value)
-          "template": "yes",             (optional, template parsing for default option)
-          "coerce-to": "pre-defined type", (optional, convert value from one type to another. Types supported:
-                                              yaml-array - converts string item1,item2 to  ['item1', 'item2']
-                                            )
-          "replace-from": "something", (optional, should be present both from and to. Replace 'from' value to 'to')
-          "replace-to": "something,
-          "required-services": ["YARN"],  (optional, process entry if services in the list existed on the cluster
-      }
-     }
-    }
-  ]
-}
-
-More examples available in ambari-server/src/main/resources/upgrade/catalog/
-"""
-
-import getpass
-import optparse
-from pprint import pprint
-import re
-import sys
-import os.path
-import logging
-import time
-import base64
-from urllib2 import HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, Request, build_opener, URLError, HTTPError
-
-try:
-  # try to import new simplejson version, which should be faster than outdated python 2.6 version
-  import ambari_simplejson as json
-except ImportError:
-  import json
-
-
-# ==============================
-#    Error classes definition
-# ==============================
-class FatalException(Exception):
-  def __init__(self, code, reason):
-    self.code = code
-    self.reason = reason
-
-  def __str__(self):
-    return repr("Fatal exception: %s, exit code %s" % (self.reason, self.code))
-
-  def _get_message(self):
-    return str(self)
-
-
-class ReadOnlyPropertyException(Exception):
-  def __str__(self):
-    return "Property is read-only"
-
-  def _get_message(self):
-    return self.__str__()
-
-
-class NotSupportedCatalogVersion(Exception):
-  def __init__(self, catalog_version):
-    self._version = catalog_version
-
-  def __str__(self):
-    return "Version %s of loaded catalog not supported" % self._version
-
-  def _get_message(self):
-    return self.__str__()
-
-  message = property(__str__)
-
-
-class CatalogNotFoundException(Exception):
-  pass
-
-
-class TemplateProcessingException(Exception):
-  pass
-
-
-class CatalogExistException(Exception):
-  pass
-
-
-class PropertyNotFoundException(Exception):
-  pass
-
-
-class StackNotFoundException(Exception):
-  pass
-
-
-class MalformedPropertyDefinitionException(Exception):
-  pass
-
-
-# ==============================
-#    Constant class definition
-# ==============================
-class Const(object):
-  def __new__(cls, *args, **kwargs):
-    raise Exception("Class couldn't be created")
-
-
-class Options(Const):
-  # action commands
-  API_PROTOCOL = "http"
-  API_PORT = "8080"
-
-  GET_MR_MAPPING_ACTION = "save-mr-mapping"
-  VERIFY_ACTION = "verify"
-  DELETE_MR_ACTION = "delete-mr"
-  ADD_YARN_MR2_ACTION = "add-yarn-mr2"
-  MODIFY_CONFIG_ACTION = "update-configs"
-  BACKUP_CONFIG_ACTION = "backup-configs"
-  INSTALL_YARN_MR2_ACTION = "install-yarn-mr2"
-
-  MR_MAPPING_FILE = "mr_mapping"
-  CAPACITY_SCHEDULER_TAG = "capacity-scheduler"
-  REPLACE_JH_HOST_NAME_TAG = "REPLACE_JH_HOST"
-  REPLACE_RM_HOST_NAME_TAG = "REPLACE_RM_HOST"
-  REPLACE_WITH_TAG = "REPLACE_WITH_"
-  PHOENIX_QUERY_SERVER = "PHOENIX_QUERY_SERVER"
-  ZK_OPTIONS = "zoo.cfg"
-  KAFKA_BROKER_CONF = "kafka-broker"
-  RANGER_ADMIN = "admin-properties"
-  RANGER_USERSYNC = "usersync-properties"
-  RANGER_ENV = "ranger-env"
-  KAFKA_PORT = "port"
-  RANGER_EXTERNAL_URL = "policymgr_external_url"
-  ZK_CLIENTPORT = "clientPort"
-  DELETE_OLD_TAG = "DELETE_OLD"
-
-  ZOOKEEPER_SERVER = "ZOOKEEPER_SERVER"
-  KAFKA_BROKER = "KAFKA_BROKER"
-  NAMENODE = "NAMENODE"
-
-  MR_MAPPING = None
-  logger = None
-  server_config_factory = None
-  """:type : ServerConfigFactory"""
-  stack_advisor = None
-  """:type : StackAdvisor"""
-  ambari_server = None
-  """:type : AmbariServer"""
-
-  # Api constants
-  ROOT_URL = None
-  CLUSTER_URL = None
-  COMPONENTS_FORMAT = None
-  TEZ_VIEW_URL = None
-
-  # Curl options
-  CURL_PRINT_ONLY = None
-  CURL_WRITE_ONLY = None
-
-  ARGS = None
-  OPTIONS = None
-  HOST = None
-  CLUSTER_NAME = None
-
-  # for verify action
-  REPORT_FILE = None
-
-  SERVICES = []
-
-  API_TOKENS = {
-    "user": None,
-    "pass": None
-  }
-
-  HEADERS = {
-    'X-Requested-By': 'upgradeHelper'
-  }
-
-  @classmethod
-  def initialize(cls):
-    cls.ROOT_URL = '%s://%s:%s/api/v1' % (cls.API_PROTOCOL, cls.HOST, cls.API_PORT)
-    cls.CLUSTER_URL = cls.ROOT_URL + "/clusters/%s" % cls.CLUSTER_NAME
-    cls.COMPONENTS_URL = cls.CLUSTER_URL + "/components?fields=ServiceComponentInfo/total_count"
-    cls.COMPONENTS_FORMAT = cls.CLUSTER_URL + "/components/{0}"
-    cls.TEZ_VIEW_URL = cls.ROOT_URL + "/views/TEZ"
-    cls.STACKS_URL = cls.ROOT_URL + "/stacks"
-    cls.STACKS_VERSIONS_URL = cls.STACKS_URL + "/{0}/versions"
-    cls.STACK_ADVISOR_URL = cls.STACKS_VERSIONS_URL + "/{1}/recommendations"
-    cls.AMBARI_SERVER_URL = cls.ROOT_URL + "/services/AMBARI/components/AMBARI_SERVER"
-    cls.AMBARI_AGENTS_URL = cls.ROOT_URL + "/services/AMBARI/components/AMBARI_AGENT"
-    if cls.CLUSTER_NAME is not None and cls.HOST is not None:
-      cls.SERVICES = set(map(lambda x: x.upper(), get_cluster_services()))
-
-    cls.ambari_server = AmbariServer()
-    if not cls.isPropertyAttributesSupported():
-      cls.logger.warning("Property attributes not supported by current Ambari version")
-
-  @classmethod
-  def isPropertyAttributesSupported(cls):
-    if cls.ambari_server.server_version[0] * 10 + cls.ambari_server.server_version[1] >= 17:
-      return True
-    return False
-
-  @classmethod
-  def initialize_logger(cls, filename=None):
-    cls.logger = logging.getLogger('UpgradeHelper')
-    cls.logger.setLevel(logging.DEBUG)
-
-    if filename is not None:
-      handler = logging.FileHandler(filename)
-      handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s - %(message)s'))
-      cls.logger.addHandler(handler)
-      cls.logger.info("")
-      cls.logger.info("Start new logging section")
-
-    handler = logging.StreamHandler(sys.stdout)
-    handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
-    cls.logger.addHandler(handler)
-
-
-class CatConst(Const):
-  VERSION_TAG = "version"
-  STACK_VERSION_OLD = "old-version"
-  STACK_VERSION_TARGET = "target-version"
-  STACK_STAGS_TAG = "stacks"
-  STACK_NAME = "name"
-  CONFIG_OPTIONS = "options"
-  CONFIG_TYPES = "config-types"
-  STACK_PROPERTIES = "properties"
-  STACK_PROPERTIES_ATTRIBUTES = "properties_attributes"
-  PROPERTY_VALUE_TAG = "value"
-  VERSIONS_TAG = "versions"
-  PROPERTY_REMOVE_TAG = "remove"
-  PROPERTY_MAP_TO = "map-to"
-  PROPERTY_MAP_FROM = "map-from"
-  PROPERTY_FROM_CATALOG = "from-catalog"
-  PROPERTY_TO_CATALOG = "to-catalog"
-  PROPERTY_DEFAULT = "default"
-  MERGED_COPY_TAG = "merged-copy"
-  REQUIRED_SERVICES = "required-services"
-  COERCE_TO_PROPERTY_TAG = "coerce-to"
-  RESOLVE_DEPENDENCY_TAG = "resolve-dependency"
-  COERCE_YAML_OPTION_TAG = "yaml-array"
-  REPLACE_FROM_TAG = "replace-from"
-  REPLACE_TO_TAG = "replace-to"
-  OVERRIDE_TAG = "override"
-  ITEMS_TAG = "items"
-  TYPE_TAG = "type"
-  TRUE_TAG = "yes"
-  VALUE_REQUIRED_TAG = "value-required"
-  PROPERTY_CAN_CREATE_TAG = "can-create"
-  STACK_PROPERTIES_MAPPING_LIST_TAG = "property-mapping"
-  VALUE_TEMPLATE_TAG = "template"
-  SEARCH_PATTERN = "(\{[^\{\}]+\})"  # {XXXXX}
-  ACTION_COMMIT = "commit"
-  ACTION_RELOAD = "reload"
-  ACTION_RENAME_PROPERTY = "rename-property"
-  TEMPLATE_HANDLER = "template_handler"
-
-
-# ==============================
-#    Catalog classes definition
-# ==============================
-
-class AmbariServer(object):
-  def __init__(self):
-    Options.logger.info("Resolving Ambari server configuration ...")
-    self._get_server_info()
-    self._get_agents_info()
-    self._get_components()
-
-  def _get_components(self):
-    info = curl(Options.COMPONENTS_URL, parse=True)
-    self._components = []
-    if CatConst.ITEMS_TAG in info:
-      for item in info[CatConst.ITEMS_TAG]:
-        if "ServiceComponentInfo" in item and "total_count" in item["ServiceComponentInfo"] and \
-          int(item["ServiceComponentInfo"]["total_count"]) > 0 and "component_name" in item["ServiceComponentInfo"]:
-          self._components.append(item["ServiceComponentInfo"]["component_name"])
-
-  def _get_server_info(self):
-    info = curl(Options.AMBARI_SERVER_URL, parse=True)
-    self._server_version = [0, 0, 0]
-
-    if "RootServiceComponents" in info:
-      server_props = info["RootServiceComponents"]
-      ver = server_props["component_version"] if "component_version" in server_props else None
-      try:
-        self._server_version = list(map(lambda x: int(x), ver.split(".")))
-      except ValueError:
-        pass
-
-  def _get_agents_info(self):
-    info = curl(Options.AMBARI_AGENTS_URL, parse=True)
-    self._agents = []
-    if "hostComponents" in info:
-      agent_props = info["hostComponents"]
-      self._agents = list(map(lambda x: x["RootServiceHostComponents"]["host_name"], agent_props))
-
-  @property
-  def components(self):
-    return self._components
-
-  @property
-  def server_version(self):
-    return self._server_version
-
-  @property
-  def agent_hosts(self):
-    return self._agents
-
-class StackAdvisorFactory(object):
-  def __init__(self):
-    self._stack_info = self._load_stack_info()
-
-  def _load_stack_versions(self, stack):
-    versions = curl(Options.STACKS_VERSIONS_URL.format(stack), parse=True)
-    if CatConst.ITEMS_TAG in versions:
-      versions = list(map(lambda x: x["Versions"]["stack_version"], versions[CatConst.ITEMS_TAG]))
-
-    return versions
-
-  def _load_stack_info(self):
-    stacks = curl(Options.STACKS_URL, parse=True)
-    if CatConst.ITEMS_TAG in stacks:
-      stacks = list(map(lambda x: x["Stacks"]["stack_name"], stacks["items"]))
-    else:
-      stacks = {}
-
-    stacks_dict = {}
-
-    for stack in stacks:
-      stacks_dict[stack] = self._load_stack_versions(stack)
-
-    return stacks_dict
-
-  def get_instance(self, stack, version):
-    sversion = Options.ambari_server.server_version
-    if sversion[0] * 10 + sversion[1] < 21:
-      Options.logger.warning("Ambari server version \"%s.%s.%s\" doesn't support property dependencies suggestion" %
-                             (sversion[0], sversion[1], sversion[2]))
-      return BaseStackAdvisor(stack, version)
-
-    if stack in self._stack_info and version in self._stack_info[stack]:
-      return StackAdvisor(stack, version)
-    else:
-      raise StackNotFoundException("Stack %s-%s not exist on the server" % (stack, version))
-
-class StackAdvisorRequestProperty(object):
-  def __init__(self, catalog, property_name):
-    self._catalog = catalog
-    self._property_name = property_name
-
-  @property
-  def catalog(self):
-    return self._catalog
-
-  @property
-  def name(self):
-    return self._property_name
-
-  def get_json(self):
-    return {
-      "type": self.catalog,
-      "name": self.name
-    }
-
-
-class BaseStackAdvisor(object):
-  def __init__(self, stack, version):
-    self._req_url = Options.STACK_ADVISOR_URL.format(stack, version)
-
-  def get_suggestion(self, cfg_factory, changed_properties):
-    return {}
-
-
-class StackAdvisor(BaseStackAdvisor):
-  def __init__(self, stack, version):
-    super(StackAdvisor, self).__init__(stack, version)
-
-  def _transform_properties(self, cfg_factory):
-    """
-    Transform properties list to blueprint output format
-    :type cfg_factory: ServerConfigFactory
-    :rtype dict
-    """
-    props = cfg_factory.get_json()
-    for cfg in props:
-      props[cfg] = {
-        "properties": props[cfg]
-      }
-
-    return props
-
-  def _from_blueprint_properties_transform(self, props):
-    """
-    Transform SA response to dict
-    """
-    for p in props:
-      rprop = {}
-      if "properties" in props[p] and props[p]["properties"] is not None:
-        rprop = props[p]["properties"]
-      if "property_attributes" in props[p]:
-        for property_attribute in props[p]["property_attributes"]:
-          if "delete" in props[p]["property_attributes"][property_attribute] and \
-            props[p]["property_attributes"][property_attribute]["delete"] == "true":
-            rprop[property_attribute] = None
-
-      props[p] = rprop
-
-    return props
-
-  def _generate_req_properties(self, properties):
-    rlist = []
-    for item in properties:
-      if isinstance(item, StackAdvisorRequestProperty):
-        rlist.append(item.get_json())
-    return rlist
-
-  def get_suggestion(self, cfg_factory, changed_properties):
-    """
-    :type cfg_factory: ServerConfigFactory
-    :type catalog_name str
-    :type changed_properties: list
-    :rtype dict
-    """
-    request = {
-      "recommend": "configuration-dependencies",
-      "hosts": Options.ambari_server.agent_hosts,
-      "services": list(Options.SERVICES),
-      "changed_configurations": self._generate_req_properties(changed_properties),
-      "recommendations": {
-        "blueprint": {
-          "host_groups": [],
-          "configurations": self._transform_properties(cfg_factory),
-          "blueprint_cluster_binding": {}
-        }
-      }
-    }
-    response = curl(self._req_url, request_type="POST", data=request, parse=True)
-    if "resources" in response and isinstance(response["resources"], list) and len(response["resources"]) > 0:
-      response = response["resources"][0]
-      if "recommendations" in response and "blueprint" in response["recommendations"] and \
-        "configurations" in response["recommendations"]["blueprint"]:
-        return self._from_blueprint_properties_transform(response["recommendations"]["blueprint"]["configurations"])
-
-    return {}
-
-
-
-class UpgradeCatalogFactory(object):
-  # versions of catalog which is currently supported
-  _supported_catalog_versions = ["1.0"]
-
-  # private variables
-  _json_catalog = None
-
-  def __init__(self, path):
-    self._load(path)
-
-  def _load(self, path):
-    f = None
-    try:
-      f = open(path, 'r')
-      json_string = f.read()
-      self._json_catalog = json.loads(json_string)
-      self._parse_upgrade_catalog()
-    except IOError as e:
-      raise FatalException(e.errno, "Couldn't open upgrade catalog file %s: %s" % (path, e.strerror))
-    except NotSupportedCatalogVersion as e:
-      raise FatalException(1, e.message)
-    except ValueError as e:
-      raise FatalException(1, "Malformed upgrade catalog: %s" % e.message)
-    finally:
-      try:
-        if f is not None:
-          f.close()
-      except IOError as e:
-        pass
-
-  def _parse_upgrade_catalog(self):
-    catalog_version = None
-    if CatConst.VERSION_TAG in self._json_catalog:
-      catalog_version = self._json_catalog[CatConst.VERSION_TAG]
-
-    if catalog_version is None or catalog_version not in self._supported_catalog_versions:
-      raise NotSupportedCatalogVersion(str(catalog_version))
-
-  def get_catalog(self, from_version=None, to_version=None):
-    search_version = {
-      CatConst.STACK_VERSION_OLD: from_version,
-      CatConst.STACK_VERSION_TARGET: to_version
-    }
-
-    for stack in self._json_catalog[CatConst.STACK_STAGS_TAG]:
-      version = {
-        CatConst.STACK_VERSION_OLD: stack[CatConst.STACK_VERSION_OLD],
-        CatConst.STACK_VERSION_TARGET: stack[CatConst.STACK_VERSION_TARGET]
-      }
-      if version == search_version:
-        return UpgradeCatalog(catalog=stack, version=version)
-
-    return None
-
-
-class UpgradeCatalog(object):
-  # private variables
-  _json_catalog = None
-  _properties_catalog = None
-  _properties_map_catalog = None
-  _version = None
-  _search_pattern = None
-  _catalog_options = None
-
-  def __init__(self, catalog=None, version=None):
-    self._handlers = {}
-    self._json_catalog = catalog
-    self._version = version
-    self._search_pattern = re.compile(CatConst.SEARCH_PATTERN)
-
-    if CatConst.STACK_PROPERTIES in catalog:
-      self._properties_catalog = self._format_catalog_properties(catalog[CatConst.STACK_PROPERTIES])
-
-    if CatConst.STACK_PROPERTIES_MAPPING_LIST_TAG in catalog:
-      self._properties_map_catalog = PropertyMapping(catalog[CatConst.STACK_PROPERTIES_MAPPING_LIST_TAG])
-    else:
-      self._properties_map_catalog = PropertyMapping()
-
-    if catalog is not None and CatConst.CONFIG_OPTIONS in catalog \
-            and CatConst.CONFIG_TYPES in catalog[CatConst.CONFIG_OPTIONS]:
-      self._catalog_options = catalog[CatConst.CONFIG_OPTIONS]
-
-  def add_handler(self, name, handler):
-    if name not in self._handlers:
-      self._handlers[name] = handler
-
-  def _format_catalog_properties(self, properties):
-    """
-    Transform properties from short form to normal one:
-    "property": "text" => "property": { "value": "text" }
-    :param properties: dict
-    :return: dict
-    """
-    for config_item in properties:
-      cfg_item = properties[config_item]
-
-      """
-        case when "properties": {
-                        "yarn-site": {
-                            .....
-                         }
-                     }
-        is set like "properties": {
-           "yarn-site": ""
-        }
-      """
-      if not isinstance(cfg_item, dict):
-        raise MalformedPropertyDefinitionException("The property catalog '%s' definition error" % config_item)
-
-      properties[config_item] = dict(zip(
-        cfg_item.keys(),
-        map(lambda x: x if isinstance(x, dict) or isinstance(x, list) else {CatConst.PROPERTY_VALUE_TAG: x}, cfg_item.values())
-      ))
-    return properties
-
-  @property
-  def version(self):
-    return "%s-%s" % (self._version[CatConst.STACK_VERSION_OLD], self._version[CatConst.STACK_VERSION_TARGET])
-
-  @property
-  def target_version(self):
-    return self._version[CatConst.STACK_VERSION_TARGET]
-
-  @property
-  def source_version(self):
-    return self._version[CatConst.STACK_VERSION_OLD]
-
-  def get_parsed_version(self):
-    """
-     Get numeric representation of the version for comparation purposes
-
-     Example:
-       1.3-2.1 will be represented as { from: 13, to: 21 }
-
-    :return: Numeric version
-    """
-    v_from = self._version[CatConst.STACK_VERSION_OLD].split(".")
-    v_to = self._version[CatConst.STACK_VERSION_TARGET].split(".")
-    try:
-      v_from = int(v_from[0]) * 10 + int(v_from[1])
-      v_to = int(v_to[0]) * 10 + int(v_to[1])
-    except ValueError:
-      v_from = 0
-      v_to = 0
-
-    version = {
-      "from": v_from,
-      "to": v_to
-    }
-
-    return version
-
-  @property
-  def name(self):
-    if CatConst.STACK_NAME in self._json_catalog:
-      return self._json_catalog[CatConst.STACK_NAME]
-    return ""
-
-  @property
-  def mapping(self):
-    return self._properties_map_catalog
-
-  @property
-  def items(self):
-    return self._properties_catalog
-
-  @property
-  def options(self):
-    if CatConst.CONFIG_TYPES in self._catalog_options:
-      return self._catalog_options[CatConst.CONFIG_TYPES]
-    return {}
-
-  @property
-  def action_handlers(self):
-    return self._handlers
-
-  @property
-  def tag_search_pattern(self):
-    return self._search_pattern
-
-  def __handle_remove_tag(self, name, catalog_item_name, catalog_property_item, properties):
-    """
-    :type name str
-    :type catalog_item_name str
-    :type catalog_property_item dict
-    :type properties dict
-    """
-    if CatConst.PROPERTY_REMOVE_TAG in catalog_property_item and \
-                    catalog_property_item[CatConst.PROPERTY_REMOVE_TAG] == CatConst.TRUE_TAG and \
-                    catalog_item_name in properties:
-      del properties[catalog_item_name]
-
-  def __handle_template_tag_sub(self, catalog_item_name, catalog_property_item):
-    """
-    :type catalog_item_name str
-    :type catalog_property_item dict
-    """
-    if CatConst.TEMPLATE_HANDLER in self._handlers and self._handlers is not None and \
-                    CatConst.VALUE_TEMPLATE_TAG in catalog_property_item and catalog_property_item[
-      CatConst.VALUE_TEMPLATE_TAG] == CatConst.TRUE_TAG:
-      try:
-        parsed_value = self._handlers[CatConst.TEMPLATE_HANDLER](
-          self,
-          self._search_pattern.findall(catalog_property_item[CatConst.PROPERTY_VALUE_TAG]),
-          catalog_property_item[CatConst.PROPERTY_VALUE_TAG]
-        )
-        catalog_property_item[CatConst.PROPERTY_VALUE_TAG] = parsed_value
-      except TemplateProcessingException:
-        pass
-
-  def __handle_add_new(self, name, catalog_item_name, catalog_property_item, properties):
-    """
-    :type name str
-    :type catalog_item_name str
-    :type catalog_property_item dict
-    :type properties dict
-    """
-    catalog_property_item = dict(catalog_property_item)
-    can_add_new = not (CatConst.PROPERTY_CAN_CREATE_TAG in catalog_property_item and
-                       catalog_property_item[CatConst.PROPERTY_CAN_CREATE_TAG].upper() == "NO")
-    if CatConst.PROPERTY_VALUE_TAG in catalog_property_item and catalog_item_name not in properties and can_add_new:
-      self.__handle_template_tag_sub(catalog_item_name, catalog_property_item)
-      properties[catalog_item_name] = catalog_property_item[CatConst.PROPERTY_VALUE_TAG]
-
-  def __handle_change_existing(self, name, catalog_item_name, catalog_property_item, properties):
-    """
-    :type name str
-    :type catalog_item_name str
-    :type catalog_property_item dict
-    :type properties dict
-    """
-    catalog_property_item = dict(catalog_property_item)
-    can_override = True
-
-    if CatConst.OVERRIDE_TAG in catalog_property_item and catalog_property_item[CatConst.OVERRIDE_TAG] != CatConst.TRUE_TAG:
-      can_override = False
-
-    if CatConst.PROPERTY_VALUE_TAG in catalog_property_item and catalog_item_name in properties and can_override:
-      self.__handle_template_tag_sub(catalog_item_name, catalog_property_item)
-      properties[catalog_item_name] = catalog_property_item[CatConst.PROPERTY_VALUE_TAG]
-    return properties
-
-  def __handle_dependency_tag(self, name, catalog_item_name, catalog_property_item, properties):
-    """
-    :type name str
-    :type catalog_item_name str
-    :type catalog_property_item dict
-    :type properties dict
-    """
-    if CatConst.RESOLVE_DEPENDENCY_TAG in catalog_property_item and \
-        catalog_property_item[CatConst.RESOLVE_DEPENDENCY_TAG] == CatConst.TRUE_TAG:
-      sa_suggestions = Options.stack_advisor.get_suggestion(Options.server_config_factory,
-                                                            [StackAdvisorRequestProperty(name, catalog_item_name)])
-      for sa_catalog in sa_suggestions:
-        # create new config group if not existed
-        if sa_catalog not in Options.server_config_factory.items():
-          Options.server_config_factory.create_config(sa_catalog)
-
-        catalog_properties = Options.server_config_factory.get_config(sa_catalog).properties
-        for sa_property in sa_suggestions[sa_catalog]:
-          if sa_suggestions[sa_catalog][sa_property] is None and sa_property in catalog_properties:
-            print "rem %s:%s" % (sa_catalog, sa_property)
-            del catalog_properties[sa_property]
-          elif sa_suggestions[sa_catalog][sa_property] is not None:
-            catalog_properties[sa_property] = sa_suggestions[sa_catalog][sa_property]
-
-
-  def __can_handler_execute(self, catalog_options, catalog_property_item, property_item, properties):
-    """
-    :type catalog_options dict
-    :type catalog_property_item str
-    :type property_item dict
-    :type properties dict
-    """
-    can_process = True
-
-    # process required services tag
-    required_list = None
-
-    if CatConst.REQUIRED_SERVICES in catalog_options and catalog_options[CatConst.REQUIRED_SERVICES] is not None and \
-            isinstance(catalog_options[CatConst.REQUIRED_SERVICES], list):
-      required_list = catalog_options[CatConst.REQUIRED_SERVICES]
-
-    if CatConst.REQUIRED_SERVICES in property_item and property_item[CatConst.REQUIRED_SERVICES] is not None and\
-            isinstance(property_item[CatConst.REQUIRED_SERVICES], list):
-      required_list = property_item[CatConst.REQUIRED_SERVICES]
-
-    if required_list is not None:
-      can_process = can_process and is_services_exists(required_list)
-
-    if CatConst.VALUE_REQUIRED_TAG in property_item and property_item[CatConst.VALUE_REQUIRED_TAG] is not None and\
-      CatConst.PROPERTY_VALUE_TAG in property_item and catalog_property_item in properties:
-      can_process = properties[catalog_property_item] == property_item[CatConst.VALUE_REQUIRED_TAG]
-
-    return can_process
-
-  def process_simple_transformations(self, name, properties):
-    """
-    :type properties dict
-    :type name str
-    """
-    tag_handlers = [
-      self.__handle_add_new,
-      self.__handle_change_existing,
-      self.__handle_dependency_tag,
-      self.__handle_remove_tag
-    ]
-    # catalog has no update entries for this config group
-    if name not in self._properties_catalog:
-      return 0
-
-    catalog_item = self._properties_catalog[name]
-    for catalog_property_item in catalog_item.keys():
-      catalog_options = self.options[name] if name in self.options else {}
-      if self.__can_handler_execute(catalog_options, catalog_property_item, catalog_item[catalog_property_item], properties):
-        for handler in tag_handlers:
-          handler(name, catalog_property_item, catalog_item[catalog_property_item], properties)
-
-
-class PropertyMapping(object):
-  _mapping_list = {}
-
-  def __init__(self, map_list=None):
-    if map_list is not None:
-      self._mapping_list = self._convert_list(map_list)
-
-  def _convert_list(self, map_list):
-    return dict(zip(
-      map_list.keys(),
-      map(lambda x: x if isinstance(x, dict) else {CatConst.PROPERTY_MAP_TO: x}, map_list.values())
-    ))
-
-  def get(self, old_property_name):
-    """
-    Get property mapping dict
-    :old_property_name str
-    :return dict
-    """
-    if old_property_name in self._mapping_list:
-      return self._mapping_list[old_property_name]
-
-    raise PropertyNotFoundException("Property %s from property mapping section not found" % old_property_name)
-
-  def list(self):
-    return self._mapping_list.keys()
-
-  def get_mapped_name(self, old_property_name):
-    if CatConst.PROPERTY_MAP_TO not in self.get(old_property_name):
-      raise MalformedPropertyDefinitionException("%s option is not set for %s property" %
-                                                 (CatConst.PROPERTY_MAP_TO, old_property_name))
-    return self.get(old_property_name)[CatConst.PROPERTY_MAP_TO]
-
-  def exists(self, old_property_name):
-    return old_property_name in self._mapping_list
-
-
-class ServerConfigFactory(object):
-  def __init__(self):
-    self.__observers = []
-    self._server_catalogs = {}
-    self._load_configs()
-
-  def subscribe(self, name, config_item):
-    self.__observers.append((name, config_item))
-
-  def _load_configs(self):
-    Options.logger.info('Getting latest cluster configuration from the server...')
-    new_configs = get_config_resp_all()
-    for config_item in new_configs:
-      if config_item in self._server_catalogs:
-        self.notify_observer(config_item, CatConst.ACTION_RELOAD, new_configs[config_item])
-      else:
-        self._server_catalogs[config_item] = ServerConfig(self, config_item, new_configs[config_item])
-
-  def notify_observers(self, action, arg=None):
-    for name, config_item in self.__observers:
-      if config_item is not None and name in self._server_catalogs:
-        config_item.notify(action, arg)
-
-  def notify_observer(self, _name, action, arg=None):
-    for name, config_item in self.__observers:
-      if config_item is not None and name == _name and name in self._server_catalogs:
-        config_item.notify(action, arg)
-
-  def __str__(self):
-    catalogs = {}
-    for cfg in self._server_catalogs:
-      catalogs[cfg] = str(self._server_catalogs[cfg])
-
-    return json.dumps(catalogs)
-
-  def get_json(self):
-    catalogs = {}
-    for cfg in self._server_catalogs:
-      catalogs[cfg] = self._server_catalogs[cfg].properties
-
-    return catalogs
-  def get_config(self, name):
-    """
-    Get configuration item object
-    :type name str
-    :rtype: ServerConfig
-    """
-    if name in self._server_catalogs:
-      return self._server_catalogs[name]
-
-    raise CatalogNotFoundException("Server catalog item \"%s\" not found" % name)
-
-  def create_config(self, name):
-    if name not in self._server_catalogs:
-      self._server_catalogs[name] = ServerConfig(self, name, {CatConst.STACK_PROPERTIES: {}})
-    else:
-      raise CatalogExistException("Config group \"%s\" already existed" % name)
-
-  def items(self):
-    return self._server_catalogs.keys()
-
-  def reload(self):
-    self._load_configs()
-
-  def process_mapping_transformations(self, catalog):
-    """
-    :type catalog UpgradeCatalog
-    """
-    for map_item in catalog.mapping.list():
-      self._process_single_map_transformation(catalog, map_item, catalog.mapping.get(map_item))
-
-  def _process_default_template_map_replacement(self, catalog, item):
-    """
-    :type catalog: UpgradeCatalog
-    :type item: dict
-    """
-    if CatConst.VALUE_TEMPLATE_TAG in item and CatConst.TEMPLATE_HANDLER in catalog.action_handlers and\
-            CatConst.PROPERTY_DEFAULT in item and item[CatConst.VALUE_TEMPLATE_TAG] == CatConst.TRUE_TAG:
-      try:
-        parsed_value = catalog.action_handlers[CatConst.TEMPLATE_HANDLER](
-          catalog,
-          catalog.tag_search_pattern.findall(item[CatConst.PROPERTY_DEFAULT]),
-          item[CatConst.PROPERTY_DEFAULT]
-        )
-        item[CatConst.PROPERTY_DEFAULT] = parsed_value
-      except TemplateProcessingException:
-        pass
-
-  def _process_property_value_transformation(self, catalog, property_map_definition, old_value):
-    """
-    :type catalog: UpgradeCatalog
-    :type property_map_definition: dict
-    :type old_value: str
-    :return: str
-    """
-
-    tmp = old_value
-
-    if CatConst.REPLACE_FROM_TAG in property_map_definition and CatConst.REPLACE_TO_TAG in property_map_definition and\
-      property_map_definition[CatConst.REPLACE_TO_TAG] is not None and property_map_definition[CatConst.REPLACE_FROM_TAG] is not None:
-      tmp = tmp.replace(property_map_definition[CatConst.REPLACE_FROM_TAG], property_map_definition[CatConst.REPLACE_TO_TAG])
-
-    if CatConst.COERCE_TO_PROPERTY_TAG in property_map_definition:
-      if property_map_definition[CatConst.COERCE_TO_PROPERTY_TAG] == CatConst.COERCE_YAML_OPTION_TAG:
-        # for example c6401,c6402 into ['c6401','c6402']
-        data = list(map(lambda x: "'%s'" % x.strip(), tmp.split(',')))
-        tmp = "[%s]" % ','.join(data)
-
-    return tmp
-
-  def _process_single_map_transformation(self, catalog, map_item_name, map_property_item):
-    """
-    :type catalog UpgradeCatalog
-    :type map_item_name str
-    :type map_property_item dict
-    """
-    old_property_name = map_item_name
-
-    # map-from item name could be re-defined via PROPERTY_MAP_FROM property to avoid duplicate entries
-    if CatConst.PROPERTY_MAP_FROM in map_property_item and map_property_item[CatConst.PROPERTY_MAP_FROM] is not None:
-      old_property_name = map_property_item[CatConst.PROPERTY_MAP_FROM]
-
-    new_property_name = old_property_name
-
-    if CatConst.PROPERTY_MAP_TO in map_property_item:
-      new_property_name = map_property_item[CatConst.PROPERTY_MAP_TO]
-
-    # process first required section
-    required_services = map_property_item[CatConst.REQUIRED_SERVICES] if CatConst.REQUIRED_SERVICES in map_property_item else None
-
-    # process required-services tag
-    if required_services is not None and not is_services_exists(required_services):
-      return 0
-
-    # process template tag
-    self._process_default_template_map_replacement(catalog, map_property_item)
-
-    source_cfg_group = map_property_item[CatConst.PROPERTY_FROM_CATALOG] if CatConst.PROPERTY_FROM_CATALOG in map_property_item and\
-                                                                            map_property_item[CatConst.PROPERTY_FROM_CATALOG] != "" else None
-    target_cfg_group = map_property_item[CatConst.PROPERTY_TO_CATALOG] if CatConst.PROPERTY_TO_CATALOG in map_property_item and \
-                                                                          map_property_item[CatConst.PROPERTY_TO_CATALOG] != ""else None
-    default_value = map_property_item[CatConst.PROPERTY_DEFAULT] if CatConst.PROPERTY_DEFAULT in map_property_item and \
-                                                                    map_property_item[CatConst.PROPERTY_DEFAULT] != "" else None
-
-    if source_cfg_group is None and target_cfg_group is None:  # global scope mapping renaming
-      self.notify_observers(CatConst.ACTION_RENAME_PROPERTY, [old_property_name, new_property_name,
-                                                              self._process_property_value_transformation,
-                                                              catalog,
-                                                              map_property_item
-                                                              ])
-    elif source_cfg_group is not None and target_cfg_group is not None:  # group-to-group moving
-      if source_cfg_group in self._server_catalogs and target_cfg_group in self._server_catalogs:
-        old_cfg_group = self.get_config(source_cfg_group).properties
-        new_cfg_group = self.get_config(target_cfg_group).properties
-
-        if old_property_name in old_cfg_group:
-          new_cfg_group[new_property_name] = self._process_property_value_transformation(catalog, map_property_item, old_cfg_group[old_property_name])
-          if new_property_name != old_property_name:
-            del old_cfg_group[old_property_name]
-        elif old_property_name not in old_cfg_group and default_value is not None:
-          new_cfg_group[new_property_name] = default_value
-
-  def commit(self):
-    self.notify_observers(CatConst.ACTION_COMMIT)
-
-
-class ServerConfig(object):
-  def __init__(self, factory, name, initial_configs):
-    """
-     Initialize configuration item
-     :factory ServerConfigFactory
-    """
-    factory.subscribe(name, self)
-    self._configs = initial_configs
-    self._hash = self._calculate_hash()
-    self._name = name
-
-  def _calculate_hash(self):
-    return hash(str(self._configs))
-
-  def notify(self, action, arg=None):
-    if action == CatConst.ACTION_RELOAD:
-      self._configs = arg
-      self._hash = self._calculate_hash()
-    elif action == CatConst.ACTION_COMMIT:
-      self._commit()
-    elif action == CatConst.ACTION_RENAME_PROPERTY and isinstance(arg, list) and len(arg) == 5:
-      self._rename_property(*arg)
-
-  def _rename_property(self, old_name, new_name, transform_func, catalog, map_item):
-    """
-    :type old_name: str
-    :type new_name: str
-    :type transform_func: function
-    :type catalog: UpgradeCatalog
-    :type map_item: dict
-    :return:
-    """
-    if old_name in self.properties:
-      old_property_value = self.properties[old_name]
-      if transform_func is not None:
-        self.properties[new_name] = transform_func(catalog, map_item, old_property_value)
-      else:
-        self.properties[new_name] = old_property_value
-
-      if old_name != new_name:
-        del self.properties[old_name]
-
-  def is_attributes_exists(self):
-    return CatConst.STACK_PROPERTIES_ATTRIBUTES in self._configs
-
-  def __str__(self):
-    return json.dumps(self.properties)
-
-  @property
-  def properties(self):
-    return self._configs[CatConst.STACK_PROPERTIES]
-
-  @properties.setter
-  def properties(self, value):
-    self._configs[CatConst.STACK_PROPERTIES] = value
-
-  @property
-  def attributes(self):
-    return self._configs[CatConst.STACK_PROPERTIES_ATTRIBUTES]
-
-  @attributes.setter
-  def attributes(self, value):
-    self._configs[CatConst.STACK_PROPERTIES_ATTRIBUTES] = value
-
-  def _commit(self):
-    if self._hash != self._calculate_hash():
-      Options.logger.info("Committing changes for \"%s\" configuration group ..." % self._name)
-      if self.is_attributes_exists():
-        update_config(self.properties, self._name, self.attributes)
-      else:
-        update_config(self.properties, self._name)
-
-  def clear(self):
-    self.properties = {}
-    self.attributes = {}
-
-  def merge(self, catalog_item):
-    """
-    :type catalog_item UpgradeCatalog
-    """
-    # handle "merged-copy" tag
-    config_options = catalog_item.options[self._name] if self._name in catalog_item.options else {}
-    clear_properties = not (CatConst.MERGED_COPY_TAG in config_options and
-                            config_options[CatConst.MERGED_COPY_TAG] == CatConst.TRUE_TAG)
-    if clear_properties:
-      self.clear()
-    Options.logger.info("Processing configuration group: %s", self._name)
-    catalog_item.process_simple_transformations(self._name, self.properties)
-
-
-def write_mapping(hostmapping):
-  if os.path.isfile(Options.MR_MAPPING_FILE):
-    os.remove(Options.MR_MAPPING_FILE)
-  json.dump(hostmapping, open(Options.MR_MAPPING_FILE, 'w'))
-
-
-def read_mapping():
-  if os.path.isfile(Options.MR_MAPPING_FILE):
-    if Options.MR_MAPPING is not None:
-      return Options.MR_MAPPING
-    else:
-      Options.MR_MAPPING = json.load(open(Options.MR_MAPPING_FILE))
-      return Options.MR_MAPPING
-  else:
-    raise FatalException(-1, "MAPREDUCE host mapping file, mr_mapping, is not available or badly formatted. Execute "
-                             "action save-mr-mapping. Ensure the file is present in the directory where you are "
-                             "executing this command.")
-
-
-def get_mr1_mapping():
-  components = ["MAPREDUCE_CLIENT", "JOBTRACKER", "TASKTRACKER", "HISTORYSERVER"]
-  GET_URL_FORMAT = Options.CLUSTER_URL + '/services/MAPREDUCE/components/%s'
-  hostmapping = {}
-  for component in components:
-    hostlist = []
-    structured_resp = curl(GET_URL_FORMAT % component, parse=True, validate=True)
-
-    if 'host_components' in structured_resp:
-      for hostcomponent in structured_resp['host_components']:
-        if 'HostRoles' in hostcomponent:
-          if 'host_name' in hostcomponent['HostRoles']:
-            hostlist.append(hostcomponent['HostRoles']['host_name'])
-
-    hostmapping[component] = hostlist
-  write_mapping(hostmapping)
-
-  pprint("File mr_mapping contains the host mapping for mapreduce components. This file is critical for later "
-         "steps.")
-
-
-def get_YN_input(prompt, default):
-  yes = set(['yes', 'ye', 'y'])
-  no = set(['no', 'n'])
-  return get_choice_string_input(prompt, default, yes, no)
-
-
-def get_choice_string_input(prompt, default, firstChoice, secondChoice):
-  choice = raw_input(prompt).lower()
-  if choice in firstChoice:
-    return True
-  elif choice in secondChoice:
-    return False
-  elif choice is "":  # Just enter pressed
-    return default
-  else:
-    print "input not recognized, please try again: "
-    return get_choice_string_input(prompt, default, firstChoice, secondChoice)
-
-
-def delete_mr():
-  saved_mr_mapping = get_YN_input("Have you saved MR host mapping using action save-mr-mapping [y/n] (n)? ", False)
-  if not saved_mr_mapping:
-    raise FatalException(1, "Ensure MAPREDUCE host component mapping is saved before deleting it. Use action "
-                            "save-mr-mapping.")
-
-  SERVICE_URL_FORMAT = Options.CLUSTER_URL + '/services/MAPREDUCE'
-  COMPONENT_URL_FORMAT = Options.CLUSTER_URL + '/hosts/%s/host_components/%s'
-  NON_CLIENTS = ["JOBTRACKER", "TASKTRACKER", "HISTORYSERVER"]
-  PUT_IN_DISABLED = {
-    "HostRoles": {
-      "state": "DISABLED"
-    }
-  }
-
-  hostmapping = read_mapping()
-
-  for key, value in hostmapping.items():
-    if (key in NON_CLIENTS) and (len(value) > 0):
-      for host in value:
-        curl(COMPONENT_URL_FORMAT % (host, key), request_type="PUT", data=PUT_IN_DISABLED,
-             validate=True)
-
-  curl(SERVICE_URL_FORMAT, request_type="DELETE", validate=True)
-
-
-def get_cluster_stackname():
-  VERSION_URL_FORMAT = Options.CLUSTER_URL + '?fields=Clusters/version'
-
-  structured_resp = curl(VERSION_URL_FORMAT, validate=True, parse=True)
-
-  if 'Clusters' in structured_resp:
-    if 'version' in structured_resp['Clusters']:
-      return structured_resp['Clusters']['version']
-
-  raise FatalException(-1, "Unable to get the cluster version")
-
-
-def has_component_in_stack_def(stack_name, service_name, component_name):
-  STACK_COMPONENT_URL_FORMAT = Options.ROOT_URL + '/stacks2/{0}/versions/{1}/stackServices/{2}/serviceComponents/{3}'
-  stack, stack_version = stack_name.split('-')
-
-  try:
-    curl(STACK_COMPONENT_URL_FORMAT.format(stack, stack_version, service_name, component_name),
-         validate=True)
-    return True
-  except FatalException:
-    return False
-
-
-def add_services():
-  SERVICE_URL_FORMAT = Options.CLUSTER_URL + '/services/{0}'
-  COMPONENT_URL_FORMAT = SERVICE_URL_FORMAT + '/components/{1}'
-  HOST_COMPONENT_URL_FORMAT = Options.CLUSTER_URL + '/hosts/{0}/host_components/{1}'
-  service_comp = {
-    "YARN": ["NODEMANAGER", "RESOURCEMANAGER", "YARN_CLIENT"],
-    "MAPREDUCE2": ["HISTORYSERVER", "MAPREDUCE2_CLIENT"]}
-  new_old_host_map = {
-    "NODEMANAGER": "TASKTRACKER",
-    "HISTORYSERVER": "HISTORYSERVER",
-    "RESOURCEMANAGER": "JOBTRACKER",
-    "YARN_CLIENT": "MAPREDUCE_CLIENT",
-    "MAPREDUCE2_CLIENT": "MAPREDUCE_CLIENT"}
-
-  stack_name = get_cluster_stackname()
-  stack_has_ats = has_component_in_stack_def(stack_name, "YARN", "APP_TIMELINE_SERVER")
-
-  # if upgrading to stack > 2.1 (which has ats)
-  if stack_has_ats:
-    service_comp["YARN"].append("APP_TIMELINE_SERVER")
-    new_old_host_map["APP_TIMELINE_SERVER"] = "JOBTRACKER"
-
-  hostmapping = read_mapping()
-
-  for service in service_comp.keys():
-    curl(SERVICE_URL_FORMAT.format(service), validate=True, request_type="POST")
-
-    for component in service_comp[service]:
-      curl(COMPONENT_URL_FORMAT.format(service, component),
-           validate=True, request_type="POST")
-
-      for host in hostmapping[new_old_host_map[component]]:
-        curl(HOST_COMPONENT_URL_FORMAT.format(host, component),
-             validate=True, request_type="POST")
-
-
-def update_config(properties, config_type, attributes=None):
-  tag = "version" + str(int(time.time() * 1000))
-  properties_payload = {"Clusters": {"desired_config": {"type": config_type, "tag": tag, "properties": properties}}}
-  if attributes is not None:
-    properties_payload["Clusters"]["desired_config"]["properties_attributes"] = attributes
-
-  expect_body = config_type != "cluster-env"  # ToDo: make exceptions more flexible
-
-  curl(Options.CLUSTER_URL, request_type="PUT", data=properties_payload, validate=True, soft_validation=True)
-
-
-def build_all_options(desired_configs):
-  """
-  Get all configs in the old-fashion way ( versions below 1.7.0 doesn't support "properties" filter )
-  """
-  config_url_tpl = Options.CLUSTER_URL + "/configurations?type={0}&tag={1}"
-  all_options = {CatConst.ITEMS_TAG: []}
-  for config in desired_configs:
-    cfg_item = curl(config_url_tpl.format(config, desired_configs[config]["tag"]), parse=True, validate=True)
-    if CatConst.ITEMS_TAG in cfg_item and len(cfg_item[CatConst.ITEMS_TAG]) == 1:
-      all_options[CatConst.ITEMS_TAG].append(cfg_item[CatConst.ITEMS_TAG][0])
-
-  return all_options
-
-
-def get_config_resp_all():
-  desired_configs = {}
-  config_all_properties_url = Options.CLUSTER_URL + "/configurations?fields=properties"
-  desired_configs_resp = curl(Options.CLUSTER_URL + "?fields=Clusters/desired_configs", validate=True, parse=True)
-
-  if 'Clusters' in desired_configs_resp:
-    if 'desired_configs' in desired_configs_resp['Clusters']:
-      desired_configs_resp = desired_configs_resp['Clusters']['desired_configs']
-    else:
-      return None
-  else:
-    return None
-
-  if Options.isPropertyAttributesSupported():
-    config_all_properties_url += ",properties_attributes"
-    all_options = curl(config_all_properties_url, validate=True, parse=True)
-  else:
-    all_options = build_all_options(desired_configs_resp)
-
-  if CatConst.ITEMS_TAG in all_options:
-    all_options = all_options[CatConst.ITEMS_TAG]
-  else:
-    return None
-
-  all_options = filter(
-    lambda x: x[CatConst.TYPE_TAG] in desired_configs_resp and x["tag"] == desired_configs_resp[x[CatConst.TYPE_TAG]][
-      "tag"],
-    all_options)
-
-  for item in all_options:
-    dc_item = {}
-
-    if CatConst.STACK_PROPERTIES in item:  # config item could not contain any property
-      dc_item[CatConst.STACK_PROPERTIES] = item[CatConst.STACK_PROPERTIES]
-    else:
-      dc_item[CatConst.STACK_PROPERTIES] = {}
-
-    if CatConst.STACK_PROPERTIES_ATTRIBUTES in item:
-      dc_item[CatConst.STACK_PROPERTIES_ATTRIBUTES] = item[CatConst.STACK_PROPERTIES_ATTRIBUTES]
-
-    if "tag" in item:
-      dc_item["tag"] = item["tag"]
-
-    if dc_item != {}:
-      desired_configs[item[CatConst.TYPE_TAG]] = dc_item
-
-  return desired_configs
-
-
-def is_services_exists(required_services):
-  """
-  return true, if required_services is a part of Options.SERVICES
-  :param required_services: list
-  :return: bool
-  """
-  # sets are equal
-  if Options.SERVICES == set(required_services):
-    return True
-
-  return set(map(lambda x: x.upper(), required_services)) < Options.SERVICES
-
-
-def get_cluster_services():
-  services_url = Options.CLUSTER_URL + '/services'
-  raw_services = curl(services_url, parse=True)
-
-  # expected structure:
-  # items: [ {"href":"...", "ServiceInfo":{"cluster_name":"..", "service_name":".."}}, ..., ... ]
-  if raw_services is not None and "items" in raw_services and isinstance(raw_services["items"], list):
-    return list(map(lambda item: item["ServiceInfo"]["service_name"], raw_services["items"]))
-
-  Options.logger.warning("Failed to load services list, functionality that depends on them couldn't work")
-  return []
-
-
-def get_zookeeper_quorum():
-  zoo_cfg = curl(Options.COMPONENTS_FORMAT.format(Options.ZOOKEEPER_SERVER), validate=False, parse=True)
-  zoo_quorum = []
-  zoo_def_port = "2181"
-  if Options.server_config_factory is not None and Options.ZK_OPTIONS in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(Options.ZK_OPTIONS)
-    if Options.ZK_CLIENTPORT in props.properties:
-      zoo_def_port = props.properties[Options.ZK_CLIENTPORT]
-
-  if "host_components" in zoo_cfg:
-    for item in zoo_cfg["host_components"]:
-      zoo_quorum.append("%s:%s" % (item["HostRoles"]["host_name"], zoo_def_port))
-
-  return ",".join(zoo_quorum)
-
-
-def get_tez_history_url_base():
-  try:
-    tez_view = curl(Options.TEZ_VIEW_URL, validate=False, parse=True)
-  except HTTPError as e:
-    raise TemplateProcessingException(str(e))
-
-  version = ""
-  if "versions" in tez_view and \
-    len(tez_view['versions']) > 0 and \
-    "ViewVersionInfo" in tez_view['versions'][0] and \
-    'version' in tez_view['versions'][0]['ViewVersionInfo']:
-    version = tez_view['versions'][0]['ViewVersionInfo']['version']
-  url = '{0}://{1}:{2}/#/main/views/TEZ/{3}/TEZ_CLUSTER_INSTANCE'.format(Options.API_PROTOCOL, Options.HOST, Options.API_PORT, version)
-  return url
-
-def get_kafka_listeners():
-  kafka_host = "localhost"
-  kafka_port = "6667"
-  if Options.server_config_factory is not None and Options.KAFKA_BROKER_CONF in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(Options.KAFKA_BROKER_CONF)
-    if Options.KAFKA_PORT in props.properties:
-      kafka_port = props.properties[Options.KAFKA_PORT]
-
-  # Default kafka listeners string
-  kafka_listeners = "PLAINTEXT://{0}:{1}".format(kafka_host, kafka_port)
-
-  return kafka_listeners
-
-
-def get_ranger_xaaudit_hdfs_destination_directory():
-  namenode_hostname="localhost"
-  namenode_cfg = curl(Options.COMPONENTS_FORMAT.format(Options.NAMENODE), validate=False, parse=True)
-  if "host_components" in namenode_cfg:
-    namenode_hostname = namenode_cfg["host_components"][0]["HostRoles"]["host_name"]
-
-  return "hdfs://{0}:8020/ranger/audit".format(namenode_hostname)
-
-def get_ranger_policymgr_external_url():
-  url = "{{policymgr_mgr_url}}"
-  if Options.server_config_factory is not None and Options.RANGER_ADMIN in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(Options.RANGER_ADMIN)
-    if Options.RANGER_EXTERNAL_URL in props.properties:
-      url = props.properties[Options.RANGER_EXTERNAL_URL]
-  return url
-
-def get_jdbc_driver():
-  driver = "{{jdbc_driver}}"
-  if Options.server_config_factory is not None and Options.RANGER_ADMIN in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(Options.RANGER_ADMIN)
-    if "DB_FLAVOR" in props.properties:
-      db = props.properties["DB_FLAVOR"]
-
-  if db.lower() == "mysql":
-    driver = "com.mysql.jdbc.Driver"
-  elif db.lower() == "oracle":
-    driver = "oracle.jdbc.OracleDriver"
-  return driver
-
-def get_audit_jdbc_url():
-  audit_jdbc_url = "{{audit_jdbc_url}}"
-  if Options.server_config_factory is not None and Options.RANGER_ADMIN in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(Options.RANGER_ADMIN)
-    if "DB_FLAVOR" in props.properties:
-      xa_audit_db_flavor = props.properties["DB_FLAVOR"]
-    if "db_host" in props.properties:
-      xa_db_host =  props.properties["db_host"]
-    if "audit_db_name" in props.properties:
-      xa_audit_db_name = props.properties["audit_db_name"]
-
-  if xa_audit_db_flavor.lower() == 'mysql':
-    audit_jdbc_url = "jdbc:mysql://{0}/{1}".format(xa_db_host, xa_audit_db_name)
-  elif xa_audit_db_flavor.lower() == 'oracle':
-    audit_jdbc_url = "jdbc:oracle:thin:\@//{0}".format(xa_db_host)
-  return audit_jdbc_url
-
-def get_audit_db_passwd():
-  audit_db_passwd = "crypted"
-  if Options.server_config_factory is not None and Options.RANGER_ADMIN in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(Options.RANGER_ADMIN)
-    if "audit_db_password" in props.properties:
-      audit_db_passwd = props.properties['audit_db_password']
-  return audit_db_passwd
-
-def get_audit_to_db_enabled(config_name):
-  audit_to_db = "true"
-  if Options.server_config_factory is not None and config_name in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(config_name)
-    if "XAAUDIT.DB.IS_ENABLED" in props.properties:
-      audit_to_db = props.properties["XAAUDIT.DB.IS_ENABLED"]
-  return audit_to_db
-
-def get_audit_to_hdfs_enabled(config_name):
-  audit_to_hdfs = "false"
-  if Options.server_config_factory is not None and config_name in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(config_name)
-    if "XAAUDIT.HDFS.IS_ENABLED" in props.properties:
-      audit_to_hdfs = props.properties["XAAUDIT.HDFS.IS_ENABLED"]
-  return audit_to_hdfs
-
-def get_hdfs_batch_filespool_dir(config_name, component):
-  if component == 'hdfs':
-    path = '/var/log/hadoop/hdfs/audit/hdfs/spool'
-  else:
-    path = '/var/log/{0}/audit/hdfs/spool'.format(component)
-  if Options.server_config_factory is not None and config_name in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(config_name)
-    if "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY" in props.properties:
-      path = props.properties["XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY"]
-  return path
-
-
-def get_usersync_sync_source():
-  ug_sync_source = 'org.apache.ranger.unixusersync.process.UnixUserGroupBuilder'
-  sync_source = 'unix'
-  if Options.server_config_factory is not None and Options.RANGER_USERSYNC in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(Options.RANGER_USERSYNC)
-    if "SYNC_SOURCE" in props.properties:
-      sync_source = props.properties['SYNC_SOURCE']
-
-    if sync_source == 'ldap':
-      ug_sync_source = 'org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder'
-  return ug_sync_source
-
-def get_audit_check(audit_type):
-  audit_check_flag = "false"
-  if Options.server_config_factory is not None and Options.RANGER_ENV in Options.server_config_factory.items():
-    props = Options.server_config_factory.get_config(Options.RANGER_ENV)
-    audit_property = "xasecure.audit.destination.{0}".format(audit_type)
-    if audit_property in props.properties:
-      audit_check_flag = props.properties[audit_property]
-
-  return audit_check_flag
-
-def get_jt_host(catalog):
-  """
-  :type catalog: UpgradeCatalog
-  :rtype str
-  """
-  if catalog.get_parsed_version()["from"] == 13:
-    return read_mapping()["JOBTRACKER"][0]
-
-  return ""
-
-
-def get_jh_host(catalog):
-  """
-  :type catalog: UpgradeCatalog
-  :rtype str
-  """
-  if catalog.get_parsed_version()["from"] == 13:
-    return read_mapping()["HISTORYSERVER"][0]
-
-  return ""
-
-def get_ranger_host():
-  ranger_config = curl(Options.COMPONENTS_FORMAT.format('RANGER_ADMIN'), validate=False, parse=True)
-  ranger_host_list = []
-  if "host_components" in ranger_config:
-    for item in ranger_config["host_components"]:
-      ranger_host_list.append(item["HostRoles"]["host_name"])
-  return ranger_host_list[0]
-
-def get_ranger_service_details():
-  server_cfg_factory = Options.server_config_factory
-  server_cfg_catalog = server_cfg_factory.get_config('admin-properties')
-  properties_latest = server_cfg_catalog.properties
-  data = {}
-
-  if properties_latest['DB_FLAVOR'].lower() == 'mysql':
-    data['RANGER_JDBC_DRIVER'] = 'com.mysql.jdbc.Driver'
-    data['RANGER_JDBC_DIALECT'] = 'org.eclipse.persistence.platform.database.MySQLPlatform'
-    data['RANGER_JDBC_URL'] = 'jdbc:mysql://{0}/{1}'.format(properties_latest['db_host'], properties_latest['db_name'])
-    data['RANGER_AUDIT_JDBC_URL'] = 'jdbc:mysql://{0}/{1}'.format(properties_latest['db_host'], properties_latest['audit_db_name'])
-    data['RANGER_ROOT_JDBC_URL'] = 'jdbc:mysql://{0}'.format(properties_latest['db_host'])
-  elif properties_latest['DB_FLAVOR'].lower() == 'oracle':
-    data['RANGER_JDBC_DRIVER'] = 'oracle.jdbc.OracleDriver'
-    data['RANGER_JDBC_DIALECT'] = 'org.eclipse.persistence.platform.database.OraclePlatform'
-    data['RANGER_JDBC_URL'] = 'jdbc:oracle:thin:@//{0}'.format(properties_latest['db_host'])
-    data['RANGER_AUDIT_JDBC_URL'] = 'jdbc:oracle:thin:@//{0}'.format(properties_latest['db_host'])
-    data['RANGER_ROOT_JDBC_URL'] = 'jdbc:oracle:thin:@//{0}'.format(properties_latest['db_host'])
-
-  return data
-
-def get_hive_security_authorization_setting():
-  # this pattern should be used only once, changes here mimic UpgradeCatalog210.java -> updateRangerHiveConfigs
-  scf = Options.server_config_factory
-  response = "None"
-
-  if "hive-env" in scf.items() and "hive_security_authorization" in scf.get_config("hive-env").properties:
-    response = scf.get_config("hive-env").properties["hive_security_authorization"]
-
-  old_ranger_catalog = "ranger-hive-plugin-properties"
-  old_ranger_setting = "ranger-hive-plugin-enabled"
-  hive_server_catalog = "hiveserver2-site"
-  hive_sec_property = "hive.security.authorization.enabled"
-
-  if scf is not None and old_ranger_catalog in scf.items():
-    cfg = scf.get_config(old_ranger_catalog)
-    prop = cfg.properties
-    if old_ranger_setting in prop and cfg.properties[old_ranger_setting].upper() == "YES":
-      response = "Ranger"
-      if hive_server_catalog in scf.items():
-         hive_props = scf.get_config(hive_server_catalog).properties
-         hive_props[hive_sec_property] = "true"
-    if old_ranger_setting in prop:
-      del prop[old_ranger_setting]
-
-  # workaround for buggy stack advisor
-  if "HIVE" in Options.SERVICES and response == "None":
-    if hive_server_catalog not in scf.items():
-      scf.create_config(hive_server_catalog)
-
-    scf.get_config(hive_server_catalog).properties[hive_sec_property] = "false"
-
-  return response
-
-
-def get_hbase_coprocessmaster_classes():
-  scf = Options.server_config_factory
-  prop = "hbase.coprocessor.master.classes"
-  hbase_ranger_enabled = False
-  old_value = ""
-  if "hbase-site" in scf.items():
-    if prop in scf.get_config("hbase-site").properties:
-      old_value = scf.get_config("hbase-site").properties[prop]
-    if "hbase.security.authorization" in scf.get_config("hbase-site").properties and \
-      scf.get_config("hbase-site").properties["hbase.security.authorization"].upper() == "TRUE":
-      hbase_ranger_enabled = True
-
-  if hbase_ranger_enabled and "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor" not in old_value:
-    if "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor" in old_value:
-      old_value = old_value.replace("com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor",
-                                    "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor")
-    else:
-      val = [] if old_value.strip() == "" else old_value.split(',')
-      val.append("org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor")
-      old_value = ','.join(val)
-
-  return old_value
-
-
-def get_rpc_scheduler_factory_class():
-  if Options.PHOENIX_QUERY_SERVER in Options.ambari_server.components:
-    return "org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory"
-  else:
-    return ""
-
-
-def get_hbase_rpc_controllerfactory_class():
-  if Options.PHOENIX_QUERY_SERVER in Options.ambari_server.components:
-    return "org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory"
-  else:
-    return ""
-
-
-def get_hbase_regionserver_wal_codec():
-  prop = "phoenix_sql_enabled"
-  scf = Options.server_config_factory
-  if "hbase-env" in scf.items():
-    if prop in scf.get_config("hbase-env").properties and scf.get_config("hbase-env").properties[prop].upper() == "TRUE":
-      return "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec"
-  return "org.apache.hadoop.hbase.regionserver.wal.WALCellCodec"
-
-
-def get_hbase_coprocessor_region_classes():
-  scf = Options.server_config_factory
-  prop = "hbase.coprocessor.region.classes"
-  hbase_ranger_enabled = False
-  old_value = ""
-  if "hbase-site" in scf.items():
-    if prop in scf.get_config("hbase-site").properties:
-      old_value = scf.get_config("hbase-site").properties[prop]
-    if "hbase.security.authorization" in scf.get_config("hbase-site").properties and \
-        scf.get_config("hbase-site").properties["hbase.security.authorization"].upper() == "TRUE":
-      hbase_ranger_enabled = True
-
-  if hbase_ranger_enabled and "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor" not in old_value:
-    if "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor" in old_value:
-      old_value = old_value.replace("com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor",
-                                    "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor")
-    else:
-      val = [] if old_value.strip() == "" else old_value.split(',')
-      val.append("org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor")
-      old_value = ','.join(val)
-
-  return old_value
-
-
-def _substitute_handler(upgrade_catalog, tokens, value):
-  """
-  Substitute handler
-  :param upgrade_catalog: UpgradeCatalog
-  :param tokens: list
-  :param value: str
-  :rtype str
-  """
-  for token in tokens:
-    if token == "{JOBHISTORY_HOST}":
-      value = value.replace(token, get_jh_host(upgrade_catalog))
-    elif token == "{RESOURCEMANAGER_HOST}":
-      value = value.replace(token, get_jt_host(upgrade_catalog))
-    elif token == "{HBASE_REGIONSERVER_WAL_CODEC}":
-      value = value.replace(token, get_hbase_regionserver_wal_codec())
-    elif token == "{HBASE_REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS}":
-      value = value.replace(token, get_rpc_scheduler_factory_class())
-    elif token == "{HBASE_RPC_CONTROLLERFACTORY_CLASS}":
-      value = value.replace(token, get_hbase_rpc_controllerfactory_class())
-    elif token == "{ZOOKEEPER_QUORUM}":
-      value = value.replace(token, get_zookeeper_quorum())
-    elif token == "{HBASE_COPROCESS_MASTER_CLASSES}":
-      value = value.replace(token, get_hbase_coprocessmaster_classes())
-    elif token == "{HBASE_COPROCESSOR_REGION_CLASSES}":
-      value = value.replace(token, get_hbase_coprocessor_region_classes())
-    elif token == "{HIVE_SECURITY_AUTHORIZATION}":
-      value = value.replace(token, get_hive_security_authorization_setting())
-    elif token == "{TEZ_HISTORY_URL_BASE}":
-      value = value.replace(token, get_tez_history_url_base())
-    elif token == "{RANGER_JDBC_DRIVER}":
-      value = value.replace(token, get_ranger_service_details()['RANGER_JDBC_DRIVER'])
-    elif token == "{RANGER_JDBC_URL}":
-      value = value.replace(token, get_ranger_service_details()['RANGER_JDBC_URL'])
-    elif token == "{RANGER_AUDIT_JDBC_URL}":
-      value = value.replace(token, get_ranger_service_details()['RANGER_AUDIT_JDBC_URL'])
-    elif token == "{RANGER_HOST}":
-      value = value.replace(token, get_ranger_host())
-    elif token == "{RANGER_JDBC_DIALECT}":
-      value = value.replace(token, get_ranger_service_details()['RANGER_JDBC_DIALECT'])
-    elif token == "{KAFKA_LISTENERS}":
-      value = value.replace(token, get_kafka_listeners())
-    elif token == "{RANGER_PLUGIN_HBASE_POLICY_CACHE_DIR}":
-      value = value.replace(token, "/etc/ranger/{0}{1}/policycache".format(Options.CLUSTER_NAME, "_hbase"))
-    elif token == "{RANGER_PLUGIN_HDFS_POLICY_CACHE_DIR}":
-      value = value.replace(token, "/etc/ranger/{0}{1}/policycache".format(Options.CLUSTER_NAME, "_hadoop"))
-    elif token == "{RANGER_PLUGIN_HIVE_POLICY_CACHE_DIR}":
-      value = value.replace(token, "/etc/ranger/{0}{1}/policycache".format(Options.CLUSTER_NAME, "_hive"))
-    elif token == "{RANGER_PLUGIN_KNOX_POLICY_CACHE_DIR}":
-      value = value.replace(token, "/etc/ranger/{0}{1}/policycache".format(Options.CLUSTER_NAME, "_knox"))
-    elif token == "{RANGER_PLUGIN_STORM_POLICY_CACHE_DIR}":
-      value = value.replace(token, "/etc/ranger/{0}{1}/policycache".format(Options.CLUSTER_NAME, "_storm"))
-    elif token == "{RANGER_HBASE_KEYSTORE_CREDENTIAL_FILE}":
-      value = value.replace(token, "jceks://file/etc/ranger/{0}{1}/cred.jceks".format(Options.CLUSTER_NAME, "_hbase"))
-    elif token == "{RANGER_HDFS_KEYSTORE_CREDENTIAL_FILE}":
-      value = value.replace(token, "jceks://file/etc/ranger/{0}{1}/cred.jceks".format(Options.CLUSTER_NAME, "_hadoop"))
-    elif token == "{RANGER_HIVE_KEYSTORE_CREDENTIAL_FILE}":
-      value = value.replace(token, "jceks://file/etc/ranger/{0}{1}/cred.jceks".format(Options.CLUSTER_NAME, "_hive"))
-    elif token == "{RANGER_KNOX_KEYSTORE_CREDENTIAL_FILE}":
-      value = value.replace(token, "jceks://file/etc/ranger/{0}{1}/cred.jceks".format(Options.CLUSTER_NAME, "_knox"))
-    elif token == "{RANGER_STORM_KEYSTORE_CREDENTIAL_FILE}":
-      value = value.replace(token, "jceks://file/etc/ranger/{0}{1}/cred.jceks".format(Options.CLUSTER_NAME, "_storm"))
-    elif token == "{XAAUDIT_HDFS_DESTINATION_DIRECTORY}":
-      value = value.replace(token, get_ranger_xaaudit_hdfs_destination_directory())
-    elif token == "{HBASE_RANGER_REPO_NAME}":
-      value = value.replace(token, Options.CLUSTER_NAME+"_hbase")
-    elif token == "{HDFS_RANGER_REPO_NAME}":
-      value = value.replace(token, Options.CLUSTER_NAME+"_hadoop")
-    elif token == "{HIVE_RANGER_REPO_NAME}":
-      value = value.replace(token, Options.CLUSTER_NAME+"_hive")
-    elif token == "{KNOX_RANGER_REPO_NAME}":
-      value = value.replace(token, Options.CLUSTER_NAME+"_knox")
-    elif token == "{STORM_RANGER_REPO_NAME}":
-      value = value.replace(token, Options.CLUSTER_NAME+"_storm")
-    elif token == "{POLICYMGR_MGR_URL}":
-      value = value.replace(token, get_ranger_policymgr_external_url())
-    elif token == "{HDFS_JDBC_DRIVER}":
-      value = value.replace(token, get_jdbc_driver())
-    elif token == "{HBASE_JDBC_DRIVER}":
-      value = value.replace(token, get_jdbc_driver())
-    elif token == "{HIVE_JDBC_DRIVER}":
-      value = value.replace(token, get_jdbc_driver())
-    elif token == "{KNOX_JDBC_DRIVER}":
-      value = value.replace(token, get_jdbc_driver())
-    elif token == "{STORM_JDBC_DRIVER}":
-      value = value.replace(token, get_jdbc_driver())
-    elif token == "{HDFS_AUDIT_JDBC_URL}":
-      value = value.replace(token, get_audit_jdbc_url())
-    elif token == "{HBASE_AUDIT_JDBC_URL}":
-      value = value.replace(token, get_audit_jdbc_url())
-    elif token == "{HIVE_AUDIT_JDBC_URL}":
-      value = value.replace(token, get_audit_jdbc_url())
-    elif token == "{KNOX_AUDIT_JDBC_URL}":
-      value = value.replace(token, get_audit_jdbc_url())
-    elif token == "{STORM_AUDIT_JDBC_URL}":
-      value = value.replace(token, get_audit_jdbc_url())
-    elif token == "{AUDIT_TO_DB_HDFS}":
-      value = value.replace(token, get_audit_to_db_enabled("ranger-hdfs-plugin-properties"))
-    elif token == "{AUDIT_TO_DB_HBASE}":
-      value = value.replace(token, get_audit_to_db_enabled("ranger-hbase-plugin-properties"))
-    elif token == "{AUDIT_TO_DB_HIVE}":
-      value = value.replace(token, get_audit_to_db_enabled("ranger-hive-plugin-properties"))
-    elif token == "{AUDIT_TO_DB_KNOX}":
-      value = value.replace(token, get_audit_to_db_enabled("ranger-knox-plugin-properties"))
-    elif token == "{AUDIT_TO_DB_STORM}":
-      value = value.replace(token, get_audit_to_db_enabled("ranger-storm-plugin-properties"))
-    elif token == "{AUDIT_TO_HDFS_HDFS}":
-      value = value.replace(token, get_audit_to_hdfs_enabled("ranger-hdfs-plugin-properties"))
-    elif token == "{AUDIT_TO_HDFS_HIVE}":
-      value = value.replace(token, get_audit_to_hdfs_enabled("ranger-hive-plugin-properties"))
-    elif token == "{AUDIT_TO_HDFS_HBASE}":
-      value = value.replace(token, get_audit_to_hdfs_enabled("ranger-hbase-plugin-properties"))
-    elif token == "{AUDIT_TO_HDFS_KNOX}":
-      value = value.replace(token, get_audit_to_hdfs_enabled("ranger-knox-plugin-properties"))
-    elif token == "{AUDIT_TO_HDFS_STORM}":
-      value = value.replace(token, get_audit_to_hdfs_enabled("ranger-storm-plugin-properties"))
-    elif token == "{AUDIT_HDFS_FILESPOOL_DIR_HDFS}":
-      value = value.replace(token, get_hdfs_batch_filespool_dir("ranger-hdfs-plugin-properties", "hdfs"))
-    elif token == "{AUDIT_HDFS_FILESPOOL_DIR_HIVE}":
-      value = value.replace(token, get_hdfs_batch_filespool_dir("ranger-hive-plugin-properties", "hive"))
-    elif token == "{AUDIT_HDFS_FILESPOOL_DIR_HBASE}":
-      value = value.replace(token, get_hdfs_batch_filespool_dir("ranger-hbase-plugin-properties", "hbase"))
-    elif token == "{AUDIT_HDFS_FILESPOOL_DIR_KNOX}":
-      value = value.replace(token, get_hdfs_batch_filespool_dir("ranger-knox-plugin-properties", "knox"))
-    elif token == "{AUDIT_HDFS_FILESPOOL_DIR_STORM}":
-      value = value.replace(token, get_hdfs_batch_filespool_dir("ranger-storm-plugin-properties", "storm"))
-    elif token == "{USERSYNC_SYNC_SOURCE}":
-      value = value.replace(token, get_usersync_sync_source())
-    elif token == "{AUDIT_TO_DB}":
-      value =  value.replace(token, get_audit_check("db"))
-    elif token == "{AUDIT_TO_HDFS}":
-      value =  value.replace(token, get_audit_check("hdfs"))
-    elif token == "{RANGER_ROOT_JDBC_URL}":
-      value = value.replace(token, get_ranger_service_details()['RANGER_ROOT_JDBC_URL'])
-
-  return value
-
-
-def modify_config_item(config_type, catalog, server_config_factory):
-  """
-  Modify configuration item
-  :type config_type str
-  :type catalog UpgradeCatalog
-  :type server_config_factory ServerConfigFactory
-  """
-
-  # if config group is absent on the server, we will create it
-  if config_type not in server_config_factory.items():
-    server_config_factory.create_config(config_type)
-
-  server_config_catalog = server_config_factory.get_config(config_type)
-
-  server_config_catalog.merge(catalog)
-
-
-def modify_configs():
-  if len(Options.ARGS) > 1:
-    config_type = Options.ARGS[1]
-  else:
-    config_type = None
-
-  catalog_farm = UpgradeCatalogFactory(Options.OPTIONS.upgrade_json)  # Load upgrade catalog
-  catalog = catalog_farm.get_catalog(Options.OPTIONS.from_stack,
-                                     Options.OPTIONS.to_stack)  # get desired version of catalog
-  Options.stack_advisor = StackAdvisorFactory().get_instance(catalog.name, catalog.target_version)
-
-  # load all desired configs from the server
-  # ToDo: implement singleton for that class
-  Options.server_config_factory = ServerConfigFactory()
-
-  if catalog is None:
-    raise FatalException(1, "Upgrade catalog for version %s-%s not found, no configs was modified"
-                         % (Options.OPTIONS.from_stack, Options.OPTIONS.to_stack))
-
-  # add user-defined template processing function
-  catalog.add_handler(CatConst.TEMPLATE_HANDLER, _substitute_handler)
-
-  if config_type is not None and config_type not in catalog.items:
-    raise FatalException("Config type %s not exists, no configs was modified" % config_type)
-
-  if config_type is not None:
-    modify_config_item(config_type, catalog, Options.server_config_factory)
-  else:
-    for collection_name in catalog.items:
-      modify_config_item(collection_name, catalog, Options.server_config_factory)
-
-  Options.server_config_factory.process_mapping_transformations(catalog)
-
-  # commit changes to server, if any will be found
-  Options.server_config_factory.commit()
-
-
-def backup_configs(conf_type=None):
-  dir = "backups_%d" % time.time()
-  file_pattern = "%s%s%s_%s.json"
-  configs = get_config_resp_all()
-  if configs is None:
-    Options.logger.error("Unexpected response from the server")
-    return -1
-
-  if conf_type is not None and conf_type in configs:
-    configs = {conf_type: configs[conf_type]}
-
-  if not os.path.exists(dir):
-    os.mkdir(dir)
-
-  for item in configs:
-    filename = file_pattern % (dir, os.path.sep, item, configs[item]["tag"])
-    if os.path.exists(filename):
-      os.remove(filename)
-
-    try:
-      with open(filename, "w") as f:
-        f.write(json.dumps(configs[item][CatConst.STACK_PROPERTIES], indent=4))
-      Options.logger.info("Catalog \"%s\" stored to %s", item, filename)
-    except IOError as e:
-      Options.logger.error("Unable to store \"%s\": %s", item, e)
-
-
-def install_services():
-  SERVICE_URL_FORMAT = Options.CLUSTER_URL + '/services/{0}'
-  SERVICES = ["MAPREDUCE2", "YARN"]
-  PUT_IN_INSTALLED = [
-    {
-      "RequestInfo": {
-        "context": "Install MapReduce2"
-      },
-      "Body": {
-        "ServiceInfo": {
-          "state": "INSTALLED"
-        }
-      }
-    },
-    {
-      "RequestInfo": {
-        "context": "Install YARN"
-      },
-      "Body": {
-        "ServiceInfo": {
-          "state": "INSTALLED"
-        }
-      }
-    }
-  ]
-
-  err_retcode = 0
-  err_message = ""
-  for index in [0, 1]:
-    try:
-      curl(SERVICE_URL_FORMAT.format(SERVICES[index]), validate=True, request_type="PUT", data=PUT_IN_INSTALLED[index])
-    except FatalException as e:
-      if not e.code == 0:
-        err_retcode = e.code
-        err_message = err_message + " Error while installing " + SERVICES[index] + ". Details: " + e.message + "."
-
-  if err_retcode != 0:
-    raise FatalException(err_retcode,
-                         err_message + "(Services may already be installed or agents are not yet started.)")
-
-  Options.OPTIONS.exit_message = "Requests has been submitted to install YARN and MAPREDUCE2. Use Ambari Web to monitor " \
-                                 "the status of the install requests."
-
-
-def generate_auth_header(user, password):
-  token = "%s:%s" % (user, password)
-  token = base64.encodestring(token)
-  return {"Authorization": "Basic %s" % token.replace('\n', '')}
-
-
-def curl(url, tokens=None, headers=None, request_type="GET", data=None, parse=False,
-         validate=False, soft_validation=False):
-  """
-  :rtype type
-  """
-  _headers = {}
-  handler_chain = []
-  post_req = ["POST", "PUT"]
-  get_req = ["GET", "DELETE"]
-
-  print_url = Options.CURL_PRINT_ONLY is not None
-  write_only_print = Options.CURL_WRITE_ONLY is not None
-
-  if request_type not in post_req + get_req:
-    raise IOError("Wrong request type \"%s\" passed" % request_type)
-
-  if data is not None and isinstance(data, dict):
-    data = json.dumps(data)
-
-  if tokens is not None:
-    _headers.update(generate_auth_header(tokens["user"], tokens["pass"]))
-  elif Options.API_TOKENS is not None:
-    _headers.update(generate_auth_header(Options.API_TOKENS["user"], Options.API_TOKENS["pass"]))
-
-  if request_type in post_req and data is not None:
-    _headers["Content-Length"] = len(data)
-
-  if headers is not None:
-    _headers.update(headers)
-
-  if Options.HEADERS is not None:
-    _headers.update(Options.HEADERS)
-
-  director = build_opener(*handler_chain)
-  if request_type in post_req:
-    _data = bytes(data)
-    req = Request(url, headers=_headers, data=_data)
-  else:
-    req = Request(url, headers=_headers)
-
-  req.get_method = lambda: request_type
-
-  if print_url:
-    if write_only_print:
-      if request_type in post_req:
-        Options.logger.info(url)
-        if data is not None:
-          Options.logger.info("POST Data: \n" + str(data))
-    else:
-      Options.logger.info(url)
-      if request_type in post_req and data is not None:
-        Options.logger.info("POST Data: \n" + str(data))
-
-  code = 200
-  if not (print_url and request_type in post_req):
-    try:
-      resp = director.open(req)
-      out = resp.read()
-      if isinstance(out, bytes):
-        out = out.decode("utf-8")
-      code = resp.code
-    except URLError as e:
-      Options.logger.error(str(e))
-      if isinstance(e, HTTPError):
-        raise e
-      else:
-        raise FatalException(-1, str(e))
-  else:
-    if not print_url:
-      Options.logger.info(url)
-    out = "{}"
-
-  if validate and not print_url and (code > 299 or code < 200):
-    if soft_validation:
-      Options.logger.warning("Response validation failed, please check previous action result manually.")
-    else:
-      raise FatalException(code, "Response validation failed, please check previous action result manually.")
-
-  if parse:
-    return json.loads(out)
-  else:
-    return out
-
-
-def configuration_item_diff(collection_name, catalog, actual_properties_list):
-  """
-  Merge catalog item with actual config item on the server
-  Diff item response:
-   {
-     "property" : name,
-     "catalog_item": value,
-     "catalog_value": value,
-     "actual_value": value
-   }
-  :param collection_name:
-  :param catalog:
-  :param actual_properties_list
-  :return:
-  """
-
-  verified_catalog = []
-  catalog_properties = dict(catalog)
-  actual_properties = dict(actual_properties_list)
-
-  if actual_properties is None:
-    verified_catalog = map(lambda x: {
-      "property": x,
-      "catalog_item": catalog_properties[x],
-      "catalog_value": catalog_properties[x][CatConst.PROPERTY_VALUE_TAG],
-      "actual_value": None
-    }, catalog_properties.keys())
-  else:
-    # build list of properties according to actual properties
-    verified_catalog = map(lambda x: {
-      "property": x,
-      "catalog_item": catalog_properties[x] if x in catalog_properties else None,
-      "catalog_value": catalog_properties[x][CatConst.PROPERTY_VALUE_TAG] if x in catalog_properties else None,
-      "actual_value": actual_properties[x]
-    }, actual_properties.keys())
-
-    # build list of properties according to catalog properties
-    verified_catalog_catalog = map(lambda x: {
-      "property": x,
-      "catalog_item": catalog_properties[x],
-      "catalog_value": catalog_properties[x][CatConst.PROPERTY_VALUE_TAG] if CatConst.PROPERTY_VALUE_TAG in
-                                                                             catalog_properties[x] else None,
-      "actual_value": actual_properties[x] if x in actual_properties else None,
-    }, catalog_properties.keys())
-
-    # append properties, which are listened in catalog but doesn't present in the actual configuration
-    verified_catalog += filter(lambda x: x["property"] not in actual_properties, verified_catalog_catalog)
-
-  return verified_catalog
-
-
-def configuration_diff_analyze(diff_list):
-  report = {}
-  for item_key in diff_list.keys():
-    property_diff_list = diff_list[item_key]
-    item_stat = {
-      "skipped": {"count": 0, "items": []},
-      "ok": {"count": 0, "items": []},
-      "fail": {"count": 0, "items": []},
-      "total": {"count": len(property_diff_list), "items": []}
-    }
-
-    def push_status(status, _property_item):
-      item_stat[status]["count"] += 1
-      item_stat[status]["items"].append(_property_item)
-
-    for property_item in property_diff_list:
-      # process properties which can be absent
-
-      # item was removed, from actual configs according to catalog instructions
-      if property_item["actual_value"] is None \
-              and CatConst.PROPERTY_REMOVE_TAG in property_item["catalog_item"] \
-              and property_item["catalog_item"][CatConst.PROPERTY_REMOVE_TAG] == CatConst.TRUE_TAG:
-
-        push_status("ok", property_item)
-
-        # currently skip values with template tag, as there no filter implemented
-        # ToDo: implement possibility to filter values without filter handler,
-        # ToDo: currently filtering is possible only on update-configs stage
-      elif property_item["actual_value"] is not None and property_item["catalog_value"] is not None \
-              and CatConst.VALUE_TEMPLATE_TAG in property_item["catalog_item"] \
-              and property_item["catalog_item"][CatConst.VALUE_TEMPLATE_TAG] == CatConst.TRUE_TAG:
-
-        push_status("skipped", property_item)
-
-      # item not present in actual config, but present in catalog and no remove tag is present
-      elif property_item["actual_value"] is None and property_item["catalog_value"] is not None:
-        push_status("fail", property_item)
-
-      # property exists in actual configuration, but not described in catalog configuration
-      elif property_item["actual_value"] is not None and property_item["catalog_value"] is None:
-        push_status("skipped", property_item)
-
-      # actual and catalog properties are equal
-      elif property_item["catalog_value"] == property_item["actual_value"]:
-        push_status("ok", property_item)
-      elif property_item["catalog_value"] != property_item["actual_value"]:
-        push_status("fail", property_item)
-
-    report[item_key] = item_stat
-  return report
-
-
-def verify_configuration():
-  diff_list = {}
-
-  if len(Options.ARGS) > 1:
-    config_type = Options.ARGS[1]
-  else:
-    config_type = None
-
-  catalog_farm = UpgradeCatalogFactory(Options.OPTIONS.upgrade_json)  # Load upgrade catalog
-  catalog = catalog_farm.get_catalog(Options.OPTIONS.from_stack,
-                                     Options.OPTIONS.to_stack)  # get desired version of catalog
-  server_configs = ServerConfigFactory()
-
-  if catalog is None:
-    raise FatalException(1, "Upgrade catalog for version %s-%s not found"
-                         % (Options.OPTIONS.from_stack, Options.OPTIONS.to_stack))
-
-  if config_type is not None and config_type not in catalog.items.keys() and config_type not in server_configs.items():
-    raise FatalException("Config type %s not exists" % config_type)
-
-  # fetch from server all option at one time and filter only desired versions
-
-  if config_type is not None:
-    diff_list[config_type] = configuration_item_diff(config_type, catalog.items[config_type], server_configs.get_config(config_type).properties)
-  else:
-    for collection_name in catalog.items.keys():
-      diff_list[collection_name] = configuration_item_diff(collection_name, catalog.items[collection_name], server_configs.get_config(collection_name).properties)
-
-  analyzed_list = configuration_diff_analyze(diff_list)
-
-  report_file = None
-  if Options.REPORT_FILE is not None:
-    try:
-      report_file = open(Options.REPORT_FILE, "w")
-    except IOError as e:
-      Options.logger.error("Report file open error: %s" % e.message)
-
-  for config_item in analyzed_list:
-    if analyzed_list[config_item]["fail"]["count"] != 0:
-      Options.logger.info(
-        "%s: %s missing configuration(s) - please look in the output file for the missing params" % (
-          config_item, analyzed_list[config_item]["fail"]["count"]
-        )
-      )
-      if report_file is not None:
-        report_formatter(report_file, config_item, analyzed_list[config_item])
-    else:
-      Options.logger.info("%s: verified" % config_item)
-
-  if report_file is not None:
-    try:
-      report_file.close()
-    except IOError as e:
-      Options.logger.error("Report file close error: %s" % e.message)
-
-
-def report_formatter(report_file, config_item, analyzed_list_item):
-  prefix = "Configuration item %s" % config_item
-  if analyzed_list_item["fail"]["count"] > 0:
-    for item in analyzed_list_item["fail"]["items"]:
-      report_file.write("%s: property \"%s\" is set to \"%s\", but should be set to \"%s\"\n" % (
-        prefix, item["property"], item["actual_value"], item["catalog_value"]
-      ))
-
-
-def main():
-  action_list = {  # list of supported actions
-                   Options.GET_MR_MAPPING_ACTION: get_mr1_mapping,
-                   Options.DELETE_MR_ACTION: delete_mr,
-                   Options.ADD_YARN_MR2_ACTION: add_services,
-                   Options.MODIFY_CONFIG_ACTION: modify_configs,
-                   Options.INSTALL_YARN_MR2_ACTION: install_services,
-                   Options.BACKUP_CONFIG_ACTION: backup_configs,
-                   Options.VERIFY_ACTION: verify_configuration
-                 }
-
-  parser = optparse.OptionParser(usage="usage: %prog [options] action\n  Valid actions: "
-                                       + ", ".join(action_list.keys())
-                                       + "\n  update-configs accepts type, e.g. hdfs-site to update specific configs")
-
-  parser.add_option("-n", "--printonly",
-                    action="store_true", dest="printonly", default=False,
-                    help="Prints all the curl commands to be executed (no post/update request will be performed)")
-  parser.add_option("-w", "--writeonly",
-                    action="store_true", dest="writeonly", default=False,
-                    help="in the combination with --printonly param will print only post/update requests")
-  parser.add_option("-o", "--log", dest="logfile", default=None,
-                    help="Log file")
-  parser.add_option("--report", dest="report", default=None,
-                    help="Report file output location")
-
-  parser.add_option('--upgradeCatalog', default=None, help="Upgrade Catalog file full path", dest="upgrade_json")
-  parser.add_option('--fromStack', default=None, help="stack version to upgrade from", dest="from_stack")
-  parser.add_option('--toStack', default=None, help="stack version to upgrade to", dest="to_stack")
-
-  parser.add_option('--hostname', default=None, help="Hostname for Ambari server", dest="hostname")
-  parser.add_option('--port', default='8080', help="Port number for Ambari server", dest="port")
-  parser.add_option('--https', default=False, action="store_true", dest="https", help="Use https protocol for connection to the server")
-  parser.add_option('--user', default=None, help="Ambari admin user", dest="user")
-  parser.add_option('--password', default=None, help="Ambari admin password", dest="password")
-  parser.add_option('--clustername', default=None, help="Cluster name", dest="clustername")
-
-  (options, args) = parser.parse_args()
-  Options.initialize_logger(options.logfile)
-  options.warnings = []
-
-  if len(arg

<TRUNCATED>

[52/63] [abbrv] ambari git commit: AMBARI-21099. Drop JDK 7 support for Ambari Server and Ambari managed services (AMS, LogSearch, Infra) (oleewere)

Posted by ab...@apache.org.
AMBARI-21099. Drop JDK 7 support for Ambari Server and Ambari managed services (AMS, LogSearch, Infra) (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2f0de691
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2f0de691
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2f0de691

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 2f0de6919ba8bb43ae156d66b39335f9860d26b9
Parents: f4fb174
Author: oleewere <ol...@gmail.com>
Authored: Wed Jun 28 21:15:48 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Wed Jun 28 21:16:11 2017 +0200

----------------------------------------------------------------------
 ambari-server/docs/configuration/index.md       |  13 ++-
 .../ambari/server/agent/ExecutionCommand.java   |   4 +
 .../server/configuration/Configuration.java     |  65 ++++++++++--
 .../controller/AmbariActionExecutionHelper.java |   2 +
 .../AmbariCustomCommandExecutionHelper.java     |  12 +--
 .../AmbariManagementControllerImpl.java         |   2 +
 .../internal/ClientConfigResourceProvider.java  |   9 +-
 .../apache/ambari/server/utils/StageUtils.java  |  54 ++++++++++
 ambari-server/src/main/python/ambari-server.py  |   2 +
 .../python/ambari_server/serverConfiguration.py |   6 ++
 .../main/python/ambari_server/serverSetup.py    | 105 +++++++++++++++++--
 .../0.1.0/package/scripts/params.py             |   4 +-
 .../0.1.0/package/scripts/params.py             |  10 +-
 .../ATLAS/0.1.0.2.3/package/scripts/metadata.py |   8 +-
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |   1 +
 .../LOGSEARCH/0.5.0/package/scripts/params.py   |   4 +-
 .../RANGER/0.4.0/package/scripts/params.py      |   1 +
 .../0.4.0/package/scripts/setup_ranger_xml.py   |  10 +-
 .../2.0.6/hooks/before-ANY/scripts/params.py    |   2 +
 .../before-ANY/scripts/shared_initialization.py |  30 ++++--
 .../2.0.6/hooks/before-START/scripts/params.py  |   4 +
 .../scripts/shared_initialization.py            |  22 ++--
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  |   3 +
 .../before-ANY/scripts/shared_initialization.py |  31 ++++--
 .../3.0/hooks/before-START/scripts/params.py    |   4 +
 .../scripts/shared_initialization.py            |  22 ++--
 .../main/resources/stacks/HDP/3.0/metainfo.xml  |   2 +-
 .../PERF/1.0/hooks/before-ANY/scripts/params.py |   3 +
 .../before-ANY/scripts/shared_initialization.py |  23 +++-
 .../ClientConfigResourceProviderTest.java       |   8 ++
 .../ambari/server/utils/StageUtilsTest.java     |  99 +++++++++++++++++
 .../src/test/python/TestAmbariServer.py         |  49 ++++++++-
 32 files changed, 524 insertions(+), 90 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/docs/configuration/index.md
----------------------------------------------------------------------
diff --git a/ambari-server/docs/configuration/index.md b/ambari-server/docs/configuration/index.md
index 2394264..9dbe9c4 100644
--- a/ambari-server/docs/configuration/index.md
+++ b/ambari-server/docs/configuration/index.md
@@ -142,9 +142,9 @@ The following are the properties which can be used to configure Ambari.
 | http.x-content-type-options | The value that will be used to set the `X-CONTENT-TYPE` HTTP response header. |`nosniff` | 
 | http.x-frame-options | The value that will be used to set the `X-Frame-Options` HTTP response header. |`DENY` | 
 | http.x-xss-protection | The value that will be used to set the `X-XSS-Protection` HTTP response header. |`1; mode=block` | 
-| java.home | The location of the JDK on the Ambari Agent hosts.<br/><br/>The following are examples of valid values:<ul><li>`/usr/jdk64/jdk1.7.0_45`</ul> | | 
-| jce.name | The name of the JCE policy ZIP file. <br/><br/>The following are examples of valid values:<ul><li>`UnlimitedJCEPolicyJDK7.zip`</ul> | | 
-| jdk.name | The name of the JDK installation binary.<br/><br/>The following are examples of valid values:<ul><li>`jdk-7u45-linux-x64.tar.gz`</ul> | | 
+| java.home | The location of the JDK on the Ambari Agent hosts. If stack.java.home exists, that is only used by Ambari Server (or you can find that as ambari_java_home in the commandParams on the agent side)<br/><br/>The following are examples of valid values:<ul><li>`/usr/jdk64/jdk1.8.0_112`</ul> | | 
+| jce.name | The name of the JCE policy ZIP file. If stack.jce.name exists, that is only used by Ambari Server (or you can find that as ambari_jce_name in the commandParams on the agent side)<br/><br/>The following are examples of valid values:<ul><li>`UnlimitedJCEPolicyJDK8.zip`</ul> | | 
+| jdk.name | The name of the JDK installation binary. If stack.jdk.name exists, that is only used by Ambari Server (or you can find that as ambari_jdk_name in the commandParams on the agent side)<br/><br/>The following are examples of valid values:<ul><li>`jdk-8u112-linux-x64.tar.gz`</ul> | | 
 | kdcserver.connection.check.timeout | The timeout, in milliseconds, to wait when communicating with a Kerberos Key Distribution Center. |`10000` | 
 | kerberos.check.jaas.configuration | Determines whether Kerberos-enabled Ambari deployments should use JAAS to validate login credentials. |`false` | 
 | kerberos.keytab.cache.dir | The location on the Ambari Server where Kerberos keytabs are cached. |`/var/lib/ambari-server/data/cache` | 
@@ -161,6 +161,7 @@ The following are the properties which can be used to configure Ambari.
 | metrics.retrieval-service.request.ttl | The number of seconds to wait between issuing JMX or REST metric requests to the same endpoint. This property is used to throttle requests to the same URL being made too close together<br/><br/> This property is related to `metrics.retrieval-service.request.ttl.enabled`. |`5` | 
 | metrics.retrieval-service.request.ttl.enabled | Enables throttling requests to the same endpoint within a fixed amount of time. This property will prevent Ambari from making new metric requests to update the cache for URLs which have been recently retrieved.<br/><br/> This property is related to `metrics.retrieval-service.request.ttl`. |`true` | 
 | mpacks.staging.path | The Ambari Management Pack staging directory on the Ambari Server.<br/><br/>The following are examples of valid values:<ul><li>`/var/lib/ambari-server/resources/mpacks`</ul> | | 
+| notification.dispatch.alert.script.directory | The directory for scripts which are used by the alert notification dispatcher. |`/var/lib/ambari-server/resources/scripts` | 
 | packages.pre.installed | Determines whether Ambari Agent instances have already have the necessary stack software installed |`false` | 
 | pam.configuration | The PAM configuration file. | | 
 | property.mask.file | The path of the file which lists the properties that should be masked from the api that returns ambari.properties | | 
@@ -209,6 +210,7 @@ The following are the properties which can be used to configure Ambari.
 | server.ecCacheSize | The size of the cache which is used to hold current operations in memory until they complete. |`10000` | 
 | server.execution.scheduler.isClustered | Determines whether Quartz will use a clustered job scheduled when performing scheduled actions like rolling restarts. |`false` | 
 | server.execution.scheduler.maxDbConnections | The number of concurrent database connections that the Quartz job scheduler can use. |`5` | 
+| server.execution.scheduler.maxStatementsPerConnection | The maximum number of prepared statements cached per database connection. |`120` | 
 | server.execution.scheduler.maxThreads | The number of threads that the Quartz job scheduler will use when executing scheduled jobs. |`5` | 
 | server.execution.scheduler.misfire.toleration.minutes | The time, in minutes, that a scheduled job can be run after its missed scheduled execution time. |`480` | 
 | server.execution.scheduler.start.delay.seconds | The delay, in seconds, that a Quartz job must wait before it starts. |`120` | 
@@ -280,6 +282,10 @@ The following are the properties which can be used to configure Ambari.
 | ssl.trustStore.password | The password to use when setting the `javax.net.ssl.trustStorePassword` property | | 
 | ssl.trustStore.path | The location of the truststore to use when setting the `javax.net.ssl.trustStore` property. | | 
 | ssl.trustStore.type | The type of truststore used by the `javax.net.ssl.trustStoreType` property. | | 
+| stack.java.home | The location of the JDK on the Ambari Agent hosts for stack services.<br/><br/>The following are examples of valid values:<ul><li>`/usr/jdk64/jdk1.7.0_45`</ul> | | 
+| stack.java.version | JDK version of the stack, use in case of it differs from Ambari JDK version.<br/><br/>The following are examples of valid values:<ul><li>`1.7`</ul> | | 
+| stack.jce.name | The name of the JCE policy ZIP file for stack services.<br/><br/>The following are examples of valid values:<ul><li>`UnlimitedJCEPolicyJDK7.zip`</ul> | | 
+| stack.jdk.name | The name of the JDK installation binary for stack services.<br/><br/>The following are examples of valid values:<ul><li>`jdk-7u45-linux-x64.tar.gz`</ul> | | 
 | stack.upgrade.auto.retry.check.interval.secs | The amount of time to wait, in seconds, between checking for upgrade tasks to be retried. This value is only applicable if `stack.upgrade.auto.retry.timeout.mins` is positive.<br/><br/> This property is related to `stack.upgrade.auto.retry.timeout.mins`. |`20` | 
 | stack.upgrade.auto.retry.command.details.to.ignore | A comma-separate list of upgrade tasks details to skip when retrying failed commands automatically. |`"Execute HDFS Finalize"` | 
 | stack.upgrade.auto.retry.command.names.to.ignore | A comma-separate list of upgrade tasks names to skip when retrying failed commands automatically. |`"ComponentVersionCheckAction","FinalizeUpgradeAction"` | 
@@ -298,6 +304,7 @@ The following are the properties which can be used to configure Ambari.
 | views.ambari.request.connect.timeout.millis | The amount of time, in milliseconds, that a view will wait when trying to connect on HTTP(S) operations to the Ambari REST API. |`30000` | 
 | views.ambari.request.read.timeout.millis | The amount of time, in milliseconds, that a view will wait before terminating an HTTP(S) read request to the Ambari REST API. |`45000` | 
 | views.dir | The directory on the Ambari Server file system used for expanding Views and storing webapp work. |`/var/lib/ambari-server/resources/views` | 
+| views.directory.watcher.disable | Determines whether the view directory watcher service should be disabled. |`false` | 
 | views.http.cache-control | The value that will be used to set the `Cache-Control` HTTP response header for Ambari View requests. |`no-store` | 
 | views.http.charset | The value that will be used to set the Character encoding to HTTP response header for Ambari View requests. |`utf-8` | 
 | views.http.pragma | The value that will be used to set the `PRAGMA` HTTP response header for Ambari View requests. |`no-cache` | 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index 60df8cf..29d28da 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -428,6 +428,10 @@ public class ExecutionCommand extends AgentCommand {
     String PACKAGE_LIST = "package_list";
     String JDK_LOCATION = "jdk_location";
     String JAVA_HOME = "java_home";
+    String AMBARI_JAVA_HOME = "ambari_java_home";
+    String AMBARI_JDK_NAME = "ambari_jdk_name";
+    String AMBARI_JCE_NAME = "ambari_jce_name";
+    String AMBARI_JAVA_VERSION = "ambari_java_version";
     String JAVA_VERSION = "java_version";
     String JDK_NAME = "jdk_name";
     String JCE_NAME = "jce_name";

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index fb06e6d..28f9d64 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -733,8 +733,8 @@ public class Configuration {
    * The location of the JDK on the Ambari Agent hosts.
    */
   @Markdown(
-      description = "The location of the JDK on the Ambari Agent hosts.",
-      examples = { "/usr/jdk64/jdk1.7.0_45" })
+      description = "The location of the JDK on the Ambari Agent hosts. If stack.java.home exists, that is only used by Ambari Server (or you can find that as ambari_java_home in the commandParams on the agent side)",
+      examples = { "/usr/jdk64/jdk1.8.0_112" })
   public static final ConfigurationProperty<String> JAVA_HOME = new ConfigurationProperty<>(
       "java.home", null);
 
@@ -742,8 +742,8 @@ public class Configuration {
    * The name of the JDK installation binary.
    */
   @Markdown(
-      description = "The name of the JDK installation binary.",
-      examples = { "jdk-7u45-linux-x64.tar.gz" })
+      description = "The name of the JDK installation binary. If stack.jdk.name exists, that is only used by Ambari Server (or you can find that as ambari_jdk_name in the commandParams on the agent side)",
+      examples = { "jdk-8u112-linux-x64.tar.gz" })
   public static final ConfigurationProperty<String> JDK_NAME = new ConfigurationProperty<>(
       "jdk.name", null);
 
@@ -751,12 +751,48 @@ public class Configuration {
    * The name of the JCE policy ZIP file.
    */
   @Markdown(
-      description = "The name of the JCE policy ZIP file. ",
-      examples = {"UnlimitedJCEPolicyJDK7.zip"})
+      description = "The name of the JCE policy ZIP file. If stack.jce.name exists, that is only used by Ambari Server (or you can find that as ambari_jce_name in the commandParams on the agent side)",
+      examples = {"UnlimitedJCEPolicyJDK8.zip"})
   public static final ConfigurationProperty<String> JCE_NAME = new ConfigurationProperty<>(
       "jce.name", null);
 
   /**
+   * The location of the JDK on the Ambari Agent hosts.
+   */
+  @Markdown(
+    description = "The location of the JDK on the Ambari Agent hosts for stack services.",
+    examples = { "/usr/jdk64/jdk1.7.0_45" })
+  public static final ConfigurationProperty<String> STACK_JAVA_HOME = new ConfigurationProperty<>(
+    "stack.java.home", null);
+
+  /**
+   * The name of the JDK installation binary.
+   */
+  @Markdown(
+    description = "The name of the JDK installation binary for stack services.",
+    examples = { "jdk-7u45-linux-x64.tar.gz" })
+  public static final ConfigurationProperty<String> STACK_JDK_NAME = new ConfigurationProperty<>(
+    "stack.jdk.name", null);
+
+  /**
+   * The name of the JCE policy ZIP file.
+   */
+  @Markdown(
+    description = "The name of the JCE policy ZIP file for stack services.",
+    examples = {"UnlimitedJCEPolicyJDK7.zip"})
+  public static final ConfigurationProperty<String> STACK_JCE_NAME = new ConfigurationProperty<>(
+    "stack.jce.name", null);
+
+  /**
+   * Java version of the stack
+   */
+  @Markdown(
+    description = "JDK version of the stack, use in case of it differs from Ambari JDK version.",
+    examples = {"1.7"})
+  public static final ConfigurationProperty<String> STACK_JAVA_VERSION = new ConfigurationProperty<>(
+    "stack.java.version", null);
+
+  /**
    * The auto group creation by Ambari.
    */
   @Markdown(
@@ -4128,6 +4164,23 @@ public class Configuration {
   public String getJCEName() {
     return getProperty(JCE_NAME);
   }
+
+  public String getStackJavaHome() {
+    return getProperty(STACK_JAVA_HOME);
+  }
+
+  public String getStackJDKName() {
+    return getProperty(STACK_JDK_NAME);
+  }
+
+  public String getStackJCEName() {
+    return getProperty(STACK_JCE_NAME);
+  }
+
+  public String getStackJavaVersion() {
+    return getProperty(STACK_JAVA_VERSION);
+  }
+
   public String getAmbariBlacklistFile() {
     return getProperty(PROPERTY_MASK_FILE);
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 1b0e0e0..8f522b0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -60,6 +60,7 @@ import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
 import org.apache.ambari.server.utils.SecretReference;
+import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -418,6 +419,7 @@ public class AmbariActionExecutionHelper {
 
       commandParams.put(SCRIPT, actionName + ".py");
       commandParams.put(SCRIPT_TYPE, TYPE_PYTHON);
+      StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
 
       ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostName,
         actionContext.getActionName()).getExecutionCommand();

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 0473690..28aa4e4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -29,11 +29,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
@@ -508,7 +504,7 @@ public class AmbariCustomCommandExecutionHelper {
       if (isUpgradeSuspended) {
         cluster.addSuspendedUpgradeParameters(commandParams, roleParams);
       }
-
+      StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
       roleParams.put(COMPONENT_CATEGORY, componentInfo.getCategory());
 
       execCmd.setCommandParams(commandParams);
@@ -815,6 +811,7 @@ public class AmbariCustomCommandExecutionHelper {
     }
     commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
     commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
 
     execCmd.setCommandParams(commandParams);
 
@@ -1481,11 +1478,8 @@ public class AmbariCustomCommandExecutionHelper {
   Map<String, String> createDefaultHostParams(Cluster cluster, StackId stackId) throws AmbariException {
 
     TreeMap<String, String> hostLevelParams = new TreeMap<>();
+    StageUtils.useStackJdkIfExists(hostLevelParams, configs);
     hostLevelParams.put(JDK_LOCATION, managementController.getJdkResourceUrl());
-    hostLevelParams.put(JAVA_HOME, managementController.getJavaHome());
-    hostLevelParams.put(JAVA_VERSION, String.valueOf(configs.getJavaVersion()));
-    hostLevelParams.put(JDK_NAME, managementController.getJDKName());
-    hostLevelParams.put(JCE_NAME, managementController.getJCEName());
     hostLevelParams.put(STACK_NAME, stackId.getStackName());
     hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
     hostLevelParams.put(DB_NAME, managementController.getServerDB());

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 77883e3..5639dc1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -245,6 +245,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   private static final String BASE_LOG_DIR = "/tmp/ambari";
 
   private static final String PASSWORD = "password";
+
   public static final String SKIP_INSTALL_FOR_COMPONENTS = "skipInstallForComponents";
   public static final String DONT_SKIP_INSTALL_FOR_COMPONENTS = "dontSkipInstallForComponents";
 
@@ -2473,6 +2474,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     if (customCommandExecutionHelper.isTopologyRefreshRequired(roleCommand.name(), clusterName, serviceName)) {
       commandParams.put(ExecutionCommand.KeyNames.REFRESH_TOPOLOGY, "True");
     }
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configs);
 
     String repoInfo = customCommandExecutionHelper.getRepoInfo(cluster, component, host);
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
index 21cf16c..bd445eb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
@@ -23,11 +23,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STA
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.GROUP_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOST_SYS_PREPPED;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.NOT_MANAGED_HDFS_PATH_LIST;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
@@ -363,11 +359,8 @@ public class ClientConfigResourceProvider extends AbstractControllerResourceProv
         osFamily = clusters.getHost(hostName).getOsFamily();
 
         TreeMap<String, String> hostLevelParams = new TreeMap<>();
+        StageUtils.useStackJdkIfExists(hostLevelParams, configs);
         hostLevelParams.put(JDK_LOCATION, managementController.getJdkResourceUrl());
-        hostLevelParams.put(JAVA_HOME, managementController.getJavaHome());
-        hostLevelParams.put(JAVA_VERSION, String.valueOf(configs.getJavaVersion()));
-        hostLevelParams.put(JDK_NAME, managementController.getJDKName());
-        hostLevelParams.put(JCE_NAME, managementController.getJCEName());
         hostLevelParams.put(STACK_NAME, stackId.getStackName());
         hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
         hostLevelParams.put(DB_NAME, managementController.getServerDB());

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index e7a94d4..9409f70 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -17,6 +17,15 @@
  */
 package org.apache.ambari.server.utils;
 
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_JAVA_HOME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_JAVA_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_JCE_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AMBARI_JDK_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
+
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -82,6 +91,7 @@ public class StageUtils {
   protected static final String PORTS = "all_ping_ports";
   protected static final String RACKS = "all_racks";
   protected static final String IPV4_ADDRESSES = "all_ipv4_ips";
+
   private static Map<String, String> componentToClusterInfoKeyMap =
     new HashMap<>();
   private static Map<String, String> decommissionedToClusterInfoKeyMap =
@@ -598,4 +608,48 @@ public class StageUtils {
         startOfRange + separator + endOfRange;
     return rangeItem;
   }
+
+  /**
+   * Add ambari specific JDK details to command parameters.
+   */
+  public static void useAmbariJdkInCommandParams(Map<String, String> commandParams, Configuration configuration) {
+    if (StringUtils.isNotEmpty(configuration.getJavaHome()) && !configuration.getJavaHome().equals(configuration.getStackJavaHome())) {
+      commandParams.put(AMBARI_JAVA_HOME, configuration.getJavaHome());
+      commandParams.put(AMBARI_JAVA_VERSION, String.valueOf(configuration.getJavaVersion()));
+      if (StringUtils.isNotEmpty(configuration.getJDKName())) { // if not custom jdk
+        commandParams.put(AMBARI_JDK_NAME, configuration.getJDKName());
+      }
+      if (StringUtils.isNotEmpty(configuration.getJCEName())) { // if not custom jdk
+        commandParams.put(AMBARI_JCE_NAME, configuration.getJCEName());
+      }
+    }
+  }
+
+  /**
+   * Fill hots level parameters with Jdk details, override them with the stack JDK data, in case of stack JAVA_HOME exists
+   */
+  public static void useStackJdkIfExists(Map<String, String> hostLevelParams, Configuration configuration) {
+    // set defaults first
+    hostLevelParams.put(JAVA_HOME, configuration.getJavaHome());
+    hostLevelParams.put(JDK_NAME, configuration.getJDKName());
+    hostLevelParams.put(JCE_NAME, configuration.getJCEName());
+    hostLevelParams.put(JAVA_VERSION, String.valueOf(configuration.getJavaVersion()));
+    if (StringUtils.isNotEmpty(configuration.getStackJavaHome())
+      && !configuration.getStackJavaHome().equals(configuration.getJavaHome())) {
+      hostLevelParams.put(JAVA_HOME, configuration.getStackJavaHome());
+      if (StringUtils.isNotEmpty(configuration.getStackJavaVersion())) {
+        hostLevelParams.put(JAVA_VERSION, configuration.getStackJavaVersion());
+      }
+      if (StringUtils.isNotEmpty(configuration.getStackJDKName())) {
+        hostLevelParams.put(JDK_NAME, configuration.getStackJDKName());
+      } else {
+        hostLevelParams.put(JDK_NAME, null); // custom jdk for stack
+      }
+      if (StringUtils.isNotEmpty(configuration.getStackJCEName())) {
+        hostLevelParams.put(JCE_NAME, configuration.getStackJCEName());
+      } else {
+        hostLevelParams.put(JCE_NAME, null); // custom jdk for stack
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/python/ambari-server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari-server.py b/ambari-server/src/main/python/ambari-server.py
index c7bdcf9..5adcb04 100755
--- a/ambari-server/src/main/python/ambari-server.py
+++ b/ambari-server/src/main/python/ambari-server.py
@@ -482,6 +482,8 @@ def init_setup_parser_options(parser):
 
   other_group.add_option('-j', '--java-home', default=None,
                          help="Use specified java_home.  Must be valid on all hosts")
+  other_group.add_option('--stack-java-home', dest="stack_java_home", default=None,
+                    help="Use specified java_home for stack services.  Must be valid on all hosts")
   other_group.add_option('--skip-view-extraction', action="store_true", default=False, help="Skip extraction of system views", dest="skip_view_extraction")
   other_group.add_option('--postgresschema', default=None, help="Postgres database schema name",
                          dest="postgres_schema")

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/python/ambari_server/serverConfiguration.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index 4780338..631c9a7 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -87,6 +87,12 @@ JCE_NAME_PROPERTY = "jce.name"
 JDK_DOWNLOAD_SUPPORTED_PROPERTY = "jdk.download.supported"
 JCE_DOWNLOAD_SUPPORTED_PROPERTY = "jce.download.supported"
 
+# Stack JDK
+STACK_JAVA_HOME_PROPERTY = "stack.java.home"
+STACK_JDK_NAME_PROPERTY = "stack.jdk.name"
+STACK_JCE_NAME_PROPERTY = "stack.jce.name"
+STACK_JAVA_VERSION = "stack.java.version"
+
 
 #TODO property used incorrectly in local case, it was meant to be dbms name, not postgres database name,
 # has workaround for now, as we don't need dbms name if persistence_type=local

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/python/ambari_server/serverSetup.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverSetup.py b/ambari-server/src/main/python/ambari_server/serverSetup.py
index c6de088..5c016c5 100644
--- a/ambari-server/src/main/python/ambari_server/serverSetup.py
+++ b/ambari-server/src/main/python/ambari_server/serverSetup.py
@@ -41,7 +41,8 @@ from ambari_server.serverConfiguration import configDefaults, JDKRelease, \
   get_resources_location, get_value_from_properties, read_ambari_user, update_properties, validate_jdk, write_property, \
   JAVA_HOME, JAVA_HOME_PROPERTY, JCE_NAME_PROPERTY, JDBC_RCA_URL_PROPERTY, JDBC_URL_PROPERTY, \
   JDK_NAME_PROPERTY, JDK_RELEASES, NR_USER_PROPERTY, OS_FAMILY, OS_FAMILY_PROPERTY, OS_TYPE, OS_TYPE_PROPERTY, OS_VERSION, \
-  VIEWS_DIR_PROPERTY, JDBC_DATABASE_PROPERTY, JDK_DOWNLOAD_SUPPORTED_PROPERTY, JCE_DOWNLOAD_SUPPORTED_PROPERTY, SETUP_DONE_PROPERTIES
+  VIEWS_DIR_PROPERTY, JDBC_DATABASE_PROPERTY, JDK_DOWNLOAD_SUPPORTED_PROPERTY, JCE_DOWNLOAD_SUPPORTED_PROPERTY, SETUP_DONE_PROPERTIES, \
+  STACK_JAVA_HOME_PROPERTY, STACK_JDK_NAME_PROPERTY, STACK_JCE_NAME_PROPERTY, STACK_JAVA_VERSION
 from ambari_server.serverUtils import is_server_runing
 from ambari_server.setupSecurity import adjust_directory_permissions
 from ambari_server.userInput import get_YN_input, get_validated_string_input
@@ -79,7 +80,7 @@ UNTAR_JDK_ARCHIVE = "tar --no-same-owner -xvf {0}"
 JDK_PROMPT = "[{0}] {1}\n"
 JDK_VALID_CHOICES = "^[{0}{1:d}]$"
 
-
+JDK_VERSION_CHECK_CMD = """{0} -version 2>&1 | grep -i version | sed 's/.*version ".*\.\(.*\)\..*"/\\1/; 1q' 2>&1"""
 
 def get_supported_jdbc_drivers():
   factory = DBMSConfigFactory()
@@ -409,7 +410,7 @@ class JDKSetup(object):
   #
   # Downloads and installs the JDK and the JCE policy archive
   #
-  def download_and_install_jdk(self, args, properties):
+  def download_and_install_jdk(self, args, properties, ambariOnly = False):
     conf_file = properties.fileName
 
     jcePolicyWarn = "JCE Policy files are required for configuring Kerberos security. If you plan to use Kerberos," \
@@ -429,8 +430,22 @@ class JDKSetup(object):
       properties.removeOldProp(JDK_NAME_PROPERTY)
       properties.removeOldProp(JCE_NAME_PROPERTY)
 
+      if not ambariOnly:
+        properties.process_pair(STACK_JAVA_HOME_PROPERTY, args.java_home)
+        properties.removeOldProp(STACK_JDK_NAME_PROPERTY)
+        properties.removeOldProp(STACK_JCE_NAME_PROPERTY)
+
       self._ensure_java_home_env_var_is_set(args.java_home)
       self.jdk_index = self.custom_jdk_number
+
+      if args.stack_java_home: # reset stack specific jdk properties if stack_java_home exists
+        print 'Setting JAVA_HOME for stack services...'
+        print_warning_msg("JAVA_HOME " + args.stack_java_home + " (Stack) must be valid on ALL hosts")
+        print_warning_msg(jcePolicyWarn)
+        properties.process_pair(STACK_JAVA_HOME_PROPERTY, args.stack_java_home)
+        properties.removeOldProp(STACK_JDK_NAME_PROPERTY)
+        properties.removeOldProp(STACK_JCE_NAME_PROPERTY)
+
       return
 
     java_home_var = get_JAVA_HOME()
@@ -440,7 +455,10 @@ class JDKSetup(object):
       progress_func = download_progress
 
     if java_home_var:
-      change_jdk = get_YN_input("Do you want to change Oracle JDK [y/n] (n)? ", False)
+      message = "Do you want to change Oracle JDK [y/n] (n)? "
+      if ambariOnly:
+        message = "Do you want to change Oracle JDK for Ambari Server [y/n] (n)? "
+      change_jdk = get_YN_input(message, False)
       if not change_jdk:
         self._ensure_java_home_env_var_is_set(java_home_var)
         self.jdk_index = self.custom_jdk_number
@@ -448,7 +466,7 @@ class JDKSetup(object):
 
     #Continue with the normal setup, taking the first listed JDK version as the default option
     jdk_num = str(self.jdk_index + 1)
-    (self.jdks, jdk_choice_prompt, jdk_valid_choices, self.custom_jdk_number) = self._populate_jdk_configs(properties, jdk_num)
+    (self.jdks, jdk_choice_prompt, jdk_valid_choices, self.custom_jdk_number) = self._populate_jdk_configs(properties, jdk_num, ambariOnly)
 
     jdk_num = get_validated_string_input(
       jdk_choice_prompt,
@@ -478,10 +496,18 @@ class JDKSetup(object):
       properties.removeOldProp(JDK_NAME_PROPERTY)
       properties.removeOldProp(JCE_NAME_PROPERTY)
 
+      if not ambariOnly:
+        properties.process_pair(STACK_JAVA_HOME_PROPERTY, args.java_home)
+        properties.removeOldProp(STACK_JDK_NAME_PROPERTY)
+        properties.removeOldProp(STACK_JCE_NAME_PROPERTY)
+
       # Make sure any previously existing JDK and JCE name properties are removed. These will
       # confuse things in a Custom JDK scenario
       properties.removeProp(JDK_NAME_PROPERTY)
       properties.removeProp(JCE_NAME_PROPERTY)
+      if not ambariOnly:
+        properties.removeOldProp(STACK_JDK_NAME_PROPERTY)
+        properties.removeOldProp(STACK_JCE_NAME_PROPERTY)
 
       self._ensure_java_home_env_var_is_set(args.java_home)
       return
@@ -551,10 +577,13 @@ class JDKSetup(object):
 
     properties.process_pair(JDK_NAME_PROPERTY, jdk_cfg.dest_file)
     properties.process_pair(JAVA_HOME_PROPERTY, java_home_dir)
+    if not ambariOnly:
+      properties.process_pair(STACK_JDK_NAME_PROPERTY, jdk_cfg.dest_file)
+      properties.process_pair(STACK_JAVA_HOME_PROPERTY, java_home_dir)
 
     self._ensure_java_home_env_var_is_set(java_home_dir)
 
-  def download_and_unpack_jce_policy(self, properties):
+  def download_and_unpack_jce_policy(self, properties, ambariOnly = False):
     err_msg_stdout = "JCE Policy files are required for secure HDP setup. Please ensure " \
               " all hosts have the JCE unlimited strength policy 6, files."
 
@@ -563,7 +592,7 @@ class JDKSetup(object):
     jdk_cfg = self.jdks[self.jdk_index]
 
     try:
-      JDKSetup._download_jce_policy(jdk_cfg.jcpol_url, jdk_cfg.dest_jcpol_file, resources_dir, properties)
+      JDKSetup._download_jce_policy(jdk_cfg.jcpol_url, jdk_cfg.dest_jcpol_file, resources_dir, properties, ambariOnly)
     except FatalException, e:
       print err_msg_stdout
       print_error_msg("Failed to download JCE policy files:")
@@ -590,10 +619,22 @@ class JDKSetup(object):
     jce_zip_path = os.path.abspath(os.path.join(resources_dir, jce_packed_file))
     expand_jce_zip_file(jce_zip_path, jdk_security_path)
 
-  def _populate_jdk_configs(self, properties, jdk_num):
+  def _populate_jdk_configs(self, properties, jdk_num, ambariOnly = False):
+    def remove_jdk_condition(name):
+      """
+      Removes jdk1.7 from the default choices.
+      This method can be removed if JDK 7 support (for stack services) will be dropped.
+      """
+      if name != "jdk1.7":
+        return True
+      else:
+       print "JDK 7 detected. Removed from choices."
+       return False
     if properties.has_key(JDK_RELEASES):
       jdk_names = properties[JDK_RELEASES].split(',')
       jdk_names = filter(None, jdk_names)
+      if ambariOnly:
+        jdk_names = filter(lambda x : remove_jdk_condition(x), jdk_names)
       jdks = []
       for jdk_name in jdk_names:
         jdkR = JDKRelease.from_properties(properties, jdk_name)
@@ -630,7 +671,7 @@ class JDKSetup(object):
       raise FatalException(1, err)
 
   @staticmethod
-  def _download_jce_policy(jcpol_url, dest_jcpol_file, resources_dir, properties):
+  def _download_jce_policy(jcpol_url, dest_jcpol_file, resources_dir, properties, ambariOnly = False):
     dest_file = os.path.abspath(os.path.join(resources_dir, dest_jcpol_file))
 
     if not os.path.exists(dest_file):
@@ -653,6 +694,8 @@ class JDKSetup(object):
       print "JCE Policy archive already exists, using " + dest_file
 
     properties.process_pair(JCE_NAME_PROPERTY, dest_jcpol_file)
+    if not ambariOnly:
+      properties.process_pair(STACK_JCE_NAME_PROPERTY, dest_jcpol_file)
 
   # Base implementation, overriden in the subclasses
   def _install_jdk(self, java_inst_file, java_home_dir):
@@ -828,6 +871,14 @@ def download_and_install_jdk(options):
 
   update_properties(properties)
 
+  ambari_java_version_valid = check_ambari_java_version_is_valid(get_JAVA_HOME(), jdkSetup.JAVA_BIN, 8, properties)
+  if not ambari_java_version_valid:
+    jdkSetup = JDKSetup() # recreate object
+    jdkSetup.download_and_install_jdk(options, properties, True)
+    if jdkSetup.jdk_index != jdkSetup.custom_jdk_number:
+      jdkSetup.download_and_unpack_jce_policy(properties, True)
+    update_properties(properties)
+
   return 0
 
 
@@ -1200,7 +1251,43 @@ def setup_jce_policy(args):
   print 'NOTE: Restart Ambari Server to apply changes' + \
         ' ("ambari-server restart|stop|start")'
 
+def check_ambari_java_version_is_valid(java_home, java_bin, min_version, properties):
+  """
+  Check that ambari uses the proper (minimum) JDK with a shell command.
+  Returns true, if Ambari meets with the minimal JDK version requirement.
+  """
+  result = True
+  print 'Check JDK version for Ambari Server...'
+  try:
+    command = JDK_VERSION_CHECK_CMD.format(os.path.join(java_home, 'bin', java_bin))
+    process = subprocess.Popen(command,
+                               stdout=subprocess.PIPE,
+                               stdin=subprocess.PIPE,
+                               stderr=subprocess.PIPE,
+                               shell=True
+                               )
+    (out, err) = process.communicate()
+    if process.returncode != 0:
+      err = "Checking JDK version command returned with exit code %s" % process.returncode
+      raise FatalException(process.returncode, err)
+    else:
+      actual_jdk_version = int(out)
+      print 'JDK version found: {0}'.format(actual_jdk_version)
+      if actual_jdk_version < min_version:
+        print 'Minimum JDK version is {0} for Ambari. Setup JDK again only for Ambari Server.'.format(min_version)
+        properties.process_pair(STACK_JAVA_VERSION, out)
+        result = False
+      else:
+        print 'Minimum JDK version is {0} for Ambari. Skipping to setup different JDK for Ambari Server.'.format(min_version)
+
+  except FatalException as e:
+    err = 'Running java version check command failed: {0}. Exiting.'.format(e)
+    raise FatalException(e.code, err)
+  except Exception as e:
+    err = 'Running java version check command failed: {0}. Exiting.'.format(e)
+    raise FatalException(1, err)
 
+  return result
 #
 # Resets the Ambari Server.
 #

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
index 6eb3ba8..5f547f3 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA/0.1.0/package/scripts/params.py
@@ -60,7 +60,9 @@ user_group = config['configurations']['cluster-env']['user_group']
 fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
 
 # shared configs
-java64_home = config['hostLevelParams']['java_home']
+java_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+java64_home = ambari_java_home if ambari_java_home is not None else java_home
 java_exec = format("{java64_home}/bin/java")
 zookeeper_hosts_list = config['clusterHostInfo']['zookeeper_hosts']
 zookeeper_hosts_list.sort()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
index b8c14f4..486f568 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py
@@ -203,9 +203,15 @@ security_enabled = False if not is_hbase_distributed else config['configurations
 # this is "hadoop-metrics.properties" for 1.x stacks
 metric_prop_file_name = "hadoop-metrics2-hbase.properties"
 
+java_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", None)
 # not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
+java64_home = ambari_java_home if ambari_java_home is not None else java_home
+ambari_java_version = default("/commandParams/ambari_java_version", None)
+if ambari_java_version:
+  java_version = expect("/commandParams/ambari_java_version", int)
+else :
+  java_version = expect("/hostLevelParams/java_version", int)
 
 metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_heapsize', "512")
 metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
index 36c4598..d424f5b 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata.py
@@ -207,7 +207,7 @@ def upload_conf_set(config_set, jaasFile):
       config_set_dir=format("{conf_dir}/solr"),
       config_set=config_set,
       tmp_dir=params.tmp_dir,
-      java64_home=params.java64_home,
+      java64_home=params.ambari_java_home,
       solrconfig_content=InlineTemplate(params.metadata_solrconfig_content),
       jaas_file=jaasFile,
       retry=30, interval=5)
@@ -220,7 +220,7 @@ def create_collection(collection, config_set, jaasFile):
       solr_znode=params.infra_solr_znode,
       collection = collection,
       config_set=config_set,
-      java64_home=params.java64_home,
+      java64_home=params.ambari_java_home,
       jaas_file=jaasFile,
       shards=params.atlas_solr_shards,
       replication_factor = params.infra_solr_replication_factor)
@@ -230,7 +230,7 @@ def secure_znode(znode, jaasFile):
   solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
                                solr_znode=znode,
                                jaas_file=jaasFile,
-                               java64_home=params.java64_home, sasl_users=[params.atlas_jaas_principal])
+                               java64_home=params.ambari_java_home, sasl_users=[params.atlas_jaas_principal])
 
 
 
@@ -240,4 +240,4 @@ def check_znode():
   solr_cloud_util.check_znode(
     zookeeper_quorum=params.zookeeper_quorum,
     solr_znode=params.infra_solr_znode,
-    java64_home=params.java64_home)
+    java64_home=params.ambari_java_home)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
index d26df33..111a248 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
@@ -126,6 +126,7 @@ user_group = config['configurations']['cluster-env']['user_group']
 
 # metadata env
 java64_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", java64_home)
 java_exec = format("{java64_home}/bin/java")
 env_sh_template = config['configurations']['atlas-env']['content']
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
index 662f49e..1b77999 100644
--- a/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/LOGSEARCH/0.5.0/package/scripts/params.py
@@ -74,7 +74,9 @@ logfeeder_pid_file = status_params.logfeeder_pid_file
 user_group = config['configurations']['cluster-env']['user_group']
 
 # shared configs
-java64_home = config['hostLevelParams']['java_home']
+java_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+java64_home = ambari_java_home if ambari_java_home is not None else java_home
 cluster_name = str(config['clusterName'])
 
 configurations = config['configurations'] # need reference inside logfeeder jinja templates

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
index 3789358..6d5581d 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
@@ -123,6 +123,7 @@ if stack_supports_ranger_tagsync:
 usersync_services_file = format('{stack_root}/current/ranger-usersync/ranger-usersync-services.sh')
 
 java_home = config['hostLevelParams']['java_home']
+ambari_java_home = default("/commandParams/ambari_java_home", java_home)
 unix_user  = config['configurations']['ranger-env']['ranger_user']
 unix_group = config['configurations']['ranger-env']['ranger_group']
 ranger_pid_dir = default("/configurations/ranger-env/ranger_pid_dir", "/var/run/ranger")

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
index ba21494..4bcf9b0 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/setup_ranger_xml.py
@@ -711,7 +711,7 @@ def setup_ranger_audit_solr():
         config_set = params.ranger_solr_config_set,
         config_set_dir = params.ranger_solr_conf,
         tmp_dir = params.tmp_dir,
-        java64_home = params.java_home,
+        java64_home = params.ambari_java_home,
         solrconfig_content = InlineTemplate(params.ranger_solr_config_content),
         jaas_file=params.solr_jaas_file,
         retry=30, interval=5
@@ -725,7 +725,7 @@ def setup_ranger_audit_solr():
         config_set = params.ranger_solr_config_set,
         config_set_dir = params.ranger_solr_conf,
         tmp_dir = params.tmp_dir,
-        java64_home = params.java_home,
+        java64_home = params.ambari_java_home,
         jaas_file=params.solr_jaas_file,
         retry=30, interval=5)
 
@@ -748,7 +748,7 @@ def setup_ranger_audit_solr():
       solr_znode = params.solr_znode,
       collection = params.ranger_solr_collection_name,
       config_set = params.ranger_solr_config_set,
-      java64_home = params.java_home,
+      java64_home = params.ambari_java_home,
       shards = params.ranger_solr_shards,
       replication_factor = int(params.replication_factor),
       jaas_file = params.solr_jaas_file)
@@ -774,14 +774,14 @@ def check_znode():
   solr_cloud_util.check_znode(
     zookeeper_quorum=params.zookeeper_quorum,
     solr_znode=params.solr_znode,
-    java64_home=params.java_home)
+    java64_home=params.ambari_java_home)
 
 def secure_znode(znode, jaasFile):
   import params
   solr_cloud_util.secure_znode(config=params.config, zookeeper_quorum=params.zookeeper_quorum,
                                solr_znode=znode,
                                jaas_file=jaasFile,
-                               java64_home=params.java_home, sasl_users=[params.ranger_admin_jaas_principal])
+                               java64_home=params.ambari_java_home, sasl_users=[params.ranger_admin_jaas_principal])
 
 def get_ranger_plugin_principals(services_defaults_tuple_list):
   """

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index 4052d1d..1d79efb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@ -65,6 +65,8 @@ version = default("/commandParams/version", None)
 # Handle upgrade and downgrade
 if (upgrade_type is not None) and version:
   stack_version_formatted = format_stack_version(version)
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
 
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index 1f17cd1..39f5a47 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -176,17 +176,26 @@ def setup_hadoop_env():
 
 def setup_java():
   """
-  Installs jdk using specific params, that comes from ambari-server
+  Install jdk using specific params.
+  Install ambari jdk as well if the stack and ambari jdk are different.
   """
   import params
+  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
+  if params.ambari_java_home and params.ambari_java_home != params.java_home:
+    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
 
-  java_exec = format("{java_home}/bin/java")
+def __setup_java(custom_java_home, custom_jdk_name):
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+  java_exec = format("{custom_java_home}/bin/java")
 
   if not os.path.isfile(java_exec):
     if not params.jdk_name: # if custom jdk is used.
       raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
 
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
     java_dir = os.path.dirname(params.java_home)
 
     Directory(params.artifact_dir,
@@ -194,13 +203,13 @@ def setup_java():
               )
 
     File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
          not_if = format("test -f {jdk_curl_target}")
-    )
+         )
 
     File(jdk_curl_target,
          mode = 0755,
-    )
+         )
 
     tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
 
@@ -213,7 +222,7 @@ def setup_java():
         install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
 
       Directory(java_dir
-      )
+                )
 
       Execute(chmod_cmd,
               sudo = True,
@@ -225,10 +234,11 @@ def setup_java():
     finally:
       Directory(tmp_java_dir, action="delete")
 
-    File(format("{java_home}/bin/java"),
+    File(format("{custom_java_home}/bin/java"),
          mode=0755,
          cd_access="a",
          )
     Execute(('chmod', '-R', '755', params.java_home),
-      sudo = True,
-    )
+            sudo = True,
+            )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 3488e75..49b0063 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -259,6 +259,10 @@ refresh_topology = False
 command_params = config["commandParams"] if "commandParams" in config else None
 if command_params is not None:
   refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+ambari_jce_name = default("/commandParams/ambari_jce_name", None)
   
 ambari_libs_dir = "/var/lib/ambari-agent/lib"
 is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
index 148d235..42785ba 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
@@ -197,9 +197,17 @@ def create_microsoft_r_dir():
     except Exception as exception:
       Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
 
-
 def setup_unlimited_key_jce_policy():
   """
+  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
+  """
+  import params
+  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
+  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
+    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
+
+def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
+  """
   Sets up the unlimited key JCE policy if needed.
 
   The following criteria must be met:
@@ -223,27 +231,27 @@ def setup_unlimited_key_jce_policy():
   if params.sysprep_skip_setup_jce:
     Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
 
-  elif not params.jdk_name:
+  elif not custom_jdk_name:
     Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
 
   elif not params.unlimited_key_jce_required:
     Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required")
 
   else:
-    jcePolicyInfo = JcePolicyInfo(params.java_home)
+    jcePolicyInfo = JcePolicyInfo(custom_java_home)
 
     if jcePolicyInfo.is_unlimited_key_jce_policy():
       Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
 
-    elif params.jce_policy_zip is None:
+    elif custom_jce_name is None:
       raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
 
     else:
       Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
 
-      jce_zip_target = format("{artifact_dir}/{jce_policy_zip}")
-      jce_zip_source = format("{ambari_server_resources_url}/{jce_policy_zip}")
-      java_security_dir = format("{java_home}/jre/lib/security")
+      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
+      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
+      java_security_dir = format("{custom_java_home}/jre/lib/security")
 
       Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
       Directory(params.artifact_dir, create_parents=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
index 9be9101..eb5feae 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
@@ -65,6 +65,9 @@ version = default("/commandParams/version", None)
 if (upgrade_type is not None) and version:
   stack_version_formatted = format_stack_version(version)
 
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
index 5d79084..dbd1727 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -172,17 +172,26 @@ def setup_hadoop_env():
 
 def setup_java():
   """
-  Installs jdk using specific params, that comes from ambari-server
+  Install jdk using specific params.
+  Install ambari jdk as well if the stack and ambari jdk are different.
   """
   import params
+  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
+  if params.ambari_java_home and params.ambari_java_home != params.java_home:
+    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
 
-  java_exec = format("{java_home}/bin/java")
+def __setup_java(custom_java_home, custom_jdk_name):
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+  java_exec = format("{custom_java_home}/bin/java")
 
   if not os.path.isfile(java_exec):
     if not params.jdk_name: # if custom jdk is used.
       raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
 
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
     java_dir = os.path.dirname(params.java_home)
 
     Directory(params.artifact_dir,
@@ -190,9 +199,13 @@ def setup_java():
               )
 
     File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
          not_if = format("test -f {jdk_curl_target}")
-    )
+         )
+
+    File(jdk_curl_target,
+         mode = 0755,
+         )
 
     tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
 
@@ -205,7 +218,7 @@ def setup_java():
         install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
 
       Directory(java_dir
-      )
+                )
 
       Execute(chmod_cmd,
               sudo = True,
@@ -217,10 +230,10 @@ def setup_java():
     finally:
       Directory(tmp_java_dir, action="delete")
 
-    File(format("{java_home}/bin/java"),
+    File(format("{custom_java_home}/bin/java"),
          mode=0755,
          cd_access="a",
          )
     Execute(('chmod', '-R', '755', params.java_home),
-      sudo = True,
-    )
+            sudo = True,
+            )

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
index a3830f7..a0259af 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
@@ -243,6 +243,10 @@ refresh_topology = False
 command_params = config["commandParams"] if "commandParams" in config else None
 if command_params is not None:
   refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+ambari_jce_name = default("/commandParams/ambari_jce_name", None)
   
 ambari_libs_dir = "/var/lib/ambari-agent/lib"
 is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
index aed1124..5156dd4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
@@ -190,9 +190,17 @@ def create_microsoft_r_dir():
     except Exception as exception:
       Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
 
-
 def setup_unlimited_key_jce_policy():
   """
+  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
+  """
+  import params
+  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
+  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
+    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
+
+def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
+  """
   Sets up the unlimited key JCE policy if needed.
 
   The following criteria must be met:
@@ -216,27 +224,27 @@ def setup_unlimited_key_jce_policy():
   if params.sysprep_skip_setup_jce:
     Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
 
-  elif not params.jdk_name:
+  elif not custom_jdk_name:
     Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
 
   elif not params.unlimited_key_jce_required:
     Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required")
 
   else:
-    jcePolicyInfo = JcePolicyInfo(params.java_home)
+    jcePolicyInfo = JcePolicyInfo(custom_java_home)
 
     if jcePolicyInfo.is_unlimited_key_jce_policy():
       Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
 
-    elif params.jce_policy_zip is None:
+    elif custom_jce_name is None:
       raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
 
     else:
       Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
 
-      jce_zip_target = format("{artifact_dir}/{jce_policy_zip}")
-      jce_zip_source = format("{ambari_server_resources_url}/{jce_policy_zip}")
-      java_security_dir = format("{java_home}/jre/lib/security")
+      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
+      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
+      java_security_dir = format("{custom_java_home}/jre/lib/security")
 
       Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
       Directory(params.artifact_dir, create_parents=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
index 0364d41..9af03c0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/metainfo.xml
@@ -19,6 +19,6 @@
   <versions>
     <active>true</active>
   </versions>
-  <minJdk>1.7</minJdk>
+  <minJdk>1.8</minJdk>
   <maxJdk>1.8</maxJdk>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
index 2c2c901..e0e78b9 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/params.py
@@ -39,6 +39,9 @@ artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
 jdk_location = config['hostLevelParams']['jdk_location']
 java_version = expect("/hostLevelParams/java_version", int)
 
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+
 service_name = config["serviceName"]
 component_name = config["role"]
 sudo = AMBARI_SUDO_BINARY
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py
index 7dc1a48..0aae910 100644
--- a/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/PERF/1.0/hooks/before-ANY/scripts/shared_initialization.py
@@ -40,17 +40,26 @@ def setup_users():
 
 def setup_java():
   """
-  Installs jdk using specific params, that comes from ambari-server
+  Install jdk using specific params.
+  Install ambari jdk as well if the stack and ambari jdk are different.
   """
   import params
+  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
+  if params.ambari_java_home and params.ambari_java_home != params.java_home:
+    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
 
-  java_exec = format("{java_home}/bin/java")
+def __setup_java(custom_java_home, custom_jdk_name):
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+  java_exec = format("{custom_java_home}/bin/java")
 
   if not os.path.isfile(java_exec):
     if not params.jdk_name: # if custom jdk is used.
       raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
 
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
+    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
     java_dir = os.path.dirname(params.java_home)
 
     Directory(params.artifact_dir,
@@ -58,10 +67,14 @@ def setup_java():
               )
 
     File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
+         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
          not_if = format("test -f {jdk_curl_target}")
          )
 
+    File(jdk_curl_target,
+         mode = 0755,
+         )
+
     tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
 
     try:
@@ -85,7 +98,7 @@ def setup_java():
     finally:
       Directory(tmp_java_dir, action="delete")
 
-    File(format("{java_home}/bin/java"),
+    File(format("{custom_java_home}/bin/java"),
          mode=0755,
          cd_access="a",
          )

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
index 29b0476..1f906ad 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProviderTest.java
@@ -271,7 +271,11 @@ public class ClientConfigResourceProviderTest {
     expect(configMap.get(Configuration.AMBARI_PYTHON_WRAP.getKey())).andReturn(Configuration.AMBARI_PYTHON_WRAP.getDefaultValue());
     expect(configuration.getConfigsMap()).andReturn(returnConfigMap);
     expect(configuration.getResourceDirPath()).andReturn(stackRoot);
+    expect(configuration.getJavaHome()).andReturn("dummy_java_home");
+    expect(configuration.getJDKName()).andReturn(null);
+    expect(configuration.getJCEName()).andReturn(null);
     expect(configuration.getJavaVersion()).andReturn(8);
+    expect(configuration.getStackJavaHome()).andReturn(null);
     expect(configuration.areHostsSysPrepped()).andReturn("false");
     expect(configuration.isAgentStackRetryOnInstallEnabled()).andReturn("false");
     expect(configuration.getAgentStackRetryOnInstallCount()).andReturn("5");
@@ -524,7 +528,11 @@ public class ClientConfigResourceProviderTest {
     expect(configMap.get(Configuration.AMBARI_PYTHON_WRAP.getKey())).andReturn(Configuration.AMBARI_PYTHON_WRAP.getDefaultValue());
     expect(configuration.getConfigsMap()).andReturn(returnConfigMap);
     expect(configuration.getResourceDirPath()).andReturn("/var/lib/ambari-server/src/main/resources");
+    expect(configuration.getJavaHome()).andReturn("dummy_java_home");
+    expect(configuration.getJDKName()).andReturn(null);
+    expect(configuration.getJCEName()).andReturn(null);
     expect(configuration.getJavaVersion()).andReturn(8);
+    expect(configuration.getStackJavaHome()).andReturn(null);
     expect(configuration.areHostsSysPrepped()).andReturn("false");
     expect(configuration.isAgentStackRetryOnInstallEnabled()).andReturn("false");
     expect(configuration.getAgentStackRetryOnInstallCount()).andReturn("5");

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
index b1cce55..c3b820b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/StageUtilsTest.java
@@ -22,6 +22,7 @@ import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.getCurrentArguments;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.ByteArrayInputStream;
@@ -615,6 +616,104 @@ public class StageUtilsTest extends EasyMockSupport {
     }
   }
 
+  @Test
+  public void testUseAmbariJdkWithoutavaHome() {
+    // GIVEN
+    Map<String, String> commandParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    // WHEN
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configuration);
+    // THEN
+    assertTrue(commandParams.isEmpty());
+  }
+
+  @Test
+  public void testUseAmbariJdkWithCustomJavaHome() {
+    // GIVEN
+    Map<String, String> commandParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    // WHEN
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configuration);
+    // THEN
+    assertEquals("myJavaHome", commandParams.get("ambari_java_home"));
+    assertEquals(2, commandParams.size());
+  }
+
+  @Test
+  public void testUseAmbariJdk() {
+    // GIVEN
+    Map<String, String> commandParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    configuration.setProperty("jdk.name", "myJdkName");
+    configuration.setProperty("jce.name", "myJceName");
+    // WHEN
+    StageUtils.useAmbariJdkInCommandParams(commandParams, configuration);
+    // THEN
+    assertEquals("myJavaHome", commandParams.get("ambari_java_home"));
+    assertEquals("myJdkName", commandParams.get("ambari_jdk_name"));
+    assertEquals("myJceName", commandParams.get("ambari_jce_name"));
+    assertEquals(4, commandParams.size());
+  }
+
+  @Test
+  public void testUseStackJdkIfExistsWithCustomStackJdk() {
+    // GIVEN
+    Map<String, String> hostLevelParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    configuration.setProperty("jdk.name", "myJdkName");
+    configuration.setProperty("jce.name", "myJceName");
+    configuration.setProperty("stack.java.home", "myStackJavaHome");
+    // WHEN
+    StageUtils.useStackJdkIfExists(hostLevelParams, configuration);
+    // THEN
+    assertEquals("myStackJavaHome", hostLevelParams.get("java_home"));
+    assertNull(hostLevelParams.get("jdk_name"));
+    assertNull(hostLevelParams.get("jce_name"));
+    assertEquals(4, hostLevelParams.size());
+  }
+
+  @Test
+  public void testUseStackJdkIfExists() {
+    // GIVEN
+    Map<String, String> hostLevelParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    configuration.setProperty("jdk.name", "myJdkName");
+    configuration.setProperty("jce.name", "myJceName");
+    configuration.setProperty("stack.java.home", "myStackJavaHome");
+    configuration.setProperty("stack.jdk.name", "myStackJdkName");
+    configuration.setProperty("stack.jce.name", "myStackJceName");
+    configuration.setProperty("stack.java.version", "7");
+    // WHEN
+    StageUtils.useStackJdkIfExists(hostLevelParams, configuration);
+    // THEN
+    assertEquals("myStackJavaHome", hostLevelParams.get("java_home"));
+    assertEquals("myStackJdkName", hostLevelParams.get("jdk_name"));
+    assertEquals("myStackJceName", hostLevelParams.get("jce_name"));
+    assertEquals("7", hostLevelParams.get("java_version"));
+    assertEquals(4, hostLevelParams.size());
+  }
+
+  @Test
+  public void testUseStackJdkIfExistsWithoutStackJdk() {
+    // GIVEN
+    Map<String, String> hostLevelParams = new HashMap<>();
+    Configuration configuration = new Configuration();
+    configuration.setProperty("java.home", "myJavaHome");
+    configuration.setProperty("jdk.name", "myJdkName");
+    configuration.setProperty("jce.name", "myJceName");
+    // WHEN
+    StageUtils.useStackJdkIfExists(hostLevelParams, configuration);
+    // THEN
+    assertEquals("myJavaHome", hostLevelParams.get("java_home"));
+    assertEquals("myJdkName", hostLevelParams.get("jdk_name"));
+    assertEquals("myJceName", hostLevelParams.get("jce_name"));
+    assertEquals(4, hostLevelParams.size());
+  }
+
   private void checkServiceHostIndexes(Map<String, Set<String>> info, String componentName, String mappedComponentName,
                                        Map<String, Collection<String>> serviceTopology, List<String> hostList) {
     Set<Integer> expectedHostsList = new HashSet<>();

http://git-wip-us.apache.org/repos/asf/ambari/blob/2f0de691/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 8c135c3..c511237 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -108,11 +108,11 @@ with patch.object(platform, "linux_distribution", return_value = MagicMock(retur
                   get_pass_file_path, GET_FQDN_SERVICE_URL, JDBC_USE_INTEGRATED_AUTH_PROPERTY, SECURITY_KEY_ENV_VAR_NAME, \
                   JAVA_HOME_PROPERTY, JDK_NAME_PROPERTY, JCE_NAME_PROPERTY, STACK_LOCATION_KEY, SERVER_VERSION_FILE_PATH, \
                   COMMON_SERVICES_PATH_PROPERTY, WEBAPP_DIR_PROPERTY, SHARED_RESOURCES_DIR, BOOTSTRAP_SCRIPT, \
-                  CUSTOM_ACTION_DEFINITIONS, BOOTSTRAP_SETUP_AGENT_SCRIPT, STACKADVISOR_SCRIPT, BOOTSTRAP_DIR_PROPERTY, MPACKS_STAGING_PATH_PROPERTY
+                  CUSTOM_ACTION_DEFINITIONS, BOOTSTRAP_SETUP_AGENT_SCRIPT, STACKADVISOR_SCRIPT, BOOTSTRAP_DIR_PROPERTY, MPACKS_STAGING_PATH_PROPERTY, STACK_JAVA_VERSION
                 from ambari_server.serverUtils import is_server_runing, refresh_stack_hash
                 from ambari_server.serverSetup import check_selinux, check_ambari_user, proceedJDBCProperties, SE_STATUS_DISABLED, SE_MODE_ENFORCING, configure_os_settings, \
                   download_and_install_jdk, prompt_db_properties, setup, \
-                  AmbariUserChecks, AmbariUserChecksLinux, AmbariUserChecksWindows, JDKSetup, reset, setup_jce_policy, expand_jce_zip_file
+                  AmbariUserChecks, AmbariUserChecksLinux, AmbariUserChecksWindows, JDKSetup, reset, setup_jce_policy, expand_jce_zip_file, check_ambari_java_version_is_valid
                 from ambari_server.serverUpgrade import upgrade, change_objects_owner, \
                   run_schema_upgrade, move_user_custom_actions, find_and_copy_custom_services
                 from ambari_server.setupHttps import is_valid_https_port, setup_https, import_cert_and_key_action, get_fqdn, \
@@ -2811,9 +2811,10 @@ class TestAmbariServer(TestCase):
   @patch("ambari_server.serverSetup.get_JAVA_HOME")
   @patch("ambari_server.serverSetup.get_resources_location")
   @patch("ambari_server.serverSetup.get_ambari_properties")
+  @patch("ambari_server.serverSetup.check_ambari_java_version_is_valid")
   @patch("shutil.copyfile")
   @patch("sys.exit")
-  def test_download_jdk(self, exit_mock, copyfile_mock, get_ambari_properties_mock, get_resources_location_mock, get_JAVA_HOME_mock, \
+  def test_download_jdk(self, exit_mock, copyfile_mock, check_ambari_java_version_is_valid_mock, get_ambari_properties_mock, get_resources_location_mock, get_JAVA_HOME_mock, \
                         validate_jdk_mock, print_info_msg_mock, get_validated_string_input_mock, update_properties_mock, \
                         run_os_command_mock, get_YN_input_mock, force_download_file_mock, expand_jce_zip_file_mock,
                         adjust_jce_permissions_mock, os_makedirs_mock,
@@ -2874,6 +2875,7 @@ class TestAmbariServer(TestCase):
     get_JAVA_HOME_mock.return_value = False
     read_ambari_user_mock.return_value = "ambari"
     get_ambari_properties_mock.return_value = p
+    check_ambari_java_version_is_valid_mock.return_value = True
     # Test case: ambari.properties not found
     try:
       download_and_install_jdk(args)
@@ -3166,6 +3168,47 @@ class TestAmbariServer(TestCase):
     pass
 
   @not_for_platform(PLATFORM_WINDOWS)
+  @patch("subprocess.Popen")
+  def test_check_ambari_java_version_is_valid(self, popenMock):
+    # case 1:  jdk7 is picked for stacks
+    properties = Properties()
+    p = MagicMock()
+    p.communicate.return_value = ('7', None)
+    p.returncode = 0
+    popenMock.return_value = p
+    result = check_ambari_java_version_is_valid('/usr/jdk64/jdk_1.7.0/', 'java', 8, properties)
+    self.assertEqual(properties.get_property(STACK_JAVA_VERSION), "7")
+    self.assertFalse(result)
+
+    # case 2: jdk8 is picked for stacks
+    properties = Properties()
+    p.communicate.return_value = ('8', None)
+    p.returncode = 0
+    result = check_ambari_java_version_is_valid('/usr/jdk64/jdk_1.8.0/', 'java', 8, properties)
+    self.assertFalse(properties.get_property(STACK_JAVA_VERSION))
+    self.assertTrue(result)
+
+    # case 3: return code is not 0
+    p.returncode = 1
+    try:
+      check_ambari_java_version_is_valid('/usr/jdk64/jdk_1.8.0/', 'java', 8, properties)
+      self.fail("Should throw exception")
+    except FatalException:
+      # expected
+      pass
+
+    # case 4: unparseable response - type error
+    p.communicate.return_value = ('something else', None)
+    p.returncode = 0
+    try:
+      check_ambari_java_version_is_valid('/usr/jdk64/jdk_1.8.0/', 'java', 8, properties)
+      self.fail("Should throw exception")
+    except FatalException as e:
+      # expected
+      self.assertEqual(e.code, 1)
+      pass
+
+  @not_for_platform(PLATFORM_WINDOWS)
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(LinuxDBMSConfig, "_setup_remote_server")
   @patch("ambari_server.dbConfiguration_linux.print_info_msg")


[11/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
deleted file mode 100644
index de8f8e3..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3.json
+++ /dev/null
@@ -1,2234 +0,0 @@
-{
-  "version": "1.0",
-  "stacks": [
-    {
-      "name": "HDP",
-      "old-version": "2.2",
-      "target-version": "2.3",
-      "options": {
-        "config-types": {
-          "falcon-startup.properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "FALCON"
-            ]
-          },
-          "tez-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "TEZ"
-            ]
-          },
-          "oozie-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "OOZIE"
-            ]
-          },
-          "hbase-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "HBASE"
-            ]
-          },
-          "mapred-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "MAPREDUCE2"
-            ]
-          },
-          "hdfs-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "HDFS"
-            ]
-          },
-          "yarn-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "YARN"
-            ]
-          },
-          "capacity-scheduler": {
-            "merged-copy": "yes",
-            "required-services": [
-              "YARN"
-            ]
-          },
-          "kafka-broker": {
-            "merged-copy": "yes",
-            "required-services": [
-              "KAFKA"
-            ]
-          },
-          "ranger-hdfs-policymgr-ssl": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          },
-          "ranger-hdfs-security": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          },
-          "ranger-hdfs-audit": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          },
-          "ranger-hdfs-plugin-properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          },
-          "ranger-hbase-policymgr-ssl": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HBASE"
-            ]
-          },
-          "ranger-hbase-security": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HBASE"
-            ]
-          },
-          "ranger-hbase-audit": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HBASE"
-            ]
-          },
-          "ranger-hbase-plugin-properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HBASE"
-            ]
-          },
-          "ranger-hive-policymgr-ssl": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HIVE"
-            ]
-          },
-          "ranger-hive-security": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HIVE"
-            ]
-          },
-          "ranger-hive-audit": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HIVE"
-            ]
-          },
-          "ranger-hive-plugin-properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "HIVE"
-            ]
-          },
-          "ranger-knox-policymgr-ssl": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "KNOX"
-            ]
-          },
-          "ranger-knox-security": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "KNOX"
-            ]
-          },
-          "ranger-knox-audit": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "KNOX"
-            ]
-          },
-          "ranger-knox-plugin-properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "KNOX"
-            ]
-          },
-          "ranger-storm-policymgr-ssl": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "STORM"
-            ]
-          },
-          "ranger-storm-security": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "STORM"
-            ]
-          },
-          "ranger-storm-audit": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "STORM"
-            ]
-          },
-          "ranger-storm-plugin-properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "STORM"
-            ]
-          },
-          "ranger-kafka-policymgr-ssl": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "KAFKA"
-            ]
-          },
-          "ranger-kafka-security": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "KAFKA"
-            ]
-          },
-          "ranger-kafka-audit": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "KAFKA"
-            ]
-          },
-          "ranger-kafka-plugin-properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "KAFKA"
-            ]
-          },
-          "ranger-yarn-policymgr-ssl": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "YARN"
-            ]
-          },
-          "ranger-yarn-security": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "YARN"
-            ]
-          },
-          "ranger-yarn-audit": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "YARN"
-            ]
-          },
-          "ranger-yarn-plugin-properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER",
-              "YARN"
-            ]
-          },
-          "kafka-env": {
-            "merged-copy": "yes",
-            "required-services": [
-              "KAFKA"
-            ]
-          },
-          "yarn-env": {
-            "merged-copy": "yes",
-            "required-services": [
-              "YARN"
-            ]
-          },
-          "admin-properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "usersync-properties": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger-env": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger-admin-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger-ugsync-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "storm-env": {
-            "merged-copy": "yes",
-            "required-services": [
-              "STORM"
-            ]
-          },
-          "webhcat-site": {
-            "merged-copy": "yes",
-            "required-services": [
-              "HIVE"
-            ]
-          },
-          "storm-cluster-log4j": {
-            "merged-copy": "yes",
-            "required-services": [
-              "STORM"
-            ]
-          },
-          "storm-worker-log4j": {
-            "merged-copy": "yes",
-            "required-services": [
-              "STORM"
-            ]
-          }
-        }
-      },
-      "properties": {
-        "storm-env": {
-          "nimbus_seeds_supported": "true",
-          "storm_logs_supported": "true"
-        },
-        "storm-cluster-log4j": {
-          "content": "\n    \n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<configuration monitorInterval=\"60\">\n<properties>\n    <property name=\"pattern\">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n</property>\n    <property name=\"patt
 ernMetris\">%d %-8r %m%n</property>\n</properties>\n<appenders>\n    <RollingFile name=\"A1\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.%i\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile name=\"ACCESS\"\n                 fileName=\"${sys:storm.log.dir}/access.log\"\n                 filePattern=\"${sys:storm.log.dir}/access.log.%i\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile
  name=\"METRICS\"\n                 fileName=\"${sys:storm.log.dir}/metrics.log\"\n                 filePattern=\"${sys:storm.log.dir}/metrics.log.%i\">\n        <PatternLayout>\n            <pattern>${patternMetris}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"2 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <Syslog name=\"syslog\" format=\"RFC5424\" host=\"localhost\" port=\"514\"\n            protocol=\"UDP\" appName=\"[${sys:daemon.name}]\" mdcId=\"mdc\" includeMDC=\"true\"\n            facility=\"LOCAL5\" enterpriseNumber=\"18060\" newLine=\"true\" exceptionPattern=\"%rEx{full}\"\n            messageId=\"[${sys:user.name}:S0]\" id=\"storm\"/>\n</appenders>\n<loggers>\n\n    <Logger name=\"backtype.storm.security.auth.authorizer\" level=\"info\">\n        <AppenderRef ref=\"ACCESS\"/>\n    </Logger>\n    <Logger name=\"backtype.storm.metric.Loggi
 ngMetricsConsumer\" level=\"info\">\n        <AppenderRef ref=\"METRICS\"/>\n    </Logger>\n    <root level=\"info\"> <!-- We log everything -->\n        <appender-ref ref=\"A1\"/>\n        <appender-ref ref=\"syslog\"/>\n    </root>\n</loggers>\n</configuration>\n    \n    "
-        },
-        "storm-worker-log4j": {
-          "content": "\n    \n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<configuration monitorInterval=\"60\">\n<properties>\n    <property name=\"pattern\">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n</property>\n    <property name=\"patt
 ernNoTime\">%msg%n</property>\n</properties>\n<appenders>\n    <RollingFile name=\"A1\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.%i.gz\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile name=\"STDOUT\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}.out\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.out.%i.gz\">\n        <PatternLayout>\n            <pattern>${patternNoTime}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"4\"/>\n    
 </RollingFile>\n    <RollingFile name=\"STDERR\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}.err\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.err.%i.gz\">\n        <PatternLayout>\n            <pattern>${patternNoTime}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"4\"/>\n    </RollingFile>\n    <Syslog name=\"syslog\" format=\"RFC5424\" host=\"localhost\" port=\"514\"\n        protocol=\"UDP\" appName=\"[${sys:storm.id}:${sys:worker.port}]\" mdcId=\"mdc\" includeMDC=\"true\"\n        facility=\"LOCAL5\" enterpriseNumber=\"18060\" newLine=\"true\" exceptionPattern=\"%rEx{full}\"\n        messageId=\"[${sys:user.name}:${sys:logging.sensitivity}]\" id=\"storm\"/>\n</appenders>\n<loggers>\n    <root level=\"info\"> <!-- We log everything -->\n        <appender-ref ref=\"A1\"/>\n  
       <appender-ref ref=\"syslog\"/>\n    </root>\n    <Logger name=\"STDERR\" level=\"INFO\">\n        <appender-ref ref=\"STDERR\"/>\n        <appender-ref ref=\"syslog\"/>\n    </Logger>\n    <Logger name=\"STDOUT\" level=\"INFO\">\n        <appender-ref ref=\"STDOUT\"/>\n        <appender-ref ref=\"syslog\"/>\n    </Logger>\n</loggers>\n</configuration>\n    \n    "
-        },
-        "falcon-startup.properties": {
-          "*.shared.libs": "activemq-core,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el",
-          "*.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\\n      org.apache.falcon.entity.ColoClusterRelation,\\\n      org.apache.falcon.group.FeedGroupMap,\\\n      org.apache.falcon.service.SharedLibraryHostingService"
-        },
-        "tez-site": {
-          "tez.am.view-acls": "*",
-          "tez.tez-ui.history-url.base": {
-            "value": "{TEZ_HISTORY_URL_BASE}",
-            "template": "yes"
-          }
-        },
-        "oozie-site": {
-          "oozie.authentication.simple.anonymous.allowed": "true",
-          "oozie.service.AuthorizationService.authorization.enabled": "true",
-          "oozie.authentication.kerberos.name.rules": "RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\nRULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\nRULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\nRULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\nDEFAULT",
-          "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials",
-          "oozie.service.CallableQueueService.callable.concurrency": {
-            "remove": "yes"
-          },
-          "oozie.service.CallableQueueService.queue.size": {
-            "remove": "yes"
-          },
-          "oozie.service.CallableQueueService.threads": {
-            "remove": "yes"
-          },
-          "oozie.service.JPAService.create.db.schema": {
-            "remove": "yes"
-          },
-          "oozie.service.JPAService.pool.max.active.conn": {
-            "remove": "yes"
-          },
-          "oozie.service.PurgeService.older.than": {
-            "remove": "yes"
-          },
-          "oozie.service.PurgeService.purge.interval": {
-            "remove": "yes"
-          },
-          "oozie.service.SchemaService.wf.ext.schemas": {
-            "remove": "yes"
-          },
-          "oozie.service.WorkflowAppService.system.libpath": {
-            "remove": "yes"
-          },
-          "oozie.service.coord.normal.default.timeout": {
-            "remove": "yes"
-          },
-          "oozie.service.coord.push.check.requeue.interval": {
-            "remove": "yes"
-          },
-          "oozie.services": {
-            "remove": "yes"
-          },
-          "oozie.system.id": {
-            "remove": "yes"
-          },
-          "oozie.systemmode": {
-            "remove": "yes"
-          },
-          "use.system.libpath.for.mapreduce.and.pig.jobs": {
-            "remove": "yes"
-          }
-        },
-        "hbase-site": {
-          "hbase.region.server.rpc.scheduler.factory.class": {
-            "value": "{HBASE_REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS}",
-            "required-services": [
-              "HBASE"
-            ],
-            "template": "yes"
-          },
-          "hbase.rpc.controllerfactory.class": {
-            "value": "{HBASE_RPC_CONTROLLERFACTORY_CLASS}",
-            "template": "yes",
-            "required-services": [
-              "HBASE"
-            ]
-          },
-          "hbase.regionserver.wal.codec": {
-            "value": "{HBASE_REGIONSERVER_WAL_CODEC}",
-            "template": "yes",
-            "required-services": [
-              "HBASE"
-            ]
-          },
-          "phoenix.functions.allowUserDefinedFunctions": "true",
-          "fs.hdfs.impl": "org.apache.hadoop.hdfs.DistributedFileSystem",
-          "hbase.bucketcache.percentage.in.combinedcache": {
-            "remove": "yes"
-          },
-          "hbase.coprocessor.master.classes": {
-            "value": "{HBASE_COPROCESS_MASTER_CLASSES}",
-            "template": "yes",
-            "can-create": "no",
-            "required-services": [
-              "HBASE",
-              "RANGER"
-            ]
-          },
-          "hbase.coprocessor.region.classes": {
-            "value": "{HBASE_COPROCESSOR_REGION_CLASSES}",
-            "template": "yes",
-            "can-create": "no",
-            "required-services": [
-              "HBASE",
-              "RANGER"
-            ]
-          }
-        },
-        "mapred-site": {
-          "mapreduce.fileoutputcommitter.algorithm.version": "1",
-          "mapreduce.task.tmp.dir": {
-            "remove": "yes"
-          }
-        },
-        "hdfs-site": {
-          "dfs.namenode.inode.attributes.provider.class": {
-            "value": "org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer",
-            "required-services": [
-              "RANGER"
-            ]
-          }
-        },
-        "yarn-site": {
-          "yarn.node-labels.enabled": {
-            "value": "false",
-            "override": "no"
-          },
-          "yarn.timeline-service.recovery.enabled": "true",
-          "yarn.timeline-service.state-store-class": "org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore",
-          "yarn.timeline-service.leveldb-state-store.path": "/var/log/hadoop-yarn/timeline"
-        },
-        "capacity-scheduler": {
-          "yarn.scheduler.capacity.root.default-node-label-expression": "",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {
-            "remove": "yes"
-          },
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {
-            "remove": "yes"
-          }
-        },
-        "kafka-broker": {
-          "listeners": {
-            "value": "{KAFKA_LISTENERS}",
-            "template": "yes"
-          },
-          "controlled.shutdown.enable": "true"
-        },
-        "ranger-hdfs-policymgr-ssl": {
-          "xasecure.policymgr.clientssl.keystore.credential.file": {
-            "value": "{RANGER_HDFS_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          },
-          "xasecure.policymgr.clientssl.truststore.credential.file": {
-            "value": "{RANGER_HDFS_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          }
-        },
-        "ranger-hdfs-security": {
-          "upgrade.transition": "yes",
-          "ranger.plugin.hdfs.policy.source.impl": {
-            "value": "org.apache.ranger.admin.client.RangerAdminRESTClient",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          },
-          "ranger.plugin.hdfs.policy.rest.ssl.config.file": {
-            "value": "/usr/hdp/current/hadoop-client/conf/ranger-policymgr-ssl.xml",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          },
-          "ranger.plugin.hdfs.policy.pollIntervalMs": {
-            "value": "30000",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          },
-          "ranger.plugin.hdfs.policy.cache.dir": {
-            "value": "{RANGER_PLUGIN_HDFS_POLICY_CACHE_DIR}",
-            "template": "yes",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          },
-          "xasecure.add-hadoop-authorization": {
-            "value": "true",
-            "required-services": [
-              "RANGER",
-              "HDFS"
-            ]
-          }
-        },
-        "ranger-hdfs-audit": {
-          "xasecure.audit.credential.provider.file": {
-            "value": "{RANGER_HDFS_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.destination.solr": {
-            "value": "false",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.destination.solr.urls": {
-            "value": "{{ranger_audit_solr_urls}}",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.destination.solr.zookeepers": {
-            "value": "none",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.destination.solr.batch.filespool.dir": {
-            "value": "/var/log/hadoop/hdfs/audit/solr/spool",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.destination.db.jdbc.driver": {
-            "value": "{HDFS_JDBC_DRIVER}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.destination.db.jdbc.url": {
-            "value": "{HDFS_AUDIT_JDBC_URL}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.provider.summary.enabled": {
-            "value": "false",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.destination.db.batch.filespool.dir": "/var/log/hadoop/hdfs/audit/db/spool",
-          "xasecure.audit.is.enabled": "true",
-          "xasecure.audit.destination.db.password": {
-            "value": "{{xa_audit_db_password}}"
-          },
-          "xasecure.audit.destination.db": {
-            "value": "{AUDIT_TO_DB_HDFS}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.destination.hdfs": {
-            "value": "{AUDIT_TO_HDFS_HDFS}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "xasecure.audit.destination.hdfs.batch.filespool.dir": {
-            "value": "{AUDIT_HDFS_FILESPOOL_DIR_HDFS}",
-            "template": "yes"
-          }
-        },
-        "ranger-hdfs-plugin-properties": {
-          "XAAUDIT.HDFS.DESTINTATION_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          }
-        },
-        "ranger-hbase-policymgr-ssl": {
-          "xasecure.policymgr.clientssl.keystore.credential.file": {
-            "value": "{RANGER_HBASE_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          },
-          "xasecure.policymgr.clientssl.truststore.credential.file": {
-            "value": "{RANGER_HBASE_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          }
-        },
-        "ranger-hbase-security": {
-          "upgrade.transition": "yes",
-          "ranger.plugin.hbase.policy.source.impl": {
-            "value": "org.apache.ranger.admin.client.RangerAdminRESTClient",
-            "required-services": [
-              "RANGER",
-              "HBASE"
-            ]
-          },
-          "ranger.plugin.hbase.policy.rest.ssl.config.file": {
-            "value": "/usr/hdp/current/hbase-client/conf/ranger-policymgr-ssl.xml",
-            "required-services": [
-              "RANGER",
-              "HBASE"
-            ]
-          },
-          "ranger.plugin.hbase.policy.pollIntervalMs": {
-            "value": "30000",
-            "required-services": [
-              "RANGER",
-              "HBASE"
-            ]
-          },
-          "ranger.plugin.hbase.policy.cache.dir": {
-            "value": "{RANGER_PLUGIN_HBASE_POLICY_CACHE_DIR}",
-            "template": "yes",
-            "required-services": [
-              "RANGER",
-              "HBASE"
-            ]
-          }
-        },
-        "ranger-hbase-audit": {
-          "xasecure.audit.credential.provider.file": {
-            "value": "{RANGER_HBASE_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.solr": {
-            "value": "false"
-          },
-          "xasecure.audit.destination.solr.urls": {
-            "value": "{{ranger_audit_solr_urls}}"
-          },
-          "xasecure.audit.destination.solr.zookeepers": {
-            "value": "none"
-          },
-          "xasecure.audit.destination.solr.batch.filespool.dir": {
-            "value": "/var/log/hbase/audit/solr/spool"
-          },
-          "xasecure.audit.destination.db.jdbc.driver": {
-            "value": "{HBASE_JDBC_DRIVER}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.db.jdbc.url": {
-            "value": "{HBASE_AUDIT_JDBC_URL}",
-            "template": "yes"
-          },
-          "xasecure.audit.provider.summary.enabled": {
-            "value": "true"
-          },
-          "xasecure.audit.destination.db.batch.filespool.dir": "/var/log/hbase/audit/db/spool",
-          "xasecure.audit.is.enabled": "true",
-          "xasecure.audit.destination.db.password": {
-            "value": "{{xa_audit_db_password}}"
-          },
-          "xasecure.audit.destination.db": {
-            "value": "{AUDIT_TO_DB_HBASE}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.hdfs": {
-            "value": "{AUDIT_TO_HDFS_HBASE}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.hdfs.batch.filespool.dir": {
-            "value": "{AUDIT_HDFS_FILESPOOL_DIR_HBASE}",
-            "template": "yes"
-          }
-        },
-        "ranger-hbase-plugin-properties": {
-          "XAAUDIT.HDFS.DESTINTATION_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "SQL_CONNECTOR_JAR": {
-            "remove": "yes"
-          }
-        },
-        "ranger-hive-policymgr-ssl": {
-          "xasecure.policymgr.clientssl.keystore.credential.file": {
-            "value": "{RANGER_HIVE_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          },
-          "xasecure.policymgr.clientssl.truststore.credential.file": {
-            "value": "{RANGER_HIVE_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          }
-        },
-        "ranger-hive-security": {
-          "upgrade.transition": "yes",
-          "ranger.plugin.hive.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
-          "ranger.plugin.hive.policy.rest.ssl.config.file": "/usr/hdp/current/hive-server2/conf/conf.server/ranger-policymgr-ssl.xml",
-          "ranger.plugin.hive.policy.pollIntervalMs": "30000",
-          "ranger.plugin.hive.policy.cache.dir": {
-            "value": "{RANGER_PLUGIN_HIVE_POLICY_CACHE_DIR}",
-            "template": "yes"
-          }
-        },
-        "ranger-hive-audit": {
-          "xasecure.audit.credential.provider.file": {
-            "value": "{RANGER_HIVE_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.solr": {
-            "value": "false"
-          },
-          "xasecure.audit.destination.solr.urls": {
-            "value": "{{ranger_audit_solr_urls}}"
-          },
-          "xasecure.audit.destination.solr.zookeepers": {
-            "value": "none"
-          },
-          "xasecure.audit.destination.solr.batch.filespool.dir": {
-            "value": "/var/log/hive/audit/solr/spool"
-          },
-          "xasecure.audit.destination.db.jdbc.driver": {
-            "value": "{HIVE_JDBC_DRIVER}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.db.jdbc.url": {
-            "value": "{HIVE_AUDIT_JDBC_URL}",
-            "template": "yes"
-          },
-          "xasecure.audit.provider.summary.enabled": {
-            "value": "false"
-          },
-          "xasecure.audit.destination.db.batch.filespool.dir": "/var/log/hive/audit/db/spool",
-          "xasecure.audit.is.enabled": "true",
-          "xasecure.audit.destination.db.password": {
-            "value": "{{xa_audit_db_password}}"
-          },
-          "xasecure.audit.destination.db": {
-            "value": "{AUDIT_TO_DB_HIVE}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.hdfs": {
-            "value": "{AUDIT_TO_HDFS_HIVE}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.hdfs.batch.filespool.dir": {
-            "value": "{AUDIT_HDFS_FILESPOOL_DIR_HIVE}",
-            "template": "yes"
-          }
-        },
-        "ranger-hive-plugin-properties": {
-          "XAAUDIT.HDFS.DESTINTATION_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          }
-        },
-        "ranger-knox-policymgr-ssl": {
-          "xasecure.policymgr.clientssl.keystore.credential.file": {
-            "value": "{RANGER_KNOX_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          },
-          "xasecure.policymgr.clientssl.truststore.credential.file": {
-            "value": "{RANGER_KNOX_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          }
-        },
-        "ranger-knox-security": {
-          "upgrade.transition": "yes",
-          "ranger.plugin.knox.policy.source.impl": {
-            "value": "org.apache.ranger.admin.client.RangerAdminJersey2RESTClient"
-          },
-          "ranger.plugin.knox.policy.rest.ssl.config.file": "/usr/hdp/current/knox-server/conf/ranger-policymgr-ssl.xml",
-          "ranger.plugin.knox.policy.pollIntervalMs": "30000",
-          "ranger.plugin.knox.policy.cache.dir": {
-            "value": "{RANGER_PLUGIN_KNOX_POLICY_CACHE_DIR}",
-            "template": "yes"
-          }
-        },
-        "ranger-knox-audit": {
-          "xasecure.audit.credential.provider.file": {
-            "value": "{RANGER_KNOX_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.solr": {
-            "value": "false"
-          },
-          "xasecure.audit.destination.solr.urls": {
-            "value": "{{ranger_audit_solr_urls}}"
-          },
-          "xasecure.audit.destination.solr.zookeepers": {
-            "value": "none"
-          },
-          "xasecure.audit.destination.solr.batch.filespool.dir": {
-            "value": "/var/log/knox/audit/solr/spool"
-          },
-          "xasecure.audit.destination.db.jdbc.driver": {
-            "value": "{KNOX_JDBC_DRIVER}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.db.jdbc.url": {
-            "value": "{KNOX_AUDIT_JDBC_URL}",
-            "template": "yes"
-          },
-          "xasecure.audit.provider.summary.enabled": {
-            "value": "false"
-          },
-          "xasecure.audit.destination.db.batch.filespool.dir": "/var/log/knox/audit/db/spool",
-          "xasecure.audit.is.enabled": "true",
-          "xasecure.audit.destination.db.password": {
-            "value": "{{xa_audit_db_password}}"
-          },
-          "xasecure.audit.destination.db": {
-            "value": "{AUDIT_TO_DB_KNOX}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.hdfs": {
-            "value": "{AUDIT_TO_HDFS_KNOX}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.hdfs.batch.filespool.dir": {
-            "value": "{AUDIT_HDFS_FILESPOOL_DIR_KNOX}",
-            "template": "yes"
-          }
-        },
-        "ranger-knox-plugin-properties": {
-          "XAAUDIT.HDFS.DESTINTATION_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          }
-        },
-        "ranger-storm-policymgr-ssl": {
-          "xasecure.policymgr.clientssl.keystore.credential.file": {
-            "value": "{RANGER_STORM_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          },
-          "xasecure.policymgr.clientssl.truststore.credential.file": {
-            "value": "{RANGER_STORM_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          }
-        },
-        "ranger-storm-security": {
-          "upgrade.transition": "yes",
-          "ranger.plugin.storm.policy.source.impl": {
-            "value": "org.apache.ranger.admin.client.RangerAdminRESTClient"
-          },
-          "ranger.plugin.storm.policy.rest.ssl.config.file": "/usr/hdp/current/storm-client/conf/ranger-policymgr-ssl.xml",
-          "ranger.plugin.storm.policy.pollIntervalMs": "30000",
-          "ranger.plugin.storm.policy.cache.dir": {
-            "value": "{RANGER_PLUGIN_STORM_POLICY_CACHE_DIR}",
-            "template": "yes"
-          }
-        },
-        "ranger-storm-audit": {
-          "xasecure.audit.credential.provider.file": {
-            "value": "{RANGER_STORM_KEYSTORE_CREDENTIAL_FILE}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.solr": {
-            "value": "false"
-          },
-          "xasecure.audit.destination.solr.urls": {
-            "value": "{{ranger_audit_solr_urls}}"
-          },
-          "xasecure.audit.destination.solr.zookeepers": {
-            "value": "none"
-          },
-          "xasecure.audit.destination.solr.batch.filespool.dir": {
-            "value": "/var/log/storm/audit/solr/spool"
-          },
-          "xasecure.audit.destination.db.jdbc.driver": {
-            "value": "{STORM_JDBC_DRIVER}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.db.jdbc.url": {
-            "value": "{STORM_AUDIT_JDBC_URL}",
-            "template": "yes"
-          },
-          "xasecure.audit.provider.summary.enabled": {
-            "value": "false"
-          },
-          "xasecure.audit.destination.db.batch.filespool.dir": "/var/log/storm/audit/db/spool",
-          "xasecure.audit.is.enabled": "true",
-          "xasecure.audit.destination.db.password": {
-            "value": "{{xa_audit_db_password}}"
-          },
-          "xasecure.audit.destination.db": {
-            "value": "{AUDIT_TO_DB_STORM}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.hdfs": {
-            "value": "AUDIT_TO_HDFS_STORM",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.hdfs.batch.filespool.dir": {
-            "value": "{AUDIT_HDFS_FILESPOOL_DIR_STORM}",
-            "template": "yes"
-          }
-        },
-        "ranger-storm-plugin-properties": {
-          "XAAUDIT.HDFS.DESTINTATION_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": {
-            "remove": "yes"
-          },
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": {
-            "remove": "yes"
-          }
-        },
-        "ranger-kafka-plugin-properties": {
-          "policy_user": "ambari-qa",
-          "hadoop.rpc.protection": "",
-          "common.name.for.certificate": "",
-          "zookeeper.connect": "localhost:2181",
-          "ranger-kafka-plugin-enabled": "No",
-          "REPOSITORY_CONFIG_USERNAME": "kafka",
-          "REPOSITORY_CONFIG_PASSWORD": "kafka"
-        },
-        "ranger-kafka-policymgr-ssl": {
-          "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/kafka-broker/config/ranger-plugin-keystore.jks",
-          "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword",
-          "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/kafka-broker/config/ranger-plugin-truststore.jks",
-          "xasecure.policymgr.clientssl.truststore.password": "changeit",
-          "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file/{{credential_file}}",
-          "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file/{{credential_file}}"
-        },
-        "ranger-kafka-audit": {
-          "xasecure.audit.is.enabled": "true",
-          "xasecure.audit.destination.db": "false",
-          "xasecure.audit.destination.db.jdbc.url": "{{audit_jdbc_url}}",
-          "xasecure.audit.destination.db.user": "{{xa_audit_db_user}}",
-          "xasecure.audit.destination.db.password": "crypted",
-          "xasecure.audit.destination.db.jdbc.driver": "{{jdbc_driver}}",
-          "xasecure.audit.credential.provider.file": "jceks://file{{credential_file}}",
-          "xasecure.audit.destination.db.batch.filespool.dir": "/var/log/kafka/audit/db/spool",
-          "xasecure.audit.destination.hdfs": "true",
-          "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
-          "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/kafka/audit/hdfs/spool",
-          "xasecure.audit.destination.solr": "true",
-          "xasecure.audit.destination.solr.urls": "{{ranger_audit_solr_urls}}",
-          "xasecure.audit.destination.solr.zookeepers": "none",
-          "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/kafka/audit/solr/spool",
-          "xasecure.audit.provider.summary.enabled": "true"
-        },
-        "ranger-kafka-security": {
-          "upgrade.transition": "yes",
-          "ranger.plugin.kafka.service.name": "{{repo_name}}",
-          "ranger.plugin.kafka.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
-          "ranger.plugin.kafka.policy.rest.url": "{{policymgr_mgr_url}}",
-          "ranger.plugin.kafka.policy.rest.ssl.config.file": "/etc/kafka/conf/ranger-policymgr-ssl.xml",
-          "ranger.plugin.kafka.policy.pollIntervalMs": "30000",
-          "ranger.plugin.kafka.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache"
-        },
-        "kafka-env": {
-          "is_supported_kafka_ranger": "true"
-        },
-        "ranger-yarn-plugin-properties": {
-          "policy_user": "ambari-qa",
-          "hadoop.rpc.protection": "",
-          "common.name.for.certificate": "",
-          "ranger-yarn-plugin-enabled": "No",
-          "REPOSITORY_CONFIG_USERNAME": "yarn",
-          "REPOSITORY_CONFIG_PASSWORD": "yarn"
-        },
-        "ranger-yarn-policymgr-ssl": {
-          "xasecure.policymgr.clientssl.keystore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-keystore.jks",
-          "xasecure.policymgr.clientssl.keystore.password": "myKeyFilePassword",
-          "xasecure.policymgr.clientssl.truststore": "/usr/hdp/current/hadoop-client/conf/ranger-yarn-plugin-truststore.jks",
-          "xasecure.policymgr.clientssl.truststore.password": "changeit",
-          "xasecure.policymgr.clientssl.keystore.credential.file": "jceks://file/{{credential_file}}",
-          "xasecure.policymgr.clientssl.truststore.credential.file": "jceks://file/{{credential_file}}"
-        },
-        "ranger-yarn-audit": {
-          "xasecure.audit.is.enabled": "true",
-          "xasecure.audit.destination.db": "false",
-          "xasecure.audit.destination.db.jdbc.url": "{{audit_jdbc_url}}",
-          "xasecure.audit.destination.db.user": "{{xa_audit_db_user}}",
-          "xasecure.audit.destination.db.password": "crypted",
-          "xasecure.audit.destination.db.jdbc.driver": "{{jdbc_driver}}",
-          "xasecure.audit.credential.provider.file": "jceks://file{{credential_file}}",
-          "xasecure.audit.destination.db.batch.filespool.dir": "/var/log/hadoop/yarn/audit/db/spool",
-          "xasecure.audit.destination.hdfs": "true",
-          "xasecure.audit.destination.hdfs.dir": "hdfs://NAMENODE_HOSTNAME:8020/ranger/audit",
-          "xasecure.audit.destination.hdfs.batch.filespool.dir": "/var/log/hadoop/yarn/audit/hdfs/spool",
-          "xasecure.audit.destination.solr": "false",
-          "xasecure.audit.destination.solr.urls": "{{ranger_audit_solr_urls}}",
-          "xasecure.audit.destination.solr.zookeepers": "none",
-          "xasecure.audit.destination.solr.batch.filespool.dir": "/var/log/hadoop/yarn/audit/solr/spool",
-          "xasecure.audit.provider.summary.enabled": "false"
-        },
-        "ranger-yarn-security": {
-          "upgrade.transition": "yes",
-          "ranger.plugin.yarn.service.name": "{{repo_name}}",
-          "ranger.plugin.yarn.policy.source.impl": "org.apache.ranger.admin.client.RangerAdminRESTClient",
-          "ranger.plugin.yarn.policy.rest.url": "{{policymgr_mgr_url}}",
-          "ranger.plugin.yarn.policy.rest.ssl.config.file": "/usr/hdp/current/hadoop-client/conf/ranger-policymgr-ssl-yarn.xml",
-          "ranger.plugin.yarn.policy.pollIntervalMs": "30000",
-          "ranger.plugin.yarn.policy.cache.dir": "/etc/ranger/{{repo_name}}/policycache"
-        },
-        "yarn-env": {
-          "is_supported_yarn_ranger": "true"
-        },
-        "admin-properties": {
-          "policymgr_http_enabled": {
-            "remove": "yes"
-          },
-          "SQL_COMMAND_INVOKER": {
-            "remove": "yes"
-          }
-        },
-        "ranger-env": {
-          "oracle_home": {
-            "remove": "yes"
-          },
-          "create_db_dbuser": {
-            "value": "true"
-          },
-          "xml_configurations_supported": {
-            "value": "true"
-          },
-          "ranger_privelege_user_jdbc_url": {
-            "value": "{RANGER_ROOT_JDBC_URL}",
-            "template": "yes"
-          },
-          "ranger-yarn-plugin-enabled": {
-            "value": "No"
-          },
-          "ranger-kafka-plugin-enabled":{
-            "value": "No"
-          },
-          "xasecure.audit.destination.solr":{
-            "value": "false"
-          },
-          "is_solrCloud_enabled":{
-            "value": "false"
-          },
-          "xasecure.audit.destination.hdfs": {
-            "value": "{AUDIT_TO_HDFS}",
-            "template": "yes"
-          },
-          "xasecure.audit.destination.db": {
-            "value": "{AUDIT_TO_DB}",
-            "template": "yes"
-          }
-        },
-        "ranger-admin-site": {
-          "upgrade.transition": "yes",
-          "ranger.service.host": {
-            "value": "{RANGER_HOST}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger.service.https.attrib.ssl.enabled": {
-            "value": "false"
-          },
-          "ranger.jpa.jdbc.driver": {
-            "value": "{RANGER_JDBC_DRIVER}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger.jpa.jdbc.url": {
-            "value": "{RANGER_JDBC_URL}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger.jpa.jdbc.credential.alias": {
-            "value": "rangeradmin"
-          },
-          "ranger.credential.provider.path": {
-            "value": "/etc/ranger/admin/rangeradmin.jceks"
-          },
-          "ranger.audit.source.type": {
-            "value": "db"
-          },
-          "ranger.audit.solr.urls": {
-            "value": "http://solr_host:6083/solr/ranger_audits"
-          },
-          "ranger.jpa.audit.jdbc.driver": {
-            "value": "{RANGER_JDBC_DRIVER}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger.jpa.audit.jdbc.url": {
-            "value": "{RANGER_AUDIT_JDBC_URL}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger.jpa.audit.jdbc.credential.alias": {
-            "value": "rangeraudit"
-          },
-          "ranger.jpa.jdbc.dialect": {
-            "value": "{RANGER_JDBC_DIALECT}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger.jpa.audit.jdbc.dialect": {
-            "value": "{RANGER_JDBC_DIALECT}",
-            "template": "yes",
-            "required-services": [
-              "RANGER"
-            ]
-          },
-          "ranger.audit.solr.zookeepers": {
-            "value": "NONE"
-          },
-          "ranger.audit.solr.username": {
-            "value": "ranger_solr"
-          },
-          "ranger.audit.solr.password": {
-            "value": "NONE"
-          },
-          "ranger.externalurl": {
-            "value": "{{ranger_external_url}}"
-          },
-          "ranger.jpa.jdbc.user": {
-            "value": "{{ranger_db_user}}"
-          },
-          "ranger.jpa.audit.jdbc.user": {
-            "value": "{{ranger_audit_db_user}}"
-          },
-          "ranger.jpa.jdbc.password": {
-            "value": "_"
-          },
-          "ranger.jpa.audit.jdbc.password": {
-            "value": "_"
-          },
-          "ranger.sso.providerurl": {
-            "value": ""
-          },
-          "ranger.sso.publicKey": {
-            "value": ""
-          },
-          "ranger.sso.cookiename": {
-            "value": "hadoop-jwt"
-          },
-          "ranger.sso.enabled": {
-            "value": "false"
-          },
-          "ranger.sso.query.param.originalurl": {
-            "value": "originalUrl"
-          },
-          "ranger.sso.browser.useragent": {
-            "value": "Mozilla,chrome"
-          },
-          "ranger.ldap.user.searchfilter":{
-            "value": "{{ranger_ug_ldap_user_searchfilter}}"
-          },
-          "ranger.ldap.group.roleattribute":{
-            "value": "cn"
-          },
-          "ranger.ldap.base.dn":{
-            "value": "dc=example,dc=com"
-          },
-          "ranger.ldap.bind.dn":{
-            "value": "{{ranger_ug_ldap_bind_dn}}"
-          },
-          "ranger.ldap.bind.password":{
-            "value": "{{ranger_usersync_ldap_ldapbindpassword}}"
-          },
-          "ranger.ldap.referral":{
-            "value": "ignore"
-          },
-          "ranger.ldap.ad.base.dn":{
-            "value": "dc=example,dc=com"
-          },
-          "ranger.ldap.ad.bind.dn":{
-            "value": "{{ranger_ug_ldap_bind_dn}}"
-          },
-          "ranger.ldap.ad.bind.password":{
-            "value": "{{ranger_usersync_ldap_ldapbindpassword}}"
-          },
-          "ranger.ldap.ad.user.searchfilter":{
-            "value": "{{ranger_ug_ldap_user_searchfilter}}"
-          },
-          "ranger.ldap.ad.referral":{
-            "value": "ignore"
-          }
-        },
-        "ranger-ugsync-site": {
-          "upgrade.transition": "yes",
-          "ranger.usersync.port": {
-            "value": "5151"
-          },
-          "ranger.usersync.ssl": {
-            "value": "true"
-          },
-          "ranger.usersync.keystore.file": {
-            "value": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks"
-          },
-          "ranger.usersync.keystore.password": {
-            "value": "UnIx529p"
-          },
-          "ranger.usersync.truststore.file": {
-            "value": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks"
-          },
-          "ranger.usersync.truststore.password": {
-            "value": "changeit"
-          },
-          "ranger.usersync.passwordvalidator.path": {
-            "value": "./native/credValidator.uexe"
-          },
-          "ranger.usersync.enabled": {
-            "value": "true"
-          },
-          "ranger.usersync.sink.impl.class": {
-            "value": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder"
-          },
-          "ranger.usersync.policymanager.maxrecordsperapicall": {
-            "value": "1000"
-          },
-          "ranger.usersync.policymanager.mockrun": {
-            "value": "false"
-          },
-          "ranger.usersync.filesource.file": {
-            "value": "/tmp/usergroup.txt"
-          },
-          "ranger.usersync.filesource.text.delimiter": {
-            "value": ","
-          },
-          "ranger.usersync.ldap.bindalias": {
-            "value": "ranger.usersync.ldap.bindalias"
-          },
-          "ranger.usersync.ldap.bindkeystore": {
-            "value": ""
-          },
-          "ranger.usersync.group.searchenabled": {
-            "value": ""
-          },
-          "ranger.usersync.group.usermapsyncenabled": {
-            "value": ""
-          },
-          "ranger.usersync.group.searchbase": {
-            "value": ""
-          },
-          "ranger.usersync.group.searchscope": {
-            "value": ""
-          },
-          "ranger.usersync.group.objectclass": {
-            "value": ""
-          },
-          "ranger.usersync.group.searchfilter": {
-            "value": "empty"
-          },
-          "ranger.usersync.group.nameattribute": {
-            "value": ""
-          },
-          "ranger.usersync.ldap.searchBase": {
-            "value": ""
-          },
-          "ranger.usersync.group.memberattributename": {
-            "value": ""
-          },
-          "ranger.usersync.pagedresultsenabled": {
-            "value": "true"
-          },
-          "ranger.usersync.pagedresultssize": {
-            "value": "500"
-          },
-          "ranger.usersync.source.impl.class": {
-            "value": "{USERSYNC_SYNC_SOURCE}",
-            "template": "yes"
-          },
-          "ranger.usersync.policymanager.baseURL": {
-            "value": "{{ranger_external_url}}"
-          },
-          "ranger.usersync.sleeptimeinmillisbetweensynccycle": {
-            "value": "60000"
-          },
-          "ranger.usersync.ldap.referral": {
-            "value": "ignore"
-          },
-          "ranger.usersync.unix.group.file": {
-            "value": "/etc/group"
-          },
-          "ranger.usersync.unix.password.file": {
-            "value": "/etc/passwd"
-          }
-        },
-        "usersync-properties":{
-          "SYNC_INTERVAL": {"remove": "yes"}
-        },
-        "webhcat-site":{
-          "templeton.hive.extra.files": "/usr/hdp/${hdp.version}/tez/conf/tez-site.xml,/usr/hdp/${hdp.version}/tez,/usr/hdp/${hdp.version}/tez/lib"
-        }
-      },
-      "property-mapping": {
-        "hive.heapsize": {
-          "map-to": "hive.heapsize",
-          "from-catalog": "hive-site",
-          "to-catalog": "hive-env",
-          "required-services": ["HIVE"]
-        },
-        "templeton.jar": {
-          "from-catalog": "webhcat-site",
-          "to-catalog": "webhcat-site",
-          "replace-from": "/usr/hdp/current/hive-webhcat",
-          "replace-to": "/usr/hdp/${hdp.version}/hive",
-          "required-services": [
-            "HIVE"
-          ]
-        },
-        "templeton.libjars": {
-          "from-catalog": "webhcat-site",
-          "to-catalog": "webhcat-site",
-          "replace-from": "/usr/hdp/current/zookeeper-client",
-          "replace-to": "/usr/hdp/${hdp.version}/zookeeper,/usr/hdp/${hdp.version}/hive/lib/hive-common.jar",
-          "required-services": [
-            "HIVE"
-          ]
-        },
-        "templeton.hadoop": {
-          "from-catalog": "webhcat-site",
-          "to-catalog": "webhcat-site",
-          "replace-from": "/usr/hdp/current/hadoop-client",
-          "replace-to": "/usr/hdp/${hdp.version}/hadoop",
-          "required-services": [
-            "HIVE"
-          ]
-        },
-        "templeton.hcat": {
-          "from-catalog": "webhcat-site",
-          "to-catalog": "webhcat-site",
-          "replace-from": "/usr/hdp/current/hive-client",
-          "replace-to": "/usr/hdp/${hdp.version}/hive",
-          "required-services": [
-            "HIVE"
-          ]
-        },
-        "https.attrib.clientAuth": {
-          "map-to": "ranger.service.https.attrib.clientAuth",
-          "from-catalog": "ranger-site",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "https.attrib.keystoreFile": {
-          "map-to": "ranger.https.attrib.keystore.file",
-          "from-catalog": "ranger-site",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "https.attrib.keystorePass": {
-          "map-to": "ranger.service.https.attrib.keystore.pass",
-          "from-catalog": "ranger-site",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "https.service.port": {
-          "map-to": "ranger.service.https.port",
-          "from-catalog": "ranger-site",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "http.service.port": {
-          "map-to": "ranger.service.http.port",
-          "from-catalog": "ranger-site",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "https.attrib.keyAlias": {
-          "map-to": "ranger.service.https.attrib.keystore.keyalias",
-          "from-catalog": "ranger-site",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "http.enabled": {
-          "map-to": "ranger.service.http.enabled",
-          "from-catalog": "ranger-site",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "authServiceHostName": {
-          "map-to": "ranger.unixauth.service.hostname",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "authServicePort": {
-          "map-to": "ranger.unixauth.service.port",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "authentication_method": {
-          "map-to": "ranger.authentication.method",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "remoteLoginEnabled": {
-          "map-to": "ranger.unixauth.remote.login.enabled",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "xa_ldap_url": {
-          "map-to": "ranger.ldap.url",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "xa_ldap_userDNpattern": {
-          "map-to": "ranger.ldap.user.dnpattern",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "xa_ldap_groupSearchBase": {
-          "map-to": "ranger.ldap.group.searchbase",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "xa_ldap_groupSearchFilter": {
-          "map-to": "ranger.ldap.group.searchfilter",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "xa_ldap_groupRoleAttribute": {
-          "map-to": "ranger.ldap.group.roleattribute",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "xa_ldap_ad_domain": {
-          "map-to": "ranger.ldap.ad.domain",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "xa_ldap_ad_url": {
-          "map-to": "ranger.ldap.ad.url",
-          "from-catalog": "admin-properties",
-          "to-catalog": "ranger-admin-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "CRED_KEYSTORE_FILENAME": {
-          "map-to": "ranger.usersync.credstore.filename",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "MIN_UNIX_USER_ID_TO_SYNC": {
-          "map-to": "ranger.usersync.unix.minUserId",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_BIND_DN": {
-          "map-to": "ranger.usersync.ldap.binddn",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_BIND_PASSWORD": {
-          "map-to": "ranger.usersync.ldap.ldapbindpassword",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_GROUPNAME_CASE_CONVERSION": {
-          "map-to": "ranger.usersync.ldap.groupname.caseconversion",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_URL": {
-          "map-to": "ranger.usersync.ldap.url",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_USERNAME_CASE_CONVERSION": {
-          "map-to": "ranger.usersync.ldap.username.caseconversion",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_USER_GROUP_NAME_ATTRIBUTE": {
-          "map-to": "ranger.usersync.ldap.user.groupnameattribute",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_USER_NAME_ATTRIBUTE": {
-          "map-to": "ranger.usersync.ldap.user.nameattribute",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_USER_OBJECT_CLASS": {
-          "map-to": "ranger.usersync.ldap.user.objectclass",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_USER_SEARCH_BASE": {
-          "map-to": "ranger.usersync.ldap.user.searchbase",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_USER_SEARCH_FILTER": {
-          "map-to": "ranger.usersync.ldap.user.searchfilter",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_LDAP_USER_SEARCH_SCOPE": {
-          "map-to": "ranger.usersync.ldap.user.searchscope",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "logdir": {
-          "map-to": "ranger.usersync.logdir",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "SYNC_SOURCE": {
-          "map-to": "ranger.usersync.sync.source",
-          "from-catalog": "usersync-properties",
-          "to-catalog": "ranger-ugsync-site",
-          "required-services": [
-            "RANGER"
-          ]
-        },
-        "nimbus.host": {
-          "map-to": "nimbus.seeds",
-          "coerce-to": "yaml-array"
-        },
-        "mapreduce.job.speculative.speculativecap": {
-          "map-to": "mapreduce.job.speculative.speculative-cap-running-tasks",
-          "from-catalog": "mapred-site",
-          "to-catalog": "mapred-site",
-          "default": "0.1"
-        },
-        "SSL_KEYSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.keystore",
-          "from-catalog": "ranger-hdfs-plugin-properties",
-          "to-catalog": "ranger-hdfs-policymgr-ssl",
-          "default": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-keystore.jks",
-          "required-services": [
-            "RANGER",
-            "HDFS"
-          ]
-        },
-        "SSL_KEYSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.keystore.password",
-          "from-catalog": "ranger-hdfs-plugin-properties",
-          "to-catalog": "ranger-hdfs-policymgr-ssl",
-          "default": "myKeyFilePassword",
-          "required-services": [
-            "RANGER",
-            "HDFS"
-          ]
-        },
-        "SSL_TRUSTSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.truststore",
-          "from-catalog": "ranger-hdfs-plugin-properties",
-          "to-catalog": "ranger-hdfs-policymgr-ssl",
-          "default": "/usr/hdp/current/hadoop-client/conf/ranger-plugin-truststore.jks",
-          "required-services": [
-            "RANGER",
-            "HDFS"
-          ]
-        },
-        "SSL_TRUSTSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.truststore.password",
-          "from-catalog": "ranger-hdfs-plugin-properties",
-          "to-catalog": "ranger-hdfs-policymgr-ssl",
-          "default": "changeit",
-          "required-services": [
-            "RANGER",
-            "HDFS"
-          ]
-        },
-        "POLICY_MGR_URL": {
-          "map-to": "ranger.plugin.hdfs.policy.rest.url",
-          "from-catalog": "ranger-hdfs-plugin-properties",
-          "to-catalog": "ranger-hdfs-security",
-          "default": "{POLICYMGR_MGR_URL}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "HDFS"
-          ]
-        },
-        "REPOSITORY_NAME": {
-          "map-to": "ranger.plugin.hdfs.service.name",
-          "from-catalog": "ranger-hdfs-plugin-properties",
-          "to-catalog": "ranger-hdfs-security",
-          "default": "{HDFS_RANGER_REPO_NAME}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "HDFS"
-          ]
-        },
-        "XAAUDIT.HDFS.DESTINATION_DIRECTORY": {
-          "map-to": "xasecure.audit.destination.hdfs.dir",
-          "from-catalog": "ranger-hdfs-plugin-properties",
-          "to-catalog": "ranger-hdfs-audit",
-          "default": "{XAAUDIT_HDFS_DESTINATION_DIRECTORY}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "HDFS"
-          ]
-        },
-        "XAAUDIT.DB.USER_NAME": {
-          "map-to": "xasecure.audit.destination.db.user",
-          "from-catalog": "ranger-hdfs-plugin-properties",
-          "to-catalog": "ranger-hdfs-audit",
-          "required-services": [
-            "RANGER",
-            "HDFS"
-          ]
-        },
-        "hbase_SSL_KEYSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.keystore",
-          "map-from": "SSL_KEYSTORE_FILE_PATH",
-          "from-catalog": "ranger-hbase-plugin-properties",
-          "to-catalog": "ranger-hbase-policymgr-ssl",
-          "default": "/usr/hdp/current/hbase-client/conf/ranger-plugin-keystore.jks",
-          "required-services": [
-            "RANGER",
-            "HBASE"
-          ]
-        },
-        "hbase_SSL_KEYSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.keystore.password",
-          "map-from": "SSL_KEYSTORE_PASSWORD",
-          "from-catalog": "ranger-hbase-plugin-properties",
-          "to-catalog": "ranger-hbase-policymgr-ssl",
-          "default": "myKeyFilePassword",
-          "required-services": [
-            "RANGER",
-            "HBASE"
-          ]
-        },
-        "hbase_SSL_TRUSTSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.truststore",
-          "map-from": "SSL_TRUSTSTORE_FILE_PATH",
-          "from-catalog": "ranger-hbase-plugin-properties",
-          "to-catalog": "ranger-hbase-policymgr-ssl",
-          "default": "/usr/hdp/current/hbase-client/conf/ranger-plugin-truststore.jks",
-          "required-services": [
-            "RANGER",
-            "HBASE"
-          ]
-        },
-        "hbase_SSL_TRUSTSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.truststore.password",
-          "map-from": "SSL_TRUSTSTORE_PASSWORD",
-          "from-catalog": "ranger-hbase-plugin-properties",
-          "to-catalog": "ranger-hbase-policymgr-ssl",
-          "default": "changeit",
-          "required-services": [
-            "RANGER",
-            "HBASE"
-          ]
-        },
-        "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": {
-          "map-to": "xasecure.hbase.update.xapolicies.on.grant.revoke",
-          "from-catalog": "ranger-hbase-plugin-properties",
-          "to-catalog": "ranger-hbase-security",
-          "default": "true",
-          "required-services": [
-            "RANGER",
-            "HBASE"
-          ]
-        },
-        "hbase_POLICY_MGR_URL": {
-          "map-from": "POLICY_MGR_URL",
-          "map-to": "ranger.plugin.hbase.policy.rest.url",
-          "from-catalog": "ranger-hbase-plugin-properties",
-          "to-catalog": "ranger-hbase-security",
-          "default": "{POLICYMGR_MGR_URL}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "HBASE"
-          ]
-        },
-        "hbase_REPOSITORY_NAME": {
-          "map-from": "REPOSITORY_NAME",
-          "map-to": "ranger.plugin.hbase.service.name",
-          "from-catalog": "ranger-hbase-plugin-properties",
-          "to-catalog": "ranger-hbase-security",
-          "default": "{HBASE_RANGER_REPO_NAME}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "HBASE"
-          ]
-        },
-        "hbase_XAAUDIT.HDFS.DESTINATION_DIRECTORY": {
-          "map-from": "XAAUDIT.HDFS.DESTINATION_DIRECTORY",
-          "map-to": "xasecure.audit.destination.hdfs.dir",
-          "from-catalog": "ranger-hbase-plugin-properties",
-          "to-catalog": "ranger-hbase-audit",
-          "default": "{XAAUDIT_HDFS_DESTINATION_DIRECTORY}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "HBASE"
-          ]
-        },
-        "hbase_XAAUDIT.DB.USER_NAME": {
-          "map-from": "XAAUDIT.DB.USER_NAME",
-          "map-to": "xasecure.audit.destination.db.user",
-          "from-catalog": "ranger-hbase-plugin-properties",
-          "to-catalog": "ranger-hbase-audit",
-          "required-services": [
-            "RANGER",
-            "HBASE"
-          ]
-        },
-        "hive_SSL_KEYSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.keystore",
-          "map-from": "SSL_KEYSTORE_FILE_PATH",
-          "from-catalog": "ranger-hive-plugin-properties",
-          "to-catalog": "ranger-hive-policymgr-ssl",
-          "default": "/etc/hive/conf/conf.server/ranger-plugin-keystore.jks",
-          "required-services": [
-            "RANGER",
-            "HIVE"
-          ]
-        },
-        "hive_SSL_KEYSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.keystore.password",
-          "map-from": "SSL_KEYSTORE_PASSWORD",
-          "from-catalog": "ranger-hive-plugin-properties",
-          "to-catalog": "ranger-hive-policymgr-ssl",
-          "default": "myKeyFilePassword",
-          "required-services": [
-            "RANGER",
-            "HIVE"
-          ]
-        },
-        "hive_SSL_TRUSTSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.truststore",
-          "map-from": "SSL_TRUSTSTORE_FILE_PATH",
-          "from-catalog": "ranger-hive-plugin-properties",
-          "to-catalog": "ranger-hive-policymgr-ssl",
-          "default": "/etc/hive/conf/conf.server/ranger-plugin-truststore.jks",
-          "required-services": [
-            "RANGER",
-            "HIVE"
-          ]
-        },
-        "hive_SSL_TRUSTSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.truststore.password",
-          "map-from": "SSL_TRUSTSTORE_PASSWORD",
-          "from-catalog": "ranger-hive-plugin-properties",
-          "to-catalog": "ranger-hive-policymgr-ssl",
-          "default": "changeit",
-          "required-services": [
-            "RANGER",
-            "HIVE"
-          ]
-        },
-        "hive_UPDATE_XAPOLICIES_ON_GRANT_REVOKE": {
-          "map-from": "UPDATE_XAPOLICIES_ON_GRANT_REVOKE",
-          "map-to": "xasecure.hive.update.xapolicies.on.grant.revoke",
-          "from-catalog": "ranger-hive-plugin-properties",
-          "to-catalog": "ranger-hive-security",
-          "default": "true",
-          "required-services": [
-            "RANGER",
-            "HIVE"
-          ]
-        },
-        "hive_POLICY_MGR_URL": {
-          "map-from": "POLICY_MGR_URL",
-          "map-to": "ranger.plugin.hive.policy.rest.url",
-          "from-catalog": "ranger-hive-plugin-properties",
-          "to-catalog": "ranger-hive-security",
-          "default": "{POLICYMGR_MGR_URL}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "HIVE"
-          ]
-        },
-        "hive_REPOSITORY_NAME": {
-          "map-from": "REPOSITORY_NAME",
-          "map-to": "ranger.plugin.hive.service.name",
-          "from-catalog": "ranger-hive-plugin-properties",
-          "to-catalog": "ranger-hive-security",
-          "default": "{HIVE_RANGER_REPO_NAME}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "HIVE"
-          ]
-        },
-        "hive_XAAUDIT.HDFS.DESTINATION_DIRECTORY": {
-          "map-from": "XAAUDIT.HDFS.DESTINATION_DIRECTORY",
-          "map-to": "xasecure.audit.destination.hdfs.dir",
-          "from-catalog": "ranger-hive-plugin-properties",
-          "to-catalog": "ranger-hive-audit",
-          "default": "{XAAUDIT_HDFS_DESTINATION_DIRECTORY}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "HIVE"
-          ]
-        },
-        "hive_XAAUDIT.DB.USER_NAME": {
-          "map-from": "XAAUDIT.DB.USER_NAME",
-          "map-to": "xasecure.audit.destination.db.user",
-          "from-catalog": "ranger-hive-plugin-properties",
-          "to-catalog": "ranger-hive-audit",
-          "required-services": [
-            "RANGER",
-            "HIVE"
-          ]
-        },
-        "knox_SSL_KEYSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.keystore",
-          "map-from": "SSL_KEYSTORE_FILE_PATH",
-          "from-catalog": "ranger-knox-plugin-properties",
-          "to-catalog": "ranger-knox-policymgr-ssl",
-          "default": "/usr/hdp/current/knox-server/conf/ranger-plugin-keystore.jks",
-          "required-services": [
-            "RANGER",
-            "KNOX"
-          ]
-        },
-        "knox_SSL_KEYSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.keystore.password",
-          "map-from": "SSL_KEYSTORE_PASSWORD",
-          "from-catalog": "ranger-knox-plugin-properties",
-          "to-catalog": "ranger-knox-policymgr-ssl",
-          "default": "myKeyFilePassword",
-          "required-services": [
-            "RANGER",
-            "KNOX"
-          ]
-        },
-        "knox_SSL_TRUSTSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.truststore",
-          "map-from": "SSL_TRUSTSTORE_FILE_PATH",
-          "from-catalog": "ranger-knox-plugin-properties",
-          "to-catalog": "ranger-knox-policymgr-ssl",
-          "default": "/usr/hdp/current/knox-server/conf/ranger-plugin-truststore.jks",
-          "required-services": [
-            "RANGER",
-            "KNOX"
-          ]
-        },
-        "knox_SSL_TRUSTSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.truststore.password",
-          "map-from": "SSL_TRUSTSTORE_PASSWORD",
-          "from-catalog": "ranger-knox-plugin-properties",
-          "to-catalog": "ranger-knox-policymgr-ssl",
-          "default": "changeit",
-          "required-services": [
-            "RANGER",
-            "KNOX"
-          ]
-        },
-        "knox_POLICY_MGR_URL": {
-          "map-from": "POLICY_MGR_URL",
-          "map-to": "ranger.plugin.knox.policy.rest.url",
-          "from-catalog": "ranger-knox-plugin-properties",
-          "to-catalog": "ranger-knox-security",
-          "default": "{POLICYMGR_MGR_URL}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "KNOX"
-          ]
-        },
-        "knox_REPOSITORY_NAME": {
-          "map-from": "REPOSITORY_NAME",
-          "map-to": "ranger.plugin.knox.service.name",
-          "from-catalog": "ranger-knox-plugin-properties",
-          "to-catalog": "ranger-knox-security",
-          "default": "{KNOX_RANGER_REPO_NAME}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "KNOX"
-          ]
-        },
-        "knox_XAAUDIT.HDFS.DESTINATION_DIRECTORY": {
-          "map-from": "XAAUDIT.HDFS.DESTINATION_DIRECTORY",
-          "map-to": "xasecure.audit.destination.hdfs.dir",
-          "from-catalog": "ranger-knox-plugin-properties",
-          "to-catalog": "ranger-knox-audit",
-          "default": "{XAAUDIT_HDFS_DESTINATION_DIRECTORY}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "KNOX"
-          ]
-        },
-        "knox_XAAUDIT.DB.USER_NAME": {
-          "map-from": "XAAUDIT.DB.USER_NAME",
-          "map-to": "xasecure.audit.destination.db.user",
-          "from-catalog": "ranger-knox-plugin-properties",
-          "to-catalog": "ranger-knox-audit",
-          "required-services": [
-            "RANGER",
-            "KNOX"
-          ]
-        },
-        "storm_SSL_KEYSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.keystore",
-          "map-from": "SSL_KEYSTORE_FILE_PATH",
-          "from-catalog": "ranger-storm-plugin-properties",
-          "to-catalog": "ranger-storm-policymgr-ssl",
-          "default": "/usr/hdp/current/storm-client/conf/ranger-plugin-keystore.jks",
-          "required-services": [
-            "RANGER",
-            "STORM"
-          ]
-        },
-        "storm_SSL_KEYSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.keystore.password",
-          "map-from": "SSL_KEYSTORE_PASSWORD",
-          "from-catalog": "ranger-storm-plugin-properties",
-          "to-catalog": "ranger-storm-policymgr-ssl",
-          "default": "myKeyFilePassword",
-          "required-services": [
-            "RANGER",
-            "STORM"
-          ]
-        },
-        "storm_SSL_TRUSTSTORE_FILE_PATH": {
-          "map-to": "xasecure.policymgr.clientssl.truststore",
-          "map-from": "SSL_TRUSTSTORE_FILE_PATH",
-          "from-catalog": "ranger-storm-plugin-properties",
-          "to-catalog": "ranger-storm-policymgr-ssl",
-          "default": "/usr/hdp/current/storm-client/conf/ranger-plugin-truststore.jks",
-          "required-services": [
-            "RANGER",
-            "STORM"
-          ]
-        },
-        "storm_SSL_TRUSTSTORE_PASSWORD": {
-          "map-to": "xasecure.policymgr.clientssl.truststore.password",
-          "map-from": "SSL_TRUSTSTORE_PASSWORD",
-          "from-catalog": "ranger-storm-plugin-properties",
-          "to-catalog": "ranger-storm-policymgr-ssl",
-          "default": "changeit",
-          "required-services": [
-            "RANGER",
-            "STORM"
-          ]
-        },
-        "storm_POLICY_MGR_URL": {
-          "map-from": "POLICY_MGR_URL",
-          "map-to": "ranger.plugin.storm.policy.rest.url",
-          "from-catalog": "ranger-storm-plugin-properties",
-          "to-catalog": "ranger-storm-security",
-          "default": "{POLICYMGR_MGR_URL}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "STORM"
-          ]
-        },
-        "storm_REPOSITORY_NAME": {
-          "map-from": "REPOSITORY_NAME",
-          "map-to": "ranger.plugin.storm.service.name",
-          "from-catalog": "ranger-storm-plugin-properties",
-          "to-catalog": "ranger-storm-security",
-          "default": "{STORM_RANGER_REPO_NAME}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "STORM"
-          ]
-        },
-        "storm_XAAUDIT.HDFS.DESTINATION_DIRECTORY": {
-          "map-from": "XAAUDIT.HDFS.DESTINATION_DIRECTORY",
-          "map-to": "xasecure.audit.destination.hdfs.dir",
-          "from-catalog": "ranger-storm-plugin-properties",
-          "to-catalog": "ranger-storm-audit",
-          "default": "{XAAUDIT_HDFS_DESTINATION_DIRECTORY}",
-          "template": "yes",
-          "required-services": [
-            "RANGER",
-            "STORM"
-          ]
-        },
-        "storm_XAAUDIT.DB.USER_NAME": {
-          "map-from": "XAAUDIT.DB.USER_NAME",
-          "map-to": "xasecure.audit.destination.db.user",
-          "from-catalog": "ranger-storm-plugin-properties",
-          "to-catalog": "ranger-storm-audit",
-          "required-services": [
-            "RANGER",
-            "STORM"
-          ]
-        }
-      }
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3_step2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3_step2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3_step2.json
deleted file mode 100644
index 014aa71..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.2_to_2.3_step2.json
+++ /dev/null
@@ -1,81 +0,0 @@
-{
-    "version": "1.0",
-    "stacks": [
-        {
-            "name": "HDP",
-            "old-version": "2.2",
-            "target-version": "2.3",
-            "options": {
-                "config-types": {
-                  "ranger-hdfs-security": {
-                    "merged-copy": "yes",
-                    "required-services": ["RANGER","HDFS"]
-                  },
-                  "ranger-hbase-security": {
-                    "merged-copy": "yes",
-                    "required-services": ["RANGER","HBASE"]
-                  },
-                  "ranger-hive-security": {
-                    "merged-copy": "yes",
-                    "required-services": ["RANGER","HIVE"]
-                  },
-                  "ranger-knox-security": {
-                    "merged-copy": "yes",
-                    "required-services": ["RANGER","KNOX"]
-                  },
-                  "ranger-storm-security": {
-                    "merged-copy": "yes",
-                    "required-services": ["RANGER","STORM"]
-                  },
-                  "ranger-kafka-security" : {
-                    "merged-copy": "yes",
-                    "required-services": ["RANGER","KAFKA"]
-                  },
-                  "ranger-yarn-security" : {
-                    "merged-copy": "yes",
-                    "required-services": ["RANGER","YARN"]
-                  },
-                    "ranger-admin-site": {
-                        "merged-copy": "yes",
-                        "required-services": [ "RANGER" ]
-                    },
-                    "ranger-ugsync-site": {
-                        "merged-copy": "yes",
-                        "required-services": [
-                            "RANGER"
-                        ]
-                    }
-                }
-            },
-            "properties": {
-              "ranger-admin-site": {
-                "upgrade.transition": {"remove": "yes"}
-              },
-              "ranger-ugsync-site": {
-                "upgrade.transition": {"remove": "yes"}
-              },
-              "ranger-hbase-security": {
-                "upgrade.transition": {"remove": "yes"}
-              },
-              "ranger-hdfs-security": {
-                "upgrade.transition": {"remove": "yes"}
-              },
-              "ranger-storm-security": {
-                "upgrade.transition": {"remove": "yes"}
-              },
-              "ranger-knox-security": {
-                "upgrade.transition": {"remove": "yes"}
-              },
-              "ranger-hive-security": {
-                "upgrade.transition": {"remove": "yes"}
-              },
-              "ranger-kafka-security": {
-                "upgrade.transition": {"remove": "yes"}
-              },
-              "ranger-yarn-security": {
-                "upgrade.transition": {"remove": "yes"}
-              }
-            }
-    }
-  ]
-}


[51/63] [abbrv] ambari git commit: AMBARI-21360: Ability to delete a view instance from view instance list (sangeetar)

Posted by ab...@apache.org.
AMBARI-21360: Ability to delete a view instance from view instance list (sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f4fb1742
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f4fb1742
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f4fb1742

Branch: refs/heads/branch-feature-logsearch-ui
Commit: f4fb1742b1ba29247da897f4ca1dd67a82a34c1b
Parents: 5c874cc
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Wed Jun 28 10:49:31 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Wed Jun 28 10:49:31 2017 -0700

----------------------------------------------------------------------
 AMBARI-21360.patch | 45 ---------------------------------------------
 1 file changed, 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f4fb1742/AMBARI-21360.patch
----------------------------------------------------------------------
diff --git a/AMBARI-21360.patch b/AMBARI-21360.patch
deleted file mode 100644
index c26f3a0..0000000
--- a/AMBARI-21360.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
-index c41e5d4..4e7bae3 100644
---- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
-+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
-@@ -132,6 +132,26 @@ angular.module('ambariAdminConsole')
-     }
-   };
- 
-+  $scope.deleteInstance = function(instance) {
-+      ConfirmationModal.show(
-+        $t('common.delete', {
-+          term: $t('views.viewInstance')
-+        }),
-+        $t('common.deleteConfirmation', {
-+          instanceType: $t('views.viewInstance'),
-+          instanceName: instance.ViewInstanceInfo.label
-+        })
-+      ).then(function() {
-+        View.deleteInstance(instance.ViewInstanceInfo.view_name, instance.ViewInstanceInfo.version, instance.ViewInstanceInfo.instance_name)
-+          .then(function() {
-+            loadViews();
-+          })
-+          .catch(function(data) {
-+            Alert.error($t('views.alerts.cannotDeleteInstance'), data.data.message);
-+          });
-+      });
-+    };
-+
-   $scope.reloadViews = function () {
-     loadViews();
-   };
-diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
-index 59c322f..91b9a93 100644
---- a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
-+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
-@@ -81,6 +81,9 @@
-                     <td class="col-sm-1">
-                         <a class="instance-link ng-scope ng-binding" href="#/views/{{view.view_name}}/versions/{{instance.ViewInstanceInfo.version}}/instances/{{instance.ViewInstanceInfo.instance_name}}/clone"><i class="fa fa-copy"></i></a>
-                     </td>
-+                    <td class="col-sm-1">
-+                        <a class="instance-link ng-scope ng-binding" href ng-click="deleteInstance(instance)"><i class="fa fa-trash-o"></i></a>
-+                    </td>
-                 </tr>
-                 </tbody>
-                 <tfoot>


[40/63] [abbrv] ambari git commit: AMBARI-21172 - Delete view privileges from the Groups page (Anita Jebaraj via sangeetar)

Posted by ab...@apache.org.
AMBARI-21172 - Delete view privileges from the Groups page (Anita Jebaraj via sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/86347182
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/86347182
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/86347182

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 86347182a99209dcd767240ae475a03549acd989
Parents: 3529d05
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Tue Jun 27 10:49:43 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Tue Jun 27 10:49:43 2017 -0700

----------------------------------------------------------------------
 .../scripts/controllers/groups/GroupsEditCtrl.js | 19 +++++++++++++++++--
 .../ui/admin-web/app/views/groups/edit.html      |  3 +++
 2 files changed, 20 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/86347182/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
index 92406e9..21d0fd6 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/groups/GroupsEditCtrl.js
@@ -129,6 +129,20 @@ angular.module('ambariAdminConsole')
     });
   };
 
+
+  $scope.removePrivilege = function(name, privilege) {
+    var privilegeObject = {
+        id: privilege.privilege_id,
+        view_name: privilege.view_name,
+        version: privilege.version,
+        instance_name: name
+    };
+    View.deletePrivilege(privilegeObject).then(function() {
+      loadPrivileges();
+    });
+  };
+
+function loadPrivileges() {
   // Load privileges
   Group.getPrivileges($routeParams.id).then(function(data) {
     var privileges = {
@@ -145,6 +159,7 @@ angular.module('ambariAdminConsole')
         privileges.views[privilege.instance_name] = privileges.views[privilege.instance_name] || { privileges:[]};
         privileges.views[privilege.instance_name].version = privilege.version;
         privileges.views[privilege.instance_name].view_name = privilege.view_name;
+        privileges.views[privilege.instance_name].privilege_id = privilege.privilege_id;
         privileges.views[privilege.instance_name].privileges.push(privilege.permission_label);
       }
     });
@@ -157,6 +172,6 @@ angular.module('ambariAdminConsole')
   }).catch(function(data) {
     Alert.error($t('common.alerts.cannotLoadPrivileges'), data.data.message);
   });
-
-
+}
+loadPrivileges();
 }]);

http://git-wip-us.apache.org/repos/asf/ambari/blob/86347182/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html
index e472ede..1aafd03 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/groups/edit.html
@@ -83,6 +83,9 @@
               <td>
                 <span tooltip="{{item}}" ng-repeat="item in privilege.privileges">{{item | translate}}{{$last ? '' : ', '}}</span>
               </td>
+              <td>
+                <i class="fa fa-trash-o" aria-hidden="true" ng-click="removePrivilege(name, privilege);"></i>
+              </td>
             </tr>
             <tr>
               <td ng-show="noViewPriv">{{'common.alerts.noPrivileges' | translate: '{term: constants.view}'}}</td>


[14/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json
deleted file mode 100644
index 50c63de..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.2.json
+++ /dev/null
@@ -1,465 +0,0 @@
-{
-  "version": "1.0",
-  "stacks": [
-    {
-      "name": "HDP",
-      "old-version": "2.1",
-      "target-version": "2.2.2",
-      "options": {
-        "config-types": {
-          "capacity-scheduler": {
-            "merged-copy": "yes"
-          },
-		  "cluster-env": {
-            "merged-copy": "yes"
-          },
-		  "core-site": {
-            "merged-copy": "yes"
-          },
-		  "falcon-startup.properties": {
-            "merged-copy": "yes"
-          },
-		  "hadoop-env": {
-            "merged-copy": "yes"
-          },
-		  "hbase-env": {
-            "merged-copy": "yes"
-          },
-		  "hbase-site": {
-            "merged-copy": "yes"
-          },
-		  "hdfs-log4j": {
-            "merged-copy": "yes"
-          },
-		  "hdfs-site": {
-            "merged-copy": "yes"
-          },
-		  "hive-env": {
-            "merged-copy": "yes"
-          },
-		  "hive-site": {
-            "merged-copy": "yes"
-          },
-		  "mapred-env": {
-            "merged-copy": "yes"
-          },
-		  "mapred-site": {
-            "merged-copy": "yes"
-          },
-		  "oozie-env": {
-            "merged-copy": "yes"
-          },
-          "oozie-site": {
-            "merged-copy": "yes"
-          },
-		  "storm-env": {
-            "merged-copy": "yes"
-          },
-		  "storm-site": {
-            "merged-copy": "yes"
-          },
-		  "tez-site": {
-            "merged-copy": "yes"
-          },
-		  "webhcat-site": {
-            "merged-copy": "yes"
-          },
-		  "yarn-site": {
-            "merged-copy": "yes"
-          }
-        }
-      },
-      "properties": {
-        "capacity-scheduler": {
-          "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
-          "yarn.scheduler.capacity.root.accessible-node-labels": "*",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
-          "yarn.scheduler.capacity.root.default-node-label-expression": " "    
-        },
-		"core-site": {
-		  "hadoop.http.authentication.simple.anonymous.allowed": "true"  
-		},
-		"falcon-startup.properties": {
-		  "*.application.services": "org.apache.falcon.security.AuthenticationInitializationService,\\\n      org.apache.falcon.workflow.WorkflowJobEndNotificationService, \\\n      org.apache.falcon.service.ProcessSubscriberService,\\\n      org.apache.falcon.entity.store.ConfigurationStore,\\\n      org.apache.falcon.rerun.service.RetryService,\\\n      org.apache.falcon.rerun.service.LateRunService,\\\n      org.apache.falcon.service.LogCleanupService,\\\n      org.apache.falcon.metadata.MetadataMappingService",
-          "*.falcon.enableTLS": "false", 
-          "*.falcon.graph.blueprints.graph": "com.thinkaurelius.titan.core.TitanFactory",
-          "*.falcon.graph.storage.backend": "berkeleyje",
-          "*.falcon.security.authorization.admin.groups": "falcon", 
-          "*.falcon.security.authorization.admin.users": "falcon,ambari-qa", 
-          "*.falcon.security.authorization.enabled": "false", 
-          "*.falcon.security.authorization.provider": "org.apache.falcon.security.DefaultAuthorizationProvider", 
-          "*.falcon.security.authorization.superusergroup": "falcon",
-          "*.journal.impl": "org.apache.falcon.transaction.SharedFileSystemJournal",
-          "prism.application.services": "org.apache.falcon.entity.store.ConfigurationStore", 
-          "prism.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\\n      org.apache.falcon.entity.ColoClusterRelation,\\\n      org.apache.falcon.group.FeedGroupMap"    
-		},
-		"hadoop-env": {
-		  "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HAD
 OOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{
 {jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies
  to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MAST
 ER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*my
 sql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"" 
  
-		},
-		"hbase-env": {
-		  "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX expo
 rting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}
 \n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=
 \"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}"  
-		},
-		"hbase-site": {
-		  "hbase.hregion.majorcompaction": "604800000", 
-          "hbase.hregion.majorcompaction.jitter": "0.50",
-          "hbase.hregion.memstore.block.multiplier": "4",
-		  "hbase.hstore.flush.retries.number": {"remove": "yes"}
-		},
-		"hdfs-log4j": {
-		  "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INF
 O,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console
 .target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhad
 oop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nl
 og4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
  Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.ap
 pender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"  
-		},
-		"hdfs-site": {
-		  "dfs.datanode.max.transfer.threads": "16384",
-          "dfs.namenode.startup.delay.block.deletion.sec": "3600"  
-		},
-		"hive-env": {
-		  "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory can be controlled by:\nexport HIVE_CONF_DIR={{hive_config_dir}}\n\n# Folder containing extra libraries required for hive compilat
 ion/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog\nfi\n\nexport METASTORE_PORT={{hive_metastore_port}}"
-		},
-		"hive-site": {
-		  "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
-          "hive.cbo.enable": "true", 
-          "hive.cli.print.header": "false", 
-          "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
-          "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
-          "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role", 
-          "hive.convert.join.bucket.mapjoin.tez": "false",
-          "hive.exec.compress.intermediate": "false", 
-          "hive.exec.compress.output": "false", 
-          "hive.exec.dynamic.partition": "true", 
-          "hive.exec.dynamic.partition.mode": "nonstrict",
-          "hive.exec.max.created.files": "100000", 
-          "hive.exec.max.dynamic.partitions": "5000", 
-          "hive.exec.max.dynamic.partitions.pernode": "2000", 
-          "hive.exec.orc.compression.strategy": "SPEED", 
-          "hive.exec.orc.default.compress": "ZLIB", 
-          "hive.exec.orc.default.stripe.size": "67108864", 
-          "hive.exec.parallel": "false", 
-          "hive.exec.parallel.thread.number": "8",
-          "hive.exec.reducers.bytes.per.reducer": "67108864", 
-          "hive.exec.reducers.max": "1009", 
-          "hive.exec.scratchdir": "/tmp/hive", 
-          "hive.exec.submit.local.task.via.child": "true", 
-          "hive.exec.submitviachild": "false",
-          "hive.fetch.task.aggr": "false", 
-          "hive.fetch.task.conversion": "more", 
-          "hive.fetch.task.conversion.threshold": "1073741824",
-          "hive.map.aggr.hash.force.flush.memory.threshold": "0.9", 
-          "hive.map.aggr.hash.min.reduction": "0.5", 
-          "hive.map.aggr.hash.percentmemory": "0.5",
-          "hive.mapjoin.optimized.hashtable": "true",
-          "hive.merge.mapfiles": "true", 
-          "hive.merge.mapredfiles": "false", 
-          "hive.merge.orcfile.stripe.level": "true", 
-          "hive.merge.rcfile.block.level": "true", 
-          "hive.merge.size.per.task": "256000000", 
-          "hive.merge.smallfiles.avgsize": "16000000", 
-          "hive.merge.tezfiles": "false", 
-          "hive.metastore.authorization.storage.checks": "false",
-          "hive.metastore.client.connect.retry.delay": "5s", 
-          "hive.metastore.client.socket.timeout": "1800s", 
-          "hive.metastore.connect.retries": "24",
-          "hive.metastore.failure.retries": "24",
-          "hive.metastore.server.max.threads": "100000",
-          "hive.optimize.constant.propagation": "true",
-          "hive.optimize.metadataonly": "true", 
-          "hive.optimize.null.scan": "true",
-          "hive.optimize.sort.dynamic.partition": "false", 
-          "hive.orc.compute.splits.num.threads": "10",
-          "hive.prewarm.enabled": "false", 
-          "hive.prewarm.numcontainers": "10",
-          "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory", 
-          "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", 
-          "hive.security.metastore.authorization.auth.reads": "true", 
-          "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly", 
-          "hive.server2.allow.user.substitution": "true",
-          "hive.server2.logging.operation.enabled": "true", 
-          "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs",
-          "hive.server2.table.type.mapping": "CLASSIC",
-          "hive.server2.thrift.http.path": "cliservice", 
-          "hive.server2.thrift.http.port": "10001", 
-          "hive.server2.thrift.max.worker.threads": "500",
-          "hive.server2.thrift.sasl.qop": "auth",
-          "hive.server2.use.SSL": "false",
-          "hive.smbjoin.cache.rows": "10000",
-          "hive.stats.dbclass": "fs", 
-          "hive.stats.fetch.column.stats": "false", 
-          "hive.stats.fetch.partition.stats": "true", 
-          "hive.support.concurrency": "false", 
-          "hive.tez.auto.reducer.parallelism": "false",
-          "hive.tez.cpu.vcores": "-1", 
-          "hive.tez.dynamic.partition.pruning": "true", 
-          "hive.tez.dynamic.partition.pruning.max.data.size": "104857600", 
-          "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
-          "hive.tez.log.level": "INFO", 
-          "hive.tez.max.partition.factor": "2.0", 
-          "hive.tez.min.partition.factor": "0.25", 
-          "hive.tez.smb.number.waves": "0.5",
-          "hive.user.install.directory": "/user/",
-          "hive.vectorized.execution.reduce.enabled": "false", 
-          "hive.vectorized.groupby.checkinterval": "4096",
-          "hive.zookeeper.client.port": "2181", 
-          "hive.zookeeper.namespace": "hive_zookeeper_namespace",
-		  "fs.file.impl.disable.cache": {"remove": "yes"},
-		  "fs.hdfs.impl.disable.cache": {"remove": "yes"},
-		  "hive.auto.convert.sortmerge.join.noconditionaltask": {"remove": "yes"},
-		  "hive.heapsize": {"remove": "yes"},
-		  "hive.optimize.mapjoin.mapreduce": {"remove": "yes"},
-		  "hive.server2.enable.impersonation": {"remove": "yes"}
-		},
-		"mapred-env": {
-		  "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\""
-		},
-		"mapred-site": {
-		  "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
-          "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
-          "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-          "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-          "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework", 
-          "mapreduce.job.emit-timeline-data": "false",
-          "mapreduce.jobhistory.bind-host": "0.0.0.0",
-          "mapreduce.reduce.shuffle.fetch.retry.enabled": "1", 
-          "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000", 
-          "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
-          "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}", 
-          "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}"		  
-		},
-		"oozie-env": {
-		  "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuration directo
 ry\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64\n\n# At least 1 minute of retry time to account for server downtime during\n# upgrade/downgrade\nexport OOZIE_CLIENT_OPTS=\"${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 \"\n\n# This is needed so that Oozie does not run into OOM or GC Overhead limit\n# exceeded exceptions. If the oozie server is handling large number of\n# workflows/coordinator jobs, the memory setting
 s may need to be revised\nexport CATALINA_OPTS=\"${CATALINA_OPTS} -Xmx2048m -XX:MaxPermSize=256m \""  
-		},
-		"oozie-site": {
-		  "oozie.authentication.simple.anonymous.allowed": "true",
-          "oozie.service.HadoopAccessorService.kerberos.enabled": "false",
-          "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd",
-          "oozie.service.coord.check.maximum.frequency": "false",
-          "oozie.services": "\n      org.apache.oozie.service.SchedulerService,\n      org.apache.oozie.service.InstrumentationService,\n      org.apache.oozie.service.MemoryLocksService,\n      org.apache.oozie.service.UUIDService,\n      org.apache.oozie.service.ELService,\n      org.apache.oozie.service.AuthorizationService,\n      org.apache.oozie.service.UserGroupInformationService,\n      org.apache.oozie.service.HadoopAccessorService,\n      org.apache.oozie.service.JobsConcurrencyService,\n      org.apache.oozie.service.URIHandlerService,\n      org.apache.oozie.service.DagXLogInfoService,\n      org.apache.oozie.service.SchemaService,\n      org.apache.oozie.service.LiteWorkflowAppService,\n      org.apache.oozie.service.JPAService,\n      org.apache.oozie.service.StoreService,\n      org.apache.oozie.service.CoordinatorStoreService,\n      org.apache.oozie.service.SLAStoreService,\n      org.apache.oozie.service.DBLiteWorkflowStoreService,\n      org.apache.oozie.service.C
 allbackService,\n      org.apache.oozie.service.ShareLibService,\n      org.apache.oozie.service.CallableQueueService,\n      org.apache.oozie.service.ActionService,\n      org.apache.oozie.service.ActionCheckerService,\n      org.apache.oozie.service.RecoveryService,\n      org.apache.oozie.service.PurgeService,\n      org.apache.oozie.service.CoordinatorEngineService,\n      org.apache.oozie.service.BundleEngineService,\n      org.apache.oozie.service.DagEngineService,\n      org.apache.oozie.service.CoordMaterializeTriggerService,\n      org.apache.oozie.service.StatusTransitService,\n      org.apache.oozie.service.PauseTransitService,\n      org.apache.oozie.service.GroupsService,\n      org.apache.oozie.service.ProxyUserService,\n      org.apache.oozie.service.XLogStreamingService,\n      org.apache.oozie.service.JvmPauseMonitorService"  
-		},
-		"storm-env": {
-		  "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"\nSTORM_HOME=/usr/hdp/current/storm-client"  
-		},
-		"storm-site": {
-		  "_storm.min.ruid": "null", 
-          "_storm.thrift.nonsecure.transport": "backtype.storm.security.auth.SimpleTransportPlugin", 
-          "_storm.thrift.secure.transport": "backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin",
-          "drpc.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
-          "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib",
-          "logviewer.childopts": "-Xmx128m _JAAS_PLACEHOLDER",
-          "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
-          "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
-          "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM",
-		  "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
-		  "storm.thrift.transport": {"remove": "yes"}
-		},
-		"tez-site": {
-		  "tez.am.container.idle.release-timeout-max.millis": "20000", 
-          "tez.am.container.idle.release-timeout-min.millis": "10000",
-          "tez.am.container.reuse.non-local-fallback.enabled": "false",
-          "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
-          "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
-          "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-          "tez.am.max.app.attempts": "2", 
-          "tez.am.maxtaskfailures.per.node": "10", 
-          "tez.am.resource.memory.mb": "1364",
-          "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure", 
-          "tez.counters.max": "2000", 
-          "tez.counters.max.groups": "1000", 
-          "tez.generate.debug.artifacts": "false", 
-          "tez.grouping.max-size": "1073741824", 
-          "tez.grouping.min-size": "16777216", 
-          "tez.grouping.split-waves": "1.7", 
-          "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService", 
-          "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz", 
-          "tez.runtime.compress": "true", 
-          "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec",
-          "tez.runtime.io.sort.mb": "272", 
-          "tez.runtime.unordered.output.buffer.size-mb": "51",
-          "tez.session.client.timeout.secs": "-1", 
-          "tez.shuffle-vertex-manager.max-src-fraction": "0.4", 
-          "tez.shuffle-vertex-manager.min-src-fraction": "0.2",
-          "tez.task.am.heartbeat.counter.interval-ms.max": "4000",
-          "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}", 
-          "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC", 
-          "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64", 
-          "tez.task.max-events-per-heartbeat": "500", 
-          "tez.task.resource.memory.mb": "682",
-          "tez.am.container.session.delay-allocation-millis": {"remove": "yes"},
-          "tez.am.env": {"remove": "yes"},
-		  "tez.am.grouping.max-size": {"remove": "yes"},
-		  "tez.am.grouping.min-size": {"remove": "yes"},
-		  "tez.am.grouping.split-waves": {"remove": "yes"},
-		  "tez.am.java.opts": {"remove": "yes"},
-		  "tez.am.shuffle-vertex-manager.max-src-fraction": {"remove": "yes"},
-		  "tez.am.shuffle-vertex-manager.min-src-fraction": {"remove": "yes"},
-		  "tez.runtime.intermediate-input.compress.codec": {"remove": "yes"},
-		  "tez.runtime.intermediate-input.is-compressed": {"remove": "yes"},
-		  "tez.runtime.intermediate-output.compress.codec": {"remove": "yes"},
-		  "tez.runtime.intermediate-output.should-compress": {"remove": "yes"},
-		  "tez.yarn.ats.enabled": {"remove": "yes"}
-		},
-		"webhcat-site": {
-		  "templeton.hadoop": "/usr/hdp/current/hadoop-client/bin/hadoop",
-          "templeton.hcat": "/usr/hdp/current/hive-client/bin/hcat",
-          "templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz",
-          "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=thrift://vitaha-1.c.pramod-thangali.internal:9083,hive.metastore.sasl.enabled=false,hive.metastore.execute.setugi=true", 
-          "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar", 
-          "templeton.libjars": "/usr/hdp/current/zookeeper-client/zookeeper.jar",
-          "templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz",
-          "templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz", 
-          "templeton.sqoop.home": "sqoop.tar.gz/sqoop", 
-          "templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop",
-          "templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar"
-		},
-		"yarn-site": {
-		  "hadoop.registry.rm.enabled": "false",
-          "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*", 
-          "yarn.client.nodemanager-connect.max-wait-ms": "60000", 
-          "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
-          "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500", 
-          "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels", 
-          "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",   
-          "yarn.nodemanager.bind-host": "0.0.0.0",
-          "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90", 
-          "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
-          "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn", 
-          "yarn.nodemanager.linux-container-executor.cgroups.mount": "false", 
-          "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false",
-          "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler", 
-          "yarn.nodemanager.log-aggregation.debug-enabled": "false", 
-          "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30", 
-          "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
-          "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state", 
-          "yarn.nodemanager.recovery.enabled": "true",
-          "yarn.nodemanager.resource.cpu-vcores": "1",
-          "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
-          "yarn.resourcemanager.bind-host": "0.0.0.0", 
-          "yarn.resourcemanager.connect.max-wait.ms": "900000", 
-          "yarn.resourcemanager.connect.retry-interval.ms": "30000", 
-          "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500", 
-          "yarn.resourcemanager.fs.state-store.uri": " ", 
-          "yarn.resourcemanager.ha.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "true",
-          "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}", 
-          "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore", 
-          "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10", 
-          "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
-          "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false", 
-          "yarn.resourcemanager.work-preserving-recovery.enabled": "true", 
-          "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000", 
-          "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
-          "yarn.resourcemanager.zk-num-retries": "1000", 
-          "yarn.resourcemanager.zk-retry-interval-ms": "1000", 
-          "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore", 
-          "yarn.resourcemanager.zk-timeout-ms": "10000",
-          "yarn.timeline-service.bind-host": "0.0.0.0", 
-          "yarn.timeline-service.client.max-retries": "30", 
-          "yarn.timeline-service.client.retry-interval-ms": "1000",
-          "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true", 
-          "yarn.timeline-service.http-authentication.type": "simple",
-          "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600", 
-          "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000", 
-          "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000"  
-		},
-		"hiveserver2-site": {
-          "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator", 
-          "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"
-        },
-		"ranger-hbase-plugin-properties": {
-          "REPOSITORY_CONFIG_PASSWORD": "hbase", 
-          "REPOSITORY_CONFIG_USERNAME": "hbase", 
-          "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-          "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
-          "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
-          "SSL_TRUSTSTORE_PASSWORD": "changeit", 
-          "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true", 
-          "XAAUDIT.DB.IS_ENABLED": "true", 
-          "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
-          "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
-          "XAAUDIT.HDFS.IS_ENABLED": "false", 
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
-          "ranger-hbase-plugin-enabled": "No"
-       
-        },
-		"ranger-hdfs-plugin-properties": {
-          "REPOSITORY_CONFIG_PASSWORD": "hadoop", 
-          "REPOSITORY_CONFIG_USERNAME": "hadoop", 
-          "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-          "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
-          "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
-          "SSL_TRUSTSTORE_PASSWORD": "changeit", 
-          "XAAUDIT.DB.IS_ENABLED": "true", 
-          "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
-          "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
-          "XAAUDIT.HDFS.IS_ENABLED": "false", 
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
-          "common.name.for.certificate": "-", 
-          "hadoop.rpc.protection": "-", 
-          "ranger-hdfs-plugin-enabled": "No"
-        },
-		"ranger-hive-plugin-properties": {
-          "REPOSITORY_CONFIG_PASSWORD": "hive", 
-          "REPOSITORY_CONFIG_USERNAME": "hive", 
-          "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-          "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
-          "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
-          "SSL_TRUSTSTORE_PASSWORD": "changeit", 
-          "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true", 
-          "XAAUDIT.DB.IS_ENABLED": "true", 
-          "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
-          "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
-          "XAAUDIT.HDFS.IS_ENABLED": "false", 
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
-          "common.name.for.certificate": "-", 
-          "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver", 
-          "ranger-hive-plugin-enabled": "No"
-        },
-		"ranger-storm-plugin-properties": {
-          "REPOSITORY_CONFIG_PASSWORD": "stormclient", 
-          "REPOSITORY_CONFIG_USERNAME": "stormclient@EXAMPLE.COM", 
-          "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks", 
-          "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword", 
-          "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks", 
-          "SSL_TRUSTSTORE_PASSWORD": "changeit", 
-          "XAAUDIT.DB.IS_ENABLED": "true", 
-          "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%", 
-          "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log", 
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900", 
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60", 
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400", 
-          "XAAUDIT.HDFS.IS_ENABLED": "false", 
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive", 
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60", 
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600", 
-          "common.name.for.certificate": "-", 
-          "ranger-storm-plugin-enabled": "No"
-        }
-      }
-    }
-  ]
-}


[54/63] [abbrv] ambari git commit: AMBARI-21343. Cleanup relevant Kerberos identities when a component is removed (amagyar)

Posted by ab...@apache.org.
AMBARI-21343. Cleanup relevant Kerberos identities when a component is removed (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8b5c7db6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8b5c7db6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8b5c7db6

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 8b5c7db602a0e1e2dfb214ec1d51884c16219467
Parents: 9d224f7
Author: Attila Magyar <am...@hortonworks.com>
Authored: Thu Jun 29 11:05:25 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Thu Jun 29 11:05:25 2017 +0200

----------------------------------------------------------------------
 .../ambari/server/controller/AmbariServer.java  |   4 +
 .../controller/DeleteIdentityHandler.java       | 283 +++++++++++++++++++
 .../server/controller/KerberosHelper.java       |   3 +
 .../server/controller/KerberosHelperImpl.java   |  31 +-
 .../OrderedRequestStageContainer.java           |  45 +++
 .../utilities/KerberosIdentityCleaner.java      | 135 +++++++++
 .../AbstractPrepareKerberosServerAction.java    |  19 +-
 .../server/serveraction/kerberos/Component.java |  74 +++++
 .../kerberos/FinalizeKerberosServerAction.java  |  27 +-
 .../kerberos/KerberosServerAction.java          |  27 ++
 .../kerberos/AbstractKerberosDescriptor.java    |  15 +
 .../kerberos/KerberosComponentDescriptor.java   |  15 +
 .../state/kerberos/KerberosDescriptor.java      |   8 -
 .../kerberos/KerberosIdentityDescriptor.java    |  30 ++
 .../kerberos/KerberosServiceDescriptor.java     |   6 +
 .../utilities/KerberosIdentityCleanerTest.java  | 204 +++++++++++++
 ambari-web/app/controllers/main/service/item.js |   6 +-
 17 files changed, 894 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index aeba739..8988be0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -76,6 +76,7 @@ import org.apache.ambari.server.controller.internal.UserPrivilegeResourceProvide
 import org.apache.ambari.server.controller.internal.ViewPermissionResourceProvider;
 import org.apache.ambari.server.controller.metrics.ThreadPoolEnabledPropertyProvider;
 import org.apache.ambari.server.controller.utilities.KerberosChecker;
+import org.apache.ambari.server.controller.utilities.KerberosIdentityCleaner;
 import org.apache.ambari.server.metrics.system.MetricsService;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.PersistenceType;
@@ -941,6 +942,9 @@ public class AmbariServer {
     BaseService.init(injector.getInstance(RequestAuditLogger.class));
 
     RetryHelper.init(injector.getInstance(Clusters.class), configs.getOperationsRetryAttempts());
+
+    KerberosIdentityCleaner identityCleaner = injector.getInstance(KerberosIdentityCleaner.class);
+    identityCleaner.register();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
new file mode 100644
index 0000000..aa098b6
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/DeleteIdentityHandler.java
@@ -0,0 +1,283 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.apache.ambari.server.controller.KerberosHelperImpl.BASE_LOG_DIR;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.Role;
+import org.apache.ambari.server.RoleCommand;
+import org.apache.ambari.server.actionmanager.HostRoleStatus;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.actionmanager.StageFactory;
+import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.controller.internal.RequestResourceFilter;
+import org.apache.ambari.server.serveraction.ServerAction;
+import org.apache.ambari.server.serveraction.kerberos.AbstractPrepareKerberosServerAction;
+import org.apache.ambari.server.serveraction.kerberos.Component;
+import org.apache.ambari.server.serveraction.kerberos.DestroyPrincipalsServerAction;
+import org.apache.ambari.server.serveraction.kerberos.KDCType;
+import org.apache.ambari.server.serveraction.kerberos.KerberosOperationHandler;
+import org.apache.ambari.server.serveraction.kerberos.KerberosServerAction;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostServerActionEvent;
+import org.apache.ambari.server.utils.StageUtils;
+
+/**
+ * I delete kerberos identities (principals and keytabs) of a given component.
+ */
+class DeleteIdentityHandler {
+  private final AmbariCustomCommandExecutionHelper customCommandExecutionHelper;
+  private final Integer taskTimeout;
+  private final StageFactory stageFactory;
+  private final AmbariManagementController ambariManagementController;
+
+  public DeleteIdentityHandler(AmbariCustomCommandExecutionHelper customCommandExecutionHelper, Integer taskTimeout, StageFactory stageFactory, AmbariManagementController ambariManagementController) {
+    this.customCommandExecutionHelper = customCommandExecutionHelper;
+    this.taskTimeout = taskTimeout;
+    this.stageFactory = stageFactory;
+    this.ambariManagementController = ambariManagementController;
+  }
+
+  /**
+   * Creates and adds stages to the given stage container for deleting kerberos identities.
+   * The service component that belongs to the identity doesn't need to be installed.
+   */
+  public void addDeleteIdentityStages(Cluster cluster, OrderedRequestStageContainer stageContainer, CommandParams commandParameters, boolean manageIdentities)
+    throws AmbariException
+  {
+    ServiceComponentHostServerActionEvent event = new ServiceComponentHostServerActionEvent("AMBARI_SERVER", StageUtils.getHostName(), System.currentTimeMillis());
+    String hostParamsJson = StageUtils.getGson().toJson(customCommandExecutionHelper.createDefaultHostParams(cluster, cluster.getDesiredStackVersion()));
+    stageContainer.setClusterHostInfo(StageUtils.getGson().toJson(StageUtils.getClusterHostInfo(cluster)));
+    if (manageIdentities) {
+      addPrepareDeleteIdentity(cluster, hostParamsJson, event, commandParameters, stageContainer);
+      addDestroyPrincipals(cluster, hostParamsJson, event, commandParameters, stageContainer);
+      addDeleteKeytab(cluster, newHashSet(commandParameters.component.getHostName()), hostParamsJson, commandParameters, stageContainer);
+    }
+    addFinalize(cluster, hostParamsJson, event, stageContainer, commandParameters);
+  }
+
+  private void addPrepareDeleteIdentity(Cluster cluster,
+                                        String hostParamsJson, ServiceComponentHostServerActionEvent event,
+                                        CommandParams commandParameters,
+                                        OrderedRequestStageContainer stageContainer)
+    throws AmbariException
+  {
+    Stage stage = createServerActionStage(stageContainer.getLastStageId(),
+      cluster,
+      stageContainer.getId(),
+      "Prepare delete identities",
+      "{}",
+      hostParamsJson,
+      PrepareDeleteIdentityServerAction.class,
+      event,
+      commandParameters.asMap(),
+      "Prepare delete identities",
+      taskTimeout);
+    stageContainer.addStage(stage);
+  }
+
+  private void addDestroyPrincipals(Cluster cluster,
+                                    String hostParamsJson, ServiceComponentHostServerActionEvent event,
+                                    CommandParams commandParameters,
+                                    OrderedRequestStageContainer stageContainer)
+    throws AmbariException
+  {
+    Stage stage = createServerActionStage(stageContainer.getLastStageId(),
+      cluster,
+      stageContainer.getId(),
+      "Destroy Principals",
+      "{}",
+      hostParamsJson,
+      DestroyPrincipalsServerAction.class,
+      event,
+      commandParameters.asMap(),
+      "Destroy Principals",
+      Math.max(ServerAction.DEFAULT_LONG_RUNNING_TASK_TIMEOUT_SECONDS, taskTimeout));
+    stageContainer.addStage(stage);
+  }
+
+  private void addDeleteKeytab(Cluster cluster,
+                               Set<String> hostFilter,
+                               String hostParamsJson,
+                               CommandParams commandParameters,
+                               OrderedRequestStageContainer stageContainer)
+    throws AmbariException
+  {
+    Stage stage = createNewStage(stageContainer.getLastStageId(),
+      cluster,
+      stageContainer.getId(),
+      "Delete Keytabs",
+      commandParameters.asJson(),
+      hostParamsJson);
+
+    Map<String, String> requestParams = new HashMap<>();
+    List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
+    RequestResourceFilter reqResFilter = new RequestResourceFilter("KERBEROS", "KERBEROS_CLIENT", new ArrayList<>(hostFilter));
+    requestResourceFilters.add(reqResFilter);
+
+    ActionExecutionContext actionExecContext = new ActionExecutionContext(
+      cluster.getClusterName(),
+      "REMOVE_KEYTAB",
+      requestResourceFilters,
+      requestParams);
+    customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage, requestParams, null);
+    stageContainer.addStage(stage);
+  }
+
+  private void addFinalize(Cluster cluster,
+                           String hostParamsJson, ServiceComponentHostServerActionEvent event,
+                           OrderedRequestStageContainer requestStageContainer,
+                           CommandParams commandParameters)
+    throws AmbariException
+  {
+    Stage stage = createServerActionStage(requestStageContainer.getLastStageId(),
+      cluster,
+      requestStageContainer.getId(),
+      "Finalize Operations",
+      "{}",
+      hostParamsJson,
+      DeleteDataDirAction.class,
+      event,
+      commandParameters.asMap(),
+      "Finalize Operations", 300);
+    requestStageContainer.addStage(stage);
+  }
+
+
+  public static class CommandParams {
+    private final Component component;
+    private final List<String> identities;
+    private final String authName;
+    private final File dataDirectory;
+    private final String defaultRealm;
+    private final KDCType kdcType;
+
+    public CommandParams(Component component, List<String> identities, String authName, File dataDirectory, String defaultRealm, KDCType kdcType) {
+      this.component = component;
+      this.identities = identities;
+      this.authName = authName;
+      this.dataDirectory = dataDirectory;
+      this.defaultRealm = defaultRealm;
+      this.kdcType = kdcType;
+    }
+
+    public Map<String, String> asMap() {
+      Map<String, String> commandParameters = new HashMap<>();
+      commandParameters.put(KerberosServerAction.AUTHENTICATED_USER_NAME, authName);
+      commandParameters.put(KerberosServerAction.DEFAULT_REALM, defaultRealm);
+      commandParameters.put(KerberosServerAction.KDC_TYPE, kdcType.name());
+      commandParameters.put(KerberosServerAction.IDENTITY_FILTER, StageUtils.getGson().toJson(identities));
+      commandParameters.put(KerberosServerAction.COMPONENT_FILTER, StageUtils.getGson().toJson(component));
+      commandParameters.put(KerberosServerAction.DATA_DIRECTORY, dataDirectory.getAbsolutePath());
+      return commandParameters;
+    }
+
+    public String asJson() {
+      return StageUtils.getGson().toJson(asMap());
+    }
+  }
+
+  private static class PrepareDeleteIdentityServerAction extends AbstractPrepareKerberosServerAction {
+    @Override
+    public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext) throws AmbariException, InterruptedException {
+      KerberosDescriptor kerberosDescriptor = getKerberosDescriptor();
+      processServiceComponents(
+        getCluster(),
+        kerberosDescriptor,
+        Collections.singletonList(getComponentFilter()),
+        getIdentityFilter(),
+        dataDirectory(),
+        calculateConfig(kerberosDescriptor),
+        new HashMap<String, Map<String, String>>(),
+        false,
+        new HashMap<String, Set<String>>());
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
+    }
+
+    protected Component getComponentFilter() {
+      return StageUtils.getGson().fromJson(getCommandParameterValue(KerberosServerAction.COMPONENT_FILTER), Component.class);
+    }
+
+    private Map<String, Map<String, String>> calculateConfig(KerberosDescriptor kerberosDescriptor) throws AmbariException {
+      return getKerberosHelper().calculateConfigurations(getCluster(), null, kerberosDescriptor.getProperties());
+    }
+
+    private String dataDirectory() {
+      return getCommandParameterValue(getCommandParameters(), DATA_DIRECTORY);
+    }
+
+    private KerberosDescriptor getKerberosDescriptor() throws AmbariException {
+      return getKerberosHelper().getKerberosDescriptor(getCluster());
+    }
+  }
+
+  private Stage createNewStage(long id, Cluster cluster, long requestId, String requestContext, String commandParams, String hostParams) {
+    Stage stage = stageFactory.createNew(requestId,
+      BASE_LOG_DIR + File.pathSeparator + requestId,
+      cluster.getClusterName(),
+      cluster.getClusterId(),
+      requestContext,
+      commandParams,
+      hostParams);
+    stage.setStageId(id);
+    return stage;
+  }
+
+  private Stage createServerActionStage(long id, Cluster cluster, long requestId,
+                                       String requestContext,
+                                       String commandParams, String hostParams,
+                                       Class<? extends ServerAction> actionClass,
+                                       ServiceComponentHostServerActionEvent event,
+                                       Map<String, String> commandParameters, String commandDetail,
+                                       Integer timeout) throws AmbariException {
+
+    Stage stage = createNewStage(id, cluster, requestId, requestContext,  commandParams, hostParams);
+    stage.addServerActionCommand(actionClass.getName(), null, Role.AMBARI_SERVER_ACTION,
+      RoleCommand.EXECUTE, cluster.getClusterName(), event, commandParameters, commandDetail,
+      ambariManagementController.findConfigurationTagsWithOverrides(cluster, null), timeout,
+      false, false);
+
+    return stage;
+  }
+
+  private static class DeleteDataDirAction extends KerberosServerAction {
+
+    @Override
+    public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext) throws AmbariException, InterruptedException {
+      deleteDataDirectory(getCommandParameterValue(DATA_DIRECTORY));
+      return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
+    }
+
+    @Override
+    protected CommandReport processIdentity(Map<String, String> identityRecord, String evaluatedPrincipal, KerberosOperationHandler operationHandler, Map<String, String> kerberosConfiguration, Map<String, Object> requestSharedDataContext) throws AmbariException {
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
index ca2dda5..cc0c048 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelper.java
@@ -27,6 +27,7 @@ import java.util.Set;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.internal.RequestStageContainer;
 import org.apache.ambari.server.security.credential.PrincipalKeyCredential;
+import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.apache.ambari.server.serveraction.kerberos.KerberosAdminAuthenticationException;
 import org.apache.ambari.server.serveraction.kerberos.KerberosIdentityDataFileWriter;
 import org.apache.ambari.server.serveraction.kerberos.KerberosInvalidConfigurationException;
@@ -232,6 +233,8 @@ public interface KerberosHelper {
                                          RequestStageContainer requestStageContainer, Boolean manageIdentities)
       throws AmbariException, KerberosOperationException;
 
+  void deleteIdentity(Cluster cluster, Component component, List<String> identities) throws AmbariException, KerberosOperationException;
+
   /**
    * Updates the relevant configurations for the components specified in the service filter.
    * <p/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index d57fcd2..b30f8f6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -64,6 +64,7 @@ import org.apache.ambari.server.security.credential.PrincipalKeyCredential;
 import org.apache.ambari.server.security.encryption.CredentialStoreService;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.serveraction.kerberos.CleanupServerAction;
+import org.apache.ambari.server.serveraction.kerberos.Component;
 import org.apache.ambari.server.serveraction.kerberos.ConfigureAmbariIdentitiesServerAction;
 import org.apache.ambari.server.serveraction.kerberos.CreateKeytabFilesServerAction;
 import org.apache.ambari.server.serveraction.kerberos.CreatePrincipalsServerAction;
@@ -130,7 +131,7 @@ import com.google.inject.persist.Transactional;
 @Singleton
 public class KerberosHelperImpl implements KerberosHelper {
 
-  private static final String BASE_LOG_DIR = "/tmp/ambari";
+  public static final String BASE_LOG_DIR = "/tmp/ambari";
 
   private static final Logger LOG = LoggerFactory.getLogger(KerberosHelperImpl.class);
 
@@ -296,6 +297,34 @@ public class KerberosHelperImpl implements KerberosHelper {
         requestStageContainer, new DeletePrincipalsAndKeytabsHandler());
   }
 
+  /**
+   * Deletes the kerberos identities of the given component, even if the component is already deleted.
+   */
+  @Override
+  public void deleteIdentity(Cluster cluster, Component component, List<String> identities) throws AmbariException, KerberosOperationException {
+    if (identities.isEmpty()) {
+      return;
+    }
+    KerberosDetails kerberosDetails = getKerberosDetails(cluster, null);
+    validateKDCCredentials(kerberosDetails, cluster);
+    File dataDirectory = createTemporaryDirectory();
+    RoleCommandOrder roleCommandOrder = ambariManagementController.getRoleCommandOrder(cluster);
+    DeleteIdentityHandler handler = new DeleteIdentityHandler(customCommandExecutionHelper, configuration.getDefaultServerTaskTimeout(), stageFactory, ambariManagementController);
+    DeleteIdentityHandler.CommandParams commandParameters = new DeleteIdentityHandler.CommandParams(
+      component,
+      identities,
+      ambariManagementController.getAuthName(),
+      dataDirectory,
+      kerberosDetails.getDefaultRealm(),
+      kerberosDetails.getKdcType());
+    OrderedRequestStageContainer stageContainer = new OrderedRequestStageContainer(
+      roleGraphFactory,
+      roleCommandOrder,
+      new RequestStageContainer(actionManager.getNextRequestId(), null, requestFactory, actionManager));
+    handler.addDeleteIdentityStages(cluster, stageContainer, commandParameters, kerberosDetails.manageIdentities());
+    stageContainer.getRequestStageContainer().persist();
+  }
+
   @Override
   public void configureServices(Cluster cluster, Map<String, Collection<String>> serviceFilter)
       throws AmbariException, KerberosInvalidConfigurationException {

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
new file mode 100644
index 0000000..6d8b5a3
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/OrderedRequestStageContainer.java
@@ -0,0 +1,45 @@
+package org.apache.ambari.server.controller;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.controller.internal.RequestStageContainer;
+import org.apache.ambari.server.metadata.RoleCommandOrder;
+import org.apache.ambari.server.stageplanner.RoleGraph;
+import org.apache.ambari.server.stageplanner.RoleGraphFactory;
+
+/**
+ * An extension of RequestStageContainer that takes the role command order into consideration when adding stages
+ */
+public class OrderedRequestStageContainer {
+  private final RoleGraphFactory roleGraphFactory;
+  private final RoleCommandOrder roleCommandOrder;
+  private final RequestStageContainer requestStageContainer;
+
+  public OrderedRequestStageContainer(RoleGraphFactory roleGraphFactory, RoleCommandOrder roleCommandOrder, RequestStageContainer requestStageContainer) {
+    this.roleGraphFactory = roleGraphFactory;
+    this.roleCommandOrder = roleCommandOrder;
+    this.requestStageContainer = requestStageContainer;
+  }
+
+  public void addStage(Stage stage) throws AmbariException {
+    RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
+    roleGraph.build(stage);
+    requestStageContainer.addStages(roleGraph.getStages());
+  }
+
+  public long getLastStageId() {
+    return requestStageContainer.getLastStageId();
+  }
+
+  public long getId() {
+    return requestStageContainer.getId();
+  }
+
+  public RequestStageContainer getRequestStageContainer() {
+    return requestStageContainer;
+  }
+
+  public void setClusterHostInfo(String clusterHostInfo) {
+    this.requestStageContainer.setClusterHostInfo(clusterHostInfo);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
new file mode 100644
index 0000000..0a8462f
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleaner.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.utilities;
+
+import static org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptor.nullToEmpty;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.serveraction.kerberos.Component;
+import org.apache.ambari.server.serveraction.kerberos.KerberosMissingAdminCredentialsException;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.eventbus.Subscribe;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+@Singleton
+public class KerberosIdentityCleaner {
+  private final static Logger LOG = LoggerFactory.getLogger(KerberosIdentityCleaner.class);
+  private final AmbariEventPublisher eventPublisher;
+  private final KerberosHelper kerberosHelper;
+  private final Clusters clusters;
+
+  @Inject
+  public KerberosIdentityCleaner(AmbariEventPublisher eventPublisher, KerberosHelper kerberosHelper, Clusters clusters) {
+    this.eventPublisher = eventPublisher;
+    this.kerberosHelper = kerberosHelper;
+    this.clusters = clusters;
+  }
+
+  public void register() {
+    this.eventPublisher.register(this);
+  }
+
+  /**
+   * Removes kerberos identities (principals and keytabs) after a component was uninstalled.
+   * Keeps the identity if either the principal or the keytab is used by an other service
+   */
+  @Subscribe
+  public void componentRemoved(ServiceComponentUninstalledEvent event) throws KerberosMissingAdminCredentialsException {
+    try {
+      Cluster cluster = clusters.getCluster(event.getClusterId());
+      if (cluster.getSecurityType() != SecurityType.KERBEROS) {
+        return;
+      }
+      KerberosComponentDescriptor descriptor = componentDescriptor(cluster, event.getServiceName(), event.getComponentName());
+      if (descriptor == null) {
+        LOG.info("No kerberos descriptor for {}", event);
+        return;
+      }
+      List<String> identitiesToRemove = identityNames(skipSharedIdentities(descriptor.getIdentitiesSkipReferences(), cluster, event));
+      LOG.info("Deleting identities {} after an event {}",  identitiesToRemove, event);
+      kerberosHelper.deleteIdentity(cluster, new Component(event.getHostName(), event.getServiceName(), event.getComponentName()), identitiesToRemove);
+    } catch (Exception e) {
+      LOG.error("Error while deleting kerberos identity after an event: " + event, e);
+    }
+  }
+
+  private KerberosComponentDescriptor componentDescriptor(Cluster cluster, String serviceName, String componentName) throws AmbariException {
+    KerberosServiceDescriptor serviceDescriptor = kerberosHelper.getKerberosDescriptor(cluster).getService(serviceName);
+    return serviceDescriptor == null ? null : serviceDescriptor.getComponent(componentName);
+  }
+
+  private List<String> identityNames(List<KerberosIdentityDescriptor> identities) {
+    List<String> result = new ArrayList<>();
+    for (KerberosIdentityDescriptor each : identities) { result.add(each.getName()); }
+    return result;
+  }
+
+  private List<KerberosIdentityDescriptor> skipSharedIdentities(List<KerberosIdentityDescriptor> candidates, Cluster cluster, ServiceComponentUninstalledEvent event) throws AmbariException {
+    List<KerberosIdentityDescriptor> activeIdentities = activeIdentities(cluster, kerberosHelper.getKerberosDescriptor(cluster), event);
+    List<KerberosIdentityDescriptor> result = new ArrayList<>();
+    for (KerberosIdentityDescriptor candidate : candidates) {
+      if (!candidate.isShared(activeIdentities)) {
+        result.add(candidate);
+      } else {
+        LOG.debug("Skip removing shared identity: {}", candidate.getName());
+      }
+    }
+    return result;
+  }
+
+  private List<KerberosIdentityDescriptor> activeIdentities(Cluster cluster, KerberosDescriptor root, ServiceComponentUninstalledEvent event) {
+    List<KerberosIdentityDescriptor> result = new ArrayList<>();
+    result.addAll(nullToEmpty(root.getIdentities()));
+    for (Map.Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
+      KerberosServiceDescriptor serviceDescriptor = root.getService(serviceEntry.getKey());
+      if (serviceDescriptor == null) {
+        continue;
+      }
+      result.addAll(nullToEmpty(serviceDescriptor.getIdentities()));
+      for (String componentName : serviceEntry.getValue().getServiceComponents().keySet()) {
+        if (!sameComponent(event, componentName, serviceEntry.getKey())) {
+          result.addAll(serviceDescriptor.getComponentIdentities(componentName));
+        }
+      }
+    }
+    return result;
+  }
+
+  private boolean sameComponent(ServiceComponentUninstalledEvent event, String componentName, String serviceName) {
+    return event.getServiceName().equals(serviceName) && event.getComponentName().equals(componentName);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java
index 7aac346..dd2b223 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/AbstractPrepareKerberosServerAction.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.serveraction.kerberos;
 import java.io.File;
 import java.io.IOException;
 import java.lang.reflect.Type;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -65,7 +66,7 @@ public abstract class AbstractPrepareKerberosServerAction extends KerberosServer
     throw new UnsupportedOperationException();
   }
 
-  KerberosHelper getKerberosHelper() {
+  protected KerberosHelper getKerberosHelper() {
     return kerberosHelper;
   }
 
@@ -76,6 +77,20 @@ public abstract class AbstractPrepareKerberosServerAction extends KerberosServer
                                     Map<String, Map<String, String>> kerberosConfigurations,
                                     boolean includeAmbariIdentity,
                                     Map<String, Set<String>> propertiesToBeIgnored) throws AmbariException {
+    List<Component> components = new ArrayList<>();
+    for (ServiceComponentHost each : schToProcess) {
+      components.add(Component.fromServiceComponentHost(each));
+    }
+    processServiceComponents(cluster, kerberosDescriptor, components, identityFilter, dataDirectory, currentConfigurations, kerberosConfigurations, includeAmbariIdentity, propertiesToBeIgnored);
+  }
+
+  protected void processServiceComponents(Cluster cluster, KerberosDescriptor kerberosDescriptor,
+                                          List<Component> schToProcess,
+                                          Collection<String> identityFilter, String dataDirectory,
+                                          Map<String, Map<String, String>> currentConfigurations,
+                                          Map<String, Map<String, String>> kerberosConfigurations,
+                                          boolean includeAmbariIdentity,
+                                          Map<String, Set<String>> propertiesToBeIgnored) throws AmbariException {
 
     actionLog.writeStdOut("Processing Kerberos identities and configurations");
 
@@ -113,7 +128,7 @@ public abstract class AbstractPrepareKerberosServerAction extends KerberosServer
         // Iterate over the components installed on the current host to get the service and
         // component-level Kerberos descriptors in order to determine which principals,
         // keytab files, and configurations need to be created or updated.
-        for (ServiceComponentHost sch : schToProcess) {
+        for (Component sch : schToProcess) {
           String hostName = sch.getHostName();
 
           String serviceName = sch.getServiceName();

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/Component.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/Component.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/Component.java
new file mode 100644
index 0000000..4f1ee52
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/Component.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.serveraction.kerberos;
+
+import org.apache.ambari.server.state.ServiceComponentHost;
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+
+public class Component {
+  private final String hostName;
+  private final String serviceName;
+  private final String serviceComponentName;
+
+  public static Component fromServiceComponentHost(ServiceComponentHost serviceComponentHost) {
+    return new Component(
+      serviceComponentHost.getHostName(),
+      serviceComponentHost.getServiceName(),
+      serviceComponentHost.getServiceComponentName());
+  }
+
+  public Component(String hostName, String serviceName, String serviceComponentName) {
+    this.hostName = hostName;
+    this.serviceName = serviceName;
+    this.serviceComponentName = serviceComponentName;
+  }
+
+  public String getHostName() {
+    return hostName;
+  }
+
+  public String getServiceName() {
+    return serviceName;
+  }
+
+  public String getServiceComponentName() {
+    return serviceComponentName;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+    Component component = (Component) o;
+    return new EqualsBuilder()
+      .append(hostName, component.hostName)
+      .append(serviceName, component.serviceName)
+      .append(serviceComponentName, component.serviceComponentName)
+      .isEquals();
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(17, 37)
+      .append(hostName)
+      .append(serviceName)
+      .append(serviceComponentName)
+      .toHashCode();
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java
index 2742390..10ad48b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/FinalizeKerberosServerAction.java
@@ -18,8 +18,6 @@
 
 package org.apache.ambari.server.serveraction.kerberos;
 
-import java.io.File;
-import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -36,7 +34,6 @@ import org.apache.ambari.server.state.SecurityState;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.utils.ShellCommandUtil;
 import org.apache.ambari.server.utils.StageUtils;
-import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -208,29 +205,9 @@ public class FinalizeKerberosServerAction extends KerberosServerAction {
       processIdentities(requestSharedDataContext);
       requestSharedDataContext.remove(this.getClass().getName() + "_visited");
     }
-
-    // Make sure this is a relevant directory. We don't want to accidentally allow _ANY_ directory
-    // to be deleted.
-    if ((dataDirectoryPath != null) && dataDirectoryPath.contains("/" + DATA_DIRECTORY_PREFIX)) {
-      File dataDirectory = new File(dataDirectoryPath);
-      File dataDirectoryParent = dataDirectory.getParentFile();
-
-      // Make sure this directory has a parent and it is writeable, else we wont be able to
-      // delete the directory
-      if ((dataDirectoryParent != null) && dataDirectory.isDirectory() &&
-          dataDirectoryParent.isDirectory() && dataDirectoryParent.canWrite()) {
-        try {
-          FileUtils.deleteDirectory(dataDirectory);
-        } catch (IOException e) {
-          // We should log this exception, but don't let it fail the process since if we got to this
-          // KerberosServerAction it is expected that the the overall process was a success.
-          String message = String.format("The data directory (%s) was not deleted due to an error condition - {%s}",
-              dataDirectory.getAbsolutePath(), e.getMessage());
-          LOG.warn(message, e);
-        }
-      }
-    }
+    deleteDataDirectory(dataDirectoryPath);
 
     return createCommandReport(0, HostRoleStatus.COMPLETED, "{}", actionLog.getStdOut(), actionLog.getStdErr());
   }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
index d404133..2e331bb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/KerberosServerAction.java
@@ -35,6 +35,7 @@ import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.io.FileUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -82,6 +83,8 @@ public abstract class KerberosServerAction extends AbstractServerAction {
    */
   public static final String IDENTITY_FILTER = "identity_filter";
 
+  public static final String COMPONENT_FILTER = "component_filter";
+
   /**
    * A (command parameter) property name used to hold the relevant KDC type value.  See
    * {@link org.apache.ambari.server.serveraction.kerberos.KDCType} for valid values
@@ -536,4 +539,28 @@ public abstract class KerberosServerAction extends AbstractServerAction {
 
     return commandReport;
   }
+
+  protected void deleteDataDirectory(String dataDirectoryPath) {
+    // Make sure this is a relevant directory. We don't want to accidentally allow _ANY_ directory
+    // to be deleted.
+    if ((dataDirectoryPath != null) && dataDirectoryPath.contains("/" + DATA_DIRECTORY_PREFIX)) {
+      File dataDirectory = new File(dataDirectoryPath);
+      File dataDirectoryParent = dataDirectory.getParentFile();
+
+      // Make sure this directory has a parent and it is writeable, else we wont be able to
+      // delete the directory
+      if ((dataDirectoryParent != null) && dataDirectory.isDirectory() &&
+          dataDirectoryParent.isDirectory() && dataDirectoryParent.canWrite()) {
+        try {
+          FileUtils.deleteDirectory(dataDirectory);
+        } catch (IOException e) {
+          // We should log this exception, but don't let it fail the process since if we got to this
+          // KerberosServerAction it is expected that the the overall process was a success.
+          String message = String.format("The data directory (%s) was not deleted due to an error condition - {%s}",
+              dataDirectory.getAbsolutePath(), e.getMessage());
+          LOG.warn(message, e);
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
index 397f384..38100ac 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/AbstractKerberosDescriptor.java
@@ -18,6 +18,9 @@
 
 package org.apache.ambari.server.state.kerberos;
 
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
@@ -181,6 +184,18 @@ public abstract class AbstractKerberosDescriptor {
     return root;
   }
 
+  public static <T> Collection<T> nullToEmpty(Collection<T> collection) {
+    return collection == null ? Collections.<T>emptyList() : collection;
+  }
+
+  public static <T> List<T> nullToEmpty(List<T> list) {
+    return list == null ? Collections.<T>emptyList() : list;
+  }
+
+  public static <K,V> Map<K,V> nullToEmpty(Map<K,V> collection) {
+    return collection == null ? Collections.<K,V>emptyMap() : collection;
+  }
+
   @Override
   public int hashCode() {
     return 37 *

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
index 768a17e..41d1f65 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosComponentDescriptor.java
@@ -17,7 +17,9 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -111,6 +113,19 @@ public class KerberosComponentDescriptor extends AbstractKerberosDescriptorConta
     return null;
   }
 
+  /**
+   * @return identities which are not references to other identities
+   */
+  public List<KerberosIdentityDescriptor> getIdentitiesSkipReferences() {
+    List<KerberosIdentityDescriptor> result = new ArrayList<>();
+    for (KerberosIdentityDescriptor each : nullToEmpty(getIdentities())) {
+      if (!each.getReferencedServiceName().isPresent() && each.getName() != null && !each.getName().startsWith("/")) {
+        result.add(each);
+      }
+    }
+    return result;
+  }
+
   @Override
   public int hashCode() {
     return 35 * super.hashCode();

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java
index f9dfa4a..eba1b3a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosDescriptor.java
@@ -461,12 +461,4 @@ public class KerberosDescriptor extends AbstractKerberosDescriptorContainer {
       }
     }
   }
-
-  private static <T> Collection<T> nullToEmpty(Collection<T> collection) {
-    return collection == null ? Collections.<T>emptyList() : collection;
-  }
-
-  private static <K,V> Map<K,V> nullToEmpty(Map<K,V> collection) {
-    return collection == null ? Collections.<K,V>emptyMap() : collection;
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
index e180f7a..2023793 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosIdentityDescriptor.java
@@ -17,8 +17,10 @@
  */
 package org.apache.ambari.server.state.kerberos;
 
+import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.collections.Predicate;
 import org.apache.ambari.server.collections.PredicateUtils;
 
@@ -369,6 +371,34 @@ public class KerberosIdentityDescriptor extends AbstractKerberosDescriptor {
     }
   }
 
+  /**
+   * @return true if this identity either has the same principal or keytab as any of the given identities.
+   */
+  public boolean isShared(List<KerberosIdentityDescriptor> identities) throws AmbariException {
+    for (KerberosIdentityDescriptor each : identities) {
+      if (hasSamePrincipal(each) || hasSameKeytab(each)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private boolean hasSameKeytab(KerberosIdentityDescriptor that) {
+    try {
+      return this.getKeytabDescriptor().getFile().equals(that.getKeytabDescriptor().getFile());
+    } catch (NullPointerException e) {
+      return false;
+    }
+  }
+
+  private boolean hasSamePrincipal(KerberosIdentityDescriptor that) {
+    try {
+      return this.getPrincipalDescriptor().getValue().equals(that.getPrincipalDescriptor().getValue());
+    } catch (NullPointerException e) {
+      return false;
+    }
+  }
+
   @Override
   public int hashCode() {
     return super.hashCode() +

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java
index 8507bfa..0777327 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/kerberos/KerberosServiceDescriptor.java
@@ -272,6 +272,12 @@ public class KerberosServiceDescriptor extends AbstractKerberosDescriptorContain
     return map;
   }
 
+  public List<KerberosIdentityDescriptor> getComponentIdentities(String componentName) {
+    return getComponent(componentName) != null
+      ? nullToEmpty(getComponent(componentName).getIdentities())
+      : Collections.<KerberosIdentityDescriptor>emptyList();
+  }
+
   @Override
   public int hashCode() {
     return super.hashCode() +

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
new file mode 100644
index 0000000..d22c92e
--- /dev/null
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/utilities/KerberosIdentityCleanerTest.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.utilities;
+
+import static com.google.common.collect.Lists.newArrayList;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.expectLastCall;
+import static org.easymock.EasyMock.reset;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.events.ServiceComponentUninstalledEvent;
+import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
+import org.apache.ambari.server.serveraction.kerberos.Component;
+import org.apache.ambari.server.serveraction.kerberos.KerberosMissingAdminCredentialsException;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
+import org.easymock.EasyMockRule;
+import org.easymock.EasyMockSupport;
+import org.easymock.Mock;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+public class KerberosIdentityCleanerTest extends EasyMockSupport {
+  @Rule public EasyMockRule mocks = new EasyMockRule(this);
+  private static final String HOST = "c6401";
+  private static final String OOZIE = "OOZIE";
+  private static final String OOZIE_SERVER = "OOZIE_SERVER";
+  private static final String OOZIE_2 = "OOZIE2";
+  private static final String OOZIE_SERVER_2 = "OOZIE_SERVER2";
+  private static final String YARN_2 = "YARN2";
+  private static final String RESOURCE_MANAGER_2 = "RESOURCE_MANAGER2";
+  private static final String YARN = "YARN";
+  private static final String RESOURCE_MANAGER = "RESOURCE_MANAGER";
+  private static final long CLUSTER_ID = 1;
+  @Mock private KerberosHelper kerberosHelper;
+  @Mock private Clusters clusters;
+  @Mock private Cluster cluster;
+  private Map<String, Service> installedServices = new HashMap<>();
+  private KerberosDescriptorFactory kerberosDescriptorFactory = new KerberosDescriptorFactory();
+  private KerberosIdentityCleaner kerberosIdentityCleaner;
+  private KerberosDescriptor kerberosDescriptor;
+
+  @Test
+  public void removesAllKerberosIdentitesOfComponentAfterComponentWasUninstalled() throws Exception {
+    installComponent(OOZIE, OOZIE_SERVER);
+    kerberosHelper.deleteIdentity(cluster, new Component(HOST, OOZIE, OOZIE_SERVER), newArrayList("oozie_server1", "oozie_server2"));
+    expectLastCall().once();
+    replayAll();
+    uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void skipsRemovingIdentityWhenServiceDoesNotExist() throws Exception {
+    replayAll();
+    uninstallComponent("NO_SUCH_SERVICE", OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void skipsRemovingIdentityThatIsSharedByPrincipalName() throws Exception {
+    installComponent(OOZIE, OOZIE_SERVER);
+    installComponent(OOZIE_2, OOZIE_SERVER_2);
+    kerberosHelper.deleteIdentity(cluster, new Component(HOST, OOZIE, OOZIE_SERVER), newArrayList("oozie_server1"));
+    expectLastCall().once();
+    replayAll();
+    uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void skipsRemovingIdentityThatIsSharedByKeyTabFilePath() throws Exception {
+    installComponent(YARN, RESOURCE_MANAGER);
+    installComponent(YARN_2, RESOURCE_MANAGER_2);
+    kerberosHelper.deleteIdentity(cluster, new Component(HOST, YARN, RESOURCE_MANAGER), newArrayList("rm_unique"));
+    expectLastCall().once();
+    replayAll();
+    uninstallComponent(YARN, RESOURCE_MANAGER, HOST);
+    verifyAll();
+  }
+
+  @Test
+  public void skipsRemovingIdentityWhenClusterIsNotKerberized() throws Exception {
+    reset(cluster);
+    expect(cluster.getSecurityType()).andReturn(SecurityType.NONE).anyTimes();
+    replayAll();
+    uninstallComponent(OOZIE, OOZIE_SERVER, HOST);
+    verifyAll();
+  }
+
+  private void installComponent(String serviceName, final String componentName) {
+    Service service = createMock(serviceName + "_" + componentName, Service.class);
+    installedServices.put(serviceName, service);
+    expect(service.getServiceComponents()).andReturn(new HashMap<String, ServiceComponent>() {{
+      put(componentName, null);
+    }}).anyTimes();
+  }
+
+  private void uninstallComponent(String service, String component, String host) throws KerberosMissingAdminCredentialsException {
+    kerberosIdentityCleaner.componentRemoved(new ServiceComponentUninstalledEvent(CLUSTER_ID, "any", "any", service, component, host, false));
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    kerberosIdentityCleaner = new KerberosIdentityCleaner(new AmbariEventPublisher(), kerberosHelper, clusters);
+    kerberosDescriptor = kerberosDescriptorFactory.createInstance("{" +
+      "  'services': [" +
+      "    {" +
+      "      'name': 'OOZIE'," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'OOZIE_SERVER'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': '/HDFS/NAMENODE/hdfs'" +
+      "            }," +
+      "            {" +
+      "              'name': 'oozie_server1'" +
+      "            }," +"" +
+      "            {" +
+      "              'name': 'oozie_server2'," +
+      "              'principal': { 'value': 'oozie/_HOST@EXAMPLE.COM' }" +
+      "            }" +
+      "          ]" +
+      "        }" +
+      "      ]" +
+      "    }," +
+      "    {" +
+      "      'name': 'OOZIE2'," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'OOZIE_SERVER2'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'oozie_server3'," +
+      "              'principal': { 'value': 'oozie/_HOST@EXAMPLE.COM' }" +
+      "            }" +"" +
+      "          ]" +
+      "        }" +
+      "      ]" +
+      "    }," +
+      "    {" +
+      "      'name': 'YARN'," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'RESOURCE_MANAGER'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'rm_unique'" +
+      "            }," +
+      "            {" +
+      "              'name': 'rm1-shared'," +
+      "              'keytab' : { 'file' : 'shared' }" +
+      "            }" +
+      "          ]" +
+      "        }" +
+      "      ]" +
+      "    }," +
+      "    {" +
+      "      'name': 'YARN2'," +
+      "      'components': [" +
+      "        {" +
+      "          'name': 'RESOURCE_MANAGER2'," +
+      "          'identities': [" +
+      "            {" +
+      "              'name': 'rm2-shared'," +
+      "              'keytab' : { 'file' : 'shared' }" +
+      "            }" +
+      "          ]" +
+      "        }" +
+      "      ]" +
+      "    }" +
+      "  ]" +
+      "}");
+    expect(clusters.getCluster(CLUSTER_ID)).andReturn(cluster).anyTimes();
+    expect(cluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
+    expect(kerberosHelper.getKerberosDescriptor(cluster)).andReturn(kerberosDescriptor).anyTimes();
+    expect(cluster.getServices()).andReturn(installedServices).anyTimes();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b5c7db6/ambari-web/app/controllers/main/service/item.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/item.js b/ambari-web/app/controllers/main/service/item.js
index 37713dc..197eb8e 100644
--- a/ambari-web/app/controllers/main/service/item.js
+++ b/ambari-web/app/controllers/main/service/item.js
@@ -1388,8 +1388,10 @@ App.MainServiceItemController = Em.Controller.extend(App.SupportClientConfigsDow
               this._super();
             }
           });
-        self.set('deleteServiceProgressPopup', progressPopup);
-        self.deleteServiceCall(serviceNames);
+        App.get('router.mainAdminKerberosController').getKDCSessionState(function() {
+          self.set('deleteServiceProgressPopup', progressPopup);
+          self.deleteServiceCall(serviceNames);
+        });
         this._super();
       },
 


[48/63] [abbrv] ambari git commit: AMBARI-21274: Typo in stack advisor error message for yarn and mr queue config issues (sangeetar)

Posted by ab...@apache.org.
AMBARI-21274: Typo in stack advisor error message for yarn and mr queue config issues (sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/34462831
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/34462831
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/34462831

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 34462831450c3f592b3343940c2c623f9bb7489b
Parents: aac9fe6
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Wed Jun 28 09:16:48 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Wed Jun 28 09:16:48 2017 -0700

----------------------------------------------------------------------
 ambari-server/src/main/resources/stacks/stack_advisor.py           | 2 +-
 .../src/test/python/stacks/2.0.6/common/test_stack_advisor.py      | 2 +-
 .../src/test/python/stacks/2.5/common/test_stack_advisor.py        | 2 +-
 .../src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py   | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/34462831/ambari-server/src/main/resources/stacks/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/stack_advisor.py b/ambari-server/src/main/resources/stacks/stack_advisor.py
index 3a39a34..8e08d82 100644
--- a/ambari-server/src/main/resources/stacks/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/stack_advisor.py
@@ -2752,7 +2752,7 @@ class DefaultStackAdvisor(StackAdvisor):
     if len(leaf_queue_names) == 0:
       return None
     elif queue_name not in leaf_queue_names:
-      return self.getErrorItem("Queue is not exist or not corresponds to existing YARN leaf queue")
+      return self.getErrorItem("Queue does not exist or correspond to an existing YARN leaf queue")
 
     return None
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/34462831/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index b6f1965..0c4996b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -534,7 +534,7 @@ class TestHDP206StackAdvisor(TestCase):
     hosts = self.prepareHosts([])
     result = self.stackAdvisor.validateConfigurations(services, hosts)
     expectedItems = [
-      {'message': 'Queue is not exist or not corresponds to existing YARN leaf queue', 'level': 'ERROR'}
+      {'message': 'Queue does not exist or correspond to an existing YARN leaf queue', 'level': 'ERROR'}
     ]
     self.assertValidationResult(expectedItems, result)
     services["configurations"]["yarn-env"]["properties"]["service_check.queue.name"] = "ndfqueue"

http://git-wip-us.apache.org/repos/asf/ambari/blob/34462831/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
index e62e00c..50f527d 100644
--- a/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.5/common/test_stack_advisor.py
@@ -5334,7 +5334,7 @@ class TestHDP25StackAdvisor(TestCase):
     hosts = self.prepareHosts([])
     result = self.stackAdvisor.validateConfigurations(services, hosts)
     expectedItems = [
-      {'message': 'Queue is not exist or not corresponds to existing YARN leaf queue', 'level': 'ERROR'}
+      {'message': 'Queue does not exist or correspond to an existing YARN leaf queue', 'level': 'ERROR'}
     ]
     self.assertValidationResult(expectedItems, result)
     services["configurations"]["yarn-env"]["properties"]["service_check.queue.name"] = "ndfqueue2"

http://git-wip-us.apache.org/repos/asf/ambari/blob/34462831/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py
index 5a95fe3..5f70db2 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/stack_advisor.py
@@ -1395,7 +1395,7 @@ class ODPi20StackAdvisor(DefaultStackAdvisor):
     if len(leaf_queue_names) == 0:
       return None
     elif queue_name not in leaf_queue_names:
-      return self.getErrorItem("Queue is not exist or not corresponds to existing YARN leaf queue")
+      return self.getErrorItem("Queue does not exist or correspond to an existing YARN leaf queue")
 
     return None
 


[06/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
deleted file mode 100644
index ff859f0..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog221Test.java
+++ /dev/null
@@ -1,614 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
-import org.apache.ambari.server.controller.KerberosHelper;
-import org.apache.ambari.server.controller.MaintenanceStateHelper;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.easymock.Capture;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockSupport;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
-import com.google.gson.Gson;
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-import com.google.inject.persist.PersistService;
-
-import junit.framework.Assert;
-
-public class UpgradeCatalog221Test {
-  private static Injector injector;
-  private static Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-  private static EntityManager entityManager = createNiceMock(EntityManager.class);
-  private static UpgradeCatalogHelper upgradeCatalogHelper;
-  private static StackEntity desiredStackEntity;
-
-  @BeforeClass
-  public static void init() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    upgradeCatalogHelper = injector.getInstance(UpgradeCatalogHelper.class);
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    desiredStackEntity = stackDAO.find("HDP", "2.2.0");
-  }
-
-  @AfterClass
-  public static void tearDown() {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  @Test
-  public void testExecuteDDLUpdates() throws Exception{
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-
-    dbAccessor.createIndex(eq("idx_stage_request_id"), eq("stage"), eq("request_id"));
-    expectLastCall().once();
-    dbAccessor.createIndex(eq("idx_hrc_request_id"), eq("host_role_command"), eq("request_id"));
-    expectLastCall().once();
-    dbAccessor.createIndex(eq("idx_rsc_request_id"), eq("role_success_criteria"), eq("request_id"));
-    expectLastCall().once();
-
-    Capture<DBAccessor.DBColumnInfo> capturedHostGroupComponentProvisionColumn = EasyMock.newCapture();
-    dbAccessor.addColumn(eq("hostgroup_component"), capture(capturedHostGroupComponentProvisionColumn));
-    expectLastCall().once();
-
-
-    replay(dbAccessor);
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(EntityManager.class).toInstance(entityManager);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog221 upgradeCatalog221 = injector.getInstance(UpgradeCatalog221.class);
-    upgradeCatalog221.executeDDLUpdates();
-
-    // verify that the column was added for provision_action to the hostgroup_component table
-    assertEquals("Incorrect column name added", "provision_action", capturedHostGroupComponentProvisionColumn.getValue().getName());
-    assertNull("Incorrect default value added", capturedHostGroupComponentProvisionColumn.getValue().getDefaultValue());
-    assertEquals("Incorrect column type added", String.class, capturedHostGroupComponentProvisionColumn.getValue().getType());
-    assertEquals("Incorrect column length added", 255, capturedHostGroupComponentProvisionColumn.getValue().getLength().intValue());
-    assertTrue("Incorrect column nullable state added", capturedHostGroupComponentProvisionColumn.getValue().isNullable());
-
-
-    verify(dbAccessor);
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-    Method updateAlerts = UpgradeCatalog221.class.getDeclaredMethod("updateAlerts");
-    Method updateOozieConfigs = UpgradeCatalog221.class.getDeclaredMethod("updateOozieConfigs");
-    Method updateTezConfigs = UpgradeCatalog221.class.getDeclaredMethod("updateTezConfigs");
-    Method updateRangerKmsDbksConfigs = UpgradeCatalog221.class.getDeclaredMethod("updateRangerKmsDbksConfigs");
-    Method updateAMSConfigs = UpgradeCatalog221.class.getDeclaredMethod("updateAMSConfigs");
-
-    UpgradeCatalog221 upgradeCatalog221 = createMockBuilder(UpgradeCatalog221.class)
-      .addMockedMethod(addNewConfigurationsFromXml)
-      .addMockedMethod(updateAlerts)
-      .addMockedMethod(updateOozieConfigs)
-      .addMockedMethod(updateTezConfigs)
-      .addMockedMethod(updateRangerKmsDbksConfigs)
-      .addMockedMethod(updateAMSConfigs)
-      .createMock();
-
-    upgradeCatalog221.addNewConfigurationsFromXml();
-    expectLastCall().once();
-    upgradeCatalog221.updateAlerts();
-    expectLastCall().once();
-    upgradeCatalog221.updateOozieConfigs();
-    expectLastCall().once();
-    upgradeCatalog221.updateTezConfigs();
-    expectLastCall().once();
-    upgradeCatalog221.updateRangerKmsDbksConfigs();
-    expectLastCall().once();
-    upgradeCatalog221.updateAMSConfigs();
-    expectLastCall().once();
-
-
-    replay(upgradeCatalog221);
-
-    upgradeCatalog221.executeDMLUpdates();
-
-    verify(upgradeCatalog221);
-  }
-
-  @Test
-  public void test_AddCheckCommandTimeoutParam_ParamsNotAvailable() {
-
-    UpgradeCatalog221 upgradeCatalog221 = new UpgradeCatalog221(injector);
-    String inputSource = "{ \"path\" : \"test_path\", \"type\" : \"SCRIPT\"}";
-    String expectedSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"name\":\"check.command.timeout\",\"display_name\":\"Check command timeout\",\"value\":60.0,\"type\":\"NUMERIC\",\"description\":\"The maximum time before check command will be killed by timeout\",\"units\":\"seconds\"}]}";
-
-    String result = upgradeCatalog221.addCheckCommandTimeoutParam(inputSource);
-    Assert.assertEquals(result, expectedSource);
-  }
-
-  @Test
-  public void test_AddCheckCommandTimeoutParam_ParamsAvailable() {
-
-    UpgradeCatalog221 upgradeCatalog221 = new UpgradeCatalog221(injector);
-    String inputSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"name\":\"test\",\"display_name\":\"Test\",\"value\":10.0,\"type\":\"test\",\"description\":\"test\",\"units\":\"test\"}]}";
-    String expectedSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"name\":\"test\",\"display_name\":\"Test\",\"value\":10.0,\"type\":\"test\",\"description\":\"test\",\"units\":\"test\"},{\"name\":\"check.command.timeout\",\"display_name\":\"Check command timeout\",\"value\":60.0,\"type\":\"NUMERIC\",\"description\":\"The maximum time before check command will be killed by timeout\",\"units\":\"seconds\"}]}";
-
-    String result = upgradeCatalog221.addCheckCommandTimeoutParam(inputSource);
-    Assert.assertEquals(result, expectedSource);
-  }
-
-  @Test
-  public void test_AddCheckCommandTimeoutParam_NeededParamAlreadyAdded() {
-
-    UpgradeCatalog221 upgradeCatalog221 = new UpgradeCatalog221(injector);
-    String inputSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"display_name\":\"Test\",\"value\":10.0,\"type\":\"test\",\"description\":\"test\",\"units\":\"test\"},{\"name\":\"check.command.timeout\",\"display_name\":\"Check command timeout\",\"value\":60.0,\"type\":\"NUMERIC\",\"description\":\"The maximum time before check command will be killed by timeout\",\"units\":\"seconds\"}]}";
-    String expectedSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"display_name\":\"Test\",\"value\":10.0,\"type\":\"test\",\"description\":\"test\",\"units\":\"test\"},{\"name\":\"check.command.timeout\",\"display_name\":\"Check command timeout\",\"value\":60.0,\"type\":\"NUMERIC\",\"description\":\"The maximum time before check command will be killed by timeout\",\"units\":\"seconds\"}]}";
-
-    String result = upgradeCatalog221.addCheckCommandTimeoutParam(inputSource);
-    Assert.assertEquals(result, expectedSource);
-  }
-
-  @Test
-  public void testUpdateOozieConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config oozieSiteConf = easyMockSupport.createNiceMock(Config.class);
-    final Map<String, String> propertiesOozieSite = new HashMap<String, String>() {{
-      put("oozie.service.HadoopAccessorService.hadoop.configurations", "*=/etc/hadoop/conf");
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("oozie-site")).andReturn(oozieSiteConf).atLeastOnce();
-    expect(oozieSiteConf.getProperties()).andReturn(propertiesOozieSite).once();
-
-    UpgradeCatalog221 upgradeCatalog221 = createMockBuilder(UpgradeCatalog221.class)
-        .withConstructor(Injector.class)
-        .withArgs(mockInjector)
-        .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-            Map.class, boolean.class, boolean.class)
-        .createMock();
-    upgradeCatalog221.updateConfigurationPropertiesForCluster(mockClusterExpected, "oozie-site",
-        Collections.singletonMap("oozie.service.HadoopAccessorService.hadoop.configurations", "*={{hadoop_conf_dir}}"),
-        true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog221);
-    upgradeCatalog221.updateOozieConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateTezConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config tezSiteConf = easyMockSupport.createNiceMock(Config.class);
-    final Map<String, String> propertiesTezSite = new HashMap<String, String>() {{
-      put("tez.counters.max", "2000");
-      put("tez.counters.max.groups", "1000");
-    }};
-
-    StackId stackId = new StackId("HDP","2.3");
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-
-    expect(mockClusterExpected.getDesiredConfigByType("tez-site")).andReturn(tezSiteConf).atLeastOnce();
-    expect(tezSiteConf.getProperties()).andReturn(propertiesTezSite).once();
-    expect(mockClusterExpected.getCurrentStackVersion()).andReturn(stackId).once();
-
-    UpgradeCatalog221 upgradeCatalog221 = createMockBuilder(UpgradeCatalog221.class)
-        .withConstructor(Injector.class)
-        .withArgs(mockInjector)
-        .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-            Map.class, boolean.class, boolean.class)
-        .createMock();
-    Map<String, String> updates = new HashMap<>();
-    updates.put("tez.counters.max", "10000");
-    updates.put("tez.counters.max.groups", "3000");
-    upgradeCatalog221.updateConfigurationPropertiesForCluster(mockClusterExpected, "tez-site",
-        updates, true, false);
-    expectLastCall().once();
-
-  }
-
-  @Test
-  public void testUpdateRangerKmsDbksConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Map<String, String> propertiesRangerKmsDbConfigs = new HashMap<>();
-    propertiesRangerKmsDbConfigs.put("DB_FLAVOR", "MYSQL");
-    propertiesRangerKmsDbConfigs.put("db_host", "localhost");
-    propertiesRangerKmsDbConfigs.put("db_name", "testdb");
-
-    final Config mockrangerKmsDbConfigs = easyMockSupport.createNiceMock(Config.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    expect(mockClusterExpected.getDesiredConfigByType("kms-properties")).andReturn(mockrangerKmsDbConfigs).atLeastOnce();
-    expect(mockrangerKmsDbConfigs.getProperties()).andReturn(propertiesRangerKmsDbConfigs).times(3);
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog221.class).updateRangerKmsDbksConfigs();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testUpdateAmsHbaseSiteConfigs() throws Exception {
-
-    Map<String, String> clusterEnvProperties = new HashMap<>();
-    Map<String, String> amsHbaseSecuritySite = new HashMap<>();
-    Map<String, String> newPropertiesAmsHbaseSite = new HashMap<>();
-
-    //Unsecure
-    amsHbaseSecuritySite.put("zookeeper.znode.parent", "/ams-hbase-unsecure");
-    newPropertiesAmsHbaseSite.put("zookeeper.znode.parent", "/ams-hbase-unsecure");
-    testAmsHbaseSiteUpdates(new HashMap<String, String>(),
-      newPropertiesAmsHbaseSite,
-      amsHbaseSecuritySite,
-      clusterEnvProperties);
-
-    //Secure
-    amsHbaseSecuritySite.put("zookeeper.znode.parent", "/ams-hbase-secure");
-    newPropertiesAmsHbaseSite.put("zookeeper.znode.parent", "/ams-hbase-secure");
-    testAmsHbaseSiteUpdates(new HashMap<String, String>(),
-      newPropertiesAmsHbaseSite,
-      amsHbaseSecuritySite,
-      clusterEnvProperties);
-
-    //Unsecure with empty value
-    clusterEnvProperties.put("security_enabled", "false");
-    amsHbaseSecuritySite.put("zookeeper.znode.parent", "");
-    newPropertiesAmsHbaseSite.put("zookeeper.znode.parent", "/ams-hbase-unsecure");
-    testAmsHbaseSiteUpdates(new HashMap<String, String>(),
-      newPropertiesAmsHbaseSite,
-      amsHbaseSecuritySite,
-      clusterEnvProperties);
-
-    //Secure with /hbase value
-    clusterEnvProperties.put("security_enabled", "true");
-    amsHbaseSecuritySite.put("zookeeper.znode.parent", "/hbase");
-    newPropertiesAmsHbaseSite.put("zookeeper.znode.parent", "/ams-hbase-secure");
-    testAmsHbaseSiteUpdates(new HashMap<String, String>(),
-      newPropertiesAmsHbaseSite,
-      amsHbaseSecuritySite,
-      clusterEnvProperties);
-
-    // Test zookeeper client port set to default
-    amsHbaseSecuritySite.put("hbase.zookeeper.property.clientPort", "61181");
-    newPropertiesAmsHbaseSite.put("hbase.zookeeper.property.clientPort", "{{zookeeper_clientPort}}");
-    testAmsHbaseSiteUpdates(Collections.singletonMap("hbase.zookeeper.property.clientPort", "61181"),
-      newPropertiesAmsHbaseSite,
-      amsHbaseSecuritySite,
-      clusterEnvProperties);
-  }
-
-  private void testAmsHbaseSiteUpdates(Map<String, String> oldPropertiesAmsHbaseSite,
-                                       Map<String, String> newPropertiesAmsHbaseSite,
-                                       Map<String, String> amsHbaseSecuritySiteProperties,
-                                       Map<String, String> clusterEnvProperties ) throws AmbariException {
-
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-
-    Config mockAmsHbaseSite = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("ams-hbase-site")).andReturn(mockAmsHbaseSite).atLeastOnce();
-    expect(mockAmsHbaseSite.getProperties()).andReturn(oldPropertiesAmsHbaseSite).times(2);
-
-    Config mockAmsHbaseSecuritySite = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("ams-hbase-security-site")).andReturn(mockAmsHbaseSecuritySite).anyTimes();
-    expect(mockAmsHbaseSecuritySite.getProperties()).andReturn(amsHbaseSecuritySiteProperties).anyTimes();
-
-    Config clusterEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("cluster-env")).andReturn(clusterEnv).anyTimes();
-    expect(clusterEnv.getProperties()).andReturn(clusterEnvProperties).anyTimes();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsHbaseSite, mockAmsHbaseSecuritySite, clusterEnv, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .addMockedMethod("createConfig")
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).anyTimes();
-
-    replay(controller, injector2);
-    new UpgradeCatalog221(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    // Test zookeeper tick time setting
-    String tickTime = updatedProperties.remove("hbase.zookeeper.property.tickTime");
-    assertEquals("6000", tickTime);
-    assertTrue(Maps.difference(newPropertiesAmsHbaseSite, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testUpdateAmsHbaseSecuritySiteConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsHbaseSecuritySite = new HashMap<String, String>() {
-      {
-        put("zookeeper.znode.parent", "/ams-hbase-secure");
-      }
-    };
-
-    Map<String, String> newPropertiesAmsHbaseSecuritySite = new HashMap<>();
-
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsHbaseSecuritySite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-
-    expect(cluster.getDesiredConfigByType("ams-hbase-security-site")).andReturn(mockAmsHbaseSecuritySite).atLeastOnce();
-    expect(mockAmsHbaseSecuritySite.getProperties()).andReturn(oldPropertiesAmsHbaseSecuritySite).times(2);
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsHbaseSecuritySite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .addMockedMethod("createConfig")
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog221(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesAmsHbaseSecuritySite, updatedProperties).areEqual());
-
-  }
-
-  @Test
-  public void testUpdateAmsHbaseEnvContent() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
-    Method updateAmsHbaseEnvContent = UpgradeCatalog221.class.getDeclaredMethod("updateAmsHbaseEnvContent", String.class);
-    UpgradeCatalog221 upgradeCatalog221 = new UpgradeCatalog221(injector);
-    String oldContent = "some_content\n" +
-      "{% if security_enabled %}\n" +
-      "export HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}\"\n" +
-      "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}\"\n" +
-      "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}\"\n" +
-      "export HBASE_ZOOKEEPER_OPTS=\"$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}} -Dzookeeper.sasl.client.username={{zk_servicename}}\"\n" +
-      "{% endif %}";
-
-    String expectedContent = "some_content\n" +
-      "{% if security_enabled %}\n" +
-      "export HBASE_OPTS=\"$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}\"\n" +
-      "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}\"\n" +
-      "export HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n" +
-      "export HBASE_ZOOKEEPER_OPTS=\"$HBASE_ZOOKEEPER_OPTS -Djava.security.auth.login.config={{ams_zookeeper_jaas_config_file}}\"\n" +
-      "{% endif %}";
-
-    String result = (String) updateAmsHbaseEnvContent.invoke(upgradeCatalog221, oldContent);
-    Assert.assertEquals(expectedContent, result);
-  }
-
-  @Test
-  public void testUpdateAmsEnvContent() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException
-  {
-    Method updateAmsEnvContent = UpgradeCatalog221.class.getDeclaredMethod("updateAmsEnvContent", String.class);
-    UpgradeCatalog221 upgradeCatalog221 = new UpgradeCatalog221(injector);
-    String oldContent = "some_content\n" +
-      "# AMS Collector options\n" +
-      "export AMS_COLLECTOR_OPTS=\"-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native\"\n" +
-      "{% if security_enabled %}\n" +
-      "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS -Djava.security.auth.login.config={{ams_collector_jaas_config_file}} " +
-      "-Dzookeeper.sasl.client.username={{zk_servicename}}\"\n" +
-      "{% endif %}";
-
-    String expectedContent = "some_content\n" +
-      "# AMS Collector options\n" +
-      "export AMS_COLLECTOR_OPTS=\"-Djava.library.path=/usr/lib/ams-hbase/lib/hadoop-native\"\n" +
-      "{% if security_enabled %}\n" +
-      "export AMS_COLLECTOR_OPTS=\"$AMS_COLLECTOR_OPTS -Djava.security.auth.login.config={{ams_collector_jaas_config_file}}\"\n" +
-      "{% endif %}";
-
-    String result = (String) updateAmsEnvContent.invoke(upgradeCatalog221, oldContent);
-    Assert.assertEquals(expectedContent, result);
-  }
-
-  @Test
-  public void testUpdateAlertDefinitions() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    long clusterId = 1;
-
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionEntity mockAmsZookeeperProcessAlertDefinitionEntity = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(AlertDefinitionDAO.class).toInstance(mockAlertDefinitionDAO);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("ams_metrics_collector_zookeeper_server_process")))
-      .andReturn(mockAmsZookeeperProcessAlertDefinitionEntity).atLeastOnce();
-
-    mockAlertDefinitionDAO.remove(mockAmsZookeeperProcessAlertDefinitionEntity);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog221.class).updateAlerts();
-    easyMockSupport.verifyAll();
-  }
-}


[12/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
deleted file mode 100644
index 70f1ffc..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.3.json
+++ /dev/null
@@ -1,440 +0,0 @@
-{
-  "version": "1.0",
-  "stacks": [
-    {
-      "name": "HDP",
-      "old-version": "2.1",
-      "target-version": "2.3",
-      "options": {
-        "config-types": {
-          "core-site": {
-            "merged-copy": "yes"
-          },
-          "hdfs-site": {
-            "merged-copy": "yes"
-          },
-          "yarn-site": {
-            "merged-copy": "yes"
-          },
-          "capacity-scheduler": {
-            "merged-copy": "yes",
-            "required-services": ["YARN"]
-          },
-          "mapred-site": {
-            "merged-copy": "yes"
-          },
-          "hbase-site": {
-            "merged-copy": "yes"
-          },
-          "hive-site": {
-            "merged-copy": "yes"
-          },
-          "hive-env": {
-            "merged-copy": "yes",
-            "required-services": ["HIVE"]
-          },
-          "oozie-site": {
-            "merged-copy": "yes"
-          },
-          "webhcat-site": {
-            "merged-copy": "yes"
-          },
-          "tez-site":{
-            "merged-copy": "yes"
-          },
-          "falcon-startup.properties": {
-            "merged-copy": "yes"
-          },
-          "storm-site":{
-            "merged-copy": "yes"
-          },
-          "storm-env": {
-            "merged-copy": "yes",
-            "required-services": [
-              "STORM"
-            ]
-          },
-          "storm-cluster-log4j": {
-            "merged-copy": "yes",
-            "required-services": [
-              "STORM"
-            ]
-          },
-          "storm-worker-log4j": {
-            "merged-copy": "yes",
-            "required-services": [
-              "STORM"
-            ]
-          }
-        }
-      },
-      "properties": {
-        "storm-env": {
-          "nimbus_seeds_supported": "true",
-          "storm_logs_supported": "true"
-        },
-        "storm-cluster-log4j": {
-          "content": "\n    \n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<configuration monitorInterval=\"60\">\n<properties>\n    <property name=\"pattern\">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n</property>\n    <property name=\"patt
 ernMetris\">%d %-8r %m%n</property>\n</properties>\n<appenders>\n    <RollingFile name=\"A1\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.%i\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile name=\"ACCESS\"\n                 fileName=\"${sys:storm.log.dir}/access.log\"\n                 filePattern=\"${sys:storm.log.dir}/access.log.%i\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile
  name=\"METRICS\"\n                 fileName=\"${sys:storm.log.dir}/metrics.log\"\n                 filePattern=\"${sys:storm.log.dir}/metrics.log.%i\">\n        <PatternLayout>\n            <pattern>${patternMetris}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"2 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <Syslog name=\"syslog\" format=\"RFC5424\" host=\"localhost\" port=\"514\"\n            protocol=\"UDP\" appName=\"[${sys:daemon.name}]\" mdcId=\"mdc\" includeMDC=\"true\"\n            facility=\"LOCAL5\" enterpriseNumber=\"18060\" newLine=\"true\" exceptionPattern=\"%rEx{full}\"\n            messageId=\"[${sys:user.name}:S0]\" id=\"storm\"/>\n</appenders>\n<loggers>\n\n    <Logger name=\"backtype.storm.security.auth.authorizer\" level=\"info\">\n        <AppenderRef ref=\"ACCESS\"/>\n    </Logger>\n    <Logger name=\"backtype.storm.metric.Loggi
 ngMetricsConsumer\" level=\"info\">\n        <AppenderRef ref=\"METRICS\"/>\n    </Logger>\n    <root level=\"info\"> <!-- We log everything -->\n        <appender-ref ref=\"A1\"/>\n        <appender-ref ref=\"syslog\"/>\n    </root>\n</loggers>\n</configuration>\n    \n    "
-        },
-        "storm-worker-log4j": {
-          "content": "\n    \n<!--\n Licensed to the Apache Software Foundation (ASF) under one or more\n contributor license agreements.  See the NOTICE file distributed with\n this work for additional information regarding copyright ownership.\n The ASF licenses this file to You under the Apache License, Version 2.0\n (the \"License\"); you may not use this file except in compliance with\n the License.  You may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n-->\n\n<configuration monitorInterval=\"60\">\n<properties>\n    <property name=\"pattern\">%d{yyyy-MM-dd HH:mm:ss.SSS} %c{1.} [%p] %msg%n</property>\n    <property name=\"patt
 ernNoTime\">%msg%n</property>\n</properties>\n<appenders>\n    <RollingFile name=\"A1\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.%i.gz\">\n        <PatternLayout>\n            <pattern>${pattern}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"9\"/>\n    </RollingFile>\n    <RollingFile name=\"STDOUT\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}.out\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.out.%i.gz\">\n        <PatternLayout>\n            <pattern>${patternNoTime}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"4\"/>\n    
 </RollingFile>\n    <RollingFile name=\"STDERR\"\n                 fileName=\"${sys:storm.log.dir}/${sys:logfile.name}.err\"\n                 filePattern=\"${sys:storm.log.dir}/${sys:logfile.name}.err.%i.gz\">\n        <PatternLayout>\n            <pattern>${patternNoTime}</pattern>\n        </PatternLayout>\n        <Policies>\n            <SizeBasedTriggeringPolicy size=\"100 MB\"/> <!-- Or every 100 MB -->\n        </Policies>\n        <DefaultRolloverStrategy max=\"4\"/>\n    </RollingFile>\n    <Syslog name=\"syslog\" format=\"RFC5424\" host=\"localhost\" port=\"514\"\n        protocol=\"UDP\" appName=\"[${sys:storm.id}:${sys:worker.port}]\" mdcId=\"mdc\" includeMDC=\"true\"\n        facility=\"LOCAL5\" enterpriseNumber=\"18060\" newLine=\"true\" exceptionPattern=\"%rEx{full}\"\n        messageId=\"[${sys:user.name}:${sys:logging.sensitivity}]\" id=\"storm\"/>\n</appenders>\n<loggers>\n    <root level=\"info\"> <!-- We log everything -->\n        <appender-ref ref=\"A1\"/>\n  
       <appender-ref ref=\"syslog\"/>\n    </root>\n    <Logger name=\"STDERR\" level=\"INFO\">\n        <appender-ref ref=\"STDERR\"/>\n        <appender-ref ref=\"syslog\"/>\n    </Logger>\n    <Logger name=\"STDOUT\" level=\"INFO\">\n        <appender-ref ref=\"STDOUT\"/>\n        <appender-ref ref=\"syslog\"/>\n    </Logger>\n</loggers>\n</configuration>\n    \n    "
-        },
-        "storm-site": {
-          "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port={{jmxremote_port}} -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
-          "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
-          "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM"
-        },
-        "hive-env": {
-          "hive_security_authorization": {
-            "value": "{HIVE_SECURITY_AUTHORIZATION}",
-            "template": "yes",
-            "resolve-dependency": "yes"
-          }
-        },
-        "falcon-startup.properties": {
-          "*.application.services": "org.apache.falcon.security.AuthenticationInitializationService, org.apache.falcon.workflow.WorkflowJobEndNotificationService, org.apache.falcon.service.ProcessSubscriberService, org.apache.falcon.entity.store.ConfigurationStore, org.apache.falcon.rerun.service.RetryService, org.apache.falcon.rerun.service.LateRunService, org.apache.falcon.service.LogCleanupService",
-          "*.dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
-          "*.falcon.enableTLS": "false",
-          "*.falcon.http.authentication.cookie.domain": "EXAMPLE.COM",
-          "*.falcon.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
-          "*.falcon.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
-          "*.falcon.security.authorization.admin.groups": "falcon",
-          "*.falcon.security.authorization.admin.users": "falcon,ambari-qa",
-          "*.falcon.security.authorization.enabled": "false",
-          "*.falcon.security.authorization.provider": "org.apache.falcon.security.DefaultAuthorizationProvider",
-          "*.falcon.security.authorization.superusergroup": "falcon",
-          "*.falcon.service.authentication.kerberos.keytab": "/etc/security/keytabs/falcon.service.keytab",
-          "*.falcon.service.authentication.kerberos.principal": "falcon/_HOST@EXAMPLE.COM",
-          "*.journal.impl": "org.apache.falcon.transaction.SharedFileSystemJournal",
-          "prism.application.services": "org.apache.falcon.entity.store.ConfigurationStore",
-          "prism.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph, org.apache.falcon.entity.ColoClusterRelation, org.apache.falcon.group.FeedGroupMap",
-          "*.shared.libs": "activemq-core,ant,geronimo-j2ee-management,jms,json-simple,oozie-client,spring-jms,commons-lang3,commons-el",
-          "*.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\\n      org.apache.falcon.entity.ColoClusterRelation,\\\n      org.apache.falcon.group.FeedGroupMap,\\\n      org.apache.falcon.service.SharedLibraryHostingService"
-        },
-        "tez-site":{
-          "tez.am.container.idle.release-timeout-max.millis": "20000",
-          "tez.am.container.idle.release-timeout-min.millis": "10000",
-          "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
-          "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-          "tez.am.max.app.attempts": "2",
-          "tez.am.maxtaskfailures.per.node": "10",
-          "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-          "tez.counters.max": "2000",
-          "tez.counters.max.groups": "1000",
-          "tez.generate.debug.artifacts": "false",
-          "tez.grouping.max-size": "1073741824",
-          "tez.grouping.min-size": "16777216",
-          "tez.grouping.split-waves": "1.7",
-          "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService",
-          "tez.runtime.compress": "true",
-          "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec",
-          "tez.runtime.io.sort.mb": "272",
-          "tez.runtime.unordered.output.buffer.size-mb": "51",
-          "tez.shuffle-vertex-manager.max-src-fraction": "0.4",
-          "tez.shuffle-vertex-manager.min-src-fraction": "0.2",
-          "tez.task.am.heartbeat.counter.interval-ms.max": "4000",
-          "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
-          "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-          "tez.task.max-events-per-heartbeat": "500",
-          "tez.task.resource.memory.mb": "682",
-          "tez.am.container.reuse.non-local-fallback.enabled": "false",
-          "tez.am.resource.memory.mb": "1364",
-          "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz",
-          "tez.session.client.timeout.secs": "-1",
-          "tez.am.view-acls": "*",
-          "tez.tez-ui.history-url.base": {
-            "value": "{TEZ_HISTORY_URL_BASE}",
-            "template": "yes"
-          },
-          "tez.am.container.session.delay-allocation-millis": {"remove": "yes"},
-          "tez.am.env": {"remove": "yes"},
-          "tez.am.grouping.max-size": {"remove": "yes"},
-          "tez.am.grouping.min-size": {"remove": "yes"},
-          "tez.am.grouping.split-waves": {"remove": "yes"},
-          "tez.am.java.opt": {"remove": "yes"},
-          "tez.am.shuffle-vertex-manager.max-src-fraction": {"remove": "yes"},
-          "tez.am.shuffle-vertex-manager.min-src-fraction": {"remove": "yes"},
-          "tez.runtime.intermediate-input.compress.codec": {"remove": "yes"},
-          "tez.runtime.intermediate-input.is-compressed": {"remove": "yes"},
-          "tez.runtime.intermediate-output.compress.codec": {"remove": "yes"},
-          "tez.runtime.intermediate-output.should-compress": {"remove": "yes"},
-          "tez.yarn.ats.enabled": {"remove": "yes"}
-        },
-        "webhcat-site": {
-          "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
-          "templeton.port": "50111"
-        },
-        "oozie-site": {
-          "oozie.service.coord.check.maximum.frequency": "false",
-          "oozie.service.HadoopAccessorService.kerberos.enabled": "false",
-          "oozie.authentication.simple.anonymous.allowed": "true",
-          "oozie.service.AuthorizationService.authorization.enabled": "true",
-          "oozie.authentication.kerberos.name.rules": "RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\nRULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\nRULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\nRULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\nDEFAULT",
-          "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials,hive2=org.apache.oozie.action.hadoop.Hive2Credentials",
-          "oozie.service.CallableQueueService.callable.concurrency": {"remove": "yes"},
-          "oozie.service.CallableQueueService.queue.size": {"remove": "yes"},
-          "oozie.service.CallableQueueService.threads": {"remove": "yes"},
-          "oozie.service.JPAService.create.db.schema": {"remove": "yes"},
-          "oozie.service.JPAService.pool.max.active.conn": {"remove": "yes"},
-          "oozie.service.PurgeService.older.than": {"remove": "yes"},
-          "oozie.service.PurgeService.purge.interval": {"remove": "yes"},
-          "oozie.service.SchemaService.wf.ext.schemas": {"remove": "yes"},
-          "oozie.service.WorkflowAppService.system.libpath": {"remove": "yes"},
-          "oozie.service.coord.normal.default.timeout": {"remove": "yes"},
-          "oozie.service.coord.push.check.requeue.interval": {"remove": "yes"},
-          "oozie.services": {"remove": "yes"},
-          "oozie.system.id": {"remove": "yes"},
-          "oozie.systemmode": {"remove": "yes"},
-          "use.system.libpath.for.mapreduce.and.pig.jobs": {"remove": "yes"}
-
-        },
-        "hive-site": {
-          "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
-          "hive.cbo.enable": "true",
-          "hive.cli.print.header": "false",
-          "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
-          "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
-          "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
-          "hive.convert.join.bucket.mapjoin.tez": "false",
-          "hive.exec.compress.intermediate": "false",
-          "hive.exec.compress.output": "false",
-          "hive.exec.dynamic.partition": "true",
-          "hive.exec.dynamic.partition.mode": "nonstrict",
-          "hive.exec.max.created.files": "100000",
-          "hive.exec.max.dynamic.partitions": "5000",
-          "hive.exec.max.dynamic.partitions.pernode": "2000",
-          "hive.exec.orc.compression.strategy": "SPEED",
-          "hive.exec.orc.default.compress": "ZLIB",
-          "hive.exec.orc.default.stripe.size": "67108864",
-          "hive.exec.parallel": "false",
-          "hive.exec.parallel.thread.number": "8",
-          "hive.exec.reducers.bytes.per.reducer": "67108864",
-          "hive.exec.reducers.max": "1009",
-          "hive.exec.scratchdir": "/tmp/hive",
-          "hive.exec.submit.local.task.via.child": "true",
-          "hive.exec.submitviachild": "false",
-          "hive.fetch.task.aggr": "false",
-          "hive.fetch.task.conversion": "more",
-          "hive.fetch.task.conversion.threshold": "1073741824",
-          "hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
-          "hive.map.aggr.hash.min.reduction": "0.5",
-          "hive.map.aggr.hash.percentmemory": "0.5",
-          "hive.mapjoin.optimized.hashtable": "true",
-          "hive.merge.mapfiles": "true",
-          "hive.merge.mapredfiles": "false",
-          "hive.merge.orcfile.stripe.level": "true",
-          "hive.merge.rcfile.block.level": "true",
-          "hive.merge.size.per.task": "256000000",
-          "hive.merge.smallfiles.avgsize": "16000000",
-          "hive.merge.tezfiles": "false",
-          "hive.metastore.authorization.storage.checks": "false",
-          "hive.metastore.client.connect.retry.delay": "5s",
-          "hive.metastore.connect.retries": "24",
-          "hive.metastore.failure.retries": "24",
-          "hive.metastore.server.max.threads": "100000",
-          "hive.optimize.constant.propagation": "true",
-          "hive.optimize.metadataonly": "true",
-          "hive.optimize.null.scan": "true",
-          "hive.optimize.sort.dynamic.partition": "false",
-          "hive.orc.compute.splits.num.threads": "10",
-          "hive.prewarm.enabled": "false",
-          "hive.prewarm.numcontainers": "10",
-          "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
-          "hive.security.metastore.authorization.auth.reads": "true",
-          "hive.server2.allow.user.substitution": "true",
-          "hive.server2.logging.operation.enabled": "true",
-          "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs",
-          "hive.server2.table.type.mapping": "CLASSIC",
-          "hive.server2.thrift.http.path": "cliservice",
-          "hive.server2.thrift.http.port": "10001",
-          "hive.server2.thrift.max.worker.threads": "500",
-          "hive.server2.thrift.sasl.qop": "auth",
-          "hive.server2.transport.mode": "binary",
-          "hive.server2.use.SSL": "false",
-          "hive.smbjoin.cache.rows": "10000",
-          "hive.stats.dbclass": "fs",
-          "hive.stats.fetch.column.stats": "false",
-          "hive.stats.fetch.partition.stats": "true",
-          "hive.support.concurrency": "false",
-          "hive.tez.auto.reducer.parallelism": "false",
-          "hive.tez.cpu.vcores": "-1",
-          "hive.tez.dynamic.partition.pruning": "true",
-          "hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
-          "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
-          "hive.tez.log.level": "INFO",
-          "hive.tez.max.partition.factor": "2.0",
-          "hive.tez.min.partition.factor": "0.25",
-          "hive.tez.smb.number.waves": "0.5",
-          "hive.user.install.directory": "/user/",
-          "hive.vectorized.execution.reduce.enabled": "false",
-          "hive.zookeeper.client.port": "2181",
-          "hive.zookeeper.namespace": "hive_zookeeper_namespace",
-          "hive.metastore.client.socket.timeout": "1800s",
-          "hive.optimize.reducededuplication.min.reducer": "4",
-          "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
-          "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
-          "hive.server2.support.dynamic.service.discovery": "true",
-          "hive.vectorized.groupby.checkinterval": "4096",
-          "fs.file.impl.disable.cache": "true",
-          "fs.hdfs.impl.disable.cache": "true"
-        },
-        "hbase-site": {
-          "hbase.hregion.majorcompaction.jitter": "0.50",
-          "hbase.hregion.majorcompaction": "604800000",
-          "hbase.hregion.memstore.block.multiplier": "4",
-          "hbase.hstore.flush.retries.number": {"remove": "yes"},
-          "hbase.region.server.rpc.scheduler.factory.class": {
-            "value": "{HBASE_REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS}",
-            "required-services": [
-              "HBASE"
-            ],
-            "template": "yes"
-          },
-          "hbase.rpc.controllerfactory.class": {
-            "value": "{HBASE_RPC_CONTROLLERFACTORY_CLASS}",
-            "template": "yes",
-            "required-services": [
-              "HBASE"
-            ]
-          },
-          "hbase.regionserver.wal.codec": {
-            "value": "{HBASE_REGIONSERVER_WAL_CODEC}",
-            "template": "yes",
-            "required-services": [
-              "HBASE"
-            ]
-          },
-          "phoenix.functions.allowUserDefinedFunctions": "true",
-          "fs.hdfs.impl": "org.apache.hadoop.hdfs.DistributedFileSystem",
-          "hbase.bucketcache.percentage.in.combinedcache": {"remove": "yes"},
-          "hbase.coprocessor.enabled": {
-            "value": "true",
-            "override": "yes"
-          },
-          "hbase.bulkload.staging.dir": {
-            "value": "/apps/hbase/staging",
-            "override": "no"
-          }
-        },
-        "mapred-site": {
-          "mapreduce.job.emit-timeline-data": "false",
-          "mapreduce.jobhistory.bind-host": "0.0.0.0",
-          "mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
-          "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
-          "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
-          "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
-          "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
-          "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}",
-          "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-          "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-          "mapreduce.fileoutputcommitter.algorithm.version": "1",
-          "mapreduce.task.tmp.dir": {"remove": "yes"}
-        },
-        "core-site": {
-          "hadoop.http.authentication.simple.anonymous.allowed": "true"
-        },
-        "hdfs-site": {
-          "dfs.namenode.startup.delay.block.deletion.sec": "3600",
-          "dfs.datanode.max.transfer.threads": "4096",
-          "dfs.namenode.inode.attributes.provider.class": {
-            "value": "org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer",
-            "required-services": ["RANGER"]
-          }
-        },
-        "yarn-site": {
-          "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.client.nodemanager-connect.max-wait-ms": "900000",
-          "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
-          "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
-          "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
-          "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
-          "yarn.nodemanager.bind-host": "0.0.0.0",
-          "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
-          "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
-          "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
-          "yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
-          "yarn.nodemanager.linux-container-executor.cgroups.strictresource-usage": "false",
-          "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
-          "yarn.nodemanager.log-aggregation.debug-enabled": "false",
-          "yarn.nodemanager.log-aggregation.num-log-files-er-app": "30",
-          "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
-          "yarn.nodemanager.recovery.dir": "/var/log/hadoop-yarn/nodemanager/recovery-state",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.nodemanager.resource.cpu-vcores": "1",
-          "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
-          "yarn.resourcemanager.bind-host": "0.0.0.0",
-          "yarn.resourcemanager.connect.max-wait.ms": "900000",
-          "yarn.resourcemanager.connect.retry-interval.ms": "30000",
-          "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
-          "yarn.resourcemanager.fs.state-store.uri": " ",
-          "yarn.resourcemanager.ha.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
-          "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
-          "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
-          "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
-          "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
-          "yarn.resourcemanager.work-preserving-recovery.enabled": "false",
-          "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
-          "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
-          "yarn.resourcemanager.zk-address": {
-            "value": "{ZOOKEEPER_QUORUM}",
-            "template": "yes"
-          },
-          "hadoop.registry.zk.quorum": {
-            "value": "{ZOOKEEPER_QUORUM}",
-            "template": "yes"
-          },
-          "yarn.resourcemanager.zk-num-retries": "1000",
-          "yarn.resourcemanager.zk-retry-interval-ms": "1000",
-          "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
-          "yarn.resourcemanager.zk-timeout-ms": "10000",
-          "yarn.timeline-service.bind-host": "0.0.0.0",
-          "yarn.timeline-service.client.max-retries": "30",
-          "yarn.timeline-service.client.retry-interval-ms": "1000",
-          "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
-          "yarn.timeline-service.http-authentication.type": "simple",
-          "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
-          "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
-          "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000",
-          "yarn.node-labels.enabled": false,
-          "yarn.timeline-service.recovery.enabled": "true",
-          "yarn.timeline-service.state-store-class": "org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore",
-          "yarn.timeline-service.leveldb-state-store.path": "/var/log/hadoop-yarn/timeline"
-        },
-        "capacity-scheduler": {
-          "yarn.scheduler.capacity.root.default-node-label-expression": "",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {
-            "remove": "yes"
-          },
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {
-            "remove": "yes"
-          }
-        }
-      },
-      "property-mapping": {
-        "nimbus.host": {
-          "map-to": "nimbus.seeds",
-          "coerce-to": "yaml-array"
-        },
-        "mapreduce.job.speculative.speculativecap": {
-          "map-to": "mapreduce.job.speculative.speculative-cap-running-tasks",
-          "from-catalog": "mapred-site",
-          "to-catalog": "mapred-site",
-          "default": "0.1"
-        },
-        "hive.heapsize": {
-          "map-to": "hive.heapsize",
-          "from-catalog": "hive-site",
-          "to-catalog": "hive-env",
-          "required-services": ["HIVE"]
-        }
-      }
-    }
-  ]
-}


[45/63] [abbrv] ambari git commit: Revert "BUG-82124 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)"

Posted by ab...@apache.org.
Revert "BUG-82124 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)"

This reverts commit 39efba35980642b832f79c6afb332716045d859f.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4c1ea4c4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4c1ea4c4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4c1ea4c4

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 4c1ea4c46c71bcae5d054eb7465283bca85cc9e8
Parents: 39efba3
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Wed Jun 28 13:58:10 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Wed Jun 28 13:58:10 2017 +0530

----------------------------------------------------------------------
 .../common-services/HBASE/0.96.0.2.0/role_command_order.json      | 3 ++-
 .../common-services/HBASE/2.0.0.3.0/role_command_order.json       | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4c1ea4c4/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
index 58d0c1c..110b179 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
@@ -4,6 +4,7 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"]
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"]
+
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/4c1ea4c4/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
index 69f4bf6..44d0c61 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
@@ -4,7 +4,7 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"]
   }
 }


[17/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json
deleted file mode 100644
index 94a09b9..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_1.3_to_2.2.json
+++ /dev/null
@@ -1,948 +0,0 @@
-{
-  "version": "1.0",
-  "stacks": [
-    {
-      "name": "HDP",
-      "old-version": "1.3",
-      "target-version": "2.2",
-      "options": {
-        "config-types": {
-          "hdfs-site": {
-            "merged-copy": "yes"
-          },
-          "core-site": {
-            "merged-copy": "yes"
-          },
-          "hbase-site": {
-            "merged-copy": "yes"
-          },
-          "hbase-env": {
-            "merged-copy": "yes"
-          },
-          "oozie-env": {
-            "merged-copy": "yes"
-          },
-          "hive-site": {
-            "merged-copy": "yes"
-          },
-          "mapred-site": {
-            "merged-copy": "yes"
-          },
-          "mapred-env": {
-            "merged-copy": "yes"
-          },
-          "cluster-env" : {
-            "merged-copy": "yes"
-          },
-          "sqoop-env": {
-            "merged-copy": "yes"
-          },
-          "pig-env": {
-            "merged-copy": "yes"
-          },
-          "pig-properties": {
-            "merged-copy": "yes"
-          },
-          "webhcat-site": {
-            "merged-copy": "yes"
-          },
-          "zookeeper-env": {
-            "merged-copy": "yes"
-          },
-          "hadoop-env": {
-            "merged-copy": "yes"
-          },
-          "hadoop-policy": {
-            "merged-copy": "yes"
-          },
-          "hbase-log4j": {
-            "merged-copy": "yes"
-          },
-          "hdfs-log4j": {
-            "merged-copy": "yes"
-          },
-          "hive-env": {
-            "merged-copy": "yes"
-          },
-          "hive-exec-log4j": {
-            "merged-copy": "yes"
-          },
-          "hive-log4j": {
-            "merged-copy": "yes"
-          },
-          "oozie-site": {
-            "merged-copy": "yes"
-          }
-        }
-      },
-      "properties": {
-        "oozie-site": {
-          "oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*",
-          "oozie.service.coord.check.maximum.frequency": "false",
-          "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd",
-          "oozie.service.HadoopAccessorService.kerberos.enabled": "false",
-          "oozie.service.coord.push.check.requeue.interval": "30000",
-          "oozie.credentials.credentialclasses": "hcat=org.apache.oozie.action.hadoop.HCatCredentials",
-          "oozie.authentication.kerberos.name.rules": "\n      RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n      RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n      RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n      RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n      DEFAULT",
-          "oozie.authentication.simple.anonymous.allowed": "true",
-          "oozie.service.AuthorizationService.security.enabled": "true",
-          "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService",
-          "oozie.services": "\n      org.apache.oozie.service.SchedulerService,\n      org.apache.oozie.service.InstrumentationService,\n      org.apache.oozie.service.MemoryLocksService,\n      org.apache.oozie.service.UUIDService,\n      org.apache.oozie.service.ELService,\n      org.apache.oozie.service.AuthorizationService,\n      org.apache.oozie.service.UserGroupInformationService,\n      org.apache.oozie.service.HadoopAccessorService,\n      org.apache.oozie.service.JobsConcurrencyService,\n      org.apache.oozie.service.URIHandlerService,\n      org.apache.oozie.service.DagXLogInfoService,\n      org.apache.oozie.service.SchemaService,\n      org.apache.oozie.service.LiteWorkflowAppService,\n      org.apache.oozie.service.JPAService,\n      org.apache.oozie.service.StoreService,\n      org.apache.oozie.service.CoordinatorStoreService,\n      org.apache.oozie.service.SLAStoreService,\n      org.apache.oozie.service.DBLiteWorkflowStoreService,\n      org.apache.oozie.service.C
 allbackService,\n      org.apache.oozie.service.ShareLibService,\n      org.apache.oozie.service.CallableQueueService,\n      org.apache.oozie.service.ActionService,\n      org.apache.oozie.service.ActionCheckerService,\n      org.apache.oozie.service.RecoveryService,\n      org.apache.oozie.service.PurgeService,\n      org.apache.oozie.service.CoordinatorEngineService,\n      org.apache.oozie.service.BundleEngineService,\n      org.apache.oozie.service.DagEngineService,\n      org.apache.oozie.service.CoordMaterializeTriggerService,\n      org.apache.oozie.service.StatusTransitService,\n      org.apache.oozie.service.PauseTransitService,\n      org.apache.oozie.service.GroupsService,\n      org.apache.oozie.service.ProxyUserService,\n      org.apache.oozie.service.XLogStreamingService,\n      org.apache.oozie.service.JvmPauseMonitorService",
-          "oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
-          "oozie.service.ProxyUserService.proxyuser.falcon.groups": "*",
-          "oozie.service.ActionService.executor.ext.classes": "\n      org.apache.oozie.action.email.EmailActionExecutor,\n      org.apache.oozie.action.hadoop.HiveActionExecutor,\n      org.apache.oozie.action.hadoop.ShellActionExecutor,\n      org.apache.oozie.action.hadoop.SqoopActionExecutor,\n      org.apache.oozie.action.hadoop.DistcpActionExecutor"
-        },
-        "hive-log4j": {
-          "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define some default values that can be overridden by system properties\nhive.log.threshold=ALL\nhive.root.logger=INFO,DRFA\nhive.log.dir=${java.io.tmpdir}/${user.name
 }\nhive.log.file=hive.log\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshold=${hive.log.threshold}\n\n#\n# Daily Rolling File Appender\n#\n# Use the PidDailyerRollingFileAppend class instead if you want to use separate log files\n# for different CLI session.\n#\n# log4j.appender.DRFA=org.apache.hadoop.hive.ql.log.PidDailyRollingFileAppender\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n\nlog4j.appender.DRFA.File=${hive.log.dir}/${hive.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]:
  %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n\nlog4j.appender.console.encoding=UTF-8\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,DRFA\nlog4j.category.Datastore=ERROR,DRFA\nlog4j.category.Datastore.Schema=ERROR,DRFA\nlog4j.category.JPOX.Datastore=ERROR,DRFA\nlog4j.category.JPOX.Plugin=ERROR,DRFA\nlog4j.category.JPOX.MetaData=ERROR,DRFA\nlog4j.category.JPOX.Query=ERROR,DRFA\nlog4j.category.JPOX.General=ERROR,DRFA\nlog4j.category.JPOX.Enha
 ncer=ERROR,DRFA\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,DRFA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,DRFA"
-        },
-        "hive-exec-log4j": {
-          "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Define some default values that can be overridden by system properties\n\nhive.log.threshold=ALL\nhive.root.logger=INFO,FA\nhive.log.dir=${java.io.tmpdir}/${user.name
 }\nhive.query.id=hadoop\nhive.log.file=${hive.query.id}.log\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hive.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=${hive.log.threshold}\n\n#\n# File Appender\n#\n\nlog4j.appender.FA=org.apache.log4j.FileAppender\nlog4j.appender.FA.File=${hive.log.dir}/${hive.log.file}\nlog4j.appender.FA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\nlog4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:s
 s} %p %c{2}: %m%n\n\n#custom logging levels\n#log4j.logger.xxx=DEBUG\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter\n\n\nlog4j.category.DataNucleus=ERROR,FA\nlog4j.category.Datastore=ERROR,FA\nlog4j.category.Datastore.Schema=ERROR,FA\nlog4j.category.JPOX.Datastore=ERROR,FA\nlog4j.category.JPOX.Plugin=ERROR,FA\nlog4j.category.JPOX.MetaData=ERROR,FA\nlog4j.category.JPOX.Query=ERROR,FA\nlog4j.category.JPOX.General=ERROR,FA\nlog4j.category.JPOX.Enhancer=ERROR,FA\n\n\n# Silence useless ZK logs\nlog4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA\nlog4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA"
-        },
-        "hive-env": {
-          "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\nexport HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory 
 can be controlled by:\nexport HIVE_CONF_DIR={{hive_config_dir}}\n\n# Folder containing extra libraries required for hive compilation/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\nelif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog\nfi\n\nexport METASTORE_PORT={{hive_metastore_port}}",
-          "hive_aux_jars_path": {"remove":"yes"},
-          "hive_conf_dir": {"remove":"yes"},
-          "hive_dbroot": {"remove":"yes"}
-        },
-        "hdfs-log4j": {
-          "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logg
 er=INFO,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.c
 onsole.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger
 }\nhadoop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=fa
 lse\nlog4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file
 }\n\n# Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlo
 g4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
-        },
-        "hbase-log4j": {
-          "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# Define some default values that can be overridden by system properties\nhbase.root.logger=INFO,console\nhbase.security.logger=INFO,console\nhbase.log.dir=.\nhbase.l
 og.file=hbase.log\n\n# Define the root logger to the system property \"hbase.root.logger\".\nlog4j.rootLogger=${hbase.root.logger}\n\n# Logging Threshold\nlog4j.threshold=ALL\n\n#\n# Daily Rolling File Appender\n#\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Rolling File Appender properties\nhbase.log.maxfilesize=256MB\nhbase.log.maxbackupindex=20\n\n# Rolling File Appender\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n\nlog4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\nlog4j.appender.RFA.MaxBackupIndex=
 ${hbase.log.maxbackupindex}\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n#\n# Security audit appender\n#\nhbase.security.log.file=SecurityAuth.audit\nhbase.security.log.maxfilesize=256MB\nhbase.security.log.maxbackupindex=20\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\nlog4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.category.SecurityLogger=${hbase.security.logger}\nlog4j.additivity.SecurityLogger=false\n#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n\n#\n# Null Appender\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\
 n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n\n# Custom Logging levels\n\nlog4j.logger.org.apache.zookeeper=INFO\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.hbase=DEBUG\n# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\nlog4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n#log4j.logger.org.apache.hadoop.dfs=DEBUG\n# Set this class to log INFO only otherwise its OTT\n# Enable this to get detailed connection error/retry logging.\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n\n\n# Uncomment this line to enable tra
 cing on _every_ RPC call (this can be a lot of output)\n#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n\n# Uncomment the below if you want to remove logging of client region caching'\n# and scan of .META. messages\n# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO"
-        },
-        "hadoop-policy": {
-          "security.refresh.usertogroups.mappings.protocol.acl": "hadoop",
-          "security.refresh.policy.protocol.acl": "hadoop",
-          "security.admin.operations.protocol.acl": "hadoop",
-          "security.inter.datanode.protocol.acl": "*"
-        },
-        "hadoop-env": {
-          "namenode_formatted_mark_dir": {"remove": "yes"},
-          "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended 
 to HADOOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps
  -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following a
 pplies to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOO
 P_MASTER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/ja
 va/*mysql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS
 \""
-        },
-        "zookeeper-env": {
-          "content": "\nexport JAVA_HOME={{java64_home}}\nexport ZOOKEEPER_HOME={{zk_home}}\nexport ZOO_LOG_DIR={{zk_log_dir}}\nexport ZOOPIDFILE={{zk_pid_file}}\nexport SERVER_JVMFLAGS={{zk_server_heapsize}}\nexport JAVA=$JAVA_HOME/bin/java\nexport CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*\n\n{% if security_enabled %}\nexport SERVER_JVMFLAGS=\"$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}\"\nexport CLIENT_JVMFLAGS=\"$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}\"\n{% endif %}"
-        },
-        "webhcat-site": {
-            "templeton.hcat.home": "hive.tar.gz/hive/hcatalog",
-            "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
-            "templeton.sqoop.home": "sqoop.tar.gz/sqoop",
-            "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
-            "templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz",
-            "templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz",
-            "templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar",
-            "templeton.libjars": "/usr/hdp/current/zookeeper-client/zookeeper.jar",
-            "templeton.hadoop": "/usr/hdp/current/hadoop-client/bin/hadoop",
-            "templeton.hive.home": "hive.tar.gz/hive",
-            "templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop",
-            "templeton.hcat": "/usr/hdp/current/hive-client/bin/hcat",
-            "templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz"
-          },
-        "pig-properties": {
-            "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.\n# see bin/pig -help\n\n# brief logging (no timestamps
 )\nbrief=false\n\n# debug level, INFO is default\ndebug=INFO\n\n# verbose print all log messages to screen (default to print only INFO and above to screen)\nverbose=false\n\n# exectype local|mapreduce, mapreduce is default\nexectype=mapreduce\n\n# Enable insertion of information about script into hadoop job conf \npig.script.info.enabled=true\n\n# Do not spill temp files smaller than this size (bytes)\npig.spill.size.threshold=5000000\n\n# EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)\n# This should help reduce the number of files being spilled.\npig.spill.gc.activation.size=40000000\n\n# the following two parameters are to help estimate the reducer number\npig.exec.reducers.bytes.per.reducer=1000000000\npig.exec.reducers.max=999\n\n# Temporary location to store the intermediate data.\npig.temp.dir=/tmp/\n\n# Threshold for merging FRJoin fragment files\npig.files.concatenation.threshold=100\npig.optimistic.files.concatenation=false;\n\npi
 g.disable.counter=false\n\n# Avoid pig failures when multiple jobs write to the same location\npig.location.check.strict=false\n\nhcat.bin=/usr/bin/hcat"
-          },
-        "pig-env" : {
-          "content": "\nJAVA_HOME={{java64_home}}\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\nif [ -d \"/usr/lib/tez\" ]; then\n  PIG_OPTS=\"$PIG_OPTS -Dmapreduce.framework.name=yarn\"\nfi"
-        },
-        "sqoop-env": {
-           "content": "\n# Set Hadoop-specific environment variables here.\n\n#Set path to where bin/hadoop is available\n#Set path to where bin/hadoop is available\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n#set the path to where bin/hbase is available\nexport HBASE_HOME=${HBASE_HOME:-{{hbase_home}}}\n\n#Set the path to where bin/hive is available\nexport HIVE_HOME=${HIVE_HOME:-{{hive_home}}}\n\n#Set the path for where zookeper config dir is\nexport ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}\n\n# add libthrift in hive to sqoop class path first so hive imports work\nexport SQOOP_USER_CLASSPATH=\"`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}\""
-        },
-        "cluster-env": {
-            "ignore_groupsusers_create": "false"
-        },
-        "capacity-scheduler": {
-          "yarn.scheduler.capacity.maximum-am-resource-percent": "0.2",
-          "yarn.scheduler.capacity.maximum-applications": "10000",
-          "yarn.scheduler.capacity.root.acl_administer_queue": "*",
-          "yarn.scheduler.capacity.root.capacity": "100",
-          "yarn.scheduler.capacity.root.default.acl_administer_jobs": "*",
-          "yarn.scheduler.capacity.root.default.acl_submit_applications": "*",
-          "yarn.scheduler.capacity.root.default.capacity": "100",
-          "yarn.scheduler.capacity.root.default.maximum-capacity": "100",
-          "yarn.scheduler.capacity.root.default.state": "RUNNING",
-          "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
-          "yarn.scheduler.capacity.root.queues": "default",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
-          "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
-          "yarn.scheduler.capacity.default.minimum-user-limit-percent": "100",
-          "yarn.scheduler.capacity.node-locality-delay": "40",
-          "yarn.scheduler.capacity.root.default.user-limit-factor": "1",
-          "yarn.scheduler.capacity.root.accessible-node-labels": "*",
-          "yarn.scheduler.capacity.root.default-node-label-expression": " "
-        },
-        "hbase-env": {
-          "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX 
 exporting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}\n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER
 \n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{re
 gionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}"
-        },
-        "mapred-site": {
-          "hadoop.job.history.location": {"remove": "yes"},
-          "hadoop.job.history.user.location": {"remove": "yes"},
-          "io.sort.record.percent": {"remove": "yes"},
-          "jetty.connector": {"remove": "yes"},
-          "mapred.child.java.opts": {"remove": "yes"},
-          "mapred.child.root.logger": {"remove": "yes"},
-          "mapred.create.symlink": {"remove": "yes"},
-          "mapred.fairscheduler.allocation.file": {"remove": "yes"},
-          "mapred.fairscheduler.assignmultiple": {"remove": "yes"},
-          "mapreduce.job.priority": {"remove": "yes"},
-          "mapred.jobtracker.blacklist.fault-bucket-width": {"remove": "yes"},
-          "mapred.jobtracker.blacklist.fault-timeout-window": {"remove": "yes"},
-          "mapred.jobtracker.completeuserjobs.maximum": {"remove": "yes"},
-          "mapred.jobtracker.job.history.block.size": {"remove": "yes"},
-          "mapred.jobtracker.retirejob.check": {"remove": "yes"},
-          "mapred.jobtracker.retirejob.interval": {"remove": "yes"},
-          "mapred.jobtracker.taskScheduler": {"remove": "yes"},
-          "mapred.permissions.supergroup": {"remove": "yes"},
-          "mapred.queue.names": {"remove": "yes"},
-          "mapreduce.cluster.acls.enabled": {"remove": "yes"},
-          "mapreduce.cluster.local.dir": {"remove": "yes"},
-          "mapreduce.cluster.mapmemory.mb": {"remove": "yes"},
-          "mapreduce.cluster.permissions.supergroup": {"remove": "yes"},
-          "mapreduce.cluster.reducememory.mb": {"remove": "yes"},
-          "mapreduce.cluster.temp.dir": {"remove": "yes"},
-          "mapreduce.jobtracker.jobinit.threads": {"remove": "yes"},
-          "mapreduce.jobtracker.permissions.supergroup": {"remove": "yes"},
-          "mapreduce.job.cache.symlink.create": {"remove": "yes"},
-          "mapreduce.job.speculative.slownodethreshold": {"remove": "yes"},
-          "mapreduce.job.userlog.retain.hours": {"remove": "yes"},
-          "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-          "mapreduce.am.max-attempts": "2",
-          "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-          "mapreduce.fileoutputcommitter.marksuccessfuljobs": {"remove": "yes"},
-          "mapreduce.framework.name": "yarn",
-          "mapreduce.history.server.embedded": {"remove": "yes"},
-          "mapreduce.history.server.http.address": {"remove": "yes"},
-          "apreduce.job.committer.setup.cleanup.needed": {"remove": "yes"},
-          "mapreduce.job.jvm.numtasks": {"remove": "yes"},
-          "mapreduce.jobhistory.address": {
-            "value": "{JOBHISTORY_HOST}:10020",
-            "template": "yes"
-          },
-          "mapreduce.jobhistory.done-dir": "/mr-history/done",
-          "mapreduce.jobhistory.intermediate-done-dir": "/mr-history/tmp",
-          "mapreduce.jobhistory.webapp.address": {
-            "value": "{JOBHISTORY_HOST}:19888",
-            "template": "yes"
-          },
-          "mapreduce.jobtracker.address": {"remove": "yes"},
-          "mapreduce.jobtracker.blacklist.average.threshold": {"remove": "yes"},
-          "mapreduce.jobtracker.expire.trackers.interval": {"remove": "yes"},
-          "mapreduce.jobtracker.handler.count": {"remove": "yes"},
-          "mapreduce.jobtracker.heartbeats.in.second": {"remove": "yes"},
-          "mapreduce.jobtracker.hosts.exclude.filename": {"remove": "yes"},
-          "mapreduce.jobtracker.hosts.filename": {"remove": "yes"},
-          "mapreduce.jobtracker.http.address": {"remove": "yes"},
-          "mapreduce.jobtracker.instrumentation": {"remove": "yes"},
-          "mapreduce.jobtracker.jobhistory.block.size": {"remove": "yes"},
-          "mapreduce.jobtracker.jobhistory.location": {"remove": "yes"},
-          "mapreduce.jobtracker.jobhistory.lru.cache.size": {"remove": "yes"},
-          "mapreduce.jobtracker.maxmapmemory.mb": {"remove": "yes"},
-          "mapreduce.jobtracker.maxreducememory.mb": {"remove": "yes"},
-          "mapreduce.jobtracker.maxtasks.perjob": {"remove": "yes"},
-          "mapreduce.jobtracker.persist.jobstatus.active": {"remove": "yes"},
-          "mapreduce.jobtracker.persist.jobstatus.dir": {"remove": "yes"},
-          "mapreduce.jobtracker.persist.jobstatus.hours": {"remove": "yes"},
-          "mapreduce.jobtracker.restart.recover": {"remove": "yes"},
-          "mapreduce.jobtracker.retiredjobs.cache.size": {"remove": "yes"},
-          "mapreduce.jobtracker.retirejobs": {"remove": "yes"},
-          "mapreduce.jobtracker.split.metainfo.maxsize": {"remove": "yes"},
-          "mapreduce.jobtracker.staging.root.dir": {"remove": "yes"},
-          "mapreduce.jobtracker.system.dir": {"remove": "yes"},
-          "mapreduce.jobtracker.taskcache.levels": {"remove": "yes"},
-          "mapreduce.jobtracker.taskscheduler": {"remove": "yes"},
-          "mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob": {"remove": "yes"},
-          "mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad": {"remove": "yes"},
-          "mapreduce.jobtracker.tasktracker.maxblacklists": {"remove": "yes"},
-          "mapreduce.jobtracker.webinterface.trusted": {"remove": "yes"},
-          "mapreduce.map.java.opts": "-Xmx546m",
-          "mapreduce.map.log.level": "INFO",
-          "mapreduce.map.memory.mb": "1024",
-          "mapreduce.map.output.compress": "false",
-          "mapreduce.map.output.compress.codec": {"remove": "yes"},
-          "mapreduce.map.sort.spill.percent": "0.7",
-          "mapreduce.output.fileoutputformat.compress": "false",
-          "mapreduce.reduce.input.limit": {"remove": "yes"},
-          "mapreduce.reduce.java.opts": "-Xmx756m",
-          "mapreduce.reduce.log.level": "INFO",
-          "mapreduce.reduce.memory.mb": "1024",
-          "mapreduce.reduce.merge.inmem.threshold": {"remove": "yes"},
-          "mapreduce.shuffle.port": "13562",
-          "mapreduce.task.timeout": "300000",
-          "mapreduce.task.userlog.limit.kb": {"remove": "yes"},
-          "mapreduce.tasktracker.cache.local.size": {"remove": "yes"},
-          "mapreduce.tasktracker.contention.tracking": {"remove": "yes"},
-          "mapreduce.tasktracker.dns.interface": {"remove": "yes"},
-          "mapreduce.tasktracker.dns.nameserver": {"remove": "yes"},
-          "mapreduce.tasktracker.events.batchsize": {"remove": "yes"},
-          "mapreduce.tasktracker.group": {"remove": "yes"},
-          "mapreduce.tasktracker.healthchecker.interval": {"remove": "yes"},
-          "mapreduce.tasktracker.healthchecker.script.args": {"remove": "yes"},
-          "mapreduce.tasktracker.healthchecker.script.path": {"remove": "yes"},
-          "mapreduce.tasktracker.healthchecker.script.timeout": {"remove": "yes"},
-          "mapreduce.tasktracker.host.name": {"remove": "yes"},
-          "mapreduce.tasktracker.http.address": {"remove": "yes"},
-          "mapreduce.tasktracker.http.threads": {"remove": "yes"},
-          "mapreduce.tasktracker.indexcache.mb": {"remove": "yes"},
-          "mapreduce.tasktracker.instrumentation": {"remove": "yes"},
-          "mapreduce.tasktracker.local.dir.minspacekill": {"remove": "yes"},
-          "mapreduce.tasktracker.local.dir.minspacestart": {"remove": "yes"},
-          "mapreduce.tasktracker.map.tasks.maximum": {"remove": "yes"},
-          "mapreduce.tasktracker.net.static.resolutions": {"remove": "yes"},
-          "mapreduce.tasktracker.reduce.tasks.maximum": {"remove": "yes"},
-          "mapreduce.tasktracker.report.address": {"remove": "yes"},
-          "mapreduce.tasktracker.resourcecalculatorplugin": {"remove": "yes"},
-          "mapreduce.tasktracker.taskcontroller": {"remove": "yes"},
-          "mapreduce.tasktracker.taskmemorymanager.monitoringinterval": {"remove": "yes"},
-          "mapreduce.tasktracker.tasks.sleeptimebeforesigkill": {"remove": "yes"},
-          "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
-          "yarn.app.mapreduce.am.command-opts": "-Xmx312m -Dhdp.version=${hdp.version}",
-          "yarn.app.mapreduce.am.log.level": "INFO",
-          "yarn.app.mapreduce.am.resource.mb": "512",
-          "yarn.app.mapreduce.am.staging-dir": "/user",
-          "mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
-          "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
-          "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
-          "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
-          "mapreduce.job.emit-timeline-data": "false",
-          "mapreduce.jobhistory.bind-host": "0.0.0.0"
-        },
-        "yarn-env": {
-          "apptimelineserver_heapsize": "1024",
-          "content": "\nexport HADOOP_YARN_HOME={{hadoop_yarn_home}}\nexport YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER\nexport YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\nexport JAVA_HOME={{java64_home}}\n\n# User for YARN daemons\nexport HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}\n\n# resolve links - $0 may be a softlink\nexport YARN_CONF_DIR=\"${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}\"\n\n# some Java parameters\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\nif [ \"$JAVA_HOME\" != \"\" ]; then\n  #echo \"run java in $JAVA_HOME\"\n  JAVA_HOME=$JAVA_HOME\nfi\n\nif [ \"$JAVA_HOME\" = \"\" ]; then\n  echo \"Error: JAVA_HOME is not set.\"\n  exit 1\nfi\n\nJAVA=$JAVA_HOME/bin/java\nJAVA_HEAP_MAX=-Xmx1000m\n\n# For setting YARN specific HEAP sizes please use this\n# Parameter and set appropriately\nYARN_HEAPSIZE={{yarn_heapsize}}\n\n# check envvars which might override default args\nif [ \"$YARN_HEAPSIZE\" != \"\" ]; then\n  JAVA_HEAP_MAX
 =\"-Xmx\"\"$YARN_HEAPSIZE\"\"m\"\nfi\n\n# Resource Manager specific parameters\n\n# Specify the max Heapsize for the ResourceManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_RESOURCEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}\n\n# Specify the JVM options to be used when starting the ResourceManager.\n# These options will be appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_RESOURCEMANAGER_OPTS=\n\n# Node Manager specific parameters\n\n# Specify the max Heapsize for the NodeManager using a numerical value\n# in the scale of MB. For example, to sp
 ecify an jvm option of -Xmx1000m, set\n# the value to 1000.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_NODEMANAGER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}\n\n# Specify the max Heapsize for the HistoryManager using a numerical value\n# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n# the value to 1024.\n# This value will be overridden by an Xmx setting specified in either YARN_OPTS\n# and/or YARN_HISTORYSERVER_OPTS.\n# If not specified, the default value will be picked from either YARN_HEAPMAX\n# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\nexport YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}\n\n# Specify the JVM options to be used when starting the NodeManager.\n# These options will be 
 appended to the options specified as YARN_OPTS\n# and therefore may override any similar flags set in YARN_OPTS\n#export YARN_NODEMANAGER_OPTS=\n\n# so that filenames w/ spaces are handled correctly in loops below\nIFS=\n\n\n# default log directory and file\nif [ \"$YARN_LOG_DIR\" = \"\" ]; then\n  YARN_LOG_DIR=\"$HADOOP_YARN_HOME/logs\"\nfi\nif [ \"$YARN_LOGFILE\" = \"\" ]; then\n  YARN_LOGFILE='yarn.log'\nfi\n\n# default policy file for service-level authorization\nif [ \"$YARN_POLICYFILE\" = \"\" ]; then\n  YARN_POLICYFILE=\"hadoop-policy.xml\"\nfi\n\n# restore ordinary behaviour\nunset IFS\n\n\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING\"\nYARN_OPTS=\"$YARN_OPTS -Dhadoop.root.logger=${
 YARN_ROOT_LOGGER:-INFO,console}\"\nYARN_OPTS=\"$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}\"\nif [ \"x$JAVA_LIBRARY_PATH\" != \"x\" ]; then\n  YARN_OPTS=\"$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH\"\nfi\nYARN_OPTS=\"$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE\"",
-          "min_user_id": "1000",
-          "nodemanager_heapsize": "1024",
-          "resourcemanager_heapsize": "1024",
-          "yarn_heapsize": "1024",
-          "yarn_log_dir_prefix": "/var/log/hadoop-yarn",
-          "yarn_pid_dir_prefix": "/var/run/hadoop-yarn",
-          "yarn_user": "yarn"
-        },
-        "oozie-env": {
-          "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  #export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuration 
 directory\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64\n\n# At least 1 minute of retry time to account for server downtime during\n# upgrade/downgrade\nexport OOZIE_CLIENT_OPTS=\"${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 \"\n\n# This is needed so that Oozie does not run into OOM or GC Overhead limit\n# exceeded exceptions. If the oozie server is handling large number of\n# workflows/coordinator jobs, the memory 
 settings may need to be revised\nexport CATALINA_OPTS=\"${CATALINA_OPTS} -Xmx2048m -XX:MaxPermSize=256m \"",
-          "oozie_heapsize": "2048m",
-          "oozie_permsize": "256m",
-          "oozie_ambari_database": "MySQL",
-          "oozie_existing_mysql_host": "",
-          "oozie_existing_oracle_host": "",
-          "oozie_existing_postgresql_host": ""
-        },
-        "yarn-log4j":{
-          "content": "\n#Relative to Yarn Log Dir Prefix\nyarn.log.dir=.\n#\n# Job Summary Appender\n#\n# Use following logger to send summary to separate file defined by\n# hadoop.mapreduce.jobsummary.log.file rolled daily:\n# hadoop.mapreduce.jobsummary.logger=INFO,JSA\n#\nhadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}\nhadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log\nlog4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender\n# Set the ResourceManager summary log filename\nyarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log\n# Set the ResourceManager summary log level and appender\nyarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}\n#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\n\n# To enable AppSummaryLogging for the RM,\n# set yarn.server.resourcemanager.appsummary.logger to\n# LEVEL,RMSUMMARY in hadoop-env.sh\n\n# Appender for ResourceManager Application Summary Log\n# Requires the fo
 llowing properties to be set\n#    - hadoop.log.dir (Hadoop Log directory)\n#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)\n#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)\nlog4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender\nlog4j.appender.RMSUMMARY.File=${yarn.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}\nlog4j.appender.RMSUMMARY.MaxFileSize=256MB\nlog4j.appender.RMSUMMARY.MaxBackupIndex=20\nlog4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\nlog4j.appender.JSA.DatePattern=.yyyy-MM-dd\nlog4j.appender.JSA.layout=org.apache.log4j.PatternLayout\nlog4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$Applicat
 ionSummary=${yarn.server.resourcemanager.appsummary.logger}\nlog4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false"
-        },
-        "yarn-site": {
-          "hadoop.registry.rm.enabled": "false",
-          "hadoop.registry.zk.quorum": {
-            "value": "{ZOOKEEPER_QUORUM}",
-            "template": "yes"
-          },
-          "yarn.acl.enable": "false",
-          "yarn.admin.acl": "",
-          "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
-          "yarn.client.nodemanager-connect.max-wait-ms": "900000",
-          "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
-          "yarn.log-aggregation-enable": "true",
-          "yarn.log-aggregation.retain-seconds": "2592000",
-          "yarn.log.server.url": {
-           "value": "http://{JOBHISTORY_HOST}:19888/jobhistory/logs",
-           "template": "yes"
-          },
-          "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
-          "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
-          "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
-          "yarn.nodemanager.address": "0.0.0.0:45454",
-          "yarn.nodemanager.admin-env": "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX",
-          "yarn.nodemanager.aux-services": "mapreduce_shuffle",
-          "yarn.nodemanager.aux-services.mapreduce_shuffle.class": "org.apache.hadoop.mapred.ShuffleHandler",
-          "yarn.nodemanager.bind-host": "0.0.0.0",
-          "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
-          "yarn.nodemanager.container-monitor.interval-ms": "3000",
-          "yarn.nodemanager.delete.debug-delay-sec": "0",
-          "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
-          "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
-          "yarn.nodemanager.disk-health-checker.min-healthy-disks": "0.25",
-          "yarn.nodemanager.health-checker.interval-ms": "135000",
-          "yarn.nodemanager.health-checker.script.timeout-ms": "60000",
-          "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
-          "yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
-          "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false",
-          "yarn.nodemanager.linux-container-executor.group": "hadoop",
-          "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
-          "yarn.nodemanager.local-dirs": "/hadoop/yarn/local",
-          "yarn.nodemanager.log-aggregation.compression-type": "gz",
-          "yarn.nodemanager.log-aggregation.debug-enabled": "false",
-          "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30",
-          "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
-          "yarn.nodemanager.log-dirs": "/hadoop/yarn/log",
-          "yarn.nodemanager.log.retain-seconds": "604800",
-          "yarn.nodemanager.recovery.dir": "/var/log/hadoop-yarn/nodemanager/recovery-state",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.nodemanager.remote-app-log-dir": "/app-logs",
-          "yarn.nodemanager.remote-app-log-dir-suffix": "logs",
-          "yarn.nodemanager.resource.cpu-vcores": "1",
-          "yarn.nodemanager.resource.memory-mb": "2048",
-          "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
-          "yarn.nodemanager.vmem-check-enabled": "false",
-          "yarn.nodemanager.vmem-pmem-ratio": "2.1",
-          "yarn.resourcemanager.address": {
-            "value": "{RESOURCEMANAGER_HOST}:8050",
-            "template": "yes"
-          },
-          "yarn.resourcemanager.admin.address": {
-            "value": "{RESOURCEMANAGER_HOST}:8141",
-            "template": "yes"
-          },
-          "yarn.resourcemanager.am.max-attempts": "2",
-          "yarn.resourcemanager.bind-host": "0.0.0.0",
-          "yarn.resourcemanager.connect.max-wait.ms": "900000",
-          "yarn.resourcemanager.connect.retry-interval.ms": "30000",
-          "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
-          "yarn.resourcemanager.fs.state-store.uri": " ",
-          "yarn.resourcemanager.ha.enabled": "false",
-          "yarn.resourcemanager.hostname": {
-            "value": "{RESOURCEMANAGER_HOST}",
-            "template": "yes"
-          },
-          "yarn.resourcemanager.nodes.exclude-path": "/etc/hadoop/conf/yarn.exclude",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.resource-tracker.address": {
-            "value": "{RESOURCEMANAGER_HOST}:8025",
-            "template": "yes"
-          },
-          "yarn.resourcemanager.scheduler.address": {
-            "value": "{RESOURCEMANAGER_HOST}:8030",
-            "template": "yes"
-          },
-          "yarn.resourcemanager.scheduler.class": "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
-          "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
-          "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
-          "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
-          "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
-          "yarn.resourcemanager.webapp.address": {
-            "value": "{RESOURCEMANAGER_HOST}:8088",
-            "template": "yes"
-          },
-          "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
-          "yarn.resourcemanager.work-preserving-recovery.enabled": "false",
-          "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
-          "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
-          "yarn.resourcemanager.zk-address": "localhost:2181",
-          "yarn.resourcemanager.zk-num-retries": "1000",
-          "yarn.resourcemanager.zk-retry-interval-ms": "1000",
-          "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
-          "yarn.resourcemanager.zk-timeout-ms": "10000",
-          "yarn.scheduler.maximum-allocation-mb": "2048",
-          "yarn.scheduler.minimum-allocation-mb": "682",
-          "yarn.timeline-service.address": {
-            "value": "{RESOURCEMANAGER_HOST}:10200",
-            "template": "yes"
-          },
-          "yarn.timeline-service.bind-host": "0.0.0.0",
-          "yarn.timeline-service.client.max-retries": "30",
-          "yarn.timeline-service.client.retry-interval-ms": "1000",
-          "yarn.timeline-service.enabled": "true",
-          "yarn.timeline-service.generic-application-history.store-class": "org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore",
-          "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
-          "yarn.timeline-service.http-authentication.type": "simple",
-          "yarn.timeline-service.leveldb-timeline-store.path": "/hadoop/yarn/timeline",
-          "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
-          "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
-          "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000",
-          "yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms": "300000",
-          "yarn.timeline-service.store-class": "org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore",
-          "yarn.timeline-service.ttl-enable": "true",
-          "yarn.timeline-service.ttl-ms": "2678400000",
-          "yarn.timeline-service.webapp.address": {
-            "value": "{RESOURCEMANAGER_HOST}:8188",
-            "template": "yes"
-          },
-          "yarn.timeline-service.webapp.https.address": {
-            "value": "{RESOURCEMANAGER_HOST}:8190",
-            "template": "yes"
-          }
-        },
-        "hdfs-site": {
-          "dfs.block.local-path-access.user": {"remove": "yes"},
-          "dfs.client.read.shortcircuit": "true",
-          "dfs.client.read.shortcircuit.streams.cache.size": "4096",
-          "dfs.datanode.du.pct": {"remove": "yes"},
-          "dfs.datanode.du.reserved": "1073741824",
-          "dfs.datanode.socket.write.timeout": {"remove": "yes"},
-          "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
-          "dfs.hosts": {"remove": "yes"},
-          "dfs.journalnode.http-address": "0.0.0.0:8480",
-          "dfs.secondary.https.port": {"remove": "yes"},
-          "dfs.web.ugi": {"remove": "yes"},
-          "fs.permissions.umask-mode": "022",
-          "ipc.server.max.response.size": {"remove": "yes"},
-          "ipc.server.read.threadpool.size": {"remove": "yes"},
-          "dfs.support.append": "true",
-          "dfs.namenode.checkpoint.txns": "1000000",
-          "dfs.namenode.checkpoint.period": "21600",
-          "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}",
-          "dfs.namenode.startup.delay.block.deletion.sec": "3600",
-          "dfs.http.policy": "HTTP_ONLY",
-          "dfs.namenode.name.dir.restore": "true",
-          "dfs.datanode.max.transfer.threads": "16384",
-          "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary",
-          "dfs.namenode.checkpoint.period": "21600"
-        },
-        "core-site": {
-          "fs.checkpoint.size": {"remove": "yes"},
-          "hadoop.security.auth_to_local": "\n        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n        RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n        RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n        DEFAULT\n    ",
-          "hadoop.security.authentication": "simple",
-          "hadoop.security.authorization": "false",
-          "io.compression.codec.lzo.class": {"remove": "yes"},
-          "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec"
-        },
-        "mapred-env": {
-          "jtnode_heapsize": {"remove": "yes"},
-          "jtnode_opt_maxnewsize": {"remove": "yes"},
-          "jtnode_opt_newsize": {"remove": "yes"},
-          "lzo_enabled": {"remove": "yes"},
-          "rca_enabled": {"remove": "yes"},
-          "rca_properties": {"remove": "yes"},
-          "snappy_enabled": {"remove": "yes"},
-          "ttnode_heapsize": {"remove": "yes"},
-          "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"",
-          "jobhistory_heapsize": "900",
-          "mapred_log_dir_prefix": "/var/log/hadoop-mapreduce",
-          "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce",
-          "mapred_user": "mapred"
-        },
-        "hive-site": {
-          "fs.hdfs.impl.disable.cache": {"remove":"yes"},
-          "fs.file.impl.disable.cache": {"remove":"yes"},
-          "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
-          "hive.security.metastore.authorization.manager": {"remove":"yes"},
-          "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
-          "hive.fetch.task.aggr": "false",
-          "hive.execution.engine": "mr",
-          "hive.tez.java.opts": "-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps",
-          "hive.vectorized.groupby.maxentries": "100000",
-          "hive.server2.table.type.mapping": "CLASSIC",
-          "hive.tez.min.partition.factor": "0.25",
-          "hive.tez.cpu.vcores": "-1",
-          "hive.compute.query.using.stats": "true",
-          "hive.stats.dbclass": "fs",
-          "hive.tez.auto.reducer.parallelism": "false",
-          "hive.server2.thrift.http.path": "cliservice",
-          "hive.metastore.authorization.storage.checks": "false",
-          "hive.exec.post.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-          "hive.zookeeper.namespace": "hive_zookeeper_namespace",
-          "hive.cbo.enable": "true",
-          "hive.optimize.index.filter": "true",
-          "hive.optimize.bucketmapjoin": "true",
-          "hive.mapjoin.bucket.cache.size": "10000",
-          "hive.limit.optimize.enable": "true",
-          "hive.fetch.task.conversion.threshold": "1073741824",
-          "hive.exec.max.dynamic.partitions": "5000",
-          "hive.metastore.sasl.enabled": "false",
-          "hive.txn.manager": "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
-          "hive.optimize.constant.propagation": "true",
-          "hive.exec.submitviachild": "false",
-          "hive.metastore.kerberos.principal": "hive/_HOST@EXAMPLE.COM",
-          "hive.txn.max.open.batch": "1000",
-          "hive.exec.compress.output": "false",
-          "hive.merge.size.per.task": "256000000",
-          "hive.heapsize": "1024",
-          "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator",
-          "hive.merge.mapfiles": "true",
-          "hive.exec.parallel.thread.number": "8",
-          "hive.mapjoin.optimized.hashtable": "true",
-          "hive.optimize.metadataonly": "true",
-          "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
-          "hive.server2.thrift.max.worker.threads": "500",
-          "hive.optimize.sort.dynamic.partition": "false",
-          "hive.server2.enable.doAs": "true",
-          "hive.metastore.pre.event.listeners": "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener",
-          "hive.metastore.failure.retries": "24",
-          "hive.server2.enable.impersonation": "true",
-          "hive.merge.smallfiles.avgsize": "16000000",
-          "hive.tez.max.partition.factor": "2.0",
-          "hive.server2.transport.mode": "binary",
-          "hive.tez.container.size": "682",
-          "hive.zookeeper.client.port": "2181",
-          "hive.vectorized.groupby.checkinterval": "4096",
-          "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
-          "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
-          "javax.jdo.option.ConnectionPassword": "1",
-          "hive.exec.max.created.files": "100000",
-          "hive.map.aggr.hash.min.reduction": "0.5",
-          "hive.server2.thrift.http.port": "10001",
-          "hive.orc.splits.include.file.footer": "false",
-          "hive.exec.pre.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-          "hive.merge.orcfile.stripe.level": "true",
-          "hive.exec.failure.hooks": "org.apache.hadoop.hive.ql.hooks.ATSHook",
-          "hive.server2.allow.user.substitution": "true",
-          "hive.vectorized.execution.reduce.enabled": "false",
-          "hive.metastore.connect.retries": "24",
-          "hive.metastore.server.max.threads": "100000",
-          "hive.exec.orc.compression.strategy": "SPEED",
-          "hive.optimize.reducededuplication.min.reducer": "4",
-          "hive.enforce.sortmergebucketmapjoin": "true",
-          "hive.auto.convert.join.noconditionaltask.size": "238026752",
-          "javax.jdo.option.ConnectionUserName": "hive",
-          "hive.tez.log.level": "INFO",
-          "hive.compactor.delta.num.threshold": "10",
-          "hive.exec.dynamic.partition": "true",
-          "hive.server2.authentication": "NONE",
-          "hive.stats.fetch.column.stats": "false",
-          "hive.orc.compute.splits.num.threads": "10",
-          "hive.tez.smb.number.waves": "0.5",
-          "hive.convert.join.bucket.mapjoin.tez": "false",
-          "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs",
-          "hive.tez.input.format": "org.apache.hadoop.hive.ql.io.HiveInputFormat",
-          "hive.exec.orc.default.compress": "ZLIB",
-          "hive.support.concurrency": "false",
-          "hive.compactor.check.interval": "300L",
-          "hive.compactor.delta.pct.threshold": "0.1f",
-          "hive.auto.convert.sortmerge.join.noconditionaltask": "true",
-          "hive.optimize.mapjoin.mapreduce": "true",
-          "hive.metastore.client.connect.retry.delay": "5s",
-          "hive.prewarm.numcontainers": "10",
-          "hive.vectorized.groupby.flush.percent": "0.1",
-          "hive.server2.authentication.spnego.principal": "/etc/security/keytabs/spnego.service.keytab",
-          "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
-          "hive.smbjoin.cache.rows": "10000",
-          "hive.vectorized.groupby.maxentries": "100000",
-          "hive.metastore.authorization.storage.checks": "false",
-          "hive.exec.parallel.thread.number": "8",
-          "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
-          "hive.cli.print.header": "false",
-          "hive.cluster.delegation.token.store.zookeeper.connectString": {
-            "value": "{ZOOKEEPER_QUORUM}",
-            "template": "yes"
-          },
-          "hive.compactor.abortedtxn.threshold": "1000",
-          "hive.compactor.initiator.on": "false",
-          "hive.compactor.worker.threads": "0",
-          "hive.compactor.worker.timeout": "86400L",
-          "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
-          "hive.exec.compress.intermediate": "false",
-          "hive.exec.dynamic.partition.mode": "nonstrict",
-          "hive.exec.max.dynamic.partitions.pernode": "2000",
-          "hive.exec.orc.default.stripe.size": "67108864",
-          "hive.exec.parallel": "false",
-          "hive.exec.reducers.bytes.per.reducer": "67108864",
-          "hive.exec.reducers.max": "1009",
-          "hive.exec.scratchdir": "/tmp/hive",
-          "hive.exec.submit.local.task.via.child": "true",
-          "hive.fetch.task.conversion": "more",
-          "hive.limit.pushdown.memory.usage": "0.04",
-          "hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
-          "hive.map.aggr.hash.percentmemory": "0.5",
-          "hive.merge.mapredfiles": "false",
-          "hive.merge.rcfile.block.level": "true",
-          "hive.merge.tezfiles": "false",
-          "hive.metastore.client.socket.timeout": "1800s",
-          "hive.metastore.kerberos.keytab.file": "/etc/security/keytabs/hive.service.keytab",
-          "hive.optimize.null.scan": "true",
-          "hive.optimize.reducededuplication": "true",
-          "hive.prewarm.enabled": "false",
-          "hive.security.metastore.authorization.auth.reads": "true",
-          "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
-          "hive.server2.authentication.spnego.keytab": "HTTP/_HOST@EXAMPLE.COM",
-          "hive.server2.logging.operation.enabled": "true",
-          "hive.server2.tez.default.queues": "default",
-          "hive.server2.tez.initialize.default.sessions": "false",
-          "hive.server2.tez.sessions.per.default.queue": "1",
-          "hive.server2.thrift.sasl.qop": "auth",
-          "hive.server2.use.SSL": "false",
-          "hive.stats.autogather": "true",
-          "hive.stats.fetch.partition.stats": "true",
-          "hive.tez.dynamic.partition.pruning": "true",
-          "hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
-          "hive.txn.timeout": "300",
-          "hive.user.install.directory": "/user/",
-          "hive.vectorized.execution.enabled": "true",
-          "hive.zookeeper.quorum": {
-            "value": "{ZOOKEEPER_QUORUM}",
-            "template": "yes"
-          }
-        },
-        "hbase-site": {
-          "dfs.support.append": {"remove": "yes"},
-          "hbase.defaults.for.version.skip": "true",
-          "hbase.hregion.majorcompaction": "604800000",
-          "hbase.hregion.max.filesize": "10737418240",
-          "hbase.hstore.blockingStoreFiles": "10",
-          "hbase.hstore.flush.retries.number": "120",
-          "hbase.hregion.majorcompaction.jitter": "0.50",
-          "hbase.regionserver.global.memstore.lowerLimit": "0.38",
-          "hbase.regionserver.handler.count": "60",
-          "hbase.rpc.engine": {"remove": "yes"},
-          "hfile.block.cache.size": "0.40",
-          "zookeeper.session.timeout": "30000",
-          "hbase.hregion.memstore.block.multiplier": "4",
-          "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket",
-          "hbase.local.dir": "${hbase.tmp.dir}/local",
-          "hbase.hstore.flush.retries.number": {"remove":"true"}
-        },
-        "webhcat-env": {
-          "content": "\n# The file containing the running pid\nPID_FILE={{webhcat_pid_file}}\n\nTEMPLETON_LOG_DIR={{templeton_log_dir}}/\n\n\nWEBHCAT_LOG_DIR={{templeton_log_dir}}/\n\n# The console error log\nERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log\n\n# The console log\nCONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log\n\n#TEMPLETON_JAR=templeton_jar_name\n\n#HADOOP_PREFIX=hadoop_prefix\n\n#HCAT_PREFIX=hive_prefix\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nexport HADOOP_HOME={{hadoop_home}}"
-        }
-      },
-      "property-mapping": {
-        "create.empty.dir.if.nonexist": "mapreduce.jobcontrol.createdir.ifnotexist",
-        "dfs.access.time.precision": "dfs.namenode.accesstime.precision",
-        "dfs.backup.address": "dfs.namenode.backup.address",
-        "dfs.backup.http.address": "dfs.namenode.backup.http-address",
-        "dfs.balance.bandwidthPerSec": "dfs.datanode.balance.bandwidthPerSec",
-        "dfs.block.size": "dfs.blocksize",
-        "dfs.data.dir": "dfs.datanode.data.dir",
-        "dfs.datanode.max.xcievers": "dfs.datanode.max.transfer.threads",
-        "dfs.df.interval": "fs.df.interval",
-        "dfs.federation.nameservice.id": "dfs.nameservice.id",
-        "dfs.federation.nameservices": "dfs.nameservices",
-        "dfs.http.address": "dfs.namenode.http-address",
-        "dfs.https.address": "dfs.namenode.https-address",
-        "dfs.https.client.keystore.resource": "dfs.client.https.keystore.resource",
-        "dfs.https.need.client.auth": "dfs.client.https.need-auth",
-        "dfs.max.objects": "dfs.namenode.max.objects",
-        "dfs.max-repl-streams": "dfs.namenode.replication.max-streams",
-        "dfs.name.dir": "dfs.namenode.name.dir",
-        "dfs.name.dir.restore": "dfs.namenode.name.dir.restore",
-        "dfs.name.edits.dir": "dfs.namenode.edits.dir",
-        "dfs.permissions": "dfs.permissions.enabled",
-        "dfs.permissions.supergroup": "dfs.permissions.superusergroup",
-        "dfs.read.prefetch.size": "dfs.client.read.prefetch.size",
-        "dfs.replication.considerLoad": "dfs.namenode.replication.considerLoad",
-        "dfs.replication.interval": "dfs.namenode.replication.interval",
-        "dfs.replication.min": "dfs.namenode.replication.min",
-        "dfs.replication.pending.timeout.sec": "dfs.namenode.replication.pending.timeout-sec",
-        "dfs.safemode.extension": "dfs.namenode.safemode.extension",
-        "dfs.safemode.threshold.pct": "dfs.namenode.safemode.threshold-pct",
-        "dfs.secondary.http.address": "dfs.namenode.secondary.http-address",
-        "dfs.socket.timeout": "dfs.client.socket-timeout",
-        "dfs.umaskmode": "fs.permissions.umask-mode",
-        "dfs.write.packet.size": "dfs.client-write-packet-size",
-        "fs.checkpoint.dir": "dfs.namenode.checkpoint.dir",
-        "fs.checkpoint.edits.dir": "dfs.namenode.checkpoint.edits.dir",
-        "fs.checkpoint.period": "dfs.namenode.checkpoint.period",
-        "fs.default.name": "fs.defaultFS",
-        "hadoop.configured.node.mapping": "net.topology.configured.node.mapping",
-        "hadoop.job.history.location": "mapreduce.jobtracker.jobhistory.location",
-        "hadoop.native.lib": "io.native.lib.available",
-        "hadoop.net.static.resolutions": "mapreduce.tasktracker.net.static.resolutions",
-        "hadoop.pipes.command-file.keep": "mapreduce.pipes.commandfile.preserve",
-        "hadoop.pipes.executable.interpretor": "mapreduce.pipes.executable.interpretor",
-        "hadoop.pipes.executable": "mapreduce.pipes.executable",
-        "hadoop.pipes.java.mapper": "mapreduce.pipes.isjavamapper",
-        "hadoop.pipes.java.recordreader": "mapreduce.pipes.isjavarecordreader",
-        "hadoop.pipes.java.recordwriter": "mapreduce.pipes.isjavarecordwriter",
-        "hadoop.pipes.java.reducer": "mapreduce.pipes.isjavareducer",
-        "hadoop.pipes.partitioner": "mapreduce.pipes.partitioner",
-        "heartbeat.recheck.interval": "dfs.namenode.heartbeat.recheck-interval",
-        "io.bytes.per.checksum": "dfs.bytes-per-checksum",
-        "io.sort.factor": "mapreduce.task.io.sort.factor",
-        "io.sort.mb": "mapreduce.task.io.sort.mb",
-        "io.sort.spill.percent": "mapreduce.map.sort.spill.percent",
-        "jobclient.completion.poll.interval": "mapreduce.client.completion.pollinterval",
-        "jobclient.output.filter": "mapreduce.client.output.filter",
-        "jobclient.progress.monitor.poll.interval": "mapreduce.client.progressmonitor.pollinterval",
-        "job.end.notification.url": "mapreduce.job.end-notification.url",
-        "job.end.retry.attempts": "mapreduce.job.end-notification.retry.attempts",
-        "job.end.retry.interval": "mapreduce.job.end-notification.retry.interval",
-        "job.local.dir": "mapreduce.job.local.dir",
-        "keep.failed.task.files": "mapreduce.task.files.preserve.failedtasks",
-        "keep.task.files.pattern": "mapreduce.task.files.preserve.filepattern",
-        "key.value.separator.in.input.line": "mapreduce.input.keyvaluelinerecordreader.key.value.separator",
-        "local.cache.size": "mapreduce.tasktracker.cache.local.size",
-        "map.input.file": "mapreduce.map.input.file",
-        "map.input.length": "mapreduce.map.input.length",
-        "map.input.start": "mapreduce.map.input.start",
-        "map.output.key.field.separator": "mapreduce.map.output.key.field.separator",
-        "map.output.key.value.fields.spec": "mapreduce.fieldsel.map.output.key.value.fields.spec",
-        "mapred.acls.enabled": "mapreduce.cluster.acls.enabled",
-        "mapred.binary.partitioner.left.offset": "mapreduce.partition.binarypartitioner.left.offset",
-        "mapred.binary.partitioner.right.offset": "mapreduce.partition.binarypartitioner.right.offset",
-        "mapred.cache.archives": "mapreduce.job.cache.archives",
-        "mapred.cache.archives.timestamps": "mapreduce.job.cache.archives.timestamps",
-        "mapred.cache.files": "mapreduce.job.cache.files",
-        "mapred.cache.files.timestamps": "mapreduce.job.cache.files.timestamps",
-        "mapred.cache.localArchives": "mapreduce.job.cache.local.archives",
-        "mapred.cache.localFiles": "mapreduce.job.cache.local.files",
-        "mapred.child.tmp": "mapreduce.task.tmp.dir",
-        "mapred.cluster.average.blacklist.threshold": "mapreduce.jobtracker.blacklist.average.threshold",
-        "mapred.cluster.map.memory.mb": "mapreduce.cluster.mapmemory.mb",
-        "mapred.cluster.max.map.memory.mb": "mapreduce.jobtracker.maxmapmemory.mb",
-        "mapred.cluster.max.reduce.memory.mb": "mapreduce.jobtracker.maxreducememory.mb",
-        "mapred.cluster.reduce.memory.mb": "mapreduce.cluster.reducememory.mb",
-        "mapred.committer.job.setup.cleanup.needed": "mapreduce.job.committer.setup.cleanup.needed",
-        "mapred.compress.map.output": "mapreduce.map.output.compress",
-        "mapred.data.field.separator": "mapreduce.fieldsel.data.field.separator",
-        "mapred.debug.out.lines": "mapreduce.task.debugout.lines",
-        "mapred.healthChecker.interval": "mapreduce.tasktracker.healthchecker.interval",
-        "mapred.healthChecker.script.args": "mapreduce.tasktracker.healthchecker.script.args",
-        "mapred.healthChecker.script.path": "mapreduce.tasktracker.healthchecker.script.path",
-        "mapred.healthChecker.script.timeout": "mapreduce.tasktracke

<TRUNCATED>

[49/63] [abbrv] ambari git commit: AMBARI-21360: Ability to delete a view instance from view instance list (sangeetar)

Posted by ab...@apache.org.
AMBARI-21360: Ability to delete a view instance from view instance list (sangeetar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a3681c01
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a3681c01
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a3681c01

Branch: refs/heads/branch-feature-logsearch-ui
Commit: a3681c0199f31511170770d454697206cbeeeda3
Parents: 3446283
Author: Sangeeta Ravindran <sa...@apache.org>
Authored: Wed Jun 28 09:29:57 2017 -0700
Committer: Sangeeta Ravindran <sa...@apache.org>
Committed: Wed Jun 28 09:29:57 2017 -0700

----------------------------------------------------------------------
 AMBARI-21360.patch                              | 45 ++++++++++++++++++++
 .../controllers/ambariViews/ViewsListCtrl.js    | 20 +++++++++
 .../app/views/ambariViews/listTable.html        |  3 ++
 3 files changed, 68 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a3681c01/AMBARI-21360.patch
----------------------------------------------------------------------
diff --git a/AMBARI-21360.patch b/AMBARI-21360.patch
new file mode 100644
index 0000000..c26f3a0
--- /dev/null
+++ b/AMBARI-21360.patch
@@ -0,0 +1,45 @@
+diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
+index c41e5d4..4e7bae3 100644
+--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
++++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
+@@ -132,6 +132,26 @@ angular.module('ambariAdminConsole')
+     }
+   };
+ 
++  $scope.deleteInstance = function(instance) {
++      ConfirmationModal.show(
++        $t('common.delete', {
++          term: $t('views.viewInstance')
++        }),
++        $t('common.deleteConfirmation', {
++          instanceType: $t('views.viewInstance'),
++          instanceName: instance.ViewInstanceInfo.label
++        })
++      ).then(function() {
++        View.deleteInstance(instance.ViewInstanceInfo.view_name, instance.ViewInstanceInfo.version, instance.ViewInstanceInfo.instance_name)
++          .then(function() {
++            loadViews();
++          })
++          .catch(function(data) {
++            Alert.error($t('views.alerts.cannotDeleteInstance'), data.data.message);
++          });
++      });
++    };
++
+   $scope.reloadViews = function () {
+     loadViews();
+   };
+diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
+index 59c322f..91b9a93 100644
+--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
++++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
+@@ -81,6 +81,9 @@
+                     <td class="col-sm-1">
+                         <a class="instance-link ng-scope ng-binding" href="#/views/{{view.view_name}}/versions/{{instance.ViewInstanceInfo.version}}/instances/{{instance.ViewInstanceInfo.instance_name}}/clone"><i class="fa fa-copy"></i></a>
+                     </td>
++                    <td class="col-sm-1">
++                        <a class="instance-link ng-scope ng-binding" href ng-click="deleteInstance(instance)"><i class="fa fa-trash-o"></i></a>
++                    </td>
+                 </tr>
+                 </tbody>
+                 <tfoot>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3681c01/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
index c41e5d4..4e7bae3 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/ambariViews/ViewsListCtrl.js
@@ -132,6 +132,26 @@ angular.module('ambariAdminConsole')
     }
   };
 
+  $scope.deleteInstance = function(instance) {
+      ConfirmationModal.show(
+        $t('common.delete', {
+          term: $t('views.viewInstance')
+        }),
+        $t('common.deleteConfirmation', {
+          instanceType: $t('views.viewInstance'),
+          instanceName: instance.ViewInstanceInfo.label
+        })
+      ).then(function() {
+        View.deleteInstance(instance.ViewInstanceInfo.view_name, instance.ViewInstanceInfo.version, instance.ViewInstanceInfo.instance_name)
+          .then(function() {
+            loadViews();
+          })
+          .catch(function(data) {
+            Alert.error($t('views.alerts.cannotDeleteInstance'), data.data.message);
+          });
+      });
+    };
+
   $scope.reloadViews = function () {
     loadViews();
   };

http://git-wip-us.apache.org/repos/asf/ambari/blob/a3681c01/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
index 59c322f..91b9a93 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/ambariViews/listTable.html
@@ -81,6 +81,9 @@
                     <td class="col-sm-1">
                         <a class="instance-link ng-scope ng-binding" href="#/views/{{view.view_name}}/versions/{{instance.ViewInstanceInfo.version}}/instances/{{instance.ViewInstanceInfo.instance_name}}/clone"><i class="fa fa-copy"></i></a>
                     </td>
+                    <td class="col-sm-1">
+                        <a class="instance-link ng-scope ng-binding" href ng-click="deleteInstance(instance)"><i class="fa fa-trash-o"></i></a>
+                    </td>
                 </tr>
                 </tbody>
                 <tfoot>


[09/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
deleted file mode 100644
index 7218578..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog210Test.java
+++ /dev/null
@@ -1,1360 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertNotNull;
-import static junit.framework.Assert.assertNull;
-import static junit.framework.Assert.assertTrue;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.verify;
-
-import java.io.File;
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.ServiceConfigVersionResponse;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.OrmTestHelper;
-import org.apache.ambari.server.orm.dao.ArtifactDAO;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.ClusterStateDAO;
-import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
-import org.apache.ambari.server.orm.entities.ClusterStateEntity;
-import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.HostEntity;
-import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
-import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.Host;
-import org.apache.ambari.server.state.HostComponentAdminState;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.easymock.Capture;
-import org.easymock.CaptureType;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockSupport;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-
-/**
- * {@link org.apache.ambari.server.upgrade.UpgradeCatalog210} unit tests.
- */
-public class UpgradeCatalog210Test {
-  private Injector injector;
-  private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-  private EntityManager entityManager = createNiceMock(EntityManager.class);
-  private UpgradeCatalogHelper upgradeCatalogHelper;
-  private StackEntity desiredStackEntity;
-  private String desiredRepositoryVersion = "2.2.0-1234";
-
-  public void initData() {
-    //reset(entityManagerProvider);
-    //expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    //replay(entityManagerProvider);
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    upgradeCatalogHelper = injector.getInstance(UpgradeCatalogHelper.class);
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    desiredStackEntity = stackDAO.find("HDP", "2.2.0");
-  }
-
-  public void tearDown() throws AmbariException, SQLException {
-    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
-  }
-
-  @Test
-  public void testExecuteDDLUpdates() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    Configuration configuration = createNiceMock(Configuration.class);
-    Connection connection = createNiceMock(Connection.class);
-    Statement statement = createNiceMock(Statement.class);
-    ResultSet resultSet = createNiceMock(ResultSet.class);
-    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
-    dbAccessor.getConnection();
-    expectLastCall().andReturn(connection).anyTimes();
-    connection.createStatement();
-    expectLastCall().andReturn(statement).anyTimes();
-    statement.executeQuery(anyObject(String.class));
-    expectLastCall().andReturn(resultSet).anyTimes();
-
-    // Create DDL sections with their own capture groups
-    AlertSectionDDL alertSectionDDL = new AlertSectionDDL();
-    HostSectionDDL hostSectionDDL = new HostSectionDDL();
-    WidgetSectionDDL widgetSectionDDL = new WidgetSectionDDL();
-    ViewSectionDDL viewSectionDDL = new ViewSectionDDL();
-
-    // Execute any DDL schema changes
-    alertSectionDDL.execute(dbAccessor);
-    hostSectionDDL.execute(dbAccessor);
-    widgetSectionDDL.execute(dbAccessor);
-    viewSectionDDL.execute(dbAccessor);
-
-    // Replay sections
-    replay(dbAccessor, configuration, resultSet, connection, statement);
-
-    AbstractUpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Class<?> c = AbstractUpgradeCatalog.class;
-    Field f = c.getDeclaredField("configuration");
-    f.setAccessible(true);
-    f.set(upgradeCatalog, configuration);
-
-    upgradeCatalog.executeDDLUpdates();
-    verify(dbAccessor, configuration, resultSet, connection, statement);
-
-    // Verify sections
-    alertSectionDDL.verify(dbAccessor);
-    hostSectionDDL.verify(dbAccessor);
-    widgetSectionDDL.verify(dbAccessor);
-    viewSectionDDL.verify(dbAccessor);
-  }
-
-  @Test
-  public void testExecutePreDMLUpdates() throws Exception {
-    Method executeStackPreDMLUpdates = UpgradeCatalog210.class.getDeclaredMethod("executeStackPreDMLUpdates");
-    Method cleanupStackUpdates = UpgradeCatalog210.class.getDeclaredMethod("cleanupStackUpdates");
-
-    final UpgradeCatalog210 upgradeCatalog210 = createMockBuilder(UpgradeCatalog210.class)
-        .addMockedMethod(executeStackPreDMLUpdates)
-        .addMockedMethod(cleanupStackUpdates).createMock();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(UpgradeCatalog210.class).toInstance(upgradeCatalog210);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    upgradeCatalog210.executeStackPreDMLUpdates();
-    expectLastCall().once();
-
-    replay(upgradeCatalog210);
-    mockInjector.getInstance(UpgradeCatalog210.class).executePreDMLUpdates();
-
-    verify(upgradeCatalog210);
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    Method addNewConfigurationsFromXml =
-      AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-
-    Method initializeClusterAndServiceWidgets =
-      UpgradeCatalog210.class.getDeclaredMethod("initializeClusterAndServiceWidgets");
-
-    Method addMissingConfigs = UpgradeCatalog210.class.getDeclaredMethod("addMissingConfigs");
-
-    Method updateAlertDefinitions = UpgradeCatalog210.class.getDeclaredMethod("updateAlertDefinitions");
-
-    Method removeStormRestApiServiceComponent =
-      UpgradeCatalog210.class.getDeclaredMethod("removeStormRestApiServiceComponent");
-
-    Method updateKerberosDescriptorArtifacts =
-      AbstractUpgradeCatalog.class.getDeclaredMethod("updateKerberosDescriptorArtifacts");
-
-    UpgradeCatalog210 upgradeCatalog210 = createMockBuilder(UpgradeCatalog210.class)
-        .addMockedMethod(addNewConfigurationsFromXml)
-        .addMockedMethod(initializeClusterAndServiceWidgets)
-        .addMockedMethod(addMissingConfigs)
-        .addMockedMethod(updateAlertDefinitions)
-        .addMockedMethod(removeStormRestApiServiceComponent)
-        .addMockedMethod(updateKerberosDescriptorArtifacts)
-        .createMock();
-
-    upgradeCatalog210.addNewConfigurationsFromXml();
-    expectLastCall().once();
-
-    upgradeCatalog210.initializeClusterAndServiceWidgets();
-    expectLastCall().once();
-
-    upgradeCatalog210.addMissingConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog210.updateAlertDefinitions();
-    expectLastCall().once();
-
-    upgradeCatalog210.removeStormRestApiServiceComponent();
-    expectLastCall().once();
-
-    upgradeCatalog210.updateKerberosDescriptorArtifacts();
-    expectLastCall().once();
-
-    replay(upgradeCatalog210);
-
-    upgradeCatalog210.executeDMLUpdates();
-
-    verify(upgradeCatalog210);
-  }
-
-  @Test
-  public void testUpdateRangerHiveConfigs() throws Exception{
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(
-        AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config mockRangerPlugin = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHiveEnv = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHiveServer = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedRangerPlugin = new HashMap<>();
-    propertiesExpectedRangerPlugin.put("ranger-hive-plugin-enabled", "yes");
-    final Map<String, String> propertiesExpectedHiveEnv = new HashMap<>();
-    final Map<String, String> propertiesExpectedHiveServer2 = new HashMap<>();
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-hive-plugin-properties")).andReturn(mockRangerPlugin).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(mockHiveEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hiveserver2-site")).andReturn(mockHiveServer).atLeastOnce();
-    expect(mockRangerPlugin.getProperties()).andReturn(propertiesExpectedRangerPlugin).anyTimes();
-    expect(mockHiveEnv.getProperties()).andReturn(propertiesExpectedHiveEnv).anyTimes();
-    expect(mockHiveServer.getProperties()).andReturn(propertiesExpectedHiveServer2).anyTimes();
-
-    ServiceConfigVersionResponse r = null;
-    expect(mockClusterExpected.getConfig(anyObject(String.class), anyObject(String.class))).
-        andReturn(mockHiveServer).anyTimes();
-    expect(mockClusterExpected.addDesiredConfig("ambari-upgrade", Collections.singleton(mockHiveServer), "Updated hive-env during Ambari Upgrade from 2.0.0 to 2.1.0.")).
-        andReturn(r).times(1);
-    expect(mockClusterExpected.addDesiredConfig("ambari-upgrade", Collections.singleton(mockHiveServer), "Updated hiveserver2-site during Ambari Upgrade from 2.0.0 to 2.1.0.")).
-        andReturn(r).times(1);
-    expect(mockClusterExpected.addDesiredConfig("ambari-upgrade", Collections.singleton(mockHiveServer), "Updated ranger-hive-plugin-properties during Ambari Upgrade from 2.0.0 to 2.1.0.")).
-        andReturn(r).times(1);
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog210.class).updateRangerHiveConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateHiveConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final ServiceConfigVersionResponse mockServiceConfigVersionResponse = easyMockSupport.createNiceMock(ServiceConfigVersionResponse.class);
-    final Config mockHiveEnv = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHiveServerSite = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHiveSite = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHiveEnv = new HashMap<String, String>() {{
-      put("hive_security_authorization", "none");
-    }};
-    final Map<String, String> propertiesExpectedHiveSite = new HashMap<String, String>() {{
-      put("hive.server2.authentication", "pam");
-      put("hive.server2.custom.authentication.class", "");
-    }};
-    final Map<String, String> propertiesExpectedHiveServerSite = new HashMap<String, String>() {{
-      put("hive.security.authorization.manager", "");
-      put("hive.security.authenticator.manager", "");
-    }};
-    final Map<String, Service> servicesExpected = new HashMap<>();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    Capture<String> configTypeEnv = EasyMock.newCapture();
-    Capture<String> configTypeSite = EasyMock.newCapture();
-    Capture<String> configTypeServerSite = EasyMock.newCapture();
-
-    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(mockHiveEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hiveserver2-site")).andReturn(mockHiveServerSite).atLeastOnce();
-    expect(mockHiveEnv.getProperties()).andReturn(propertiesExpectedHiveEnv).anyTimes();
-    expect(mockHiveServerSite.getProperties()).andReturn(propertiesExpectedHiveServerSite).anyTimes();
-    expect(mockClusterExpected.getConfig(capture(configTypeEnv), anyObject(String.class))).andReturn(mockHiveEnv).once();
-    expect(mockClusterExpected.getConfig(capture(configTypeServerSite), anyObject(String.class))).andReturn(mockHiveServerSite).once();
-    expect(mockClusterExpected.addDesiredConfig("ambari-upgrade", Collections.singleton(mockHiveEnv), "Updated hive-env during Ambari Upgrade from 2.0.0 to 2.1.0.")).andReturn(mockServiceConfigVersionResponse).once();
-    expect(mockClusterExpected.addDesiredConfig("ambari-upgrade", Collections.singleton(mockHiveServerSite), "Updated hiveserver2-site during Ambari Upgrade from 2.0.0 to 2.1.0.")).andReturn(mockServiceConfigVersionResponse).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(mockHiveSite).atLeastOnce();
-    expect(mockHiveSite.getProperties()).andReturn(propertiesExpectedHiveSite).anyTimes();
-    expect(mockClusterExpected.getServices()).andReturn(servicesExpected).once();
-    expect(mockClusterExpected.getConfig(capture(configTypeSite), anyObject(String.class))).andReturn(mockHiveSite).once();
-    expect(mockClusterExpected.addDesiredConfig("ambari-upgrade", Collections.singleton(mockHiveSite), "Updated hive-site during Ambari Upgrade from 2.0.0 to 2.1.0.")).andReturn(mockServiceConfigVersionResponse).once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog210.class).updateHiveConfigs();
-    easyMockSupport.verifyAll();
-
-    assertEquals("hive-env", configTypeEnv.getValue());
-    assertEquals("hive-site", configTypeSite.getValue());
-    assertEquals("hiveserver2-site", configTypeServerSite.getValue());
-  }
-
-  @Test
-  public void TestRangerSitePropertyConversion() throws Exception{
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final String clusterName = "c1";
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createStrictMock(Cluster.class);
-    final Config config = easyMockSupport.createNiceMock(Config.class);
-    final Map<String,Cluster> clusters = new HashMap<String,Cluster>(){{
-      put(clusterName, cluster);
-    }};
-    final Map<String,String> properties = new HashMap<String, String>() {{
-      put("HTTPS_CLIENT_AUTH", "test123");
-      put("HTTPS_KEYSTORE_FILE", "test123");
-      put("HTTPS_KEYSTORE_PASS", "test123");
-      put("HTTPS_KEY_ALIAS", "test123");
-      put("HTTPS_SERVICE_PORT", "test123");
-      put("HTTP_ENABLED", "test123");
-      put("HTTP_SERVICE_PORT", "test123");
-    }};
-
-    final Map<String, String> expectedPropertyMap = new HashMap<String, String>() {{
-      put("HTTPS_CLIENT_AUTH", "https.attrib.clientAuth");
-      put("HTTPS_KEYSTORE_FILE", "https.attrib.keystoreFile");
-      put("HTTPS_KEYSTORE_PASS", "https.attrib.keystorePass");
-      put("HTTPS_KEY_ALIAS", "https.attrib.keyAlias");
-      put("HTTP_SERVICE_PORT", "http.service.port");
-      put("HTTPS_SERVICE_PORT", "https.service.port");
-      put("HTTP_ENABLED", "http.enabled");
-    }};
-
-    final Map<String,String> convertedProperties = new HashMap<>();
-    final Set<String> removedProperties = new HashSet<>();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    UpgradeCatalog210 upgradeCatalog210 = new UpgradeCatalog210(mockInjector) {
-
-      @Override
-      protected void updateConfigurationPropertiesForCluster(Cluster cluster, String configType,
-        Map<String, String> properties, boolean updateIfExists, boolean createNewConfigType) throws AmbariException {
-        convertedProperties.putAll(properties);
-      }
-
-      @Override
-      protected void removeConfigurationPropertiesFromCluster(Cluster cluster, String configType, Set<String> removePropertiesList)
-        throws AmbariException {
-        removedProperties.addAll(removePropertiesList);
-      }
-    };
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).atLeastOnce();
-    expect(mockClusters.getClusters()).andReturn(clusters).atLeastOnce();
-    expect(config.getProperties()).andReturn(properties).atLeastOnce();
-    expect(cluster.getDesiredConfigByType("ranger-site")).andReturn(config).atLeastOnce();
-
-    replay(mockAmbariManagementController, mockClusters, cluster, config);
-
-    upgradeCatalog210.updateRangerSiteConfigs();
-
-
-    for (Map.Entry<String,String> propertyEntry: expectedPropertyMap.entrySet()){
-      String oldKey = propertyEntry.getKey();
-      String newKey = propertyEntry.getValue();
-      assertTrue(String.format("Old property %s doesn't migrated to new name %s", oldKey, newKey), convertedProperties.containsKey(newKey));
-      assertTrue(String.format("Property value %s doesn't preserved after renaming: %s",properties.get(oldKey), convertedProperties.get(newKey)),
-        convertedProperties.get(newKey).equals(properties.get(oldKey)));
-      assertTrue(String.format("Old property %s doesn't removed after renaming", oldKey), removedProperties.contains(oldKey));
-    }
-  }
-
-  @Test
-  public void testUpdateHiveConfigsWithKerberos() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Config mockHiveEnv = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHiveSite = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHiveServerSite = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHiveEnv = new HashMap<>();
-    final Map<String, String> propertiesExpectedHiveSite = new HashMap<String, String>() {{
-      put("hive.server2.authentication", "kerberos");
-    }};
-    final Map<String, String> propertiesExpectedHiveServerSite = new HashMap<>();
-    final Map<String, Service> servicesExpected = new HashMap<String, Service>(){{
-      put("KERBEROS", null);
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-      }
-    });
-
-    final UpgradeCatalog210 upgradeCatalog210 =  mockInjector.getInstance(UpgradeCatalog210.class);
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    Capture<Map<String,String>> configCreation = Capture.newInstance(CaptureType.ALL);
-
-    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(mockHiveEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hiveserver2-site")).andReturn(mockHiveServerSite).atLeastOnce();
-    expect(mockHiveEnv.getProperties()).andReturn(propertiesExpectedHiveEnv).anyTimes();
-    expect(mockHiveServerSite.getProperties()).andReturn(propertiesExpectedHiveServerSite).anyTimes();
-
-    expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(mockHiveSite).atLeastOnce();
-    expect(mockHiveSite.getProperties()).andReturn(propertiesExpectedHiveSite).anyTimes();
-    expect(mockClusterExpected.getServices()).andReturn(servicesExpected).atLeastOnce();
-    expect(mockAmbariManagementController.createConfig((Cluster)anyObject(), anyObject(StackId.class),
-      anyString(),
-      capture(configCreation),
-      anyString(),
-      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    upgradeCatalog210.updateHiveConfigs();
-    easyMockSupport.verifyAll();
-
-    Assert.assertEquals(2, configCreation.getValues().size());
-
-    boolean hiveSecFound = false;
-
-    for (Map<String, String> cfg: configCreation.getValues()){
-      if (cfg.containsKey("hive_security_authorization")) {
-        hiveSecFound = true;
-        Assert.assertTrue("sqlstdauth".equalsIgnoreCase(cfg.get("hive_security_authorization")));
-        break;
-      }
-    }
-
-    Assert.assertTrue(hiveSecFound);
-  }
-
-  @Test
-  public void testUpdateHiveConfigsWithRangerPlugin() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Config mockHiveEnv = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHiveSite = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHiveServerSite = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHivePluginProperies = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHiveEnv = new HashMap<String, String>() {{
-      put("hive_security_authorization", "none");
-    }};
-    final Map<String, String> propertiesExpectedHiveSite = new HashMap<>();
-
-    final Map<String, String> propertiesExpectedPluginProperies = new HashMap<String, String>() {{
-      put("ranger-hive-plugin-enabled", "yes");
-    }};
-    final Map<String, String> propertiesExpectedHiveServerSite = new HashMap<String, String>() {{
-      put("hive.security.authorization.manager", "test");
-      put("hive.security.authenticator.manager", "test");
-    }};
-    final Map<String, Service> servicesExpected = new HashMap<String, Service>() {{
-      put("RANGER", null);
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-      }
-    });
-
-    final UpgradeCatalog210 upgradeCatalog210 = mockInjector.getInstance(UpgradeCatalog210.class);
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    Capture<Map<String, String>> configCreation = Capture.newInstance(CaptureType.ALL);
-
-    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(mockHiveEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hiveserver2-site")).andReturn(mockHiveServerSite).atLeastOnce();
-    expect(mockHiveEnv.getProperties()).andReturn(propertiesExpectedHiveEnv).anyTimes();
-    expect(mockHiveServerSite.getProperties()).andReturn(propertiesExpectedHiveServerSite).anyTimes();
-
-    expect(mockClusterExpected.getDesiredConfigByType("ranger-hive-plugin-properties")).andReturn(mockHivePluginProperies).once();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(mockHiveSite).atLeastOnce();
-    expect(mockHiveSite.getProperties()).andReturn(propertiesExpectedHiveSite).anyTimes();
-    expect(mockHivePluginProperies.getProperties()).andReturn(propertiesExpectedPluginProperies).anyTimes();
-    expect(mockClusterExpected.getServices()).andReturn(servicesExpected).atLeastOnce();
-    expect(mockAmbariManagementController.createConfig((Cluster) anyObject(), anyObject(StackId.class),
-        anyString(),
-        capture(configCreation),
-        anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    upgradeCatalog210.updateHiveConfigs();
-    easyMockSupport.verifyAll();
-    Assert.assertEquals(1, configCreation.getValues().size());
-
-    boolean result = false;
-    for (Map<String, String> cfg : configCreation.getValues()) {
-      if (cfg.containsKey("hive.security.authorization.manager")) {
-        result = true;
-        break;
-      }
-    }
-    Assert.assertFalse(result);
-    result = false;
-    for (Map<String, String> cfg : configCreation.getValues()) {
-      if (cfg.containsKey("hive.security.authenticator.manager")) {
-        result = true;
-        break;
-      }
-    }
-    Assert.assertFalse(result);
-  }
-
-
-  @Test
-  public void TestUpdateHiveEnvContent() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    String content = "# Start HIVE_AUX_JARS_PATH \n" +
-        "if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n" +
-        "  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n" +
-        "elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then \n" +
-        "  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog\n" +
-        "fi\n" +
-        "#End HIVE_AUX_JARS_PATH";
-    String expectedContent = "# Start HIVE_AUX_JARS_PATH \n" +
-        "if [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n" +
-        "  if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then    \n" +
-        "    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n" +
-        "  elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-        "    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-        "  fi\n" +
-        "elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n" +
-        "  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n" +
-        "fi\n" +
-        "#End HIVE_AUX_JARS_PATH";
-
-    String modifiedContent = mockInjector.getInstance(UpgradeCatalog210.class).updateHiveEnvContent(content);
-    Assert.assertEquals(modifiedContent, expectedContent);
-  }
-
-  @Test
-  public void testInitializeClusterAndServiceWidgets() throws Exception {
-    final AmbariManagementController controller = createStrictMock(AmbariManagementController.class);
-    final Clusters clusters = createStrictMock(Clusters.class);
-    final Cluster cluster = createStrictMock(Cluster.class);
-    final Service service = createStrictMock(Service.class);
-    final Map<String, Cluster> clusterMap = Collections.singletonMap("c1", cluster);
-    final Map<String, Service> services = Collections.singletonMap("HBASE", service);
-
-
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(AmbariManagementController.class).toInstance(controller);
-        binder.bind(Clusters.class).toInstance(clusters);
-        binder.bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    };
-
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(clusters.getClusters()).andReturn(clusterMap).anyTimes();
-    controller.initializeWidgetsAndLayouts(cluster, null);
-    expectLastCall().once();
-
-    expect(cluster.getServices()).andReturn(services).once();
-    controller.initializeWidgetsAndLayouts(cluster, service);
-    expectLastCall().once();
-
-    replay(controller, clusters, cluster);
-
-    Injector injector = Guice.createInjector(module);
-    injector.getInstance(UpgradeCatalog210.class).initializeClusterAndServiceWidgets();
-
-    verify(controller, clusters, cluster);
-  }
-
-  @Test
-  public void testUpdateStormConfiguration() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(
-        AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config mockClusterEnv = easyMockSupport.createNiceMock(Config.class);
-    final Config mockStormSite = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedClusterEnv = new HashMap<>();
-    propertiesExpectedClusterEnv.put("security_enabled", "true");
-    final Map<String, String> propertiesExpectedStormSite = new HashMap<>();
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("cluster-env")).andReturn(mockClusterEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("storm-site")).andReturn(mockStormSite).atLeastOnce();
-    expect(mockClusterEnv.getProperties()).andReturn(propertiesExpectedClusterEnv).anyTimes();
-    expect(mockStormSite.getProperties()).andReturn(propertiesExpectedStormSite).anyTimes();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog210.class).updateStormConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateHBaseConfiguration() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Host mockHost = easyMockSupport.createNiceMock(Host.class);
-
-    final Config mockHBaseSite = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHBaseEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHBaseSite = new HashMap<>();
-    propertiesExpectedHBaseSite.put("hbase.region.server.rpc.scheduler.factory.class",
-        "org.apache.phoenix.hbase.index.ipc.PhoenixIndexRpcSchedulerFactory");
-    propertiesExpectedHBaseSite.put("hbase.security.authorization", "true");
-
-    final Map<String, String> propertiesExpectedHBaseEnv = new HashMap<>();
-    propertiesExpectedHBaseEnv.put("phoenix_sql_enabled", "false");
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-site")).andReturn(mockHBaseSite).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-env")).andReturn(mockHBaseEnv).atLeastOnce();
-    expect(mockHBaseSite.getProperties()).andReturn(propertiesExpectedHBaseSite).anyTimes();
-    expect(mockHBaseEnv.getProperties()).andReturn(propertiesExpectedHBaseEnv).anyTimes();
-
-    Capture<String> configType = EasyMock.newCapture();
-    Capture<String> configTag = EasyMock.newCapture();
-    expect(mockClusterExpected.getConfig(capture(configType), capture(configTag))).
-            andReturn(mockHBaseSite).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog210.class).updateHBaseConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testDeleteStormRestApiServiceComponent() throws Exception {
-    initData();
-
-    ClusterEntity clusterEntity = upgradeCatalogHelper.createCluster(injector,
-        "c1", desiredStackEntity, desiredRepositoryVersion);
-
-    OrmTestHelper helper = injector.getInstance(OrmTestHelper.class);
-    RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(
-        new StackId(desiredStackEntity.getStackName(), desiredStackEntity.getStackVersion()),
-        desiredRepositoryVersion);
-
-    ClusterServiceEntity clusterServiceEntity = upgradeCatalogHelper.createService(
-        injector, clusterEntity, "STORM");
-
-    HostEntity hostEntity = upgradeCatalogHelper.createHost(injector,
-        clusterEntity, "h1");
-
-    // Set current stack version
-    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
-    ClusterStateDAO clusterStateDAO = injector.getInstance(ClusterStateDAO.class);
-    ClusterStateEntity clusterStateEntity = new ClusterStateEntity();
-    clusterStateEntity.setClusterId(clusterEntity.getClusterId());
-    clusterStateEntity.setClusterEntity(clusterEntity);
-    clusterStateEntity.setCurrentStack(desiredStackEntity);
-    clusterStateDAO.create(clusterStateEntity);
-    clusterEntity.setClusterStateEntity(clusterStateEntity);
-    clusterDAO.merge(clusterEntity);
-
-    ServiceComponentDesiredStateEntity componentDesiredStateEntity = new ServiceComponentDesiredStateEntity();
-    componentDesiredStateEntity.setClusterId(clusterEntity.getClusterId());
-    componentDesiredStateEntity.setServiceName(clusterServiceEntity.getServiceName());
-    componentDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
-    componentDesiredStateEntity.setComponentName("STORM_REST_API");
-    componentDesiredStateEntity.setDesiredRepositoryVersion(repositoryVersion);
-
-    ServiceComponentDesiredStateDAO componentDesiredStateDAO =
-      injector.getInstance(ServiceComponentDesiredStateDAO.class);
-
-    componentDesiredStateDAO.create(componentDesiredStateEntity);
-
-    HostComponentDesiredStateDAO hostComponentDesiredStateDAO =
-      injector.getInstance(HostComponentDesiredStateDAO.class);
-
-    HostComponentDesiredStateEntity hostComponentDesiredStateEntity = new HostComponentDesiredStateEntity();
-
-    hostComponentDesiredStateEntity.setClusterId(clusterEntity.getClusterId());
-    hostComponentDesiredStateEntity.setComponentName("STORM_REST_API");
-    hostComponentDesiredStateEntity.setAdminState(HostComponentAdminState.INSERVICE);
-    hostComponentDesiredStateEntity.setServiceName(clusterServiceEntity.getServiceName());
-    hostComponentDesiredStateEntity.setServiceComponentDesiredStateEntity(componentDesiredStateEntity);
-    hostComponentDesiredStateEntity.setHostEntity(hostEntity);
-
-    hostComponentDesiredStateDAO.create(hostComponentDesiredStateEntity);
-
-    HostComponentDesiredStateEntity entity = hostComponentDesiredStateDAO.findAll().get(0);
-
-    Assert.assertEquals(HostComponentAdminState.INSERVICE.name(), entity.getAdminState().name());
-
-    // ensure the desired state exists
-    Assert.assertNotNull(componentDesiredStateDAO.findByName(clusterEntity.getClusterId(), "STORM",
-        "STORM_REST_API"));
-
-    UpgradeCatalog210 upgradeCatalog210 = injector.getInstance(UpgradeCatalog210.class);
-    upgradeCatalog210.removeStormRestApiServiceComponent();
-
-    Assert.assertNull(componentDesiredStateDAO.findByName(clusterEntity.getClusterId(), "STORM",
-        "STORM_REST_API"));
-    tearDown();
-  }
-
-
-  @Test
-  public void testUpdateHDFSConfiguration() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
-    final Config mockCoreSite = easyMockSupport.createStrictMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHdfs = new HashMap<>();
-    final Map<String, String> propertiesExpectedCoreSite = new HashMap<>();
-    propertiesExpectedHdfs.put("dfs.nameservices", "nncl1,nncl2");
-    propertiesExpectedHdfs.put("dfs.ha.namenodes.nncl2", "nn1,nn2");
-    propertiesExpectedCoreSite.put("fs.defaultFS", "hdfs://EXAMPLE.COM:8020");
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    // Expected operation
-    expect(mockClusterExpected.getDesiredConfigByType("hadoop-env")).andReturn(null).once();
-
-    // Expected operation
-    expect(mockClusterExpected.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
-    expect(mockClusterExpected.getHosts("HDFS", "NAMENODE")).andReturn( new HashSet<String>() {{
-      add("host1");
-    }}).atLeastOnce();
-    expect(mockHdfsSite.getProperties()).andReturn(propertiesExpectedHdfs).anyTimes();
-
-    expect(mockClusterExpected.getDesiredConfigByType("core-site")).andReturn(mockCoreSite).anyTimes();
-    expect(mockCoreSite.getProperties()).andReturn(propertiesExpectedCoreSite).anyTimes();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog210.class).updateHdfsConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  /**
-   * @param dbAccessor
-   * @return
-   */
-  private AbstractUpgradeCatalog getUpgradeCatalog(final DBAccessor dbAccessor) {
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    return injector.getInstance(UpgradeCatalog210.class);
-  }
-
-  @Test
-  public void testGetSourceVersion() {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Assert.assertEquals("2.0.0", upgradeCatalog.getSourceVersion());
-  }
-
-  @Test
-  public void testGetTargetVersion() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-
-    Assert.assertEquals("2.1.0", upgradeCatalog.getTargetVersion());
-  }
-
-  @Test
-  public void testUpdateKerberosDescriptorArtifact_Simple() throws Exception {
-    final KerberosDescriptorFactory kerberosDescriptorFactory = new KerberosDescriptorFactory();
-
-    KerberosServiceDescriptor serviceDescriptor;
-
-    URL systemResourceURL = ClassLoader.getSystemResource("kerberos/test_kerberos_descriptor_simple.json");
-    assertNotNull(systemResourceURL);
-
-    final KerberosDescriptor kerberosDescriptorOrig = kerberosDescriptorFactory.createInstance(new File(systemResourceURL.getFile()));
-    assertNotNull(kerberosDescriptorOrig);
-    assertNotNull(kerberosDescriptorOrig.getIdentity("hdfs"));
-
-    serviceDescriptor = kerberosDescriptorOrig.getService("HDFS");
-    assertNotNull(serviceDescriptor);
-    assertNotNull(serviceDescriptor.getIdentity("/hdfs"));
-    assertNull(serviceDescriptor.getIdentity("hdfs"));
-
-    serviceDescriptor = kerberosDescriptorOrig.getService("OOZIE");
-    assertNotNull(serviceDescriptor);
-    assertNotNull(serviceDescriptor.getIdentity("/hdfs"));
-    assertNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
-
-    UpgradeCatalog210 upgradeMock = createMockBuilder(UpgradeCatalog210.class).createMock();
-
-    Capture<Map<String, Object>> updatedData = EasyMock.newCapture();
-
-    ArtifactEntity artifactEntity = createNiceMock(ArtifactEntity.class);
-    expect(artifactEntity.getArtifactData())
-        .andReturn(kerberosDescriptorOrig.toMap())
-        .once();
-
-    artifactEntity.setArtifactData(capture(updatedData));
-    expectLastCall().once();
-
-    replay(artifactEntity, upgradeMock);
-    upgradeMock.updateKerberosDescriptorArtifact(createNiceMock(ArtifactDAO.class), artifactEntity);
-    verify(artifactEntity, upgradeMock);
-
-    KerberosDescriptor kerberosDescriptorUpdated = new KerberosDescriptorFactory().createInstance(updatedData.getValue());
-    assertNotNull(kerberosDescriptorUpdated);
-    assertNull(kerberosDescriptorUpdated.getIdentity("/hdfs"));
-
-    serviceDescriptor = kerberosDescriptorUpdated.getService("HDFS");
-    assertNotNull(serviceDescriptor);
-    assertNull(serviceDescriptor.getIdentity("/hdfs"));
-    assertNotNull(serviceDescriptor.getIdentity("hdfs"));
-
-    serviceDescriptor = kerberosDescriptorUpdated.getService("OOZIE");
-    assertNotNull(serviceDescriptor);
-    assertNull(serviceDescriptor.getIdentity("/hdfs"));
-    assertNotNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
-  }
-
-  @Test
-  public void testUpdateKerberosDescriptorArtifact_NoHDFSService() throws Exception {
-    final KerberosDescriptorFactory kerberosDescriptorFactory = new KerberosDescriptorFactory();
-
-    KerberosServiceDescriptor serviceDescriptor;
-
-    URL systemResourceURL = ClassLoader.getSystemResource("kerberos/test_kerberos_descriptor_no_hdfs.json");
-    assertNotNull(systemResourceURL);
-
-    final KerberosDescriptor kerberosDescriptorOrig = kerberosDescriptorFactory.createInstance(new File(systemResourceURL.getFile()));
-    assertNotNull(kerberosDescriptorOrig);
-    assertNotNull(kerberosDescriptorOrig.getIdentity("hdfs"));
-
-    serviceDescriptor = kerberosDescriptorOrig.getService("HDFS");
-    assertNull(serviceDescriptor);
-
-    serviceDescriptor = kerberosDescriptorOrig.getService("OOZIE");
-    assertNotNull(serviceDescriptor);
-    assertNotNull(serviceDescriptor.getIdentity("/hdfs"));
-    assertNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
-
-    UpgradeCatalog210 upgradeMock = createMockBuilder(UpgradeCatalog210.class).createMock();
-
-    Capture<Map<String, Object>> updatedData = EasyMock.newCapture();
-
-    ArtifactEntity artifactEntity = createNiceMock(ArtifactEntity.class);
-    expect(artifactEntity.getArtifactData())
-        .andReturn(kerberosDescriptorOrig.toMap())
-        .once();
-
-    artifactEntity.setArtifactData(capture(updatedData));
-    expectLastCall().once();
-
-    replay(artifactEntity, upgradeMock);
-    upgradeMock.updateKerberosDescriptorArtifact(createNiceMock(ArtifactDAO.class), artifactEntity);
-    verify(artifactEntity, upgradeMock);
-
-    KerberosDescriptor kerberosDescriptorUpdated = new KerberosDescriptorFactory().createInstance(updatedData.getValue());
-    assertNotNull(kerberosDescriptorUpdated);
-    assertNull(kerberosDescriptorUpdated.getIdentity("/hdfs"));
-
-    serviceDescriptor = kerberosDescriptorUpdated.getService("HDFS");
-    assertNotNull(serviceDescriptor);
-    assertNull(serviceDescriptor.getIdentity("/hdfs"));
-    assertNotNull(serviceDescriptor.getIdentity("hdfs"));
-
-    serviceDescriptor = kerberosDescriptorUpdated.getService("OOZIE");
-    assertNotNull(serviceDescriptor);
-    assertNull(serviceDescriptor.getIdentity("/hdfs"));
-    assertNotNull(serviceDescriptor.getIdentity("/HDFS/hdfs"));
-  }
-
-  // *********** Inner Classes that represent sections of the DDL ***********
-  // ************************************************************************
-
-  /**
-   * Verify that all of the host-related tables added a column for the host_id
-   */
-  class HostSectionDDL implements SectionDDL {
-
-    HashMap<String, Capture<DBColumnInfo>> captures;
-
-    public HostSectionDDL() {
-      // Capture all tables that will have the host_id column added to it.
-      captures = new HashMap<>();
-
-      // Column Capture section
-      // Hosts
-      Capture<DBAccessor.DBColumnInfo> clusterHostMappingColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> configGroupHostMappingColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> hostConfigMappingColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> hostsColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> hostComponentStateColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> hostComponentDesiredStateColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> hostRoleCommandColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> hostStateColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> hostVersionColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> kerberosPrincipalHostColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> requestOperationLevelColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> serviceConfigHostsColumnCapture = EasyMock.newCapture();
-
-      captures.put("ClusterHostMapping", clusterHostMappingColumnCapture);
-      captures.put("configgrouphostmapping", configGroupHostMappingColumnCapture);
-      captures.put("hostconfigmapping", hostConfigMappingColumnCapture);
-      captures.put("hosts", hostsColumnCapture);
-      captures.put("hostcomponentstate", hostComponentStateColumnCapture);
-      captures.put("hostcomponentdesiredstate", hostComponentDesiredStateColumnCapture);
-      captures.put("host_role_command", hostRoleCommandColumnCapture);
-      captures.put("hoststate", hostStateColumnCapture);
-      captures.put("host_version", hostVersionColumnCapture);
-      captures.put("kerberos_principal_host", kerberosPrincipalHostColumnCapture);
-      captures.put("requestoperationlevel", requestOperationLevelColumnCapture);
-      captures.put("serviceconfighosts", serviceConfigHostsColumnCapture);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void execute(DBAccessor dbAccessor) throws SQLException {
-      // Add columns and alter table section
-      dbAccessor.addColumn(eq("ClusterHostMapping"), capture(captures.get("ClusterHostMapping")));
-      dbAccessor.addColumn(eq("configgrouphostmapping"), capture(captures.get("configgrouphostmapping")));
-      dbAccessor.addColumn(eq("hostconfigmapping"), capture(captures.get("hostconfigmapping")));
-      dbAccessor.addColumn(eq("hosts"), capture(captures.get("hosts")));
-      dbAccessor.addColumn(eq("hostcomponentstate"), capture(captures.get("hostcomponentstate")));
-      dbAccessor.addColumn(eq("hostcomponentdesiredstate"), capture(captures.get("hostcomponentdesiredstate")));
-      dbAccessor.addColumn(eq("host_role_command"), capture(captures.get("host_role_command")));
-      dbAccessor.addColumn(eq("hoststate"), capture(captures.get("hoststate")));
-      dbAccessor.addColumn(eq("host_version"), capture(captures.get("host_version")));
-      dbAccessor.addColumn(eq("kerberos_principal_host"), capture(captures.get("kerberos_principal_host")));
-      dbAccessor.addColumn(eq("requestoperationlevel"), capture(captures.get("requestoperationlevel")));
-      dbAccessor.addColumn(eq("serviceconfighosts"), capture(captures.get("serviceconfighosts")));
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void verify(DBAccessor dbAccessor) throws SQLException {
-      // Verification section
-      for (Capture<DBColumnInfo> columnCapture : captures.values()) {
-        verifyContainsHostIdColumn(columnCapture);
-      }
-    }
-
-    /**
-     * Verify that the column capture of the table contains a host_id column of type Long.
-     * This is needed for all of the host-related tables that are switching from the
-     * host_name to the host_id.
-     * @param columnCapture
-     */
-    private void verifyContainsHostIdColumn(Capture<DBAccessor.DBColumnInfo> columnCapture) {
-      DBColumnInfo idColumn = columnCapture.getValue();
-      Assert.assertEquals(Long.class, idColumn.getType());
-      Assert.assertEquals("host_id", idColumn.getName());
-    }
-  }
-
-  /**
-   * Verify that the widget, widget_layout, and widget_layout_user_widget tables are created correctly.
-   */
-  class WidgetSectionDDL implements SectionDDL {
-
-    HashMap<String, Capture<List<DBColumnInfo>>> captures;
-    Capture<DBColumnInfo> userActiveLayoutsColumnCapture;
-
-    public WidgetSectionDDL() {
-      captures = new HashMap<>();
-
-      Capture<List<DBColumnInfo>> userWidgetColumnsCapture = EasyMock.newCapture();
-      Capture<List<DBColumnInfo>> widgetLayoutColumnsCapture = EasyMock.newCapture();
-      Capture<List<DBColumnInfo>> widgetLayoutUserWidgetColumnsCapture = EasyMock.newCapture();
-
-      captures.put("widget", userWidgetColumnsCapture);
-      captures.put("widget_layout", widgetLayoutColumnsCapture);
-      captures.put("widget_layout_user_widget", widgetLayoutUserWidgetColumnsCapture);
-      userActiveLayoutsColumnCapture = EasyMock.newCapture();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void execute(DBAccessor dbAccessor) throws SQLException {
-      Capture<List<DBColumnInfo>> userWidgetColumnsCapture = captures.get("widget");
-      Capture<List<DBColumnInfo>> widgetLayoutColumnsCapture = captures.get("widget_layout");
-      Capture<List<DBColumnInfo>> widgetLayoutUserWidgetColumnsCapture = captures.get("widget_layout_user_widget");
-
-      // User Widget
-      dbAccessor.createTable(eq("widget"),
-          capture(userWidgetColumnsCapture), eq("id"));
-
-      // Widget Layout
-      dbAccessor.createTable(eq("widget_layout"),
-          capture(widgetLayoutColumnsCapture), eq("id"));
-
-      // Widget Layout User Widget
-      dbAccessor.createTable(eq("widget_layout_user_widget"),
-          capture(widgetLayoutUserWidgetColumnsCapture), eq("widget_layout_id"), eq("widget_id"));
-
-      dbAccessor.addColumn(eq("users"), capture(userActiveLayoutsColumnCapture));
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void verify(DBAccessor dbAccessor) throws SQLException {
-      Capture<List<DBColumnInfo>> widgetColumnsCapture = captures.get("widget");
-      Capture<List<DBColumnInfo>> widgetLayoutColumnsCapture = captures.get("widget_layout");
-      Capture<List<DBColumnInfo>> widgetLayoutUserWidgetColumnsCapture = captures.get("widget_layout_user_widget");
-
-      // Verify widget tables
-      assertEquals(12, widgetColumnsCapture.getValue().size());
-      assertEquals(7, widgetLayoutColumnsCapture.getValue().size());
-      assertEquals(3, widgetLayoutUserWidgetColumnsCapture.getValue().size());
-
-      DBColumnInfo idColumn = userActiveLayoutsColumnCapture.getValue();
-      Assert.assertEquals(String.class, idColumn.getType());
-      Assert.assertEquals("active_widget_layouts", idColumn.getName());
-    }
-  }
-
-  /**
-   * Verify view changes
-   */
-  class ViewSectionDDL implements SectionDDL {
-
-    HashMap<String, Capture<DBColumnInfo>> captures;
-
-    public ViewSectionDDL() {
-      captures = new HashMap<>();
-
-      Capture<DBAccessor.DBColumnInfo> viewInstanceColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> viewInstanceAlterNamesColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> viewParamColumnCapture = EasyMock.newCapture();
-      Capture<DBAccessor.DBColumnInfo> viewBuildColumnCapture = EasyMock.newCapture();
-
-      captures.put("viewinstance", viewInstanceColumnCapture);
-      captures.put("viewinstance_alter_names", viewInstanceAlterNamesColumnCapture);
-      captures.put("viewparameter", viewParamColumnCapture);
-      captures.put("viewmain", viewBuildColumnCapture);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void execute(DBAccessor dbAccessor) throws SQLException {
-      Capture<DBColumnInfo> viewInstanceColumnCapture = captures.get("viewinstance");
-      Capture<DBColumnInfo> viewInstanceAlterNamesColumnCapture = captures.get("viewinstance_alter_names");
-      Capture<DBColumnInfo> viewParamColumnCapture = captures.get("viewparameter");
-      Capture<DBColumnInfo> viewBuildColumnCapture = captures.get("viewmain");
-
-      dbAccessor.addColumn(eq("viewinstance"), capture(viewInstanceColumnCapture));
-      dbAccessor.addColumn(eq("viewinstance"), capture(viewInstanceAlterNamesColumnCapture));
-      dbAccessor.addColumn(eq("viewparameter"), capture(viewParamColumnCapture));
-      dbAccessor.addColumn(eq("viewmain"), capture(viewBuildColumnCapture));
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void verify(DBAccessor dbAccessor) throws SQLException {
-      verifyViewInstance(captures.get("viewinstance"));
-      verifyViewInstanceAlterNames(captures.get("viewinstance_alter_names"));
-      verifyViewParameter(captures.get("viewparameter"));
-      verifyViewBuild(captures.get("viewmain"));
-    }
-
-    private void verifyViewInstance(Capture<DBAccessor.DBColumnInfo> viewInstanceColumnCapture) {
-      DBColumnInfo clusterIdColumn = viewInstanceColumnCapture.getValue();
-      Assert.assertEquals(String.class, clusterIdColumn.getType());
-      Assert.assertEquals("cluster_handle", clusterIdColumn.getName());
-    }
-
-    private void verifyViewInstanceAlterNames(Capture<DBAccessor.DBColumnInfo> viewInstanceAlterNamesColumnCapture) {
-      DBColumnInfo clusterIdColumn = viewInstanceAlterNamesColumnCapture.getValue();
-      Assert.assertEquals(Integer.class, clusterIdColumn.getType());
-      Assert.assertEquals("alter_names", clusterIdColumn.getName());
-    }
-
-    private void verifyViewParameter(Capture<DBAccessor.DBColumnInfo> viewParamColumnCapture) {
-      DBColumnInfo clusterConfigColumn = viewParamColumnCapture.getValue();
-      Assert.assertEquals(String.class, clusterConfigColumn.getType());
-      Assert.assertEquals("cluster_config", clusterConfigColumn.getName());
-    }
-
-    private void verifyViewBuild(Capture<DBAccessor.DBColumnInfo> viewBuildColumnCapture) {
-      DBColumnInfo clusterConfigColumn = viewBuildColumnCapture.getValue();
-      Assert.assertEquals(String.class, clusterConfigColumn.getType());
-      Assert.assertEquals("build", clusterConfigColumn.getName());
-    }
-  }
-
-  /**
-   * Verify alert changes
-   */
-  class AlertSectionDDL implements SectionDDL {
-    HashMap<String, Capture<String>> stringCaptures;
-    HashMap<String, Capture<Class>> classCaptures;
-
-
-    public AlertSectionDDL() {
-      stringCaptures = new HashMap<>();
-      classCaptures = new HashMap<>();
-
-      Capture<String> textCaptureC = EasyMock.newCapture();
-      Capture<String> textCaptureH = EasyMock.newCapture();
-      Capture<Class>  classFromC = EasyMock.newCapture();
-      Capture<Class>  classFromH = EasyMock.newCapture();
-      Capture<Class>  classToC = EasyMock.newCapture();
-      Capture<Class>  classToH = EasyMock.newCapture();
-
-      stringCaptures.put("textCaptureC", textCaptureC);
-      stringCaptures.put("textCaptureH", textCaptureH);
-      classCaptures.put("classFromC", classFromC);
-      classCaptures.put("classFromH", classFromH);
-      classCaptures.put("classToC", classToC);
-      classCaptures.put("classToH", classToH);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void execute(DBAccessor dbAccessor) throws SQLException {
-      Capture<String> textCaptureC = stringCaptures.get("textCaptureC");
-      Capture<String> textCaptureH = stringCaptures.get("textCaptureH");
-      Capture<Class>  classFromC = classCaptures.get("classFromC");
-      Capture<Class>  classFromH = classCaptures.get("classFromH");
-      Capture<Class>  classToC = classCaptures.get("classToC");
-      Capture<Class>  classToH = classCaptures.get("classToH");
-
-      dbAccessor.changeColumnType(eq("alert_current"), capture(textCaptureC), capture(classFromC), capture(classToC));
-      dbAccessor.changeColumnType(eq("alert_history"), capture(textCaptureH), capture(classFromH), capture(classToH));
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void verify(DBAccessor dbAccessor) throws SQLException {
-      Capture<String> textCaptureC = stringCaptures.get("textCaptureC");
-      Capture<String> textCaptureH = stringCaptures.get("textCaptureH");
-      Capture<Class>  classFromC = classCaptures.get("classFromC");
-      Capture<Class>  classFromH = classCaptures.get("classFromH");
-      Capture<Class>  classToC = classCaptures.get("classToC");
-      Capture<Class>  classToH = classCaptures.get("classToH");
-
-      Assert.assertEquals("latest_text", textCaptureC.getValue());
-      Assert.assertEquals(String.class, classFromC.getValue());
-      Assert.assertEquals(char[].class, classToC.getValue());
-
-      Assert.assertEquals("alert_text", textCaptureH.getValue());
-      Assert.assertEquals(String.class, classFromH.getValue());
-      Assert.assertEquals(char[].class, classToH.getValue());
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog211Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog211Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog211Test.java
deleted file mode 100644
index 60efead..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog211Test.java
+++ /dev/null
@@ -1,446 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.newCapture;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.Statement;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.configuration.Configuration.DatabaseType;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.ConfigurationRequest;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.easymock.Capture;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockSupport;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-import com.google.inject.persist.PersistService;
-
-
-/**
- * {@link UpgradeCatalog211} unit tests.
- */
-public class UpgradeCatalog211Test extends EasyMockSupport {
-
-  @Test
-  public void testExecuteDDLUpdates() throws Exception {
-    Injector injector = initInjector();
-
-    try {
-      Provider<EntityManager> entityManagerProvider = initEntityManagerProvider();
-
-      final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-      final OsFamily osFamily = createNiceMock(OsFamily.class);
-      Configuration configuration = createNiceMock(Configuration.class);
-      Connection connection = createNiceMock(Connection.class);
-      Statement statement = createNiceMock(Statement.class);
-      ResultSet resultSet = createNiceMock(ResultSet.class);
-      expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
-      expect(configuration.getDatabaseType()).andReturn(DatabaseType.DERBY).anyTimes();
-      dbAccessor.getConnection();
-      expectLastCall().andReturn(connection).anyTimes();
-      connection.createStatement();
-      expectLastCall().andReturn(statement).anyTimes();
-      statement.executeQuery("SELECT COUNT(*) from ambari_sequences where sequence_name='hostcomponentstate_id_seq'");
-      expectLastCall().andReturn(resultSet).atLeastOnce();
-
-      ResultSet rs1 = createNiceMock(ResultSet.class);
-      expect(rs1.next()).andReturn(Boolean.TRUE).once();
-
-      statement.executeQuery(anyObject(String.class));
-      expectLastCall().andReturn(rs1).anyTimes();
-
-      Capture<String> queryCapture = EasyMock.newCapture();
-      dbAccessor.executeQuery(capture(queryCapture));
-      expectLastCall().once();
-
-      dbAccessor.setColumnNullable("viewinstanceproperty", "value", true);
-      expectLastCall().once();
-      dbAccessor.setColumnNullable("viewinstancedata", "value", true);
-      expectLastCall().once();
-
-      // Create DDL sections with their own capture groups
-      // Example: AlertSectionDDL alertSectionDDL = new AlertSectionDDL();
-
-      // Execute any DDL schema changes
-      // Example: alertSectionDDL.execute(dbAccessor);
-
-      // Replay sections
-      replayAll();
-
-      AbstractUpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor, osFamily, entityManagerProvider.get());
-      Class<?> c = AbstractUpgradeCatalog.class;
-      Field f = c.getDeclaredField("configuration");
-      f.setAccessible(true);
-      f.set(upgradeCatalog, configuration);
-
-      f = UpgradeCatalog211.class.getDeclaredField("m_hcsId");
-      f.setAccessible(true);
-      f.set(upgradeCatalog, new AtomicLong(1001));
-
-      upgradeCatalog.executeDDLUpdates();
-      verifyAll();
-
-      Assert.assertTrue(queryCapture.hasCaptured());
-      Assert.assertTrue(queryCapture.getValue().contains("1001"));
-
-      // Verify sections
-      // Example: alertSectionDDL.verify(dbAccessor);
-    } finally {
-      destroyInjector(injector);
-    }
-  }
-
-  @Test
-  public void testExecutePreDMLUpdates() throws Exception {
-
-    final UpgradeCatalog211 upgradeCatalog211 = createMockBuilder(UpgradeCatalog211.class)
-        // Add mocked methods. Example: .addMockedMethod(cleanupStackUpdates)
-        .createMock();
-
-    final Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(UpgradeCatalog211.class).toInstance(upgradeCatalog211);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    setInjector(upgradeCatalog211, injector);
-
-    replayAll();
-
-    injector.getInstance(UpgradeCatalog211.class).executePreDMLUpdates();
-
-    verifyAll();
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    final OsFamily osFamily = createNiceMock(OsFamily.class);
-
-    final Cluster cluster = createMock(Cluster.class);
-
-    final Clusters clusters = createMock(Clusters.class);
-    expect(clusters.getClusters())
-        .andReturn(Collections.singletonMap("c1", cluster));
-
-    final AmbariManagementController controller = createNiceMock(AmbariManagementController.class);
-    expect(controller.getClusters())
-        .andReturn(clusters)
-        .once();
-
-    final Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(controller);
-        bind(DBAccessor.class).toInstance(dbAccessor);
-        bind(OsFamily.class).toInstance(osFamily);
-      }
-    });
-
-    Method addNewConfigurationsFromXml =
-        AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-
-    Method updateKerberosConfigurations =
-        UpgradeCatalog211.class.getDeclaredMethod("updateKerberosConfigurations", Cluster.class);
-
-    UpgradeCatalog211 upgradeCatalog211 = createMockBuilder(UpgradeCatalog211.class)
-        .addMockedMethod(addNewConfigurationsFromXml)
-        .addMockedMethod(updateKerberosConfigurations)
-        .createMock();
-
-    setInjector(upgradeCatalog211, injector);
-
-    upgradeCatalog211.addNewConfigurationsFromXml();
-    expectLastCall().once();
-
-    upgradeCatalog211.updateKerberosConfigurations(anyObject(Cluster.class));
-    expectLastCall().once();
-
-    replayAll();
-
-    upgradeCatalog211.executeDMLUpdates();
-
-    verifyAll();
-  }
-
-  @Test
-  public void testUpdateKerberosConfiguration() throws Exception {
-    final AmbariManagementController controller = createNiceMock(AmbariManagementController.class);
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    final OsFamily osFamily = createNiceMock(OsFamily.class);
-
-    final Map<String, String> propertiesKerberosEnv = new HashMap<String, String>() {
-      {
-        put("create_attributes_template", "create_attributes_template content");
-        put("realm", "EXAMPLE.COM");
-        put("container_dn", "");
-        put("ldap_url", "");
-        put("encryption_types", "aes des3-cbc-sha1 rc4 des-cbc-md5");
-        put("kdc_host", "c6407.ambari.apache.org");
-        put("admin_server_host", "c6407.ambari.apache.org");
-        put("kdc_type", "mit-kdc");
-      }
-    };
-
-    final Config configKerberosEnv = createNiceMock(Config.class);
-    expect(configKerberosEnv.getProperties()).andReturn(propertiesKerberosEnv).anyTimes();
-    expect(configKerberosEnv.getTag()).andReturn("tag1").anyTimes();
-
-    final Cluster cluster = createNiceMock(Cluster.class);
-    expect(cluster.getDesiredConfigByType("kerberos-env")).andReturn(configKerberosEnv).once();
-
-    final Injector injector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(controller);
-        bind(DBAccessor.class).toInstance(dbAccessor);
-        bind(OsFamily.class).toInstance(osFamily);
-      }
-    });
-
-    /* *********************************************************
-     * Expects for updateConfigurationPropertiesForCluster
-     * **** */
-    expect(cluster.getConfigsByType("kerberos-env"))
-        .andReturn(Collections.singletonMap("tag1", configKerberosEnv))
-        .once();
-
-    expect(cluster.getDesiredConfigByType("kerberos-env"))
-        .andReturn(configKerberosEnv)
-        .once();
-
-    Capture<ConfigurationRequest> captureCR = EasyMock.newCapture();
-    Capture<Cluster> clusterCapture = newCapture();
-    Capture<String> typeCapture = newCapture();
-    Capture<Map<String, String>> propertiesCapture = newCapture();
-    Capture<String> tagCapture = newCapture();
-    Capture<Map<String, Map<String, String>>> attributesCapture = newCapture();
-
-
-    expect(controller.createConfig(capture(clusterCapture), anyObject(StackId.class),capture(typeCapture),
-        capture(propertiesCapture), capture(tagCapture), capture(attributesCapture) ))
-        .andReturn(createNiceMock(Config.class))
-        .once();
-
-    /* ****
-     * Expects for updateConfigurationPropertiesForCluster (end)
-     * ********************************************************* */
-
-    replayAll();
-
-    injector.getInstance(UpgradeCatalog211.class).updateKerberosConfigurations(cluster);
-
-    verifyAll();
-
-    Map<String, String> capturedCRProperties = propertiesCapture.getValue();
-    Assert.assertNotNull(capturedCRProperties);
-    Assert.assertFalse(capturedCRProperties.containsKey("create_attributes_template"));
-    Assert.assertTrue(capturedCRProperties.containsKey("ad_create_attributes_template"));
-
-    for (String property : propertiesKerberosEnv.keySet()) {
-      if ("create_attributes_template".equals(property)) {
-        Assert.assertEquals("create_attributes_template/ad_create_attributes_template", propertiesKerberosEnv.get(property), capturedCRProperties.get("ad_create_attributes_template"));
-      } else {
-        Assert.assertEquals(property, propertiesKerberosEnv.get(property), capturedCRProperties.get(property));
-      }
-    }
-  }
-
-  @Test
-  public void testGetSourceVersion() {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    final OsFamily osFamily = createNiceMock(OsFamily.class);
-    Provider<EntityManager> entityManagerProvider = initEntityManagerProvider();
-
-    replayAll();
-
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor, osFamily, entityManagerProvider.get());
-
-    Assert.assertEquals("2.1.0", upgradeCatalog.getSourceVersion());
-
-    verifyAll();
-  }
-
-  @Test
-  public void testGetTargetVersion() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    final OsFamily osFamily = createNiceMock(OsFamily.class);
-    Provider<EntityManager> entityManagerProvider = initEntityManagerProvider();
-
-    replayAll();
-
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor, osFamily, entityManagerProvider.get());
-
-    Assert.assertEquals("2.1.1", upgradeCatalog.getTargetVersion());
-
-    verifyAll();
-  }
-
-  private Provider<EntityManager> initEntityManagerProvider() {
-    Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-
-    EntityManager entityManager = createNiceMock(EntityManager.class);
-    expect(entityManagerProvider.get())
-        .andReturn(entityManager)
-        .anyTimes();
-
-    return entityManagerProvider;
-  }
-
-  private Injector initInjector() {
-    Injector injector;
-
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    stackDAO.find("HDP", "2.2.0");
-
-    return injector;
-  }
-
-  private void destroyInjector(Injector injector) {
-    injector.getInstance(PersistService.class).stop();
-  }
-
-  private AbstractUpgradeCatalog getUpgradeCatalog(final DBAccessor dbAccessor, final OsFamily osFamily, final EntityManager entityManager) {
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(osFamily);
-        binder.bind(EntityManager.class).toInstance(entityManager);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    return injector.getInstance(UpgradeCatalog211.class);
-  }
-
-  private void setInjector(UpgradeCatalog211 upgradeCatalog211, Injector injector) throws NoSuchFieldException, IllegalAccessException {
-    Field fieldInjector = AbstractUpgradeCatalog.class.getDeclaredField("injector");
-    if (fieldInjector != null) {
-      fieldInjector.set(upgradeCatalog211, injector);
-    }
-  }
-
-  // *********** Inner Classes that represent sections of the DDL ***********
-  // ************************************************************************
-
-  /*
-   * Example *SectionDDL class
-   */
-  /*
-  class AlertSectionDDL implements SectionDDL {
-    HashMap<String, Capture<String>> stringCaptures;
-    HashMap<String, Capture<Class>> classCaptures;
-
-
-    public AlertSectionDDL() {
-      stringCaptures = new HashMap<String, Capture<String>>();
-      classCaptures = new HashMap<String, Capture<Class>>();
-
-      Capture<String> textCaptureC = EasyMock.newCapture();
-      Capture<String> textCaptureH = EasyMock.newCapture();
-      Capture<Class>  classFromC = EasyMock.newCapture();
-      Capture<Class>  classFromH = EasyMock.newCapture();
-      Capture<Class>  classToC = EasyMock.newCapture();
-      Capture<Class>  classToH = EasyMock.newCapture();
-
-      stringCaptures.put("textCaptureC", textCaptureC);
-      stringCaptures.put("textCaptureH", textCaptureH);
-      classCaptures.put("classFromC", classFromC);
-      classCaptures.put("classFromH", classFromH);
-      classCaptures.put("classToC", classToC);
-      classCaptures.put("classToH", classToH);
-    }
-
-    @Override
-    public void execute(DBAccessor dbAccessor) throws SQLException {
-      Capture<String> textCaptureC = stringCaptures.get("textCaptureC");
-      Capture<String> textCaptureH = stringCaptures.get("textCaptureH");
-      Capture<Class>  classFromC = classCaptures.get("classFromC");
-      Capture<Class>  classFromH = classCaptures.get("classFromH");
-      Capture<Class>  classToC = classCaptures.get("classToC");
-      Capture<Class>  classToH = classCaptures.get("classToH");
-
-      dbAccessor.changeColumnType(eq("alert_current"), capture(textCaptureC), capture(classFromC), capture(classToC));
-      dbAccessor.changeColumnType(eq("alert_history"), capture(textCaptureH), capture(classFromH), capture(classToH));
-    }
-
-    @Override
-    public void verify(DBAccessor dbAccessor) throws SQLException {
-      Capture<String> textCaptureC = stringCaptures.get("textCaptureC");
-      Capture<String> textCaptureH = stringCaptures.get("textCaptureH");
-      Capture<Class>  classFromC = classCaptures.get("classFromC");
-      Capture<Class>  classFromH = classCaptures.get("classFromH");
-      Capture<Class>  classToC = classCaptures.get("classToC");
-      Capture<Class>  classToH = classCaptures.get("classToH");
-
-      Assert.assertEquals("latest_text", textCaptureC.getValue());
-      Assert.assertEquals(String.class, classFromC.getValue());
-      Assert.assertEquals(char[].class, classToC.getValue());
-
-      Assert.assertEquals("alert_text", textCaptureH.getValue());
-      Assert.assertEquals(String.class, classFromH.getValue());
-      Assert.assertEquals(char[].class, classToH.getValue());
-    }
-  }
-  */
-}


[28/63] [abbrv] ambari git commit: AMBARI-21306.zeppelin jdbc interpreter gives error for 'doAs'(Prabhjyot Singh via Venkata Sairam)

Posted by ab...@apache.org.
AMBARI-21306.zeppelin jdbc interpreter gives error for 'doAs'(Prabhjyot Singh via Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/23cc628f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/23cc628f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/23cc628f

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 23cc628f4827913869da4917a9bd7b9c0c1abf73
Parents: 8da634c
Author: Venkata Sairam <ve...@gmail.com>
Authored: Mon Jun 26 12:31:26 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Mon Jun 26 12:31:26 2017 +0530

----------------------------------------------------------------------
 .../common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/23cc628f/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
index 8b5f821..0013ab0 100644
--- a/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
+++ b/ambari-server/src/main/resources/common-services/ZEPPELIN/0.6.0.2.5/package/scripts/master.py
@@ -366,7 +366,7 @@ class Master(Script):
           interpreter['properties'][hive_interactive_properties_key + '.driver'] = 'org.apache.hive.jdbc.HiveDriver'
           interpreter['properties'][hive_interactive_properties_key + '.user'] = 'hive'
           interpreter['properties'][hive_interactive_properties_key + '.password'] = ''
-          interpreter['properties'][hive_interactive_properties_key + '.property'] = 'hive.server2.proxy.user'
+          interpreter['properties'][hive_interactive_properties_key + '.proxy.user.property'] = 'hive.server2.proxy.user'
           if params.hive_server2_support_dynamic_service_discovery:
             interpreter['properties'][hive_interactive_properties_key + '.url'] = 'jdbc:hive2://' + \
                                                     params.hive_zookeeper_quorum + \


[44/63] [abbrv] ambari git commit: BUG-82124 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)

Posted by ab...@apache.org.
BUG-82124 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/39efba35
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/39efba35
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/39efba35

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 39efba35980642b832f79c6afb332716045d859f
Parents: 9aa786f
Author: Vishal Suvagia <vi...@yahoo.com>
Authored: Thu Jun 15 15:27:47 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Wed Jun 28 13:32:37 2017 +0530

----------------------------------------------------------------------
 .../common-services/HBASE/0.96.0.2.0/role_command_order.json      | 3 +--
 .../common-services/HBASE/2.0.0.3.0/role_command_order.json       | 2 +-
 2 files changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/39efba35/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
index 110b179..58d0c1c 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
@@ -4,7 +4,6 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"]
-
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"]
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/39efba35/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
index 44d0c61..69f4bf6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
@@ -4,7 +4,7 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"]
   }
 }


[46/63] [abbrv] ambari git commit: AMBARI-21256 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)

Posted by ab...@apache.org.
AMBARI-21256 : As part of START_ALL Ranger kms starts after hbase and hive causing their start failure (Vishal Suvagia via mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5e50042a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5e50042a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5e50042a

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 5e50042a78e4ddcb8e50bb28289ba67c50fb502c
Parents: 4c1ea4c
Author: Vishal Suvagia <vi...@yahoo.com>
Authored: Thu Jun 15 15:27:47 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Wed Jun 28 14:00:01 2017 +0530

----------------------------------------------------------------------
 .../common-services/HBASE/0.96.0.2.0/role_command_order.json      | 3 +--
 .../common-services/HBASE/2.0.0.3.0/role_command_order.json       | 2 +-
 2 files changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5e50042a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
index 110b179..58d0c1c 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/role_command_order.json
@@ -4,7 +4,6 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"]
-
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"]
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5e50042a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
index 44d0c61..69f4bf6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
+++ b/ambari-server/src/main/resources/common-services/HBASE/2.0.0.3.0/role_command_order.json
@@ -4,7 +4,7 @@
     "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
     "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START", "ZOOKEEPER_SERVER-START", "RANGER_USERSYNC-START", "RANGER_KMS_SERVER-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"]
   }
 }


[02/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
deleted file mode 100644
index 0663049..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog250Test.java
+++ /dev/null
@@ -1,2129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
-import org.apache.ambari.server.controller.KerberosHelper;
-import org.apache.ambari.server.controller.MaintenanceStateHelper;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.ArtifactDAO;
-import org.apache.ambari.server.orm.dao.PermissionDAO;
-import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
-import org.apache.ambari.server.orm.dao.RoleAuthorizationDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.ArtifactEntity;
-import org.apache.ambari.server.orm.entities.PermissionEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
-import org.apache.ambari.server.orm.entities.RoleAuthorizationEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.kerberos.AbstractKerberosDescriptorContainer;
-import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
-import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosKeytabDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.easymock.Capture;
-import org.easymock.CaptureType;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockRunner;
-import org.easymock.EasyMockSupport;
-import org.easymock.Mock;
-import org.easymock.MockType;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Maps;
-import com.google.gson.Gson;
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-import com.google.gson.JsonPrimitive;
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-
-import junit.framework.AssertionFailedError;
-
-/**
- * {@link UpgradeCatalog250} unit tests.
- */
-@RunWith(EasyMockRunner.class)
-public class UpgradeCatalog250Test {
-
-  //  private Injector injector;
-  @Mock(type = MockType.STRICT)
-  private Provider<EntityManager> entityManagerProvider;
-
-  @Mock(type = MockType.NICE)
-  private EntityManager entityManager;
-
-  @Mock(type = MockType.NICE)
-  private DBAccessor dbAccessor;
-
-  @Mock(type = MockType.NICE)
-  private Configuration configuration;
-
-  @Mock(type = MockType.NICE)
-  private Connection connection;
-
-  @Mock(type = MockType.NICE)
-  private Statement statement;
-
-  @Mock(type = MockType.NICE)
-  private ResultSet resultSet;
-
-  @Mock(type = MockType.NICE)
-  private OsFamily osFamily;
-
-  @Mock(type = MockType.NICE)
-  private KerberosHelper kerberosHelper;
-
-  @Mock(type = MockType.NICE)
-  private ActionManager actionManager;
-
-  @Mock(type = MockType.NICE)
-  private Config config;
-
-  @Mock(type = MockType.STRICT)
-  private Service service;
-
-  @Mock(type = MockType.NICE)
-  private Clusters clusters;
-
-  @Mock(type = MockType.NICE)
-  private Cluster cluster;
-
-  @Mock(type = MockType.NICE)
-  private Injector injector;
-
-  private UpgradeCatalog250 upgradeCatalog250;
-
-  @Before
-  public void init() {
-    reset(entityManagerProvider, injector);
-
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(kerberosHelper).anyTimes();
-
-    replay(entityManagerProvider, injector);
-
-    upgradeCatalog250 = new UpgradeCatalog250(injector);
-  }
-
-  @After
-  public void tearDown() {
-  }
-
-  @Test
-  public void testExecuteDDLUpdates() throws Exception {
-    // !!! setup capture for host_version
-    dbAccessor.addUniqueConstraint("host_version", "UQ_host_repo", "repo_version_id", "host_id");
-
-    Capture<DBAccessor.DBColumnInfo> groupGroupType = newCapture();
-    dbAccessor.addColumn(eq(UpgradeCatalog250.GROUPS_TABLE), capture(groupGroupType));
-    dbAccessor.addUniqueConstraint("groups", "UNQ_groups_0", "group_name", "group_type");
-
-    expectLastCall().once();
-
-    // !!! setup capture for servicecomponent_version
-    Capture<List<DBAccessor.DBColumnInfo>> capturedComponentVersionColumns = newCapture();
-
-    dbAccessor.createTable(eq(UpgradeCatalog250.COMPONENT_VERSION_TABLE), capture(capturedComponentVersionColumns),
-        eq((String[]) null));
-
-    dbAccessor.addPKConstraint(eq(UpgradeCatalog250.COMPONENT_VERSION_TABLE),
-        eq(UpgradeCatalog250.COMPONENT_VERSION_PK), eq("id"));
-    dbAccessor.addFKConstraint(eq(UpgradeCatalog250.COMPONENT_VERSION_TABLE),
-        eq(UpgradeCatalog250.COMPONENT_VERSION_FK_COMPONENT), eq("component_id"),
-        eq(UpgradeCatalog250.COMPONENT_TABLE), eq("id"), eq(false));
-    dbAccessor.addFKConstraint(eq(UpgradeCatalog250.COMPONENT_VERSION_TABLE),
-        eq(UpgradeCatalog250.COMPONENT_VERSION_FK_REPO_VERSION), eq("repo_version_id"),
-        eq("repo_version"), eq("repo_version_id"), eq(false));
-
-    // servicedesiredstate table
-    Capture<DBAccessor.DBColumnInfo> capturedCredentialStoreEnabledCol = newCapture();
-    dbAccessor.addColumn(eq(UpgradeCatalog250.SERVICE_DESIRED_STATE_TABLE), capture(capturedCredentialStoreEnabledCol));
-
-    expect(dbAccessor.getConnection()).andReturn(connection).anyTimes();
-    expect(connection.createStatement()).andReturn(statement).anyTimes();
-    expect(statement.executeQuery(anyObject(String.class))).andReturn(resultSet).anyTimes();
-    expect(configuration.getDatabaseType()).andReturn(Configuration.DatabaseType.POSTGRES).anyTimes();
-
-    replay(dbAccessor, configuration, connection, statement, resultSet);
-
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(osFamily);
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(Configuration.class).toInstance(configuration);
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog250 upgradeCatalog250 = injector.getInstance(UpgradeCatalog250.class);
-    upgradeCatalog250.executeDDLUpdates();
-
-    DBAccessor.DBColumnInfo capturedGroupTypeColumn = groupGroupType.getValue();
-    Assert.assertNotNull(capturedGroupTypeColumn);
-    Assert.assertEquals(UpgradeCatalog250.GROUP_TYPE_COL, capturedGroupTypeColumn.getName());
-    Assert.assertEquals(String.class, capturedGroupTypeColumn.getType());
-    Assert.assertEquals(null, capturedGroupTypeColumn.getLength());
-    Assert.assertEquals("LOCAL", capturedGroupTypeColumn.getDefaultValue());
-    Assert.assertEquals(false, capturedGroupTypeColumn.isNullable());
-
-    verify(dbAccessor);
-
-    // !!! check the captured for host_version
-    // (no checks)
-
-    // !!! check the captured for servicecomponent_version
-    Map<String, DBAccessor.DBColumnInfo> expected = new HashMap<>();
-    expected.put("id", new DBAccessor.DBColumnInfo("id", Long.class, null, null, false));
-    expected.put("component_id", new DBAccessor.DBColumnInfo("component_id", Long.class, null, null, false));
-    expected.put("repo_version_id", new DBAccessor.DBColumnInfo("repo_version_id", Long.class, null, null, false));
-    expected.put("state", new DBAccessor.DBColumnInfo("state", String.class, 32, null, false));
-    expected.put("user_name", new DBAccessor.DBColumnInfo("user_name", String.class, 255, null, false));
-
-    List<DBAccessor.DBColumnInfo> captured = capturedComponentVersionColumns.getValue();
-    Assert.assertEquals(5, captured.size());
-
-    for (DBAccessor.DBColumnInfo column : captured) {
-      DBAccessor.DBColumnInfo expectedColumn = expected.remove(column.getName());
-
-      Assert.assertNotNull(expectedColumn);
-      Assert.assertEquals(expectedColumn.getDefaultValue(), column.getDefaultValue());
-      Assert.assertEquals(expectedColumn.getName(), column.getName());
-      Assert.assertEquals(expectedColumn.getLength(), column.getLength());
-      Assert.assertEquals(expectedColumn.getType(), column.getType());
-      Assert.assertEquals(expectedColumn.getClass(), column.getClass());
-    }
-
-    // did we get them all?
-    Assert.assertEquals(0, expected.size());
-
-    // Verify if credential_store_enabled columns
-    // were added to servicedesiredstate table
-
-    DBAccessor.DBColumnInfo capturedCredentialStoreEnabledColValues = capturedCredentialStoreEnabledCol.getValue();
-    Assert.assertNotNull(capturedCredentialStoreEnabledColValues);
-
-    Assert.assertEquals(UpgradeCatalog250.CREDENTIAL_STORE_ENABLED_COL, capturedCredentialStoreEnabledColValues.getName());
-    Assert.assertEquals(null, capturedCredentialStoreEnabledColValues.getLength());
-    Assert.assertEquals(Short.class, capturedCredentialStoreEnabledColValues.getType());
-    Assert.assertEquals(0, capturedCredentialStoreEnabledColValues.getDefaultValue());
-    Assert.assertEquals(false, capturedCredentialStoreEnabledColValues.isNullable());
-  }
-
-  @Test
-  public void testUpdateAlerts_StormUIWebAlert() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final AlertDefinitionEntity stormWebUIAlertMock = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    final Injector mockInjector = createInjector(mockAmbariManagementController, mockClusters, mockAlertDefinitionDAO);
-    long clusterId = 1;
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("storm_webui")))
-        .andReturn(stormWebUIAlertMock).atLeastOnce();
-    expect(stormWebUIAlertMock.getSource()).andReturn("{\"uri\": {\n" +
-        "            \"http\": \"{{storm-site/ui.port}}\",\n" +
-        "            \"kerberos_keytab\": \"{{storm-env/storm_ui_keytab}}\",\n" +
-        "            \"kerberos_principal\": \"{{storm-env/storm_ui_principal_name}}\",\n" +
-        "            \"connection_timeout\": 5.0\n" +
-        "          } }");
-
-    stormWebUIAlertMock.setSource("{\"uri\":{\"http\":\"{{storm-site/ui.port}}\",\"kerberos_keytab\":\"{{storm-env/storm_ui_keytab}}\",\"kerberos_principal\":\"{{storm-env/storm_ui_principal_name}}\",\"connection_timeout\":5.0,\"https\":\"{{storm-site/ui.https.port}}\",\"https_property\":\"{{storm-site/ui.https.keystore.type}}\",\"https_property_value\":\"jks\"}}");
-
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog250.class).updateStormAlerts();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateAlerts_StormUIPortAlert() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final AlertDefinitionEntity stormUIPortAlertMock = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    final Injector mockInjector = createInjector(mockAmbariManagementController, mockClusters, mockAlertDefinitionDAO);
-    long clusterId = 1;
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("storm_server_process")))
-        .andReturn(stormUIPortAlertMock).atLeastOnce();
-
-    mockAlertDefinitionDAO.remove(stormUIPortAlertMock);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-
-    mockInjector.getInstance(UpgradeCatalog250.class).updateStormAlerts();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateAlerts_LogSearchUIWebAlert() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final AlertDefinitionEntity logSearchWebUIAlertMock = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    final Injector mockInjector = createInjector(mockAmbariManagementController, mockClusters, mockAlertDefinitionDAO);
-    long clusterId = 1;
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("logsearch_ui")))
-      .andReturn(logSearchWebUIAlertMock).atLeastOnce();
-    expect(logSearchWebUIAlertMock.getSource()).andReturn("{\"uri\": {\n" +
-      "            \"http\": \"{{logsearch-env/logsearch_ui_port}}\",\n" +
-      "            \"https\": \"{{logsearch-env/logsearch_ui_port}}\"\n" +
-      "          } }");
-
-    logSearchWebUIAlertMock.setSource("{\"uri\":{\"http\":\"{{logsearch-env/logsearch_ui_port}}\",\"https\":\"{{logsearch-env/logsearch_ui_port}}\",\"https_property\":\"{{logsearch-env/logsearch_ui_protocol}}\",\"https_property_value\":\"https\"}}");
-
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog250.class).updateLogSearchAlert();
-    easyMockSupport.verifyAll();
-  }
-
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    Method updateAmsConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAMSConfigs");
-    Method updateHadoopEnvConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateHadoopEnvConfigs");
-    Method updateKafkaConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateKafkaConfigs");
-    Method updateTablesForZeppelinViewRemoval = UpgradeCatalog250.class.getDeclaredMethod("unInstallAllZeppelinViews");
-    Method updateHIVEInteractiveConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateHIVEInteractiveConfigs");
-    Method addManageServiceAutoStartPermissions = UpgradeCatalog250.class.getDeclaredMethod("addManageServiceAutoStartPermissions");
-    Method addManageAlertNotificationsPermissions = UpgradeCatalog250.class.getDeclaredMethod("addManageAlertNotificationsPermissions");
-    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-    Method updateZeppelinConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateZeppelinConfigs");
-    Method updateAtlasConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAtlasConfigs");
-    Method updateLogSearchConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateLogSearchConfigs");
-    Method updateLogSearchAlert = UpgradeCatalog250.class.getDeclaredMethod("updateLogSearchAlert");
-    Method updateAmbariInfraConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateAmbariInfraConfigs");
-    Method updateRangerUrlConfigs = UpgradeCatalog250.class.getDeclaredMethod("updateRangerUrlConfigs");
-    Method updateTezHistoryUrlBase = UpgradeCatalog250.class.getDeclaredMethod("updateTezHistoryUrlBase");
-    Method updateYarnSite = UpgradeCatalog250.class.getDeclaredMethod("updateYarnSite");
-    Method updateAlerts = UpgradeCatalog250.class.getDeclaredMethod("updateStormAlerts");
-    Method removeAlertDuplicates = UpgradeCatalog250.class.getDeclaredMethod("removeAlertDuplicates");
-    Method updateKerberosDescriptorArtifacts = AbstractUpgradeCatalog.class.getDeclaredMethod("updateKerberosDescriptorArtifacts");
-    Method fixHBaseMasterCPUUtilizationAlertDefinition = UpgradeCatalog250.class.getDeclaredMethod("fixHBaseMasterCPUUtilizationAlertDefinition");
-
-    UpgradeCatalog250 upgradeCatalog250 = createMockBuilder(UpgradeCatalog250.class)
-        .addMockedMethod(updateAmsConfigs)
-        .addMockedMethod(updateHadoopEnvConfigs)
-        .addMockedMethod(updateKafkaConfigs)
-        .addMockedMethod(addNewConfigurationsFromXml)
-        .addMockedMethod(addManageServiceAutoStartPermissions)
-        .addMockedMethod(addManageAlertNotificationsPermissions)
-        .addMockedMethod(updateHIVEInteractiveConfigs)
-        .addMockedMethod(updateTablesForZeppelinViewRemoval)
-        .addMockedMethod(updateZeppelinConfigs)
-        .addMockedMethod(updateAtlasConfigs)
-        .addMockedMethod(updateLogSearchConfigs)
-        .addMockedMethod(updateAmbariInfraConfigs)
-        .addMockedMethod(updateRangerUrlConfigs)
-        .addMockedMethod(updateYarnSite)
-        .addMockedMethod(updateAlerts)
-        .addMockedMethod(updateLogSearchAlert)
-        .addMockedMethod(removeAlertDuplicates)
-        .addMockedMethod(updateKerberosDescriptorArtifacts)
-        .addMockedMethod(fixHBaseMasterCPUUtilizationAlertDefinition)
-        .addMockedMethod(updateTezHistoryUrlBase)
-        .createMock();
-
-    upgradeCatalog250.updateAMSConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateHadoopEnvConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.addNewConfigurationsFromXml();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateKafkaConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateHIVEInteractiveConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.unInstallAllZeppelinViews();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateZeppelinConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateAtlasConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateLogSearchConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateAmbariInfraConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateRangerUrlConfigs();
-    expectLastCall().once();
-
-    upgradeCatalog250.addManageServiceAutoStartPermissions();
-    expectLastCall().once();
-
-    upgradeCatalog250.addManageAlertNotificationsPermissions();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateTezHistoryUrlBase();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateYarnSite();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateStormAlerts();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateLogSearchAlert();
-    expectLastCall().once();
-
-    upgradeCatalog250.removeAlertDuplicates();
-    expectLastCall().once();
-
-    upgradeCatalog250.updateKerberosDescriptorArtifacts();
-    expectLastCall().once();
-
-    upgradeCatalog250.fixHBaseMasterCPUUtilizationAlertDefinition();
-    expectLastCall().once();
-
-    replay(upgradeCatalog250);
-
-    upgradeCatalog250.executeDMLUpdates();
-
-    verify(upgradeCatalog250);
-  }
-
-  @Test
-  public void testFixHBaseMasterCPUUtilizationAlertDefinition() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final AlertDefinitionEntity hbaseMasterCPUAlertMock = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    String brokenSource = "{\"uri\":{\"http\":\"{{hbase-site/hbase.master.info.port}}\",\"kerberos_keytab\":\"{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}\",\"kerberos_principal\":\"{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}\",\"default_port\":60010,\"connection_timeout\":5.0},\"jmx\":{\"property_list\":[\"java.lang:type\\u003dOperatingSystem/SystemCpuLoad\",\"java.lang:type\\u003dOperatingSystem/AvailableProcessors\"],\"value\":\"{0} * 100\"},\"type\":\"METRIC\",\"reporting\":{\"ok\":{\"text\":\"{1} CPU, load {0:.1%}\"},\"warning\":{\"text\":\"{1} CPU, load {0:.1%}\",\"value\":200.0},\"critical\":{\"text\":\"{1} CPU, load {0:.1%}\",\"value\":250.0},\"units\":\"%\",\"type\":\"PERCENT\"}}";
-
-    Capture<String> capturedFixedSource = newCapture();
-
-    final Injector mockInjector = createInjector(mockAmbariManagementController, mockClusters, mockAlertDefinitionDAO);
-    long clusterId = 1;
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(Collections.singletonMap("normal", mockClusterExpected)).atLeastOnce();
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("hbase_master_cpu"))).andReturn(hbaseMasterCPUAlertMock).atLeastOnce();
-    expect(hbaseMasterCPUAlertMock.getDefinitionName()).andReturn("hbase_master_cpu").once();
-    expect(hbaseMasterCPUAlertMock.getSource()).andReturn(brokenSource).once();
-
-    hbaseMasterCPUAlertMock.setSource(capture(capturedFixedSource));
-    expectLastCall().once();
-
-    hbaseMasterCPUAlertMock.setHash(anyString());
-    expectLastCall().once();
-
-    expect(mockAlertDefinitionDAO.merge(hbaseMasterCPUAlertMock)).andReturn(hbaseMasterCPUAlertMock).once();
-
-    easyMockSupport.replayAll();
-
-    mockInjector.getInstance(UpgradeCatalog250.class).fixHBaseMasterCPUUtilizationAlertDefinition();
-    easyMockSupport.verifyAll();
-
-    String fixedSource = capturedFixedSource.getValue();
-    Assert.assertNotNull(fixedSource);
-
-    JsonObject sourceJson = new JsonParser().parse(fixedSource).getAsJsonObject();
-    Assert.assertNotNull(sourceJson);
-
-    JsonObject uriJson = sourceJson.get("uri").getAsJsonObject();
-    Assert.assertNotNull(uriJson);
-
-    JsonPrimitive primitive;
-    primitive = uriJson.getAsJsonPrimitive("kerberos_keytab");
-    Assert.assertTrue(primitive.isString());
-    Assert.assertEquals("{{hbase-site/hbase.security.authentication.spnego.kerberos.keytab}}", primitive.getAsString());
-
-    primitive = uriJson.getAsJsonPrimitive("kerberos_principal");
-    Assert.assertTrue(primitive.isString());
-    Assert.assertEquals("{{hbase-site/hbase.security.authentication.spnego.kerberos.principal}}", primitive.getAsString());
-  }
-
-  @Test
-  public void testFixHBaseMasterCPUUtilizationAlertDefinitionMissingKerberosInfo() {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final AlertDefinitionDAO mockAlertDefinitionDAO = easyMockSupport.createNiceMock(AlertDefinitionDAO.class);
-    final AlertDefinitionEntity hbaseMasterCPUAlertMock = easyMockSupport.createNiceMock(AlertDefinitionEntity.class);
-
-    String brokenSource = "{\"uri\":{\"http\":\"{{hbase-site/hbase.master.info.port}}\",\"default_port\":60010,\"connection_timeout\":5.0},\"jmx\":{\"property_list\":[\"java.lang:type\\u003dOperatingSystem/SystemCpuLoad\",\"java.lang:type\\u003dOperatingSystem/AvailableProcessors\"],\"value\":\"{0} * 100\"},\"type\":\"METRIC\",\"reporting\":{\"ok\":{\"text\":\"{1} CPU, load {0:.1%}\"},\"warning\":{\"text\":\"{1} CPU, load {0:.1%}\",\"value\":200.0},\"critical\":{\"text\":\"{1} CPU, load {0:.1%}\",\"value\":250.0},\"units\":\"%\",\"type\":\"PERCENT\"}}";
-
-    Capture<String> capturedFixedSource = newCapture();
-
-    final Injector mockInjector = createInjector(mockAmbariManagementController, mockClusters, mockAlertDefinitionDAO);
-    long clusterId = 1;
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(Collections.singletonMap("normal", mockClusterExpected)).atLeastOnce();
-    expect(mockClusterExpected.getClusterId()).andReturn(clusterId).anyTimes();
-    expect(mockAlertDefinitionDAO.findByName(eq(clusterId), eq("hbase_master_cpu"))).andReturn(hbaseMasterCPUAlertMock).atLeastOnce();
-    expect(hbaseMasterCPUAlertMock.getDefinitionName()).andReturn("hbase_master_cpu").once();
-    expect(hbaseMasterCPUAlertMock.getSource()).andReturn(brokenSource).once();
-
-    expect(mockAlertDefinitionDAO.merge(hbaseMasterCPUAlertMock)).andReturn(hbaseMasterCPUAlertMock).anyTimes();
-
-    easyMockSupport.replayAll();
-
-    mockInjector.getInstance(UpgradeCatalog250.class).fixHBaseMasterCPUUtilizationAlertDefinition();
-    easyMockSupport.verifyAll();
-
-    Assert.assertFalse(capturedFixedSource.hasCaptured());
-  }
-
-  @Test
-  public void testUpdateYarnSite() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    final String propertyToRemove = "yarn.nodemanager.linux-container-executor.cgroups.mount-path";
-    final AmbariManagementController ambariManagementController = createNiceMock(AmbariManagementController.class);
-    Config mockYarnEnv = easyMockSupport.createNiceMock(Config.class);
-    Config mockYarnSite = easyMockSupport.createNiceMock(Config.class);
-
-    HashMap<String, String> yarnEnv = new HashMap<String, String>() {{
-      put("yarn_cgroups_enabled", "false");
-    }};
-
-    HashMap<String, String> yarnSite = new HashMap<String, String>() {{
-      put(propertyToRemove, "");
-    }};
-
-    reset(clusters, cluster, injector);
-
-    expect(injector.getInstance(AmbariManagementController.class)).andReturn(ambariManagementController).atLeastOnce();
-    expect(ambariManagementController.getClusters()).andReturn(clusters).atLeastOnce();
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("yarn-env")).andReturn(mockYarnEnv).atLeastOnce();
-    expect(mockYarnEnv.getProperties()).andReturn(yarnEnv).anyTimes();
-    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(mockYarnSite).atLeastOnce();
-    expect(mockYarnSite.getProperties()).andReturn(yarnSite).anyTimes();
-
-    replay(clusters, cluster, injector, ambariManagementController, mockYarnEnv, mockYarnSite);
-
-    UpgradeCatalog250 upgradeCatalog250 = createMockBuilder(UpgradeCatalog250.class)
-        .addMockedMethod("removeConfigurationPropertiesFromCluster")
-        .withConstructor(injector)
-        .createNiceMock();
-
-    Capture<HashSet<String>> removeConfigName = EasyMock.newCapture();
-
-    upgradeCatalog250.removeConfigurationPropertiesFromCluster(anyObject(Cluster.class), eq("yarn-site"), capture(removeConfigName));
-    EasyMock.expectLastCall();
-
-    replay(upgradeCatalog250);
-
-    upgradeCatalog250.updateYarnSite();
-
-    easyMockSupport.verifyAll();
-
-    Set<String> updatedProperties = removeConfigName.getValue();
-    assertTrue(updatedProperties.contains(propertyToRemove));
-
-    reset(injector);
-  }
-
-  @Test
-  public void testUpdateYarnSiteWithEnabledCGroups() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    final String propertyToRemove = "yarn.nodemanager.linux-container-executor.cgroups.mount-path";
-    final AmbariManagementController ambariManagementController = createNiceMock(AmbariManagementController.class);
-    Config mockYarnEnv = easyMockSupport.createNiceMock(Config.class);
-    Config mockYarnSite = easyMockSupport.createNiceMock(Config.class);
-
-    HashMap<String, String> yarnEnv = new HashMap<String, String>() {{
-      put("yarn_cgroups_enabled", "true");
-    }};
-
-    HashMap<String, String> yarnSite = new HashMap<String, String>() {{
-      put(propertyToRemove, "");
-    }};
-
-    reset(clusters, cluster, injector);
-
-    expect(injector.getInstance(AmbariManagementController.class)).andReturn(ambariManagementController).atLeastOnce();
-    expect(ambariManagementController.getClusters()).andReturn(clusters).atLeastOnce();
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("yarn-env")).andReturn(mockYarnEnv).atLeastOnce();
-    expect(mockYarnEnv.getProperties()).andReturn(yarnEnv).anyTimes();
-    expect(cluster.getDesiredConfigByType("yarn-site")).andReturn(mockYarnSite).atLeastOnce();
-    expect(mockYarnSite.getProperties()).andReturn(yarnSite).anyTimes();
-
-    replay(clusters, cluster, injector, ambariManagementController, mockYarnEnv, mockYarnSite);
-
-    UpgradeCatalog250 upgradeCatalog250 = createMockBuilder(UpgradeCatalog250.class)
-        .addMockedMethod("removeConfigurationPropertiesFromCluster")
-        .withConstructor(injector)
-        .createNiceMock();
-
-    Capture<HashSet<String>> removeConfigName = EasyMock.newCapture();
-
-    upgradeCatalog250.removeConfigurationPropertiesFromCluster(anyObject(Cluster.class), eq("yarn-site"), capture(removeConfigName));
-    EasyMock.expectLastCall().andThrow(new AssertionFailedError()).anyTimes();
-
-    replay(upgradeCatalog250);
-
-    upgradeCatalog250.updateYarnSite();
-
-    reset(injector);
-  }
-
-  @Test
-  public void testAmsEnvUpdateConfigs() throws Exception {
-
-    Map<String, String> oldPropertiesAmsEnv = new HashMap<String, String>() {
-      {
-        put("content", "\n" +
-            "# AMS Collector heapsize\n" +
-            "export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}\n" +
-            "\n" +
-            "# HBase normalizer enabled\n" +
-            "export AMS_HBASE_NORMALIZER_ENABLED={{ams_hbase_normalizer_enabled}}\n" +
-            "\n" +
-            "# HBase compaction policy enabled\n" +
-            "export HBASE_FIFO_COMPACTION_POLICY_ENABLED={{ams_hbase_fifo_compaction_policy_enabled}}\n" +
-            "\n" +
-            "# HBase Tables Initialization check enabled\n" +
-            "export AMS_HBASE_INIT_CHECK_ENABLED={{ams_hbase_init_check_enabled}}\n");
-      }
-    };
-    Map<String, String> newPropertiesAmsEnv = new HashMap<String, String>() {
-      {
-        put("content", "\n" +
-            "# AMS Collector heapsize\n" +
-            "export AMS_COLLECTOR_HEAPSIZE={{metrics_collector_heapsize}}\n" +
-            "\n" +
-            "# HBase Tables Initialization check enabled\n" +
-            "export AMS_HBASE_INIT_CHECK_ENABLED={{ams_hbase_init_check_enabled}}\n");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Config mockAmsEnv = easyMockSupport.createNiceMock(Config.class);
-
-    reset(clusters, cluster);
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-env")).andReturn(mockAmsEnv).atLeastOnce();
-    expect(mockAmsEnv.getProperties()).andReturn(oldPropertiesAmsEnv).anyTimes();
-
-    replay(clusters, mockAmsEnv, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-        .addMockedMethod("createConfiguration")
-        .addMockedMethod("getClusters", new Class[]{})
-        .addMockedMethod("createConfig")
-        .withConstructor(actionManager, clusters, injector)
-        .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog250(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesAmsEnv, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testAmsGrafanaIniUpdateConfigs() throws Exception {
-
-    Map<String, String> oldProperties = new HashMap<String, String>() {
-      {
-        put("content", "[security]\n" +
-          "# default admin user, created on startup\n" +
-          "admin_user = {{ams_grafana_admin_user}}\n" +
-          "\n" +
-          "# default admin password, can be changed before first start of grafana,  or in profile settings\n" +
-          "admin_password = {{ams_grafana_admin_pwd}}\n" +
-          "\n" +
-          "# used for signing\n" +
-          ";secret_key = SW2YcwTIb9zpOOhoPsMm\n" +
-          "\n" +
-          "# Auto-login remember days\n" +
-          ";login_remember_days = 7\n" +
-          ";cookie_username = grafana_user\n" +
-          ";cookie_remember_name = grafana_remember\n" +
-          "\n" +
-          "# disable gravatar profile images\n" +
-          ";disable_gravatar = false\n" +
-          "\n" +
-          "# data source proxy whitelist (ip_or_domain:port seperated by spaces)\n" +
-          ";data_source_proxy_whitelist =\n");
-      }
-    };
-    Map<String, String> newProperties = new HashMap<String, String>() {
-      {
-        put("content", "[security]\n" +
-          "# default admin user, created on startup\n" +
-          "admin_user = {{ams_grafana_admin_user}}\n" +
-          "\n" +
-          "# default admin password, can be changed before first start of grafana,  or in profile settings\n" +
-          ";admin_password =\n" +
-          "\n" +
-          "# used for signing\n" +
-          ";secret_key = SW2YcwTIb9zpOOhoPsMm\n" +
-          "\n" +
-          "# Auto-login remember days\n" +
-          ";login_remember_days = 7\n" +
-          ";cookie_username = grafana_user\n" +
-          ";cookie_remember_name = grafana_remember\n" +
-          "\n" +
-          "# disable gravatar profile images\n" +
-          ";disable_gravatar = false\n" +
-          "\n" +
-          "# data source proxy whitelist (ip_or_domain:port seperated by spaces)\n" +
-          ";data_source_proxy_whitelist =\n");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Config mockAmsGrafanaIni = easyMockSupport.createNiceMock(Config.class);
-
-    reset(clusters, cluster);
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-grafana-ini")).andReturn(mockAmsGrafanaIni).atLeastOnce();
-    expect(mockAmsGrafanaIni.getProperties()).andReturn(oldProperties).anyTimes();
-
-    replay(clusters, mockAmsGrafanaIni, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[]{})
-      .addMockedMethod("createConfig")
-      .withConstructor(actionManager, clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog250(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newProperties, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testAmsHbaseSiteUpdateConfigs() throws Exception {
-
-    Map<String, String> newProperties = new HashMap<String, String>() {
-      {
-        put("hbase.rootdir", "/user/ams/hbase");
-      }
-    };
-
-    Map<String, String> oldProperties = new HashMap<String, String>() {
-      {
-        put("hbase.rootdir", "hdfs://namenodehost.domain.com:8020/user/ams/hbase");
-      }
-    };
-
-    testAmsHbaseRootDir(oldProperties, newProperties);
-
-    oldProperties = new HashMap<String, String>() {
-      {
-        put("hbase.rootdir", "hdfs://nameservice/user/ams/hbase");
-      }
-    };
-
-    testAmsHbaseRootDir(oldProperties, newProperties);
-
-  }
-
-  private void testAmsHbaseRootDir(Map<String, String> oldProperties, Map<String, String> newProperties) throws AmbariException {
-    Map<String, String> amsSite = new HashMap<String, String>() {
-      {
-        put("timeline.metrics.service.operation.mode", "distributed");
-        put("timeline.metrics.hbase.fifo.compaction.enabled", "true");
-      }
-    };
-
-    Map<String, String> newAmsSite = new HashMap<String, String>() {
-      {
-        put("timeline.metrics.service.operation.mode", "distributed");
-      }
-    };
-
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Config mockAmsHbaseSite = easyMockSupport.createNiceMock(Config.class);
-    Config mockAmsSite = easyMockSupport.createNiceMock(Config.class);
-
-    reset(clusters, cluster);
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-
-    expect(cluster.getDesiredConfigByType("ams-site")).andReturn(mockAmsSite).atLeastOnce();
-    expect(mockAmsSite.getProperties()).andReturn(amsSite).anyTimes();
-    expect(cluster.getDesiredConfigByType("ams-hbase-site")).andReturn(mockAmsHbaseSite).atLeastOnce();
-    expect(mockAmsHbaseSite.getProperties()).andReturn(oldProperties).anyTimes();
-
-    replay(clusters, mockAmsHbaseSite, mockAmsSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-        .addMockedMethod("createConfiguration")
-        .addMockedMethod("getClusters", new Class[]{})
-        .addMockedMethod("createConfig")
-        .withConstructor(actionManager, clusters, injector)
-        .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture(CaptureType.ALL);
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).times(2);
-
-    replay(controller, injector2);
-    new UpgradeCatalog250(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    assertTrue(propertiesCapture.getValues().size() == 2);
-
-    Map<String, String> updatedProperties = propertiesCapture.getValues().get(0);
-    assertTrue(Maps.difference(newAmsSite, updatedProperties).areEqual());
-
-    updatedProperties = propertiesCapture.getValues().get(1);
-    assertTrue(Maps.difference(newProperties, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testKafkaUpdateConfigs() throws Exception {
-
-    Map<String, String> oldProperties = new HashMap<String, String>() {
-      {
-        put("kafka.timeline.metrics.host", "{{metric_collector_host}}");
-        put("kafka.timeline.metrics.port", "{{metric_collector_port}}");
-      }
-    };
-    Map<String, String> newProperties = new HashMap<String, String>() {
-      {
-        put("kafka.timeline.metrics.port", "{{metric_collector_port}}");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    Config mockKafkaBroker = easyMockSupport.createNiceMock(Config.class);
-
-    reset(clusters, cluster);
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("kafka-broker")).andReturn(mockKafkaBroker).atLeastOnce();
-    expect(mockKafkaBroker.getProperties()).andReturn(oldProperties).anyTimes();
-
-    replay(clusters, mockKafkaBroker, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-        .addMockedMethod("createConfiguration")
-        .addMockedMethod("getClusters", new Class[]{})
-        .addMockedMethod("createConfig")
-        .withConstructor(actionManager, clusters, injector)
-        .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog250(injector2).updateKafkaConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newProperties, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testAmsLog4jUpdateConfigs() throws Exception {
-    reset(clusters, cluster);
-    expect(clusters.getClusters()).andReturn(ImmutableMap.of("normal", cluster)).once();
-
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-        .addMockedMethod("createConfiguration")
-        .addMockedMethod("getClusters", new Class[]{})
-        .addMockedMethod("createConfig")
-        .withConstructor(actionManager, clusters, injector)
-        .createNiceMock();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-
-    Map<String, String> oldAmsLog4j = ImmutableMap.of(
-        "content",
-        "#\n" +
-            "# Licensed to the Apache Software Foundation (ASF) under one\n" +
-            "# or more contributor license agreements.  See the NOTICE file\n" +
-            "# distributed with this work for additional information\n" +
-            "# regarding copyright ownership.  The ASF licenses this file\n" +
-            "# to you under the Apache License, Version 2.0 (the\n" +
-            "# \"License\"); you may not use this file except in compliance\n" +
-            "# with the License.  You may obtain a copy of the License at\n" +
-            "#\n" +
-            "#     http://www.apache.org/licenses/LICENSE-2.0\n" +
-            "#\n" +
-            "# Unless required by applicable law or agreed to in writing, software\n" +
-            "# distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
-            "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
-            "# See the License for the specific language governing permissions and\n" +
-            "# limitations under the License.\n" +
-            "#\n" +
-            "\n" +
-            "# Define some default values that can be overridden by system properties\n" +
-            "ams.log.dir=.\n" +
-            "ams.log.file=ambari-metrics-collector.log\n" +
-            "\n" +
-            "# Root logger option\n" +
-            "log4j.rootLogger=INFO,file\n" +
-            "\n" +
-            "# Direct log messages to a log file\n" +
-            "log4j.appender.file=org.apache.log4j.RollingFileAppender\n" +
-            "log4j.appender.file.File=${ams.log.dir}/${ams.log.file}\n" +
-            "log4j.appender.file.MaxFileSize=10MB\n" +
-            "log4j.appender.file.MaxBackupIndex=12\n" +
-            "log4j.appender.file.layout=org.apache.log4j.PatternLayout\n" +
-            "log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n");
-
-    Map<String, String> expectedAmsLog4j = new HashMap<>();
-    expectedAmsLog4j.put("content", "#\n" +
-        "# Licensed to the Apache Software Foundation (ASF) under one\n" +
-        "# or more contributor license agreements.  See the NOTICE file\n" +
-        "# distributed with this work for additional information\n" +
-        "# regarding copyright ownership.  The ASF licenses this file\n" +
-        "# to you under the Apache License, Version 2.0 (the\n" +
-        "# \"License\"); you may not use this file except in compliance\n" +
-        "# with the License.  You may obtain a copy of the License at\n" +
-        "#\n" +
-        "#     http://www.apache.org/licenses/LICENSE-2.0\n" +
-        "#\n" +
-        "# Unless required by applicable law or agreed to in writing, software\n" +
-        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
-        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
-        "# See the License for the specific language governing permissions and\n" +
-        "# limitations under the License.\n" +
-        "#\n" +
-        "\n" +
-        "# Define some default values that can be overridden by system properties\n" +
-        "ams.log.dir=.\n" +
-        "ams.log.file=ambari-metrics-collector.log\n" +
-        "\n" +
-        "# Root logger option\n" +
-        "log4j.rootLogger=INFO,file\n" +
-        "\n" +
-        "# Direct log messages to a log file\n" +
-        "log4j.appender.file=org.apache.log4j.RollingFileAppender\n" +
-        "log4j.appender.file.File=${ams.log.dir}/${ams.log.file}\n" +
-        "log4j.appender.file.MaxFileSize={{ams_log_max_backup_size}}MB\n" +
-        "log4j.appender.file.MaxBackupIndex={{ams_log_number_of_backup_files}}\n" +
-        "log4j.appender.file.layout=org.apache.log4j.PatternLayout\n" +
-        "log4j.appender.file.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n");
-    expectedAmsLog4j.put("ams_log_max_backup_size", "10");
-    expectedAmsLog4j.put("ams_log_number_of_backup_files", "12");
-
-
-    Config mockAmsLog4j = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("ams-log4j")).andReturn(mockAmsLog4j).atLeastOnce();
-    expect(mockAmsLog4j.getProperties()).andReturn(oldAmsLog4j).anyTimes();
-    Capture<Map<String, String>> AmsLog4jCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(AmsLog4jCapture), anyString(),
-        anyObject(Map.class))).andReturn(config).once();
-
-    Map<String, String> oldAmsHbaseLog4j = ImmutableMap.of(
-        "content", "# Licensed to the Apache Software Foundation (ASF) under one\n" +
-            "# or more contributor license agreements.  See the NOTICE file\n" +
-            "# distributed with this work for additional information\n" +
-            "# regarding copyright ownership.  The ASF licenses this file\n" +
-            "# to you under the Apache License, Version 2.0 (the\n" +
-            "# \"License\"); you may not use this file except in compliance\n" +
-            "# with the License.  You may obtain a copy of the License at\n" +
-            "#\n" +
-            "#     http://www.apache.org/licenses/LICENSE-2.0\n" +
-            "#\n" +
-            "# Unless required by applicable law or agreed to in writing, software\n" +
-            "# distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
-            "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
-            "# See the License for the specific language governing permissions and\n" +
-            "# limitations under the License.\n" +
-            "\n" +
-            "\n" +
-            "# Define some default values that can be overridden by system properties\n" +
-            "hbase.root.logger=INFO,console\n" +
-            "hbase.security.logger=INFO,console\n" +
-            "hbase.log.dir=.\n" +
-            "hbase.log.file=hbase.log\n" +
-            "\n" +
-            "# Define the root logger to the system property \"hbase.root.logger\".\n" +
-            "log4j.rootLogger=${hbase.root.logger}\n" +
-            "\n" +
-            "# Logging Threshold\n" +
-            "log4j.threshold=ALL\n" +
-            "\n" +
-            "#\n" +
-            "# Daily Rolling File Appender\n" +
-            "#\n" +
-            "log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n" +
-            "log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n" +
-            "\n" +
-            "# Rollver at midnight\n" +
-            "log4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n" +
-            "\n" +
-            "# 30-day backup\n" +
-            "#log4j.appender.DRFA.MaxBackupIndex=30\n" +
-            "log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n" +
-            "\n" +
-            "# Pattern format: Date LogLevel LoggerName LogMessage\n" +
-            "log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
-            "\n" +
-            "# Rolling File Appender properties\n" +
-            "hbase.log.maxfilesize=256MB\n" +
-            "hbase.log.maxbackupindex=20\n" +
-            "\n" +
-            "# Rolling File Appender\n" +
-            "log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n" +
-            "log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n" +
-            "\n" +
-            "log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\n" +
-            "log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n" +
-            "\n" +
-            "log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n" +
-            "log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
-            "\n" +
-            "#\n" +
-            "# Security audit appender\n" +
-            "#\n" +
-            "hbase.security.log.file=SecurityAuth.audit\n" +
-            "hbase.security.log.maxfilesize=256MB\n" +
-            "hbase.security.log.maxbackupindex=20\n" +
-            "log4j.appender.RFAS=org.apache.log4j.RollingFileAppender\n" +
-            "log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\n" +
-            "log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\n" +
-            "log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\n" +
-            "log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\n" +
-            "log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n" +
-            "log4j.category.SecurityLogger=${hbase.security.logger}\n" +
-            "log4j.additivity.SecurityLogger=false\n" +
-            "#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n" +
-            "\n" +
-            "#\n" +
-            "# Null Appender\n" +
-            "#\n" +
-            "log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n" +
-            "\n" +
-            "#\n" +
-            "# console\n" +
-            "# Add \"console\" to rootlogger above if you want to use this\n" +
-            "#\n" +
-            "log4j.appender.console=org.apache.log4j.ConsoleAppender\n" +
-            "log4j.appender.console.target=System.err\n" +
-            "log4j.appender.console.layout=org.apache.log4j.PatternLayout\n" +
-            "log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
-            "\n" +
-            "# Custom Logging levels\n" +
-            "\n" +
-            "log4j.logger.org.apache.zookeeper=INFO\n" +
-            "#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\n" +
-            "log4j.logger.org.apache.hadoop.hbase=INFO\n" +
-            "# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\n" +
-            "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\n" +
-            "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n" +
-            "#log4j.logger.org.apache.hadoop.dfs=DEBUG\n" +
-            "# Set this class to log INFO only otherwise its OTT\n" +
-            "# Enable this to get detailed connection error/retry logging.\n" +
-            "# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n" +
-            "\n" +
-            "\n" +
-            "# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n" +
-            "#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n" +
-            "\n" +
-            "# Uncomment the below if you want to remove logging of client region caching'\n" +
-            "# and scan of .META. messages\n" +
-            "# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n" +
-            "# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO\n");
-
-    Map<String, String> expectedAmsHbaseLog4j = new HashMap<>();
-    expectedAmsHbaseLog4j.put("content", "# Licensed to the Apache Software Foundation (ASF) under one\n" +
-        "# or more contributor license agreements.  See the NOTICE file\n" +
-        "# distributed with this work for additional information\n" +
-        "# regarding copyright ownership.  The ASF licenses this file\n" +
-        "# to you under the Apache License, Version 2.0 (the\n" +
-        "# \"License\"); you may not use this file except in compliance\n" +
-        "# with the License.  You may obtain a copy of the License at\n" +
-        "#\n" +
-        "#     http://www.apache.org/licenses/LICENSE-2.0\n" +
-        "#\n" +
-        "# Unless required by applicable law or agreed to in writing, software\n" +
-        "# distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
-        "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
-        "# See the License for the specific language governing permissions and\n" +
-        "# limitations under the License.\n" +
-        "\n" +
-        "\n" +
-        "# Define some default values that can be overridden by system properties\n" +
-        "hbase.root.logger=INFO,console\n" +
-        "hbase.security.logger=INFO,console\n" +
-        "hbase.log.dir=.\n" +
-        "hbase.log.file=hbase.log\n" +
-        "\n" +
-        "# Define the root logger to the system property \"hbase.root.logger\".\n" +
-        "log4j.rootLogger=${hbase.root.logger}\n" +
-        "\n" +
-        "# Logging Threshold\n" +
-        "log4j.threshold=ALL\n" +
-        "\n" +
-        "#\n" +
-        "# Daily Rolling File Appender\n" +
-        "#\n" +
-        "log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\n" +
-        "log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}\n" +
-        "\n" +
-        "# Rollver at midnight\n" +
-        "log4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n" +
-        "\n" +
-        "# 30-day backup\n" +
-        "#log4j.appender.DRFA.MaxBackupIndex=30\n" +
-        "log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n" +
-        "\n" +
-        "# Pattern format: Date LogLevel LoggerName LogMessage\n" +
-        "log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
-        "\n" +
-        "# Rolling File Appender properties\n" +
-        "hbase.log.maxfilesize={{ams_hbase_log_maxfilesize}}MB\n" +
-        "hbase.log.maxbackupindex={{ams_hbase_log_maxbackupindex}}\n" +
-        "\n" +
-        "# Rolling File Appender\n" +
-        "log4j.appender.RFA=org.apache.log4j.RollingFileAppender\n" +
-        "log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file}\n" +
-        "\n" +
-        "log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize}\n" +
-        "log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex}\n" +
-        "\n" +
-        "log4j.appender.RFA.layout=org.apache.log4j.PatternLayout\n" +
-        "log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
-        "\n" +
-        "#\n" +
-        "# Security audit appender\n" +
-        "#\n" +
-        "hbase.security.log.file=SecurityAuth.audit\n" +
-        "hbase.security.log.maxfilesize={{ams_hbase_security_log_maxfilesize}}MB\n" +
-        "hbase.security.log.maxbackupindex={{ams_hbase_security_log_maxbackupindex}}\n" +
-        "log4j.appender.RFAS=org.apache.log4j.RollingFileAppender\n" +
-        "log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file}\n" +
-        "log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize}\n" +
-        "log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex}\n" +
-        "log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\n" +
-        "log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n" +
-        "log4j.category.SecurityLogger=${hbase.security.logger}\n" +
-        "log4j.additivity.SecurityLogger=false\n" +
-        "#log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE\n" +
-        "\n" +
-        "#\n" +
-        "# Null Appender\n" +
-        "#\n" +
-        "log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n" +
-        "\n" +
-        "#\n" +
-        "# console\n" +
-        "# Add \"console\" to rootlogger above if you want to use this\n" +
-        "#\n" +
-        "log4j.appender.console=org.apache.log4j.ConsoleAppender\n" +
-        "log4j.appender.console.target=System.err\n" +
-        "log4j.appender.console.layout=org.apache.log4j.PatternLayout\n" +
-        "log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n\n" +
-        "\n" +
-        "# Custom Logging levels\n" +
-        "\n" +
-        "log4j.logger.org.apache.zookeeper=INFO\n" +
-        "#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\n" +
-        "log4j.logger.org.apache.hadoop.hbase=INFO\n" +
-        "# Make these two classes INFO-level. Make them DEBUG to see more zk debug.\n" +
-        "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO\n" +
-        "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO\n" +
-        "#log4j.logger.org.apache.hadoop.dfs=DEBUG\n" +
-        "# Set this class to log INFO only otherwise its OTT\n" +
-        "# Enable this to get detailed connection error/retry logging.\n" +
-        "# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE\n" +
-        "\n" +
-        "\n" +
-        "# Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output)\n" +
-        "#log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG\n" +
-        "\n" +
-        "# Uncomment the below if you want to remove logging of client region caching'\n" +
-        "# and scan of .META. messages\n" +
-        "# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO\n" +
-        "# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO\n");
-    expectedAmsHbaseLog4j.put("ams_hbase_log_maxfilesize", "256");
-    expectedAmsHbaseLog4j.put("ams_hbase_log_maxbackupindex", "20");
-    expectedAmsHbaseLog4j.put("ams_hbase_security_log_maxfilesize", "256");
-    expectedAmsHbaseLog4j.put("ams_hbase_security_log_maxbackupindex", "20");
-
-    Config mockAmsHbaseLog4j = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("ams-hbase-log4j")).andReturn(mockAmsHbaseLog4j).atLeastOnce();
-    expect(mockAmsHbaseLog4j.getProperties()).andReturn(oldAmsHbaseLog4j).anyTimes();
-    Capture<Map<String, String>> AmsHbaseLog4jCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(AmsHbaseLog4jCapture), anyString(),
-        anyObject(Map.class))).andReturn(config).once();
-
-    replay(clusters, cluster);
-    replay(controller, injector2);
-    replay(mockAmsLog4j, mockAmsHbaseLog4j);
-    new UpgradeCatalog250(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedAmsLog4jProperties = AmsLog4jCapture.getValue();
-    assertTrue(Maps.difference(expectedAmsLog4j, updatedAmsLog4jProperties).areEqual());
-
-    Map<String, String> updatedAmsHbaseLog4jProperties = AmsHbaseLog4jCapture.getValue();
-    assertTrue(Maps.difference(expectedAmsHbaseLog4j, updatedAmsHbaseLog4jProperties).areEqual());
-
-  }
-
-  @Test
-  public void testLogSearchUpdateConfigs() throws Exception {
-    reset(clusters, cluster);
-    expect(clusters.getClusters()).andReturn(ImmutableMap.of("normal", cluster)).once();
-
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-        .addMockedMethod("createConfiguration")
-        .addMockedMethod("getClusters", new Class[]{})
-        .addMockedMethod("createConfig")
-        .withConstructor(actionManager, clusters, injector)
-        .createNiceMock();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-
-    Map<String, String> oldLogSearchProperties = ImmutableMap.of(
-        "logsearch.external.auth.enabled", "true",
-        "logsearch.external.auth.host_url", "host_url",
-        "logsearch.external.auth.login_url", "login_url");
-
-    Map<String, String> expectedLogSearchProperties = ImmutableMap.of(
-        "logsearch.auth.external_auth.enabled", "true",
-        "logsearch.auth.external_auth.host_url", "host_url",
-        "logsearch.auth.external_auth.login_url", "login_url");
-
-    Config mockLogSearchProperties = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("logsearch-properties")).andReturn(mockLogSearchProperties).atLeastOnce();
-    expect(mockLogSearchProperties.getProperties()).andReturn(oldLogSearchProperties).anyTimes();
-    Capture<Map<String, String>> logSearchPropertiesCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchPropertiesCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    Map<String, String> oldLogFeederEnv = ImmutableMap.of(
-        "content", "infra_solr_ssl_enabled");
-
-    Map<String, String> expectedLogFeederEnv = ImmutableMap.of(
-        "content", "logfeeder_use_ssl");
-
-    Config mockLogFeederEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("logfeeder-env")).andReturn(mockLogFeederEnv).atLeastOnce();
-    expect(mockLogFeederEnv.getProperties()).andReturn(oldLogFeederEnv).anyTimes();
-    Capture<Map<String, String>> logFeederEnvCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logFeederEnvCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    Map<String, String> oldLogSearchEnv = new HashMap<>();
-    oldLogSearchEnv.put("logsearch_solr_audit_logs_use_ranger", "false");
-    oldLogSearchEnv.put("logsearch_solr_audit_logs_zk_node", "zk_node");
-    oldLogSearchEnv.put("logsearch_solr_audit_logs_zk_quorum", "zk_quorum");
-    oldLogSearchEnv.put("logsearch_ui_protocol", "http");
-    oldLogSearchEnv.put("logsearch_truststore_location", "/etc/security/serverKeys/logsearch.trustStore.jks");
-    oldLogSearchEnv.put("logsearch_keystore_location", "/etc/security/serverKeys/logsearch.keyStore.jks");
-    oldLogSearchEnv.put("content", "infra_solr_ssl_enabled or logsearch_ui_protocol == 'https'");
-
-    Map<String, String> expectedLogSearchEnv = ImmutableMap.of(
-        "logsearch_ui_protocol", "http",
-        "logsearch_truststore_location", "/etc/ambari-logsearch-portal/conf/keys/logsearch.jks",
-        "logsearch_keystore_location", "/etc/ambari-logsearch-portal/conf/keys/logsearch.jks",
-        "content", "logsearch_use_ssl");
-
-    Config mockLogSearchEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("logsearch-env")).andReturn(mockLogSearchEnv).atLeastOnce();
-    expect(mockLogSearchEnv.getProperties()).andReturn(oldLogSearchEnv).anyTimes();
-    Capture<Map<String, String>> logSearchEnvCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchEnvCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    Map<String, String> oldLogFeederLog4j = ImmutableMap.of(
-        "content",
-        "    <appender name=\"rolling_file\" class=\"org.apache.log4j.RollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logfeeder_log_dir}}/logfeeder.log\"/>\n" +
-            "    <param name=\"append\" value=\"true\"/>\n" +
-            "    <param name=\"maxFileSize\" value=\"11MB\"/>\n" +
-            "    <param name=\"maxBackupIndex\" value=\"12\"/>\n" +
-            "    <layout class=\"org.apache.log4j.PatternLayout\">\n" +
-            "      <param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\"/>\n" +
-            "    </layout>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <appender name=\"rolling_file_json\"\n" +
-            "    class=\"org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logfeeder_log_dir}}/logsearch-logfeeder.json\" />\n" +
-            "    <param name=\"append\" value=\"true\" />\n" +
-            "    <param name=\"maxFileSize\" value=\"13MB\" />\n" +
-            "    <param name=\"maxBackupIndex\" value=\"14\" />\n" +
-            "    <layout class=\"org.apache.ambari.logsearch.appender.LogsearchConversion\" />\n" +
-            "  </appender>");
-
-    Map<String, String> expectedLogFeederLog4j = ImmutableMap.of(
-        "content",
-        "    <appender name=\"rolling_file\" class=\"org.apache.log4j.RollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logfeeder_log_dir}}/logfeeder.log\"/>\n" +
-            "    <param name=\"append\" value=\"true\"/>\n" +
-            "    <param name=\"maxFileSize\" value=\"{{logfeeder_log_maxfilesize}}MB\"/>\n" +
-            "    <param name=\"maxBackupIndex\" value=\"{{logfeeder_log_maxbackupindex}}\"/>\n" +
-            "    <layout class=\"org.apache.log4j.PatternLayout\">\n" +
-            "      <param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\"/>\n" +
-            "    </layout>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <appender name=\"rolling_file_json\"\n" +
-            "    class=\"org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logfeeder_log_dir}}/logsearch-logfeeder.json\" />\n" +
-            "    <param name=\"append\" value=\"true\" />\n" +
-            "    <param name=\"maxFileSize\" value=\"{{logfeeder_json_log_maxfilesize}}MB\" />\n" +
-            "    <param name=\"maxBackupIndex\" value=\"{{logfeeder_json_log_maxbackupindex}}\" />\n" +
-            "    <layout class=\"org.apache.ambari.logsearch.appender.LogsearchConversion\" />\n" +
-            "  </appender>",
-        "logfeeder_log_maxfilesize", "11",
-        "logfeeder_log_maxbackupindex", "12",
-        "logfeeder_json_log_maxfilesize", "13",
-        "logfeeder_json_log_maxbackupindex", "14");
-
-    Config mockLogFeederLog4j = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("logfeeder-log4j")).andReturn(mockLogFeederLog4j).atLeastOnce();
-    expect(mockLogFeederLog4j.getProperties()).andReturn(oldLogFeederLog4j).anyTimes();
-    Capture<Map<String, String>> logFeederLog4jCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logFeederLog4jCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    Map<String, String> oldLogSearchLog4j = ImmutableMap.of(
-        "content",
-        "  <appender name=\"rolling_file\" class=\"org.apache.log4j.RollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch.err\" />\n" +
-            "    <param name=\"Threshold\" value=\"info\" />\n" +
-            "    <param name=\"append\" value=\"true\" />\n" +
-            "    <param name=\"maxFileSize\" value=\"11MB\" />\n" +
-            "    <param name=\"maxBackupIndex\" value=\"12\" />\n" +
-            "    <layout class=\"org.apache.log4j.PatternLayout\">\n" +
-            "      <param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\" />\n" +
-            "    </layout>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <appender name=\"rolling_file_json\" class=\"org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch.json\"/>\n" +
-            "    <param name=\"append\" value=\"true\"/>\n" +
-            "    <param name=\"maxFileSize\" value=\"13MB\"/>\n" +
-            "    <param name=\"maxBackupIndex\" value=\"14\"/>\n" +
-            "    <layout class=\"org.apache.ambari.logsearch.appender.LogsearchConversion\"/>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <appender name=\"audit_rolling_file_json\" class=\"org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch-audit.json\"/>\n" +
-            "    <param name=\"append\" value=\"true\"/>\n" +
-            "    <param name=\"maxFileSize\" value=\"15MB\"/>\n" +
-            "    <param name=\"maxBackupIndex\" value=\"16\"/>\n" +
-            "    <layout class=\"org.apache.ambari.logsearch.appender.LogsearchConversion\"/>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <appender name=\"performance_analyzer_json\" class=\"org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch-performance.json\"/>\n" +
-            "    <param name=\"Threshold\" value=\"info\"/>\n" +
-            "    <param name=\"append\" value=\"true\"/>\n" +
-            "    <param name=\"maxFileSize\" value=\"17MB\"/>\n" +
-            "    <param name=\"maxBackupIndex\" value=\"18\"/>\n" +
-            "    <layout class=\"org.apache.ambari.logsearch.appender.LogsearchConversion\"/>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <logger name=\"org.apache.ambari.logsearch.audit\" additivity=\"true\">\n" +
-            "     <appender-ref ref=\"audit_rolling_file_json\"/>\n" +
-            "  </logger>\n" +
-            "\n" +
-            "  <logger name=\"org.apache.ambari.logsearch.performance\" additivity=\"false\">\n" +
-            "    <appender-ref ref=\"performance_analyzer_json\"/>\n" +
-            "  </logger>\n" +
-            "\n" +
-            "  <category name=\"org.apache.ambari.logsearch\" additivity=\"false\">\n" +
-            "    <priority value=\"warn\"/>\n" +
-            "    <appender-ref ref=\"rolling_file_json\"/>\n" +
-            "  </category>");
-
-    Map<String, String> expectedLogSearchLog4j = new HashMap<>();
-    expectedLogSearchLog4j.put("content",
-        "  <appender name=\"rolling_file\" class=\"org.apache.log4j.RollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch.log\" />\n" +
-            "    <param name=\"Threshold\" value=\"info\" />\n" +
-            "    <param name=\"append\" value=\"true\" />\n" +
-            "    <param name=\"maxFileSize\" value=\"{{logsearch_log_maxfilesize}}MB\" />\n" +
-            "    <param name=\"maxBackupIndex\" value=\"{{logsearch_log_maxbackupindex}}\" />\n" +
-            "    <layout class=\"org.apache.log4j.PatternLayout\">\n" +
-            "      <param name=\"ConversionPattern\" value=\"%d [%t] %-5p %C{6} (%F:%L) - %m%n\" />\n" +
-            "    </layout>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <appender name=\"rolling_file_json\" class=\"org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch.json\"/>\n" +
-            "    <param name=\"append\" value=\"true\"/>\n" +
-            "    <param name=\"maxFileSize\" value=\"{{logsearch_json_log_maxfilesize}}MB\"/>\n" +
-            "    <param name=\"maxBackupIndex\" value=\"{{logsearch_json_log_maxbackupindex}}\"/>\n" +
-            "    <layout class=\"org.apache.ambari.logsearch.appender.LogsearchConversion\"/>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <appender name=\"audit_rolling_file_json\" class=\"org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch-audit.json\"/>\n" +
-            "    <param name=\"append\" value=\"true\"/>\n" +
-            "    <param name=\"maxFileSize\" value=\"{{logsearch_audit_log_maxfilesize}}MB\"/>\n" +
-            "    <param name=\"maxBackupIndex\" value=\"{{logsearch_audit_log_maxbackupindex}}\"/>\n" +
-            "    <layout class=\"org.apache.ambari.logsearch.appender.LogsearchConversion\"/>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <appender name=\"performance_analyzer_json\" class=\"org.apache.ambari.logsearch.appender.LogsearchRollingFileAppender\">\n" +
-            "    <param name=\"file\" value=\"{{logsearch_log_dir}}/logsearch-performance.json\"/>\n" +
-            "    <param name=\"Threshold\" value=\"info\"/>\n" +
-            "    <param name=\"append\" value=\"true\"/>\n" +
-            "    <param name=\"maxFileSize\" value=\"{{logsearch_perf_log_maxfilesize}}MB\"/>\n" +
-            "    <param name=\"maxBackupIndex\" value=\"{{logsearch_perf_log_maxbackupindex}}\"/>\n" +
-            "    <layout class=\"org.apache.ambari.logsearch.appender.LogsearchConversion\"/>\n" +
-            "  </appender>\n" +
-            "\n" +
-            "  <logger name=\"org.apache.ambari.logsearch.audit\" additivity=\"true\">\n" +
-            "     <appender-ref ref=\"audit_rolling_file_json\"/>\n" +
-            "  </logger>\n" +
-            "\n" +
-            "  <logger name=\"org.apache.ambari.logsearch.performance\" additivity=\"false\">\n" +
-            "    <appender-ref ref=\"performance_analyzer_json\"/>\n" +
-            "  </logger>\n" +
-            "\n" +
-            "  <category name=\"org.apache.ambari.logsearch\" additivity=\"false\">\n" +
-            "    <priority value=\"info\"/>\n" +
-            "    <appender-ref ref=\"rolling_file_json\"/>\n" +
-            "  </category>");
-
-    expectedLogSearchLog4j.put("logsearch_log_maxfilesize", "11");
-    expectedLogSearchLog4j.put("logsearch_log_maxbackupindex", "12");
-    expectedLogSearchLog4j.put("logsearch_json_log_maxfilesize", "13");
-    expectedLogSearchLog4j.put("logsearch_json_log_maxbackupindex", "14");
-    expectedLogSearchLog4j.put("logsearch_audit_log_maxfilesize", "15");
-    expectedLogSearchLog4j.put("logsearch_audit_log_maxbackupindex", "16");
-    expectedLogSearchLog4j.put("logsearch_perf_log_maxfilesize", "17");
-    expectedLogSearchLog4j.put("logsearch_perf_log_maxbackupindex", "18");
-
-    Config mockLogSearchLog4j = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("logsearch-log4j")).andReturn(mockLogSearchLog4j).atLeastOnce();
-    expect(mockLogSearchLog4j.getProperties()).andReturn(oldLogSearchLog4j).anyTimes();
-    Capture<Map<String, String>> logSearchLog4jCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(logSearchLog4jCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    replay(clusters, cluster);
-    replay(controller, injector2);
-    replay(mockLogSearchProperties, mockLogFeederEnv, mockLogSearchEnv, mockLogFeederLog4j, mockLogSearchLog4j);
-    new UpgradeCatalog250(injector2).updateLogSearchConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedLogSearchProperties = logSearchPropertiesCapture.getValue();
-    assertTrue(Maps.difference(expectedLogSearchProperties, updatedLogSearchProperties).areEqual());
-
-    Map<String, String> updatedLogFeederEnv = logFeederEnvCapture.getValue();
-    assertTrue(Maps.difference(expectedLogFeederEnv, updatedLogFeederEnv).areEqual());
-
-    Map<String, String> updatedLogSearchEnv = logSearchEnvCapture.getValue();
-    assertTrue(Maps.difference(expectedLogSearchEnv, updatedLogSearchEnv).areEqual());
-
-    Map<String, String> updatedLogFeederLog4j = logFeederLog4jCapture.getValue();
-    assertTrue(Maps.difference(expectedLogFeederLog4j, updatedLogFeederLog4j).areEqual());
-
-    Map<String, String> updatedLogSearchLog4j = logSearchLog4jCapture.getValue();
-    assertTrue(Maps.difference(expectedLogSearchLog4j, updatedLogSearchLog4j).areEqual());
-  }
-
-  @Test
-  public void testAmbariInfraUpdateConfigs() throws Exception {
-    reset(clusters, cluster);
-    expect(clusters.getClusters()).andReturn(ImmutableMap.of("normal", cluster)).once();
-
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-        .addMockedMethod("createConfiguration")
-        .addMockedMethod("getClusters", new Class[]{})
-        .addMockedMethod("createConfig")
-        .withConstructor(actionManager, clusters, injector)
-        .createNiceMock();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-
-    Map<String, String> oldInfraSolrEnv = ImmutableMap.of(
-        "content", "SOLR_SSL_TRUST_STORE={{infra_solr_keystore_location}}\n" +
-            "SOLR_SSL_TRUST_STORE_PASSWORD={{infra_solr_keystore_password}}\n" +
-            "SOLR_KERB_NAME_RULES={{infra_solr_kerberos_name_rules}}\n" +
-            "SOLR_AUTHENTICATION_OPTS=\" -DauthenticationPlugin=org.apache.solr.security.KerberosPlugin -Djava.security.auth.login.config=$SOLR_JAAS_FILE -Dsolr.kerberos.principal=${SOLR_KERB_PRINCIPAL} -Dsolr.kerberos.keytab=${SOLR_KERB_KEYTAB} -Dsolr.kerberos.cookie.domain=${SOLR_HOST} -Dsolr.kerberos.name.rules=${SOLR_KERB_NAME_RULES}\"");
-
-    Map<String, String> expectedInfraSolrEnv = ImmutableMap.of(
-        "content", "SOLR_SSL_TRUST_STORE={{infra_solr_truststore_location}}\n" +
-            "SOLR_SSL_TRUST_STORE_PASSWORD={{infra_solr_truststore_password}}\n" +
-            "SOLR_KERB_NAME_RULES=\"{{infra_solr_kerberos_name_rules}}\"\n" +
-            "SOLR_AUTHENTICATION_OPTS=\" -DauthenticationPlugin=org.apache.solr.security.KerberosPlugin -Djava.security.auth.login.config=$SOLR_JAAS_FILE -Dsolr.kerberos.principal=${SOLR_KERB_PRINCIPAL} -Dsolr.kerberos.keytab=${SOLR_KERB_KEYTAB} -Dsolr.kerberos.cookie.domain=${SOLR_HOST}\"");
-
-    Config mockInfraSolrEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("infra-solr-env")).andReturn(mockInfraSolrEnv).atLeastOnce();
-    expect(mockInfraSolrEnv.getProperties()).andReturn(oldInfraSolrEnv).anyTimes();
-    Capture<Map<String, String>> infraSolrEnvCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(infraSolrEnvCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    Map<String, String> oldInfraSolrLog4j = ImmutableMap.of(
-        "content", "log4j.appender.file.MaxFileSize=15MB\n" +
-            "log4j.appender.file.MaxBackupIndex=5\n");
-
-    Map<String, String> expectedInfraSolrLog4j = ImmutableMap.of(
-        "content", "log4j.appender.file.MaxFileSize={{infra_log_maxfilesize}}MB\n" +
-            "log4j.appender.file.MaxBackupIndex={{infra_log_maxbackupindex}}\n",
-        "infra_log_maxfilesize", "15",
-        "infra_log_maxbackupindex", "5");
-
-    Config mockInfraSolrLog4j = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("infra-solr-log4j")).andReturn(mockInfraSolrLog4j).atLeastOnce();
-    expect(mockInfraSolrLog4j.getProperties()).andReturn(oldInfraSolrLog4j).anyTimes();
-    Capture<Map<String, String>> infraSolrLog4jCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(infraSolrLog4jCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    Map<String, String> oldInfraSolrClientLog4j = ImmutableMap.of(
-        "content", "log4j.appender.file.File\u003d{{infra_client_log|default(\u0027/var/log/ambari-infra-solr-client/solr-client.log\u0027)}}\n" +
-            "log4j.appender.file.MaxFileSize=55MB\n" +
-            "log4j.appender.file.MaxBackupIndex=10\n");
-
-    Map<String, String> expectedInfraSolrClientLog4j = ImmutableMap.of(
-        "content", "log4j.appender.file.File\u003d{{solr_client_log|default(\u0027/var/log/ambari-infra-solr-client/solr-client.log\u0027)}}\n" +
-            "log4j.appender.file.MaxFileSize={{solr_client_log_maxfilesize}}MB\n" +
-            "log4j.appender.file.MaxBackupIndex={{solr_client_log_maxbackupindex}}\n",
-        "infra_client_log_maxfilesize", "55",
-        "infra_client_log_maxbackupindex", "10");
-
-    Config mockInfraSolrClientLog4j = easyMockSupport.createNiceMock(Config.class);
-    expect(cluster.getDesiredConfigByType("infra-solr-client-log4j")).andReturn(mockInfraSolrClientLog4j).atLeastOnce();
-    expect(mockInfraSolrClientLog4j.getProperties()).andReturn(oldInfraSolrClientLog4j).anyTimes();
-    Capture<Map<String, String>> infraSolrClientLog4jCapture = EasyMock.newCapture();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(infraSolrClientLog4jCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(config).once();
-
-    replay(clusters, cluster);
-    replay(controller, injector2);
-    replay(mockInfraSolrEnv, mockInfraSolrLog4j, mockInfraSolrClientLog4j);
-    new UpgradeCatalog250(injector2).updateAmbariInfraConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedInfraSolrEnv = infraSolrEnvCapture.getValue();
-    assertTrue(Maps.difference(expectedInfraSolrEnv, updatedInfraSolrEnv).areEqual());
-
-    Map<String, String> updatedInfraSolrLog4j = infraSolrLog4jCapture.getValue();
-    assertTrue(Maps.difference(expectedInfraSolrLog4j, updatedInfraSolrLog4j).areEqual());
-
-    Map<String, String> updatedInfraSolrClientLog4j = infraSolrClientLog4jCapture.getValue();
-    assertTrue(Maps.difference(expectedInfraSolrClientLog4j, updatedInfraSolrClientLog4j).areEqual());
-  }
-
-  @Test
-  public void testUpdateHiveConfigs() throws Exception {
-    reset(clusters, cluster);
-    expect(clusters.getClusters()).andReturn(

<TRUNCATED>

[04/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
deleted file mode 100644
index f106658..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog240Test.java
+++ /dev/null
@@ -1,2688 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMock;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.actionmanager.ActionManager;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.AmbariManagementControllerImpl;
-import org.apache.ambari.server.controller.KerberosHelper;
-import org.apache.ambari.server.controller.MaintenanceStateHelper;
-import org.apache.ambari.server.hooks.HookContextFactory;
-import org.apache.ambari.server.hooks.HookService;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
-import org.apache.ambari.server.orm.dao.ClusterDAO;
-import org.apache.ambari.server.orm.dao.PrivilegeDAO;
-import org.apache.ambari.server.orm.dao.RemoteAmbariClusterDAO;
-import org.apache.ambari.server.orm.dao.RequestScheduleDAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.dao.UserDAO;
-import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
-import org.apache.ambari.server.orm.dao.WidgetDAO;
-import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.PermissionEntity;
-import org.apache.ambari.server.orm.entities.PrincipalEntity;
-import org.apache.ambari.server.orm.entities.PrivilegeEntity;
-import org.apache.ambari.server.orm.entities.RemoteAmbariClusterEntity;
-import org.apache.ambari.server.orm.entities.RequestScheduleEntity;
-import org.apache.ambari.server.orm.entities.ResourceEntity;
-import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
-import org.apache.ambari.server.orm.entities.UserEntity;
-import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
-import org.apache.ambari.server.orm.entities.WidgetEntity;
-import org.apache.ambari.server.security.authorization.ResourceType;
-import org.apache.ambari.server.security.authorization.User;
-import org.apache.ambari.server.security.authorization.UserName;
-import org.apache.ambari.server.security.authorization.Users;
-import org.apache.ambari.server.stack.StackManagerFactory;
-import org.apache.ambari.server.state.AlertFirmness;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.ambari.server.state.SecurityType;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.ServiceInfo;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.StackInfo;
-import org.apache.ambari.server.state.alert.SourceType;
-import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosKeytabDescriptor;
-import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.apache.ambari.server.view.DefaultMasker;
-import org.apache.ambari.view.ClusterType;
-import org.apache.ambari.view.MaskException;
-import org.apache.commons.io.FileUtils;
-import org.easymock.Capture;
-import org.easymock.CaptureType;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockSupport;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.springframework.security.crypto.password.PasswordEncoder;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Maps;
-import com.google.gson.Gson;
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-
-import junit.framework.Assert;
-
-public class UpgradeCatalog240Test {
-  private static final String CAPACITY_SCHEDULER_CONFIG_TYPE = "capacity-scheduler";
-  private static final String WEBHCAT_SITE_CONFIG_TYPE = "webhcat-site";
-  private static final String TEZ_SITE_CONFIG_TYPE = "tez-site";
-  private static final String MAPRED_SITE_CONFIG_TYPE = "mapred-site";
-  private static final String YARN_ENV_CONFIG_TYPE = "yarn-env";
-  private static Injector injector;
-  private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-  private EntityManager entityManager = createNiceMock(EntityManager.class);
-
-
-  @BeforeClass
-  public static void classSetUp() {
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-  }
-
-  @Before
-  public void init() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-
-    injector.getInstance(UpgradeCatalogHelper.class);
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    stackDAO.find("HDP", "2.2.0");
-  }
-
-  @AfterClass
-  public static void tearDown() throws AmbariException, SQLException {
-    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
-  }
-
-  @Test
-  public void testExecuteDDLUpdates() throws SQLException, AmbariException, ClassNotFoundException {
-    Capture<DBAccessor.DBColumnInfo> capturedSortOrderColumnInfo = newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedPermissionIDColumnInfo = newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedScColumnInfo = newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedScDesiredVersionColumnInfo = newCapture();
-
-    final DBAccessor dbAccessor = createStrictMock(DBAccessor.class);
-    Configuration configuration = createNiceMock(Configuration.class);
-    Connection connection = createNiceMock(Connection.class);
-    Statement statement = createNiceMock(Statement.class);
-    ResultSet resultSet = createNiceMock(ResultSet.class);
-
-    Capture<List<DBAccessor.DBColumnInfo>> capturedExtensionColumns = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> capturedExtensionLinkColumns = EasyMock.newCapture();
-    Capture<List<DBAccessor.DBColumnInfo>> capturedSettingColumns = EasyMock.newCapture();
-
-    dbAccessor.addColumn(eq("adminpermission"), capture(capturedSortOrderColumnInfo));
-    dbAccessor.addColumn(eq("adminpermission"), capture(capturedPermissionIDColumnInfo));
-    dbAccessor.addColumn(eq(UpgradeCatalog240.SERVICE_COMPONENT_DESIRED_STATE_TABLE), capture(capturedScColumnInfo));
-    dbAccessor.addColumn(eq(UpgradeCatalog240.SERVICE_COMPONENT_DESIRED_STATE_TABLE),
-        capture(capturedScDesiredVersionColumnInfo));
-
-    dbAccessor.createTable(eq("extension"), capture(capturedExtensionColumns), eq("extension_id"));
-    dbAccessor.addUniqueConstraint("extension", "UQ_extension", "extension_name", "extension_version");
-
-    expect(dbAccessor.getConnection()).andReturn(connection);
-    dbAccessor.createTable(eq("extensionlink"), capture(capturedExtensionLinkColumns), eq("link_id"));
-    dbAccessor.addUniqueConstraint("extensionlink", "UQ_extension_link", "stack_id", "extension_id");
-    dbAccessor.addFKConstraint("extensionlink", "FK_extensionlink_extension_id", "extension_id", "extension",
-                               "extension_id", false);
-    dbAccessor.addFKConstraint("extensionlink", "FK_extensionlink_stack_id", "stack_id", "stack",
-                               "stack_id", false);
-
-    expect(dbAccessor.getConnection()).andReturn(connection);
-    dbAccessor.createTable(eq("setting"), capture(capturedSettingColumns), eq("id"));
-
-    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
-    expect(dbAccessor.getConnection()).andReturn(connection);
-    expect(connection.createStatement()).andReturn(statement);
-    expect(statement.executeQuery(anyObject(String.class))).andReturn(resultSet);
-
-    Capture<DBAccessor.DBColumnInfo> repoVersionRepoTypeColumnCapture = newCapture();
-    Capture<DBAccessor.DBColumnInfo> repoVersionUrlColumnCapture = newCapture();
-    Capture<DBAccessor.DBColumnInfo> repoVersionXmlColumnCapture = newCapture();
-    Capture<DBAccessor.DBColumnInfo> repoVersionXsdColumnCapture = newCapture();
-    Capture<DBAccessor.DBColumnInfo> repoVersionParentIdColumnCapture = newCapture();
-
-    dbAccessor.addColumn(eq("repo_version"), capture(repoVersionRepoTypeColumnCapture));
-    dbAccessor.addColumn(eq("repo_version"), capture(repoVersionUrlColumnCapture));
-    dbAccessor.addColumn(eq("repo_version"), capture(repoVersionXmlColumnCapture));
-    dbAccessor.addColumn(eq("repo_version"), capture(repoVersionXsdColumnCapture));
-    dbAccessor.addColumn(eq("repo_version"), capture(repoVersionParentIdColumnCapture));
-
-    // skip all of the drama of the servicecomponentdesiredstate table for now
-    expect(dbAccessor.tableHasPrimaryKey("servicecomponentdesiredstate", "id")).andReturn(true);
-
-    Capture<List<DBAccessor.DBColumnInfo>> capturedHistoryColumns = EasyMock.newCapture();
-    dbAccessor.createTable(eq("servicecomponent_history"), capture(capturedHistoryColumns),
-            eq((String[]) null));
-
-    dbAccessor.addPKConstraint("servicecomponent_history", "PK_sc_history", "id");
-    dbAccessor.addFKConstraint("servicecomponent_history", "FK_sc_history_component_id",
-        "component_id", "servicecomponentdesiredstate", "id", false);
-
-    dbAccessor.addFKConstraint("servicecomponent_history", "FK_sc_history_upgrade_id", "upgrade_id",
-        "upgrade", "upgrade_id", false);
-
-    dbAccessor.addFKConstraint("servicecomponent_history", "FK_sc_history_from_stack_id",
-            "from_stack_id", "stack", "stack_id", false);
-
-    dbAccessor.addFKConstraint("servicecomponent_history", "FK_sc_history_to_stack_id",
-            "to_stack_id", "stack", "stack_id", false);
-
-
-    expect(dbAccessor.getConnection()).andReturn(connection);
-    expect(connection.createStatement()).andReturn(statement);
-    expect(statement.executeQuery(anyObject(String.class))).andReturn(resultSet);
-
-    Capture<DBAccessor.DBColumnInfo> capturedClusterUpgradeColumnInfo = newCapture();
-    dbAccessor.addColumn(eq(UpgradeCatalog240.CLUSTER_TABLE), capture(capturedClusterUpgradeColumnInfo));
-    dbAccessor.addFKConstraint(UpgradeCatalog240.CLUSTER_TABLE, "FK_clusters_upgrade_id",
-            UpgradeCatalog240.CLUSTER_UPGRADE_ID_COLUMN, UpgradeCatalog240.UPGRADE_TABLE, "upgrade_id", false);
-
-    Capture<DBAccessor.DBColumnInfo> capturedHelpURLColumnInfo = newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedRepeatToleranceColumnInfo = newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedRepeatToleranceEnabledColumnInfo = newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedOccurrencesColumnInfo = newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedFirmnessColumnInfo = newCapture();
-    Capture<DBAccessor.DBColumnInfo> capturedTargetEnabledColumnInfo = newCapture();
-
-    dbAccessor.addColumn(eq(UpgradeCatalog240.ALERT_DEFINITION_TABLE), capture(capturedHelpURLColumnInfo));
-    dbAccessor.addColumn(eq(UpgradeCatalog240.ALERT_DEFINITION_TABLE), capture(capturedRepeatToleranceColumnInfo));
-    dbAccessor.addColumn(eq(UpgradeCatalog240.ALERT_DEFINITION_TABLE), capture(capturedRepeatToleranceEnabledColumnInfo));
-    dbAccessor.addColumn(eq(UpgradeCatalog240.ALERT_CURRENT_TABLE), capture(capturedOccurrencesColumnInfo));
-    dbAccessor.addColumn(eq(UpgradeCatalog240.ALERT_CURRENT_TABLE), capture(capturedFirmnessColumnInfo));
-    dbAccessor.addColumn(eq(UpgradeCatalog240.ALERT_TARGET_TABLE), capture(capturedTargetEnabledColumnInfo));
-
-    // Test creation of blueprint_setting table
-    Capture<List<DBAccessor.DBColumnInfo>> capturedBlueprintSettingColumns = EasyMock.newCapture();
-    dbAccessor.createTable(eq(UpgradeCatalog240.BLUEPRINT_SETTING_TABLE), capture(capturedBlueprintSettingColumns));
-    dbAccessor.addPKConstraint(UpgradeCatalog240.BLUEPRINT_SETTING_TABLE, "PK_blueprint_setting", UpgradeCatalog240.ID);
-    dbAccessor.addUniqueConstraint(UpgradeCatalog240.BLUEPRINT_SETTING_TABLE, "UQ_blueprint_setting_name",
-            UpgradeCatalog240.BLUEPRINT_NAME_COL, UpgradeCatalog240.SETTING_NAME_COL);
-    dbAccessor.addFKConstraint(UpgradeCatalog240.BLUEPRINT_SETTING_TABLE, "FK_blueprint_setting_name",
-            UpgradeCatalog240.BLUEPRINT_NAME_COL, UpgradeCatalog240.BLUEPRINT_TABLE,
-            UpgradeCatalog240.BLUEPRINT_NAME_COL, false);
-    expect(dbAccessor.getConnection()).andReturn(connection);
-    expect(connection.createStatement()).andReturn(statement);
-    expect(statement.executeQuery(anyObject(String.class))).andReturn(resultSet);
-
-    // Test host_role_command adds a column called original_start_time
-    Capture<DBAccessor.DBColumnInfo> hostRoleCommandOriginalStartTimeColumnInfo = newCapture();
-    dbAccessor.addColumn(eq(UpgradeCatalog240.HOST_ROLE_COMMAND_TABLE), capture(hostRoleCommandOriginalStartTimeColumnInfo));
-
-    Capture<List<DBAccessor.DBColumnInfo>> capturedViewUrlColums = EasyMock.newCapture();
-    dbAccessor.createTable(eq(UpgradeCatalog240.VIEWURL_TABLE), capture(capturedViewUrlColums),eq("url_id"));
-    expect(dbAccessor.getConnection()).andReturn(connection);
-    expect(connection.createStatement()).andReturn(statement);
-
-    Capture<DBAccessor.DBColumnInfo> viewInstanceShortUrlInfo = newCapture();
-    dbAccessor.addColumn(eq(UpgradeCatalog240.VIEWINSTANCE_TABLE), capture(viewInstanceShortUrlInfo));
-
-    dbAccessor.addFKConstraint(UpgradeCatalog240.VIEWINSTANCE_TABLE, "FK_instance_url_id",
-            UpgradeCatalog240.SHORT_URL_COLUMN, UpgradeCatalog240.VIEWURL_TABLE, "url_id", false);
-
-    Capture<DBAccessor.DBColumnInfo> viewInstanceClusterType = newCapture();
-    dbAccessor.addColumn(eq(UpgradeCatalog240.VIEWINSTANCE_TABLE), capture(viewInstanceClusterType));
-
-    // Test remote Cluster Tables
-    Capture<List<DBAccessor.DBColumnInfo>> capturedRemoteAmbariClusterColumns = EasyMock.newCapture();
-    dbAccessor.createTable(eq(UpgradeCatalog240.REMOTE_AMBARI_CLUSTER_TABLE), capture(capturedRemoteAmbariClusterColumns),anyString());
-    dbAccessor.addUniqueConstraint(UpgradeCatalog240.REMOTE_AMBARI_CLUSTER_TABLE , "UQ_remote_ambari_cluster" , UpgradeCatalog240.CLUSTER_NAME);
-    expect(dbAccessor.getConnection()).andReturn(connection);
-    expect(connection.createStatement()).andReturn(statement);
-
-    Capture<List<DBAccessor.DBColumnInfo>> capturedRemoteClusterServiceColumns = EasyMock.newCapture();
-    dbAccessor.createTable(eq(UpgradeCatalog240.REMOTE_AMBARI_CLUSTER_SERVICE_TABLE), capture(capturedRemoteClusterServiceColumns),anyString());
-    dbAccessor.addFKConstraint(UpgradeCatalog240.REMOTE_AMBARI_CLUSTER_SERVICE_TABLE, "FK_remote_ambari_cluster_id",
-      UpgradeCatalog240.CLUSTER_ID, UpgradeCatalog240.REMOTE_AMBARI_CLUSTER_TABLE, UpgradeCatalog240.CLUSTER_ID, false);
-    expect(dbAccessor.getConnection()).andReturn(connection);
-    expect(connection.createStatement()).andReturn(statement);
-
-    // Test viewInstance update
-    expect(dbAccessor.getColumnClass(UpgradeCatalog240.VIEWINSTANCE_TABLE, UpgradeCatalog240.CLUSTER_HANDLE_COLUMN)).andReturn(String.class);
-    dbAccessor.addColumn(eq(UpgradeCatalog240.VIEWINSTANCE_TABLE), anyObject(DBAccessor.DBColumnInfo.class));
-
-    expect(dbAccessor.getConnection()).andReturn(connection);
-    expect(connection.createStatement()).andReturn(statement);
-
-    dbAccessor.dropColumn(UpgradeCatalog240.VIEWINSTANCE_TABLE, UpgradeCatalog240.CLUSTER_HANDLE_COLUMN);
-
-    Capture<DBAccessor.DBColumnInfo> capturedClusterHandleColumn = EasyMock.newCapture();
-    dbAccessor.renameColumn(eq(UpgradeCatalog240.VIEWINSTANCE_TABLE), anyString(), capture(capturedClusterHandleColumn));
-    Capture<DBAccessor.DBColumnInfo> requestScheduleUserIdInfo = newCapture();
-    dbAccessor.addColumn(eq(UpgradeCatalog240.REQUESTSCHEDULE_TABLE), capture(requestScheduleUserIdInfo));
-
-    Capture<DBAccessor.DBColumnInfo> provisionActionColumnInfo = newCapture();
-    dbAccessor.addColumn(eq(UpgradeCatalog240.TOPOLOGY_REQUEST_TABLE), capture(provisionActionColumnInfo));
-
-    replay(dbAccessor, configuration, connection, statement, resultSet);
-
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(PasswordEncoder.class).toInstance(createNiceMock(PasswordEncoder.class));
-        binder.bind(HookContextFactory.class).toInstance(createMock(HookContextFactory.class));
-        binder.bind(HookService.class).toInstance(createMock(HookService.class));
-      }
-      };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog240 upgradeCatalog240 = injector.getInstance(UpgradeCatalog240.class);
-    upgradeCatalog240.executeDDLUpdates();
-
-    DBAccessor.DBColumnInfo columnSortOrderInfo = capturedSortOrderColumnInfo.getValue();
-    Assert.assertNotNull(columnSortOrderInfo);
-    Assert.assertEquals(UpgradeCatalog240.SORT_ORDER_COL, columnSortOrderInfo.getName());
-    Assert.assertEquals(null, columnSortOrderInfo.getLength());
-    Assert.assertEquals(Short.class, columnSortOrderInfo.getType());
-    Assert.assertEquals(1, columnSortOrderInfo.getDefaultValue());
-    Assert.assertEquals(false, columnSortOrderInfo.isNullable());
-
-    DBAccessor.DBColumnInfo columnPrincipalIDInfo = capturedPermissionIDColumnInfo.getValue();
-    Assert.assertNotNull(columnPrincipalIDInfo);
-    Assert.assertEquals(UpgradeCatalog240.PRINCIPAL_ID_COL, columnPrincipalIDInfo.getName());
-    Assert.assertEquals(null, columnPrincipalIDInfo.getLength());
-    Assert.assertEquals(Long.class, columnPrincipalIDInfo.getType());
-    Assert.assertEquals(null, columnPrincipalIDInfo.getDefaultValue());
-    Assert.assertEquals(true, columnPrincipalIDInfo.isNullable());
-
-    // Verify if recovery_enabled column was added to servicecomponentdesiredstate table
-    DBAccessor.DBColumnInfo columnScInfo = capturedScColumnInfo.getValue();
-    Assert.assertNotNull(columnScInfo);
-    Assert.assertEquals(UpgradeCatalog240.RECOVERY_ENABLED_COL, columnScInfo.getName());
-    Assert.assertEquals(null, columnScInfo.getLength());
-    Assert.assertEquals(Short.class, columnScInfo.getType());
-    Assert.assertEquals(0, columnScInfo.getDefaultValue());
-    Assert.assertEquals(false, columnScInfo.isNullable());
-
-    DBAccessor.DBColumnInfo columnScDesiredVersionInfo = capturedScDesiredVersionColumnInfo.getValue();
-    Assert.assertNotNull(columnScDesiredVersionInfo);
-    Assert.assertEquals(UpgradeCatalog240.DESIRED_VERSION_COLUMN_NAME, columnScDesiredVersionInfo.getName());
-    Assert.assertEquals(Integer.valueOf(255), columnScDesiredVersionInfo.getLength());
-    Assert.assertEquals(String.class, columnScDesiredVersionInfo.getType());
-    Assert.assertEquals("UNKNOWN", columnScDesiredVersionInfo.getDefaultValue());
-    Assert.assertEquals(false, columnScDesiredVersionInfo.isNullable());
-
-    // Verify if upgrade_id column was added to clusters table
-    DBAccessor.DBColumnInfo clusterUpgradeColumnInfo = capturedClusterUpgradeColumnInfo.getValue();
-    Assert.assertNotNull(clusterUpgradeColumnInfo);
-    Assert.assertEquals(UpgradeCatalog240.CLUSTER_UPGRADE_ID_COLUMN, clusterUpgradeColumnInfo.getName());
-    Assert.assertEquals(null, clusterUpgradeColumnInfo.getLength());
-    Assert.assertEquals(Long.class, clusterUpgradeColumnInfo.getType());
-    Assert.assertEquals(null, clusterUpgradeColumnInfo.getDefaultValue());
-    Assert.assertEquals(true, clusterUpgradeColumnInfo.isNullable());
-
-    Map<String, Class> expectedCaptures = new HashMap<>();
-    expectedCaptures.put("id", Long.class);
-    expectedCaptures.put("name", String.class);
-    expectedCaptures.put("setting_type", String.class);
-    expectedCaptures.put("content", String.class);
-    expectedCaptures.put("updated_by", String.class);
-    expectedCaptures.put("update_timestamp", Long.class);
-
-    Map<String, Class> actualCaptures = new HashMap<>();
-    for(DBAccessor.DBColumnInfo settingColumnInfo : capturedSettingColumns.getValue()) {
-      actualCaptures.put(settingColumnInfo.getName(), settingColumnInfo.getType());
-    }
-
-    assertEquals(expectedCaptures, actualCaptures);
-
-    expectedCaptures = new HashMap<>();
-    expectedCaptures.put("extension_id", Long.class);
-    expectedCaptures.put("extension_name", String.class);
-    expectedCaptures.put("extension_version", String.class);
-
-    actualCaptures = new HashMap<>();
-    for(DBAccessor.DBColumnInfo settingColumnInfo : capturedExtensionColumns.getValue()) {
-      actualCaptures.put(settingColumnInfo.getName(), settingColumnInfo.getType());
-    }
-
-    assertEquals(expectedCaptures, actualCaptures);
-
-
-    expectedCaptures = new HashMap<>();
-    expectedCaptures.put("link_id", Long.class);
-    expectedCaptures.put("stack_id", Long.class);
-    expectedCaptures.put("extension_id", Long.class);
-
-    actualCaptures = new HashMap<>();
-    for(DBAccessor.DBColumnInfo settingColumnInfo : capturedExtensionLinkColumns.getValue()) {
-      actualCaptures.put(settingColumnInfo.getName(), settingColumnInfo.getType());
-    }
-
-    assertEquals(expectedCaptures, actualCaptures);
-
-    expectedCaptures = new HashMap<>();
-    expectedCaptures.put("id", Long.class);
-    expectedCaptures.put("component_id", Long.class);
-    expectedCaptures.put("upgrade_id", Long.class);
-    expectedCaptures.put("from_stack_id", Long.class);
-    expectedCaptures.put("to_stack_id", Long.class);
-
-    actualCaptures = new HashMap<>();
-    for (DBAccessor.DBColumnInfo historyColumnInfo : capturedHistoryColumns.getValue()) {
-      actualCaptures.put(historyColumnInfo.getName(), historyColumnInfo.getType());
-    }
-
-    DBAccessor.DBColumnInfo columnHelpURLInfo = capturedHelpURLColumnInfo.getValue();
-    Assert.assertNotNull(columnHelpURLInfo);
-    Assert.assertEquals(UpgradeCatalog240.HELP_URL_COLUMN, columnHelpURLInfo.getName());
-    Assert.assertEquals(Integer.valueOf(512), columnHelpURLInfo.getLength());
-    Assert.assertEquals(String.class, columnHelpURLInfo.getType());
-    Assert.assertEquals(null, columnHelpURLInfo.getDefaultValue());
-    Assert.assertEquals(true, columnHelpURLInfo.isNullable());
-
-    DBAccessor.DBColumnInfo columnRepeatToleranceInfo = capturedRepeatToleranceColumnInfo.getValue();
-    Assert.assertNotNull(columnRepeatToleranceInfo);
-    Assert.assertEquals(UpgradeCatalog240.REPEAT_TOLERANCE_COLUMN, columnRepeatToleranceInfo.getName());
-    Assert.assertEquals(Integer.class, columnRepeatToleranceInfo.getType());
-    Assert.assertEquals(1, columnRepeatToleranceInfo.getDefaultValue());
-    Assert.assertEquals(false, columnRepeatToleranceInfo.isNullable());
-
-    DBAccessor.DBColumnInfo columnRepeatToleranceEnabledInfo = capturedRepeatToleranceEnabledColumnInfo.getValue();
-    Assert.assertNotNull(columnRepeatToleranceEnabledInfo);
-    Assert.assertEquals(UpgradeCatalog240.REPEAT_TOLERANCE_ENABLED_COLUMN, columnRepeatToleranceEnabledInfo.getName());
-    Assert.assertEquals(Short.class, columnRepeatToleranceEnabledInfo.getType());
-    Assert.assertEquals(0, columnRepeatToleranceEnabledInfo.getDefaultValue());
-    Assert.assertEquals(false, columnRepeatToleranceEnabledInfo.isNullable());
-
-    DBAccessor.DBColumnInfo columnOccurrencesInfo = capturedOccurrencesColumnInfo.getValue();
-    Assert.assertNotNull(columnOccurrencesInfo);
-    Assert.assertEquals(UpgradeCatalog240.ALERT_CURRENT_OCCURRENCES_COLUMN, columnOccurrencesInfo.getName());
-    Assert.assertEquals(Long.class, columnOccurrencesInfo.getType());
-    Assert.assertEquals(1, columnOccurrencesInfo.getDefaultValue());
-    Assert.assertEquals(false, columnOccurrencesInfo.isNullable());
-
-    DBAccessor.DBColumnInfo columnFirmnessInfo = capturedFirmnessColumnInfo.getValue();
-    Assert.assertNotNull(columnFirmnessInfo);
-    Assert.assertEquals(UpgradeCatalog240.ALERT_CURRENT_FIRMNESS_COLUMN, columnFirmnessInfo.getName());
-    Assert.assertEquals(String.class, columnFirmnessInfo.getType());
-    Assert.assertEquals(AlertFirmness.HARD.name(), columnFirmnessInfo.getDefaultValue());
-    Assert.assertEquals(false, columnFirmnessInfo.isNullable());
-
-    DBAccessor.DBColumnInfo targetEnabledColumnInfo = capturedTargetEnabledColumnInfo.getValue();
-    Assert.assertNotNull(targetEnabledColumnInfo);
-    Assert.assertEquals(UpgradeCatalog240.ALERT_TARGET_ENABLED_COLUMN, targetEnabledColumnInfo.getName());
-    Assert.assertEquals(Short.class, targetEnabledColumnInfo.getType());
-    Assert.assertEquals(1, targetEnabledColumnInfo.getDefaultValue());
-    Assert.assertEquals(false, targetEnabledColumnInfo.isNullable());
-
-    assertEquals(expectedCaptures, actualCaptures);
-
-    // Verify blueprint_setting columns
-    expectedCaptures = new HashMap<>();
-    expectedCaptures.put(UpgradeCatalog240.ID, Long.class);
-    expectedCaptures.put(UpgradeCatalog240.BLUEPRINT_NAME_COL, String.class);
-    expectedCaptures.put(UpgradeCatalog240.SETTING_NAME_COL, String.class);
-    expectedCaptures.put(UpgradeCatalog240.SETTING_DATA_COL, char[].class);
-
-    actualCaptures = new HashMap<>();
-    for(DBAccessor.DBColumnInfo blueprintSettingsColumnInfo : capturedBlueprintSettingColumns.getValue()) {
-      actualCaptures.put(blueprintSettingsColumnInfo.getName(), blueprintSettingsColumnInfo.getType());
-    }
-
-    assertEquals(expectedCaptures, actualCaptures);
-
-    // Verify host_role_command column
-    DBAccessor.DBColumnInfo originalStartTimeInfo = hostRoleCommandOriginalStartTimeColumnInfo.getValue();
-    Assert.assertNotNull(originalStartTimeInfo);
-    Assert.assertEquals("original_start_time", originalStartTimeInfo.getName());
-    Assert.assertEquals(Long.class, originalStartTimeInfo.getType());
-    Assert.assertEquals(-1L, originalStartTimeInfo.getDefaultValue());
-
-    DBAccessor.DBColumnInfo viewInstanceEntityUrlColInfoValue = viewInstanceShortUrlInfo.getValue();
-    Assert.assertNotNull(viewInstanceEntityUrlColInfoValue);
-    Assert.assertEquals("short_url", viewInstanceEntityUrlColInfoValue.getName());
-    Assert.assertEquals(Long.class, viewInstanceEntityUrlColInfoValue.getType());
-
-    List<DBAccessor.DBColumnInfo> capturedViewUrlColumsValue = capturedViewUrlColums.getValue();
-    Assert.assertNotNull(capturedViewUrlColumsValue);
-    Assert.assertEquals(3, capturedViewUrlColumsValue.size());
-
-    // Verify cluster_type column
-    DBAccessor.DBColumnInfo viewInstanceEntityClusterTypeValue = viewInstanceClusterType.getValue();
-    Assert.assertNotNull(viewInstanceClusterType);
-    Assert.assertEquals("cluster_type", viewInstanceEntityClusterTypeValue.getName());
-    Assert.assertEquals(String.class, viewInstanceEntityClusterTypeValue.getType());
-
-    List<DBAccessor.DBColumnInfo> capturedRemoteAmbariClusterColumnsValue = capturedRemoteAmbariClusterColumns.getValue();
-    Assert.assertNotNull(capturedRemoteAmbariClusterColumnsValue);
-    Assert.assertEquals(5, capturedRemoteAmbariClusterColumnsValue.size());
-
-    List<DBAccessor.DBColumnInfo> capturedRemoteClusterServiceColumnsValue = capturedRemoteClusterServiceColumns.getValue();
-    Assert.assertNotNull(capturedRemoteClusterServiceColumnsValue);
-    Assert.assertEquals(3, capturedRemoteClusterServiceColumnsValue.size());
-
-    DBAccessor.DBColumnInfo clusterHandleColumn = capturedClusterHandleColumn.getValue();
-    Assert.assertEquals(UpgradeCatalog240.CLUSTER_HANDLE_COLUMN, clusterHandleColumn.getName());
-    Assert.assertEquals(Long.class, clusterHandleColumn.getType());
-
-    // Verify authenticated_user_id column
-    DBAccessor.DBColumnInfo requestScheduleUserIdInfoValue = requestScheduleUserIdInfo.getValue();
-    Assert.assertNotNull(requestScheduleUserIdInfoValue);
-    Assert.assertEquals("authenticated_user_id", requestScheduleUserIdInfoValue.getName());
-    Assert.assertEquals(Integer.class, requestScheduleUserIdInfoValue.getType());
-    Assert.assertEquals(null, requestScheduleUserIdInfoValue.getDefaultValue());
-
-    DBAccessor.DBColumnInfo provisionActionColumnInfoValue = provisionActionColumnInfo.getValue();
-    Assert.assertNotNull(provisionActionColumnInfoValue);
-    Assert.assertEquals(UpgradeCatalog240.PROVISION_ACTION_COL, provisionActionColumnInfoValue.getName());
-    Assert.assertEquals(String.class, provisionActionColumnInfoValue.getType());
-    Assert.assertEquals(true, provisionActionColumnInfoValue.isNullable());
-
-    verify(dbAccessor);
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-    Method updateAlerts = UpgradeCatalog240.class.getDeclaredMethod("updateAlerts");
-    Method addManageUserPersistedDataPermission = UpgradeCatalog240.class.getDeclaredMethod("addManageUserPersistedDataPermission");
-    Method allowClusterOperatorToManageCredentials = UpgradeCatalog240.class.getDeclaredMethod("allowClusterOperatorToManageCredentials");
-    Method addSettingPermission = UpgradeCatalog240.class.getDeclaredMethod("addSettingPermission");
-    Method addViewOperationalLogsPermission = UpgradeCatalog240.class.getDeclaredMethod("addViewOperationalLogsPermission");
-    Method updateHDFSConfigs = UpgradeCatalog240.class.getDeclaredMethod("updateHDFSConfigs");
-    Method updateHIVEConfigs = UpgradeCatalog240.class.getDeclaredMethod("updateHIVEConfigs");
-    Method updateAmsConfigs = UpgradeCatalog240.class.getDeclaredMethod("updateAMSConfigs");
-    Method updateClusterEnv = UpgradeCatalog240.class.getDeclaredMethod("updateClusterEnv");
-    Method updateSequenceForView = UpgradeCatalog240.class.getDeclaredMethod("updateSequenceForView");
-    Method adjustHiveJobTimestamps = UpgradeCatalog240.class.getDeclaredMethod("adjustHiveJobTimestamps");
-    Method updateHostRoleCommandTableDML = UpgradeCatalog240.class.getDeclaredMethod("updateHostRoleCommandTableDML");
-    Method updateKerberosEnv = UpgradeCatalog240.class.getDeclaredMethod("updateKerberosConfigs");
-    Method updateYarnEnv = UpgradeCatalog240.class.getDeclaredMethod("updateYarnEnv");
-    Method removeHiveOozieDBConnectionConfigs = UpgradeCatalog240.class.getDeclaredMethod("removeHiveOozieDBConnectionConfigs");
-    Method updateClustersAndHostsVersionStateTableDML = UpgradeCatalog240.class.getDeclaredMethod("updateClustersAndHostsVersionStateTableDML");
-    Method removeStandardDeviationAlerts = UpgradeCatalog240.class.getDeclaredMethod("removeStandardDeviationAlerts");
-    Method consolidateUserRoles = UpgradeCatalog240.class.getDeclaredMethod("consolidateUserRoles");
-    Method updateClusterInheritedPermissionsConfig = UpgradeCatalog240.class.getDeclaredMethod("updateClusterInheritedPermissionsConfig");
-    Method createRolePrincipals = UpgradeCatalog240.class.getDeclaredMethod("createRolePrincipals");
-    Method updateHDFSWidget = UpgradeCatalog240.class.getDeclaredMethod("updateHDFSWidgetDefinition");
-    Method upgradeCapSchedulerView = UpgradeCatalog240.class.getDeclaredMethod("upgradeCapSchedulerView");
-    Method updatePhoenixConfigs = UpgradeCatalog240.class.getDeclaredMethod("updatePhoenixConfigs");
-    Method updateSparkConfigs = UpgradeCatalog240.class.getDeclaredMethod("updateSparkConfigs");
-    Method updateHBaseConfigs = UpgradeCatalog240.class.getDeclaredMethod("updateHBaseConfigs");
-    Method updateKerberosDescriptorArtifacts = AbstractUpgradeCatalog.class.getDeclaredMethod("updateKerberosDescriptorArtifacts");
-    Method updateFalconConfigs = UpgradeCatalog240.class.getDeclaredMethod("updateFalconConfigs");
-    Method fixAuthorizationDescriptions = UpgradeCatalog240.class.getDeclaredMethod("fixAuthorizationDescriptions");
-    Method removeAuthorizations = UpgradeCatalog240.class.getDeclaredMethod("removeAuthorizations");
-    Method addConnectionTimeoutParamForWebAndMetricAlerts = AbstractUpgradeCatalog.class.getDeclaredMethod("addConnectionTimeoutParamForWebAndMetricAlerts");
-    Method addSliderClientConfig = UpgradeCatalog240.class.getDeclaredMethod("addSliderClientConfig");
-    Method updateRequestScheduleEntityUserIds = UpgradeCatalog240.class.getDeclaredMethod("updateRequestScheduleEntityUserIds");
-    Method updateRecoveryConfigurationDML = UpgradeCatalog240.class.getDeclaredMethod("updateRecoveryConfigurationDML");
-    Method removeAtlasMetaserverAlert = UpgradeCatalog240.class.getDeclaredMethod("removeAtlasMetaserverAlert");
-    Method updateRangerHbasePluginProperties = UpgradeCatalog240.class.getDeclaredMethod("updateRangerHbasePluginProperties");
-    Method updateKAFKAConfigs = UpgradeCatalog240.class.getDeclaredMethod("updateKAFKAConfigs");
-
-    Capture<String> capturedStatements = newCapture(CaptureType.ALL);
-
-    DBAccessor dbAccessor = createStrictMock(DBAccessor.class);
-    expect(dbAccessor.executeUpdate(capture(capturedStatements))).andReturn(1).times(7);
-
-    Capture<String> capturedTezViewUpdate = newCapture();
-    expect(dbAccessor.executeUpdate(capture(capturedTezViewUpdate))).andReturn(1).once();
-
-    Capture<String> capturedPigSmokeTestEntityUpdate = newCapture();
-    expect(dbAccessor.executeUpdate(capture(capturedPigSmokeTestEntityUpdate))).andReturn(1).once();
-
-    UpgradeCatalog240 upgradeCatalog240 = createMockBuilder(UpgradeCatalog240.class)
-            .addMockedMethod(addNewConfigurationsFromXml)
-            .addMockedMethod(updateAlerts)
-            .addMockedMethod(addSettingPermission)
-            .addMockedMethod(addViewOperationalLogsPermission)
-            .addMockedMethod(addManageUserPersistedDataPermission)
-            .addMockedMethod(allowClusterOperatorToManageCredentials)
-            .addMockedMethod(updateHDFSConfigs)
-            .addMockedMethod(updateHIVEConfigs)
-            .addMockedMethod(updateAmsConfigs)
-            .addMockedMethod(updateClusterEnv)
-            .addMockedMethod(updateSequenceForView)
-            .addMockedMethod(adjustHiveJobTimestamps)
-            .addMockedMethod(updateHostRoleCommandTableDML)
-            .addMockedMethod(updateKerberosEnv)
-            .addMockedMethod(updateYarnEnv)
-            .addMockedMethod(removeHiveOozieDBConnectionConfigs)
-            .addMockedMethod(updateClustersAndHostsVersionStateTableDML)
-            .addMockedMethod(removeStandardDeviationAlerts)
-            .addMockedMethod(consolidateUserRoles)
-            .addMockedMethod(updateClusterInheritedPermissionsConfig)
-            .addMockedMethod(createRolePrincipals)
-            .addMockedMethod(updateHDFSWidget)
-            .addMockedMethod(upgradeCapSchedulerView)
-            .addMockedMethod(updatePhoenixConfigs)
-            .addMockedMethod(updateSparkConfigs)
-            .addMockedMethod(updateKerberosDescriptorArtifacts)
-            .addMockedMethod(updateFalconConfigs)
-            .addMockedMethod(fixAuthorizationDescriptions)
-            .addMockedMethod(removeAuthorizations)
-            .addMockedMethod(addConnectionTimeoutParamForWebAndMetricAlerts)
-            .addMockedMethod(updateHBaseConfigs)
-            .addMockedMethod(addSliderClientConfig)
-            .addMockedMethod(updateRequestScheduleEntityUserIds)
-            .addMockedMethod(updateRecoveryConfigurationDML)
-            .addMockedMethod(removeAtlasMetaserverAlert)
-            .addMockedMethod(updateRangerHbasePluginProperties)
-            .addMockedMethod(updateKAFKAConfigs)
-            .createMock();
-
-    Field field = AbstractUpgradeCatalog.class.getDeclaredField("dbAccessor");
-    field.set(upgradeCatalog240, dbAccessor);
-
-    upgradeCatalog240.addNewConfigurationsFromXml();
-    upgradeCatalog240.updateAlerts();
-    upgradeCatalog240.addSettingPermission();
-    upgradeCatalog240.addViewOperationalLogsPermission();
-    upgradeCatalog240.addManageUserPersistedDataPermission();
-    upgradeCatalog240.allowClusterOperatorToManageCredentials();
-    upgradeCatalog240.updateHDFSConfigs();
-    upgradeCatalog240.updateHIVEConfigs();
-    upgradeCatalog240.updateAMSConfigs();
-    upgradeCatalog240.updateClusterEnv();
-    upgradeCatalog240.updateSequenceForView();
-    upgradeCatalog240.updateHostRoleCommandTableDML();
-    upgradeCatalog240.updateKerberosConfigs();
-    upgradeCatalog240.updateYarnEnv();
-    upgradeCatalog240.removeHiveOozieDBConnectionConfigs();
-    upgradeCatalog240.updateClustersAndHostsVersionStateTableDML();
-    upgradeCatalog240.removeStandardDeviationAlerts();
-    upgradeCatalog240.consolidateUserRoles();
-    upgradeCatalog240.createRolePrincipals();
-    upgradeCatalog240.updateClusterInheritedPermissionsConfig();
-    upgradeCatalog240.updateHDFSWidgetDefinition();
-    upgradeCatalog240.upgradeCapSchedulerView();
-    upgradeCatalog240.updatePhoenixConfigs();
-    upgradeCatalog240.updateSparkConfigs();
-    upgradeCatalog240.updateKerberosDescriptorArtifacts();
-    upgradeCatalog240.updateFalconConfigs();
-    upgradeCatalog240.fixAuthorizationDescriptions();
-    upgradeCatalog240.removeAuthorizations();
-    upgradeCatalog240.addConnectionTimeoutParamForWebAndMetricAlerts();
-    upgradeCatalog240.updateHBaseConfigs();
-    upgradeCatalog240.addSliderClientConfig();
-    upgradeCatalog240.updateRequestScheduleEntityUserIds();
-    upgradeCatalog240.updateRecoveryConfigurationDML();
-    upgradeCatalog240.removeAtlasMetaserverAlert();
-    upgradeCatalog240.updateRangerHbasePluginProperties();
-    upgradeCatalog240.adjustHiveJobTimestamps();
-    upgradeCatalog240.updateKAFKAConfigs();
-
-    replay(upgradeCatalog240, dbAccessor);
-
-    upgradeCatalog240.executeDMLUpdates();
-
-    verify(upgradeCatalog240, dbAccessor);
-
-    List<String> statements = capturedStatements.getValues();
-    Assert.assertNotNull(statements);
-    Assert.assertEquals(7, statements.size());
-    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=1 WHERE permission_name='AMBARI.ADMINISTRATOR'"));
-    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=2 WHERE permission_name='CLUSTER.ADMINISTRATOR'"));
-    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=3 WHERE permission_name='CLUSTER.OPERATOR'"));
-    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=4 WHERE permission_name='SERVICE.ADMINISTRATOR'"));
-    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=5 WHERE permission_name='SERVICE.OPERATOR'"));
-    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=6 WHERE permission_name='CLUSTER.USER'"));
-    Assert.assertTrue(statements.contains("UPDATE adminpermission SET sort_order=7 WHERE permission_name='VIEW.USER'"));
-
-    Assert.assertNotNull(capturedTezViewUpdate.getValue());
-    Assert.assertEquals("UPDATE viewinstanceproperty SET name = 'yarn.ats.url' where name = 'yarn.timeline-server.url'",
-      capturedTezViewUpdate.getValue());
-
-    Assert.assertNotNull(capturedPigSmokeTestEntityUpdate.getValue());
-    Assert.assertEquals("UPDATE viewentity " +
-        "SET class_name = 'org.apache.ambari.view.pig.persistence.SmokeTestEntity' " +
-        "WHERE class_name = 'org.apache.ambari.view.pig.persistence.DataStoreStorage$SmokeTestEntity'",
-      capturedPigSmokeTestEntityUpdate.getValue());
-  }
-
-  @Test
-  public void testRemoveHiveOozieDBConnectionConfigs() throws Exception{
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(
-            AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config mockOozieEnv = easyMockSupport.createNiceMock(Config.class);
-    final Config mockHiveEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedOozieEnv = new HashMap<>();
-    propertiesExpectedOozieEnv.put("oozie_derby_database", "Derby");
-    propertiesExpectedOozieEnv.put("property", "value");
-    // Imitate missing property
-    // propertiesExpectedOozieEnv.put("oozie_hostname", "hostname");
-    final Map<String, String> propertiesExpectedHiveEnv = new HashMap<>();
-    propertiesExpectedHiveEnv.put("hive_hostname", "hostname");
-
-    final Injector mockInjector = Guice.createInjector(new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        binder.bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        binder.bind(Clusters.class).toInstance(mockClusters);
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(PasswordEncoder.class).toInstance(createNiceMock(PasswordEncoder.class));
-        binder.bind(HookContextFactory.class).toInstance(createMock(HookContextFactory.class));
-        binder.bind(HookService.class).toInstance(createMock(HookService.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("oozie-env")).andReturn(mockOozieEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hive-env")).andReturn(mockHiveEnv).atLeastOnce();
-    expect(mockOozieEnv.getProperties()).andReturn(propertiesExpectedOozieEnv).anyTimes();
-    expect(mockHiveEnv.getProperties()).andReturn(propertiesExpectedHiveEnv).anyTimes();
-
-    Capture<Map<String, String>> oozieCapture =  newCapture();
-    Capture<Map<String, String>> hiveCapture =  newCapture();
-    expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class), eq("oozie-env"),
-        capture(oozieCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
-    expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class), eq("hive-env"),
-            capture(hiveCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog240.class).removeHiveOozieDBConnectionConfigs();
-    easyMockSupport.verifyAll();
-
-    assertEquals("value", oozieCapture.getValue().get("property"));
-    assertNull(oozieCapture.getValue().get("oozie_derby_database"));
-    assertNull(oozieCapture.getValue().get("oozie_hostname"));
-    assertNull(hiveCapture.getValue().get("hive_hostname"));
-  }
-
-  @Test
-  public void testUpdateFalconConfigs() throws Exception{
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(
-            AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config mockFalconEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedFalconEnv = new HashMap<>();
-    propertiesExpectedFalconEnv.put("falcon_store_uri", "file:///hadoop/falcon/store");
-    propertiesExpectedFalconEnv.put("content", "# content");
-    propertiesExpectedFalconEnv.put("property", "value");
-
-    final String applicationServicesOldPropertyValue =
-        "org.apache.falcon.security.AuthenticationInitializationService,\\\n" +
-        "      org.apache.falcon.workflow.WorkflowJobEndNotificationService, \\\n" +
-        "      org.apache.falcon.service.ProcessSubscriberService,\\\n" +
-        "      org.apache.falcon.entity.store.ConfigurationStore,\\\n" +
-        "      org.apache.falcon.rerun.service.RetryService,\\\n" +
-        "      org.apache.falcon.rerun.service.LateRunService,\\\n" +
-        "      org.apache.falcon.service.LogCleanupService,\\\n" +
-        "      org.apache.falcon.metadata.MetadataMappingService";
-
-    final String applicationServicesExpectedPropertyValue =
-        "org.apache.falcon.security.AuthenticationInitializationService,\\\n" +
-        "      org.apache.falcon.workflow.WorkflowJobEndNotificationService, \\\n" +
-        "      org.apache.falcon.service.ProcessSubscriberService,\\\n" +
-        "      org.apache.falcon.entity.store.ConfigurationStore,\\\n" +
-        "      org.apache.falcon.rerun.service.RetryService,\\\n" +
-        "      org.apache.falcon.rerun.service.LateRunService,\\\n" +
-        "      org.apache.falcon.service.LogCleanupService,\\\n" +
-        "      org.apache.falcon.metadata.MetadataMappingService{{atlas_application_class_addition}}";
-
-    final Config falconStartupConfig = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> falconStartupConfigProperties= new HashMap<>();
-    falconStartupConfigProperties.put("*.application.services", applicationServicesOldPropertyValue);
-    falconStartupConfigProperties.put("property", "value");
-    final Injector mockInjector = Guice.createInjector(new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        binder.bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        binder.bind(Clusters.class).toInstance(mockClusters);
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(PasswordEncoder.class).toInstance(createNiceMock(PasswordEncoder.class));
-        binder.bind(HookContextFactory.class).toInstance(createMock(HookContextFactory.class));
-        binder.bind(HookService.class).toInstance(createMock(HookService.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("falcon-env")).andReturn(mockFalconEnv).atLeastOnce();
-    expect(mockFalconEnv.getProperties()).andReturn(propertiesExpectedFalconEnv).anyTimes();
-
-    expect(mockClusterExpected.getDesiredConfigByType("falcon-startup.properties")).andReturn(falconStartupConfig).atLeastOnce();
-    expect(falconStartupConfig.getProperties()).andReturn(falconStartupConfigProperties).anyTimes();
-
-    Capture<Map<String, String>> falconCapture =  newCapture();
-    expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class),  eq("falcon-env"),
-        capture(falconCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
-
-    Capture<Map<String, String>> falconCapture2 =  newCapture();
-    expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class), eq("falcon-env"),
-        capture(falconCapture2), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
-
-    Capture<Map<String, String>> falconStartupCapture =  newCapture();
-    expect(mockAmbariManagementController.createConfig(eq(mockClusterExpected), anyObject(StackId.class), eq("falcon-startup.properties"),
-        capture(falconStartupCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog240.class).updateFalconConfigs();
-    easyMockSupport.verifyAll();
-
-    final String expectredEnvContent = "# content\n" +
-                                       "\n" +
-                                       "{% if falcon_atlas_support %}\n" +
-                                       "# Add the Atlas Falcon hook to the Falcon classpath\n" +
-                                       "export FALCON_EXTRA_CLASS_PATH={{atlas_hook_cp}}${FALCON_EXTRA_CLASS_PATH}\n" +
-                                       "{% endif %}";
-
-    assertEquals("value", falconCapture.getValue().get("property"));
-    assertEquals("# content", falconCapture.getValue().get("content"));
-    assertNull(falconCapture.getValue().get("falcon_store_uri"));
-
-    assertEquals(expectredEnvContent, falconCapture2.getValue().get("content"));
-
-    assertEquals("value", falconStartupCapture.getValue().get("property"));
-    assertEquals(applicationServicesExpectedPropertyValue, falconStartupCapture.getValue().get("*.application.services"));
-  }
-
-  @Test
-  public void testUpdateHbaseConfigs() throws Exception{
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(
-        AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockCluster = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Injector mockInjector = Guice.createInjector(new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        binder.bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        binder.bind(Clusters.class).toInstance(mockClusters);
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        binder.bind(PasswordEncoder.class).toInstance(createNiceMock(PasswordEncoder.class));
-        binder.bind(HookContextFactory.class).toInstance(createMock(HookContextFactory.class));
-        binder.bind(HookService.class).toInstance(createMock(HookService.class));
-
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).anyTimes();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockCluster);
-    }}).anyTimes();
-
-    final Service hbaseService = createNiceMock(Service.class);
-    expect(hbaseService.getDesiredStackId()).andReturn(new StackId("HDP-2.4"));
-
-    expect(mockCluster.getServices()).andReturn(new HashMap<String, Service>(){{put("HBASE",hbaseService);}}).anyTimes();
-    expect(mockCluster.getSecurityType()).andReturn(SecurityType.KERBEROS).anyTimes();
-
-    final Config mockHbaseSiteConfigs = easyMockSupport.createNiceMock(Config.class);
-    expect(mockCluster.getDesiredConfigByType("hbase-site")).andReturn(mockHbaseSiteConfigs).atLeastOnce();
-    expect(mockHbaseSiteConfigs.getProperties()).andReturn(new HashMap<String, String>(){{
-      put("hbase.coprocessor.regionserver.classes","{{hbase_coprocessor_regionserver_classes}}");
-      put("hbase.coprocessor.region.classes","{{hbase_coprocessor_region_classes}}");
-    }}).anyTimes();
-
-    final Config mockRangerHbaseConfigs = easyMockSupport.createNiceMock(Config.class);
-    expect(mockCluster.getDesiredConfigByType("ranger-hbase-plugin-properties")).andReturn(mockRangerHbaseConfigs).atLeastOnce();
-    expect(mockRangerHbaseConfigs.getProperties()).andReturn(new HashMap<String, String>(){{
-      put("ranger-hbase-plugin-enabled", "yes");
-    }}).anyTimes();
-
-    final Config mockRangerEnvConfigs = easyMockSupport.createNiceMock(Config.class);
-    expect(mockCluster.getDesiredConfigByType("ranger-env")).andReturn(mockRangerEnvConfigs).atLeastOnce();
-    expect(mockRangerEnvConfigs.getProperties()).andReturn(new HashMap<String, String>(){{
-      put("xml_configurations_supported", "true");
-    }}).anyTimes();
-
-
-    Capture<Map<String, String>> hbaseCapture =  newCapture();
-    expect(mockAmbariManagementController.createConfig(eq(mockCluster), anyObject(StackId.class), eq("hbase-site"),
-        capture(hbaseCapture), anyString(), EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(null).once();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog240.class).updateHBaseConfigs();
-    easyMockSupport.verifyAll();
-
-    assertEquals("org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor", hbaseCapture.getValue().get("hbase.coprocessor.regionserver.classes"));
-    assertEquals("org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor", hbaseCapture.getValue().get("hbase.coprocessor.region.classes"));
-  }
-
-  @Test
-  public void test_addParam_ParamsNotAvailable() {
-
-    UpgradeCatalog240 upgradeCatalog240 = new UpgradeCatalog240(injector);
-    String inputSource = "{ \"path\" : \"test_path\", \"type\" : \"SCRIPT\"}";
-    List<String> params = Arrays.asList("connection.timeout", "checkpoint.time.warning.threshold", "checkpoint.time.critical.threshold");
-    String expectedSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"name\":\"connection.timeout\",\"display_name\":\"Connection Timeout\",\"value\":5.0,\"type\":\"NUMERIC\",\"description\":\"The maximum time before this alert is considered to be CRITICAL\",\"units\":\"seconds\",\"threshold\":\"CRITICAL\"},{\"name\":\"checkpoint.time.warning.threshold\",\"display_name\":\"Checkpoint Warning\",\"value\":2.0,\"type\":\"PERCENT\",\"description\":\"The percentage of the last checkpoint time greater than the interval in order to trigger a warning alert.\",\"units\":\"%\",\"threshold\":\"WARNING\"},{\"name\":\"checkpoint.time.critical.threshold\",\"display_name\":\"Checkpoint Critical\",\"value\":4.0,\"type\":\"PERCENT\",\"description\":\"The percentage of the last checkpoint time greater than the interval in order to trigger a critical alert.\",\"units\":\"%\",\"threshold\":\"CRITICAL\"}]}";
-
-    String result = upgradeCatalog240.addParam(inputSource, params);
-    Assert.assertEquals(result, expectedSource);
-  }
-
-  @Test
-  public void test_addParam_ParamsAvailableWithOneOFNeededItem() {
-
-    UpgradeCatalog240 upgradeCatalog240 = new UpgradeCatalog240(injector);
-    String inputSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"name\":\"connection.timeout\",\"display_name\":\"Connection Timeout\",\"value\":5.0,\"type\":\"NUMERIC\",\"description\":\"The maximum time before this alert is considered to be CRITICAL\",\"units\":\"seconds\",\"threshold\":\"CRITICAL\"}]}";
-    List<String> params = new ArrayList<>(Arrays.asList("connection.timeout", "checkpoint.time.warning.threshold", "checkpoint.time.critical.threshold"));
-    String expectedSource = "{\"path\":\"test_path\",\"type\":\"SCRIPT\",\"parameters\":[{\"name\":\"connection.timeout\",\"display_name\":\"Connection Timeout\",\"value\":5.0,\"type\":\"NUMERIC\",\"description\":\"The maximum time before this alert is considered to be CRITICAL\",\"units\":\"seconds\",\"threshold\":\"CRITICAL\"},{\"name\":\"checkpoint.time.warning.threshold\",\"display_name\":\"Checkpoint Warning\",\"value\":2.0,\"type\":\"PERCENT\",\"description\":\"The percentage of the last checkpoint time greater than the interval in order to trigger a warning alert.\",\"units\":\"%\",\"threshold\":\"WARNING\"},{\"name\":\"checkpoint.time.critical.threshold\",\"display_name\":\"Checkpoint Critical\",\"value\":4.0,\"type\":\"PERCENT\",\"description\":\"The percentage of the last checkpoint time greater than the interval in order to trigger a critical alert.\",\"units\":\"%\",\"threshold\":\"CRITICAL\"}]}";
-
-    String result = upgradeCatalog240.addParam(inputSource, params);
-    Assert.assertEquals(result, expectedSource);
-  }
-
-  /**
-   * Test that dfs.internal.nameservices is not affected
-   * Also, dfs.client.retry.policy.enabled is reset
-   * @throws Exception
-   */
-  @Test
-  public void testHdfsSiteUpdateConfigs2() throws Exception{
-    Map<String, String> oldPropertiesHdfsSite = new HashMap<String, String>() {
-      {
-        put("dfs.client.retry.policy.enabled", "true");
-      }
-    };
-    Map<String, String> newPropertiesHdfsSite = new HashMap<String, String>() {
-      {
-        put("dfs.client.retry.policy.enabled", "false");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    final Service service = createStrictMock(Service.class);
-    final Map<String, Service> services = Collections.singletonMap("HDFS", service);
-    Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).anyTimes();
-    expect(cluster.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
-    expect(mockHdfsSite.getProperties()).andReturn(oldPropertiesHdfsSite).anyTimes();
-    expect(cluster.getServices()).andReturn(services).once();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockHdfsSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-        .addMockedMethod("createConfiguration")
-        .addMockedMethod("getClusters", new Class[] { })
-        .addMockedMethod("createConfig")
-        .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-        .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-                                   EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog240(injector2).updateHDFSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesHdfsSite, updatedProperties).areEqual());
-  }
-
-  /**
-   * Test that dfs.nameservices is copied over to dfs.internal.nameservices
-   * @throws Exception
-   */
-  @Test
-  public void testHdfsSiteUpdateConfigs() throws Exception{
-    Map<String, String> oldPropertiesHdfsSite = new HashMap<String, String>() {
-      {
-        put("dfs.nameservices", "nnha");
-      }
-    };
-    Map<String, String> newPropertiesHdfsSite = new HashMap<String, String>() {
-      {
-        put("dfs.nameservices", "nnha");
-        put("dfs.internal.nameservices", "nnha");
-      }
-    };
-    Map<String, String> oldPropertiesHadoopEnv = new HashMap<String, String>() {
-      {
-        put("keyserver_port", " ");
-      }
-    };
-    Map<String, String> newPropertiesHadoopEnv = new HashMap<String, String>() {
-      {
-        put("keyserver_port", "");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    final Service service = createStrictMock(Service.class);
-    final Map<String, Service> services = Collections.singletonMap("HDFS", service);
-    Config mockHdfsSite = easyMockSupport.createNiceMock(Config.class);
-    Config mockHadoopEnv = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).anyTimes();
-    expect(cluster.getDesiredConfigByType("hdfs-site")).andReturn(mockHdfsSite).atLeastOnce();
-    expect(cluster.getDesiredConfigByType("hadoop-env")).andReturn(mockHadoopEnv).atLeastOnce();
-    expect(mockHdfsSite.getProperties()).andReturn(oldPropertiesHdfsSite).anyTimes();
-    expect(mockHadoopEnv.getProperties()).andReturn(oldPropertiesHadoopEnv).anyTimes();
-    expect(cluster.getServices()).andReturn(services).once();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockHdfsSite, mockHadoopEnv, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-        .addMockedMethod("createConfiguration")
-        .addMockedMethod("getClusters", new Class[] { })
-        .addMockedMethod("createConfig")
-        .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-        .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCaptureHdfsSite = EasyMock.newCapture();
-    Capture<Map<String, String>> propertiesCaptureHadoopEnv = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("hdfs-site"), capture(propertiesCaptureHdfsSite), anyString(),
-                                   EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("hadoop-env"), capture(propertiesCaptureHadoopEnv), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog240(injector2).updateHDFSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedPropertiesHdfsSite = propertiesCaptureHdfsSite.getValue();
-    assertTrue(Maps.difference(newPropertiesHdfsSite, updatedPropertiesHdfsSite).areEqual());
-
-    Map<String, String> updatedPropertiesHadoopEnv = propertiesCaptureHadoopEnv.getValue();
-    assertTrue(Maps.difference(newPropertiesHadoopEnv, updatedPropertiesHadoopEnv).areEqual());
-  }
-
-  @Test
-  public void testYarnEnvUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesYarnEnv = new HashMap<String, String>() {
-      {
-        put("content", "export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}");
-      }
-    };
-    Map<String, String> newPropertiesYarnEnv = new HashMap<String, String>() {
-      {
-        put("content", "# export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}}" +
-                "\n\n      # Specify the max Heapsize for the timeline server using a numerical value\n" +
-                "      # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set\n" +
-                "      # the value to 1024.\n" +
-                "      # This value will be overridden by an Xmx setting specified in either YARN_OPTS\n" +
-                "      # and/or YARN_TIMELINESERVER_OPTS.\n" +
-                "      # If not specified, the default value will be picked from either YARN_HEAPMAX\n" +
-                "      # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.\n" +
-                "      export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}}");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockYarnEnv = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("yarn-env")).andReturn(mockYarnEnv).atLeastOnce();
-    expect(mockYarnEnv.getProperties()).andReturn(oldPropertiesYarnEnv).anyTimes();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockYarnEnv, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-            .addMockedMethod("createConfiguration")
-            .addMockedMethod("getClusters", new Class[] { })
-            .addMockedMethod("createConfig")
-            .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-            .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-            EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog240(injector2).updateYarnEnv();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesYarnEnv, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testUpdateKAFKAConfigs() throws Exception{
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config kafkaBroker = easyMockSupport.createNiceMock(Config.class);
-    expect(kafkaBroker.getProperties()).andReturn(new HashMap<String, String>(){{
-      put("listeners", "PLAINTEXT://localhost:6667,SSL://localhost:6666");
-    }}
-    ).anyTimes();
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-        bind(PasswordEncoder.class).toInstance(createNiceMock(PasswordEncoder.class));
-        bind(HookContextFactory.class).toInstance(createMock(HookContextFactory.class));
-        bind(HookService.class).toInstance(createMock(HookService.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("kafka-broker")).andReturn(kafkaBroker).atLeastOnce();
-    expect(mockClusterExpected.getSecurityType()).andReturn(SecurityType.KERBEROS);
-    expect(mockClusterExpected.getServices()).andReturn(new HashMap<String, Service>() {
-      {
-        put("KAFKA", null);
-      }
-    }).atLeastOnce();
-
-    UpgradeCatalog240 upgradeCatalog240 = createMockBuilder(UpgradeCatalog240.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationProperties", String.class,
-                    Map.class, boolean.class, boolean.class)
-            .createMock();
-
-    Map<String, String> expectedUpdates = new HashMap<>();
-    expectedUpdates.put("listeners", "PLAINTEXTSASL://localhost:6667,SSL://localhost:6666");
-
-    upgradeCatalog240.updateConfigurationProperties("kafka-broker", expectedUpdates,
-            true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog240);
-    upgradeCatalog240.updateKAFKAConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-
-  @Test
-  public void testSparkConfigUpdate() throws Exception{
-
-    Map<String, String> oldPropertiesSparkDefaults = new HashMap<String, String>() {
-      {
-        put("spark.driver.extraJavaOptions", "-Dhdp.version={{hdp_full_version}}");
-        put("spark.yarn.am.extraJavaOptions", "-Dhdp.version={{hdp_full_version}}");
-      }
-    };
-    Map<String, String> newPropertiesSparkDefaults = new HashMap<String, String>() {
-      {
-        put("spark.driver.extraJavaOptions", "-Dhdp.version={{full_stack_version}}");
-        put("spark.yarn.am.extraJavaOptions", "-Dhdp.version={{full_stack_version}}");
-      }
-    };
-
-    Map<String, String> oldPropertiesSparkJavaOpts = new HashMap<String, String>() {
-      {
-        put("content", "-Dhdp.version={{hdp_full_version}}");
-      }
-    };
-    Map<String, String> newPropertiesSparkJavaOpts = new HashMap<String, String>() {
-      {
-        put("content", "-Dhdp.version={{full_stack_version}}");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockSparkDefaults = easyMockSupport.createNiceMock(Config.class);
-    Config mockSparkJavaOpts = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("spark-defaults")).andReturn(mockSparkDefaults).atLeastOnce();
-    expect(mockSparkDefaults.getProperties()).andReturn(oldPropertiesSparkDefaults).anyTimes();
-    expect(cluster.getDesiredConfigByType("spark-javaopts-properties")).andReturn(mockSparkJavaOpts).atLeastOnce();
-    expect(mockSparkJavaOpts.getProperties()).andReturn(oldPropertiesSparkJavaOpts).anyTimes();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockSparkDefaults, mockSparkJavaOpts, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-        .addMockedMethod("createConfiguration")
-        .addMockedMethod("getClusters", new Class[] { })
-        .addMockedMethod("createConfig")
-        .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-        .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesSparkDefaultsCapture = EasyMock.newCapture();
-    Capture<Map<String, String>> propertiesSparkJavaOptsCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("spark-defaults"), capture(propertiesSparkDefaultsCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), eq("spark-javaopts-properties"), capture(propertiesSparkJavaOptsCapture), anyString(),
-        EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog240(injector2).updateSparkConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedSparkDefaultsProperties = propertiesSparkDefaultsCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesSparkDefaults, updatedSparkDefaultsProperties).areEqual());
-
-    Map<String, String> updatedSparkJavaOptsProperties = propertiesSparkJavaOptsCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesSparkJavaOpts, updatedSparkJavaOptsProperties).areEqual());
-  }
-
-  @Test
-  public void testAmsHbaseEnvUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsHbaseEnv = new HashMap<String, String>() {
-      {
-        put("content", "some_content");
-      }
-    };
-    Map<String, String> newPropertiesAmsHbaseEnv = new HashMap<String, String>() {
-      {
-        put("content", "some_content"+ "\n # Explicitly Setting HBASE_HOME for AMS HBase so that there is no conflict\n" +
-          "export HBASE_HOME={{ams_hbase_home_dir}}\n");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsHbaseEnv = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-hbase-env")).andReturn(mockAmsHbaseEnv).atLeastOnce();
-    expect(mockAmsHbaseEnv.getProperties()).andReturn(oldPropertiesAmsHbaseEnv).anyTimes();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsHbaseEnv, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .addMockedMethod("createConfig")
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog240(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesAmsHbaseEnv, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testAmsSiteUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        put("timeline.metrics.sink.collection.period", "60");
-        put("timeline.metrics.host.aggregator.ttl", "86400");
-        put("timeline.metrics.cluster.aggregator.second.ttl", "604800");
-      }
-    };
-    Map<String, String> newPropertiesAmsSite = new HashMap<String, String>() {
-      {
-        put("timeline.metrics.sink.collection.period", "10");
-        put("timeline.metrics.host.aggregator.ttl", "86400");
-        put("timeline.metrics.cluster.aggregator.second.ttl", "259200");
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsSite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-site")).andReturn(mockAmsSite).atLeastOnce();
-    expect(mockAmsSite.getProperties()).andReturn(oldPropertiesAmsSite).anyTimes();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .addMockedMethod("createConfig")
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog240(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesAmsSite, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testAmsHbaseSiteUpdateConfigs() throws Exception{
-
-    Map<String, String> oldPropertiesAmsHbaseSite = new HashMap<String, String>() {
-      {
-        put("hbase.rpc.timeout", "30000");
-        put("hbase.normalizer.enabled", String.valueOf(true));
-      }
-    };
-    Map<String, String> newPropertiesAmsHbaseSite = new HashMap<String, String>() {
-      {
-        put("hbase.rpc.timeout", "300000");
-        put("hbase.normalizer.enabled", String.valueOf(false));
-      }
-    };
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-
-    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
-    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
-    Config mockAmsHbaseSite = easyMockSupport.createNiceMock(Config.class);
-
-    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", cluster);
-    }}).once();
-    expect(cluster.getDesiredConfigByType("ams-hbase-site")).andReturn(mockAmsHbaseSite).atLeastOnce();
-    expect(mockAmsHbaseSite.getProperties()).andReturn(oldPropertiesAmsHbaseSite).anyTimes();
-
-    Injector injector = easyMockSupport.createNiceMock(Injector.class);
-    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
-    expect(injector.getInstance(KerberosHelper.class)).andReturn(createNiceMock(KerberosHelper.class)).anyTimes();
-
-    replay(injector, clusters, mockAmsHbaseSite, cluster);
-
-    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
-      .addMockedMethod("createConfiguration")
-      .addMockedMethod("getClusters", new Class[] { })
-      .addMockedMethod("createConfig")
-      .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
-      .createNiceMock();
-
-    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
-    Capture<Map<String, String>> propertiesCapture = EasyMock.newCapture();
-
-    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
-    expect(controller.getClusters()).andReturn(clusters).anyTimes();
-    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
-      EasyMock.<Map<String, Map<String, String>>>anyObject())).andReturn(createNiceMock(Config.class)).once();
-
-    replay(controller, injector2);
-    new UpgradeCatalog240(injector2).updateAMSConfigs();
-    easyMockSupport.verifyAll();
-
-    Map<String, String> updatedProperties = propertiesCapture.getValue();
-    assertTrue(Maps.difference(newPropertiesAmsHbaseSite, updatedProperties).areEqual());
-  }
-
-  @Test
-  public void testUpdateKerberosConfiguration() throws Exception {
-    final AmbariManagementController controller = createMock(AmbariManagementController.class);
-    final AmbariMetaInfo metaInfo = createMock(AmbariMetaInfo.class);
-    final StackInfo stackInfo = createMock(StackInfo.class);
-    final ServiceInfo serviceInfo = createMock(ServiceInfo.class);
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    final OsFamily osFamily = createNiceMock(OsFamily.class);
-
-    final Map<String, String> propertiesKerberosEnv = new HashMap<String, String>() {
-      {
-        put("realm", "EXAMPLE.COM");
-        put("encryption_types", "aes des3-cbc-sha1 rc4 des-cbc-md5");
-        put("kdc_host", "c6407.ambari.apache.org");
-        put("admin_server_host", "c6407.ambari.apache.org");
-        put("kdc_type", "mit-kdc");
-      }
-    };
-
-    final Map<String, String> propertiesKrb5Conf = new HashMap<String, String>() {
-      {
-        put("content", "\n" +
-            "[libdefaults]\n" +
-            "  renew_lifetime = 7d\n" +
-            "  forwardable = true\n" +
-            "  default_realm = {{realm}}\n" +
-            "  ticket_lifetime = 24h\n" +
-            "  dns_lookup_realm = false\n" +
-            "  dns_lookup_kdc = false\n" +
-            "  #default_tgs_enctypes = {{encryption_types}}\n" +
-            "  #default_tkt_enctypes = {{encryption_types}}\n" +
-            "\n" +
-            "{% if domains %}\n" +
-            "[domain_realm]\n" +
-            "{% for domain in domains.split(',') %}\n" +
-            "  {{domain}} = {{realm}}\n" +
-            "{% endfor %}\n" +
-            "{% endif %}\n" +
-            "\n" +
-            "[logging]\n" +
-            "  default = FILE:/var/log/krb5kdc.log\n" +
-            "  admin_server = FILE:/var/log/kadmind.log\n" +
-            "  kdc = FILE:/var/log/krb5kdc.log\n" +
-            "\n" +
-            "[realms]\n" +
-            "  {{realm}} = {\n" +
-            "    admin_server = {{admin_server_host|default(kdc_host, True)}}\n" +
-            "    kdc = {{kdc_host}}\n" +
-            "  }\n" +
-            "\n" +
-            "{# Append additional realm declarations below #}");
-      }
-    };
-
-    final Config configKerberosEnv = createNiceMock(Config.class);
-    expect(configKerberosEnv.getProperties()).andReturn(propertiesKerberosEnv).anyTimes();
-    expect(configKerberosEnv.getTag()).andReturn("tag1").anyTimes();
-
-    final Config configKrb5Conf = createNiceMock(Config.class);
-    expect(configKrb5Conf.getProperties()).andReturn(propertiesKrb

<TRUNCATED>

[38/63] [abbrv] ambari git commit: AMBARI-21287. Cannot install Datanode/AppTimeLine server from ambari 3.0. Fix root mode sudo issue (dlysnichenko)

Posted by ab...@apache.org.
AMBARI-21287. Cannot install Datanode/AppTimeLine server from ambari 3.0. Fix root mode sudo issue (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c1eeafbf
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c1eeafbf
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c1eeafbf

Branch: refs/heads/branch-feature-logsearch-ui
Commit: c1eeafbfebd089496153d5f3e3665e6a3302bd4a
Parents: ebd79e9
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Tue Jun 27 15:56:52 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Tue Jun 27 15:57:41 2017 +0300

----------------------------------------------------------------------
 .../libraries/functions/packages_analyzer.py         | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c1eeafbf/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py b/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py
index f4db3d2..5d67654 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py
@@ -26,6 +26,7 @@ from threading import Thread
 import threading
 from ambari_commons import OSCheck, OSConst
 from ambari_commons import shell
+from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.core.logger import Logger
 from resource_management.core import shell as rmf_shell
 from resource_management.core.exceptions import Fail
@@ -93,11 +94,11 @@ def allInstalledPackages(allInstalledPackages):
   """
   if OSCheck.is_suse_family():
     return _lookUpZypperPackages(
-      ["sudo", "zypper", "--no-gpg-checks", "search", "--installed-only", "--details"],
+      [AMBARI_SUDO_BINARY, "zypper", "--no-gpg-checks", "search", "--installed-only", "--details"],
       allInstalledPackages)
   elif OSCheck.is_redhat_family():
     return _lookUpYumPackages(
-      ["sudo", "yum", "list", "installed"],
+      [AMBARI_SUDO_BINARY, "yum", "list", "installed"],
       'Installed Packages',
       allInstalledPackages)
   elif OSCheck.is_ubuntu_family():
@@ -133,14 +134,14 @@ def get_available_packages_in_repos(repositories):
       available_packages_in_repos.append(package[0])
   elif OSCheck.is_suse_family():
     for repo in repo_ids:
-      _lookUpZypperPackages(["sudo", "zypper", "--no-gpg-checks", "search", "--details", "--repo", repo],
+      _lookUpZypperPackages([AMBARI_SUDO_BINARY, "zypper", "--no-gpg-checks", "search", "--details", "--repo", repo],
                             available_packages)
     available_packages_in_repos += [package[0] for package in available_packages]
   elif OSCheck.is_redhat_family():
     for repo in repo_ids:
-      _lookUpYumPackages(["sudo", "yum", "list", "available", "--disablerepo=*", "--enablerepo=" + repo],
+      _lookUpYumPackages([AMBARI_SUDO_BINARY, "yum", "list", "available", "--disablerepo=*", "--enablerepo=" + repo],
                          'Available Packages', available_packages)
-      _lookUpYumPackages(["sudo", "yum", "list", "installed", "--disablerepo=*", "--enablerepo=" + repo],
+      _lookUpYumPackages([AMBARI_SUDO_BINARY, "yum", "list", "installed", "--disablerepo=*", "--enablerepo=" + repo],
                          'Installed Packages', installed_packages)
     available_packages_in_repos += [package[0] for package in available_packages + installed_packages]
   return available_packages_in_repos
@@ -149,11 +150,11 @@ def get_available_packages_in_repos(repositories):
 def allAvailablePackages(allAvailablePackages):
   if OSCheck.is_suse_family():
     return _lookUpZypperPackages(
-      ["sudo", "zypper", "--no-gpg-checks", "search", "--uninstalled-only", "--details"],
+      [AMBARI_SUDO_BINARY, "zypper", "--no-gpg-checks", "search", "--uninstalled-only", "--details"],
       allAvailablePackages)
   elif OSCheck.is_redhat_family():
     return _lookUpYumPackages(
-      ["sudo", "yum", "list", "available"],
+      [AMBARI_SUDO_BINARY, "yum", "list", "available"],
       'Available Packages',
       allAvailablePackages)
   elif OSCheck.is_ubuntu_family():


[13/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json
deleted file mode 100644
index 6dace0f..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.4.json
+++ /dev/null
@@ -1,499 +0,0 @@
-{
-  "version": "1.0",
-  "stacks": [
-    {
-      "name": "HDP",
-      "old-version": "2.1",
-      "target-version": "2.2.4",
-      "options": {
-        "config-types": {
-          "capacity-scheduler": {
-            "merged-copy": "yes"
-          },
-		  "cluster-env": {
-            "merged-copy": "yes"
-          },
-		  "core-site": {
-            "merged-copy": "yes"
-          },
-		  "falcon-startup.properties": {
-            "merged-copy": "yes"
-          },
-		  "flume-env": {
-		    "merged-copy": "yes"
-		  },
-		  "hadoop-env": {
-            "merged-copy": "yes"
-          },
-		  "hbase-env": {
-            "merged-copy": "yes"
-          },
-		  "hbase-site": {
-            "merged-copy": "yes"
-          },
-		  "hdfs-log4j": {
-            "merged-copy": "yes"
-          },
-		  "hdfs-site": {
-            "merged-copy": "yes"
-          },
-		  "hive-env": {
-            "merged-copy": "yes"
-          },
-		  "hive-site": {
-            "merged-copy": "yes"
-          },
-		  "mapred-env": {
-            "merged-copy": "yes"
-          },
-		  "mapred-site": {
-            "merged-copy": "yes"
-          },
-		  "oozie-env": {
-            "merged-copy": "yes"
-          },
-          "oozie-site": {
-            "merged-copy": "yes"
-          },
-		  "storm-env": {
-            "merged-copy": "yes"
-          },
-		  "storm-site": {
-            "merged-copy": "yes"
-          },
-		  "tez-site": {
-            "merged-copy": "yes"
-          },
-		  "webhcat-log4j": {
-		    "merged-copy": "yes"
-		  },
-		  "webhcat-site": {
-            "merged-copy": "yes"
-          },
-		  "yarn-site": {
-            "merged-copy": "yes"
-          }
-        }
-      },
-      "properties": {
-        "capacity-scheduler": {
-          "yarn.scheduler.capacity.resource-calculator": "org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator",
-          "yarn.scheduler.capacity.root.accessible-node-labels": "*",
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.capacity": {"remove": "yes"},
-          "yarn.scheduler.capacity.root.accessible-node-labels.default.maximum-capacity": {"remove": "yes"},
-          "yarn.scheduler.capacity.root.default-node-label-expression": " "
-        },
-		"cluster-env": {
-          "smokeuser_principal_name": {"remove": "yes"}
-        },
-		"core-site": {
-		  "hadoop.http.authentication.simple.anonymous.allowed": "true",
-          "hadoop.security.auth_to_local": "\n        DEFAULT"
-		},
-		"falcon-startup.properties": {
-		  "*.application.services": "org.apache.falcon.security.AuthenticationInitializationService,\\\n      org.apache.falcon.workflow.WorkflowJobEndNotificationService, \\\n      org.apache.falcon.service.ProcessSubscriberService,\\\n      org.apache.falcon.entity.store.ConfigurationStore,\\\n      org.apache.falcon.rerun.service.RetryService,\\\n      org.apache.falcon.rerun.service.LateRunService,\\\n      org.apache.falcon.service.LogCleanupService,\\\n      org.apache.falcon.metadata.MetadataMappingService",
-		  "*.falcon.enableTLS": "false",
-		  "*.falcon.graph.blueprints.graph": "com.thinkaurelius.titan.core.TitanFactory",
-		  "*.falcon.graph.storage.backend": "berkeleyje",
-		  "*.falcon.security.authorization.admin.groups": "falcon",
-		  "*.falcon.security.authorization.admin.users": "falcon,ambari-qa",
-		  "*.falcon.security.authorization.enabled": "false",
-		  "*.falcon.security.authorization.provider": "org.apache.falcon.security.DefaultAuthorizationProvider",
-		  "*.falcon.security.authorization.superusergroup": "falcon",
-		  "*.journal.impl": "org.apache.falcon.transaction.SharedFileSystemJournal",
-		  "prism.application.services": "org.apache.falcon.entity.store.ConfigurationStore",
-		  "prism.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph,\\\n      org.apache.falcon.entity.ColoClusterRelation,\\\n      org.apache.falcon.group.FeedGroupMap"
-		},
-		"flume-env": {
-		  "content": "\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements.  See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# If this file is placed at FLUME_CONF_DIR/flume-env.sh, it will be sourced\n# during Flume startup.\n\n# Enviroment variables can be set here.\n\nexport JAVA_HOME={{java_home}}\
 n\n# Give Flume more memory and pre-allocate, enable remote monitoring via JMX\n# export JAVA_OPTS=\"-Xms100m -Xmx2000m -Dcom.sun.management.jmxremote\"\n\n# Note that the Flume conf directory is always included in the classpath.\n# Add flume sink to classpath\nif [ -e \"/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\" ]; then\n  export FLUME_CLASSPATH=$FLUME_CLASSPATH:/usr/lib/flume/lib/ambari-metrics-flume-sink.jar\nfi\n\nexport HIVE_HOME={{flume_hive_home}}\nexport HCAT_HOME={{flume_hcat_home}}"
-		},
-		"hadoop-env": {
-		  "content": "\n# Set Hadoop-specific environment variables here.\n\n# The only required environment variable is JAVA_HOME.  All others are\n# optional.  When running a distributed configuration it is best to\n# set JAVA_HOME in this file, so that it is correctly defined on\n# remote nodes.\n\n# The java implementation to use.  Required.\nexport JAVA_HOME={{java_home}}\nexport HADOOP_HOME_WARN_SUPPRESS=1\n\n# Hadoop home directory\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hadoop Configuration Directory\n\n{# this is different for HDP1 #}\n# Path to jsvc required by secure HDP 2.0 datanode\nexport JSVC_HOME={{jsvc_path}}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\nexport HADOOP_HEAPSIZE=\"{{hadoop_heapsize}}\"\n\nexport HADOOP_NAMENODE_INIT_HEAPSIZE=\"-Xms{{namenode_heapsize}}\"\n\n# Extra Java runtime options.  Empty by default.\nexport HADOOP_OPTS=\"-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}\"\n\n# Command specific options appended to HAD
 OOP_OPTS when specified\nexport HADOOP_NAMENODE_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}\"\nHADOOP_JOBTRACKER_OPTS=\"-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{
 {jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}\"\n\nHADOOP_TASKTRACKER_OPTS=\"-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}\"\nexport HADOOP_DATANODE_OPTS=\"-server -XX:ParallelGCThreads=4 -XX:+UseConcMarkSweepGC -XX:ErrorFile=/var/log/hadoop/$USER/hs_err_pid%p.log -XX:NewSize=200m -XX:MaxNewSize=200m -XX:PermSize=128m -XX:MaxPermSize=256m -Xloggc:/var/log/hadoop/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{dtnode_heapsize}} -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_DATANODE_OPTS}\"\nHADOOP_BALANCER_OPTS=\"-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}\"\n\nexport HADOOP_SECONDARYNAMENODE_OPTS=$HADOOP_NAMENODE_OPTS\n\n# The following applies
  to multiple commands (fs, dfs, fsck, distcp etc)\nexport HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m $HADOOP_CLIENT_OPTS\"\n\n# On secure datanodes, user to run the datanode as after dropping privileges\nexport HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}\n\n# Extra ssh options.  Empty by default.\nexport HADOOP_SSH_OPTS=\"-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR\"\n\n# Where log files are stored.  $HADOOP_HOME/logs by default.\nexport HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER\n\n# History server logs\nexport HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER\n\n# Where log files are stored in the secure data environment.\nexport HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.\n# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves\n\n# host:path where hadoop code should be rsync'd from.  Unset by default.\n# export HADOOP_MAST
 ER=master:/home/$USER/src/hadoop\n\n# Seconds to sleep between slave commands.  Unset by default.  This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HADOOP_SLAVE_SLEEP=0.1\n\n# The directory where pid files are stored. /tmp by default.\nexport HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER\nexport HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER\n\n# History server pid\nexport HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER\n\nYARN_RESOURCEMANAGER_OPTS=\"-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY\"\n\n# A string representing this instance of hadoop. $USER by default.\nexport HADOOP_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes.  See 'man nice'.\n\n# export HADOOP_NICENESS=10\n\n# Use libraries from standard classpath\nJAVA_JDBC_LIBS=\"\"\n#Add libraries required by mysql connector\nfor jarFile in `ls /usr/share/java/*my
 sql* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by oracle connector\nfor jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`\ndo\n  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile\ndone\n# Add libraries required by nodemanager\nMAPREDUCE_LIBS={{mapreduce_libs_path}}\nexport HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}\n\n# added to the HADOOP_CLASSPATH\nif [ -d \"/usr/hdp/current/tez-client\" ]; then\n  if [ -d \"/etc/tez/conf/\" ]; then\n    # When using versioned RPMs, the tez-client will be a symlink to the current folder of tez in HDP.\n    export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}:/usr/hdp/current/tez-client/*:/usr/hdp/current/tez-client/lib/*:/etc/tez/conf/\n  fi\nfi\n\n\n# Setting path to hdfs command line\nexport HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}\n\n# Mostly required for hadoop 2.0\nexport JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}\n\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\"",
-		  "namenode_opt_maxnewsize": "256m",
-		  "namenode_opt_newsize": "256m"
-		},
-		"hbase-env": {
-		  "content": "\n# Set environment variables here.\n\n# The java implementation to use. Java 1.6 required.\nexport JAVA_HOME={{java64_home}}\n\n# HBase Configuration directory\nexport HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{hbase_conf_dir}}}\n\n# Extra Java CLASSPATH elements. Optional.\nexport HBASE_CLASSPATH=${HBASE_CLASSPATH}\n\n\n# The maximum amount of heap to use, in MB. Default is 1000.\n# export HBASE_HEAPSIZE=1000\n\n# Extra Java runtime options.\n# Below are what we set by default. May only work with SUN JVM.\n# For more on why as well as other possible settings,\n# see http://wiki.apache.org/hadoop/PerformanceTuning\nexport SERVER_GC_OPTS=\"-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`\"\n# Uncomment below to enable java garbage collection logging.\n# export HBASE_OPTS=\"$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log\"\n\n# Uncomment and adjust to enable JMX expo
 rting\n# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.\n# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html\n#\n# export HBASE_JMX_BASE=\"-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false\"\n# If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size\n# export HBASE_THRIFT_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103\"\n# export HBASE_ZOOKEEPER_OPTS=\"$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104\"\n\n# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.\nexport HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers\n\n# Extra ssh options. Empty by default.\n# export HBASE_SSH_OPTS=\"-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR\"\n\n# Where log files are stored. $HBASE_HOME/logs by default.\nexport HBASE_LOG_DIR={{log_dir}}
 \n\n# A string representing this instance of hbase. $USER by default.\n# export HBASE_IDENT_STRING=$USER\n\n# The scheduling priority for daemon processes. See 'man nice'.\n# export HBASE_NICENESS=10\n\n# The directory where pid files are stored. /tmp by default.\nexport HBASE_PID_DIR={{pid_dir}}\n\n# Seconds to sleep between slave commands. Unset by default. This\n# can be useful in large clusters, where, e.g., slave rsyncs can\n# otherwise arrive faster than the master can service them.\n# export HBASE_SLAVE_SLEEP=0.1\n\n# Tell HBase whether it should manage it's own instance of Zookeeper or not.\nexport HBASE_MANAGES_ZK=false\n\n{% if security_enabled %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log -Djava.security.auth.login.config={{client_jaas_config_file}}\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}} -Djava.security.auth.login.config={{master_jaas_config_file}}\"\nexport HBASE_REGIONSERVER_OPTS=
 \"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}} -Djava.security.auth.login.config={{regionserver_jaas_config_file}}\"\n{% else %}\nexport HBASE_OPTS=\"$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log\"\nexport HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS -Xmx{{master_heapsize}}\"\nexport HBASE_REGIONSERVER_OPTS=\"$HBASE_REGIONSERVER_OPTS -Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}\"\n{% endif %}"
-		},
-		"hbase-site": {
-		  "hbase.hregion.majorcompaction": "604800000",
-		  "hbase.hregion.majorcompaction.jitter": "0.50",
-		  "hbase.hregion.memstore.block.multiplier": "4",
-		  "hbase.hstore.flush.retries.number": {"remove": "yes"}
-		},
-		"hdfs-log4j": {
-		  "content": "\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#  http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# Define some default values that can be overridden by system properties\n# To change daemon root logger use hadoop_root_logger in hadoop-env\nhadoop.root.logger=INF
 O,console\nhadoop.log.dir=.\nhadoop.log.file=hadoop.log\n\n\n# Define the root logger to the system property \"hadoop.root.logger\".\nlog4j.rootLogger=${hadoop.root.logger}, EventCounter\n\n# Logging Threshold\nlog4j.threshhold=ALL\n\n#\n# Daily Rolling File Appender\n#\n\nlog4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern=.yyyy-MM-dd\n\n# 30-day backup\n#log4j.appender.DRFA.MaxBackupIndex=30\nlog4j.appender.DRFA.layout=org.apache.log4j.PatternLayout\n\n# Pattern format: Date LogLevel LoggerName LogMessage\nlog4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n# Debugging Pattern format\n#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n#\n# console\n# Add \"console\" to rootlogger above if you want to use this\n#\n\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console
 .target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n#\n# TaskLog Appender\n#\n\n#Default values\nhadoop.tasklog.taskid=null\nhadoop.tasklog.iscleanup=false\nhadoop.tasklog.noKeepSplits=4\nhadoop.tasklog.totalLogFileSize=100\nhadoop.tasklog.purgeLogSplits=true\nhadoop.tasklog.logsRetainHours=12\n\nlog4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender\nlog4j.appender.TLA.taskId=${hadoop.tasklog.taskid}\nlog4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}\nlog4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}\n\nlog4j.appender.TLA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\n\n#\n#Security audit appender\n#\nhadoop.security.logger=INFO,console\nhadoop.security.log.maxfilesize=256MB\nhadoop.security.log.maxbackupindex=20\nlog4j.category.SecurityLogger=${hadoop.security.logger}\nhad
 oop.security.log.file=SecurityAuth.audit\nlog4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.DRFAS.DatePattern=.yyyy-MM-dd\n\nlog4j.appender.RFAS=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}\nlog4j.appender.RFAS.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n\nlog4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}\nlog4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}\n\n#\n# hdfs audit logging\n#\nhdfs.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}\nlog4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false\nl
 og4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log\nlog4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# mapred audit logging\n#\nmapred.audit.logger=INFO,console\nlog4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}\nlog4j.additivity.org.apache.hadoop.mapred.AuditLogger=false\nlog4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log\nlog4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout\nlog4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n\nlog4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd\n\n#\n# Rolling File Appender\n#\n\nlog4j.appender.RFA=org.apache.log4j.RollingFileAppender\nlog4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}\n\n#
  Logfile size and and 30-day backups\nlog4j.appender.RFA.MaxFileSize=256MB\nlog4j.appender.RFA.MaxBackupIndex=10\n\nlog4j.appender.RFA.layout=org.apache.log4j.PatternLayout\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n\nlog4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n\n# Custom Logging levels\n\nhadoop.metrics.log.level=INFO\n#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG\n#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG\n#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG\nlog4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}\n\n# Jets3t library\nlog4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR\n\n#\n# Null Appender\n# Trap security logger on the hadoop client side\n#\nlog4j.appender.NullAppender=org.apache.log4j.varia.NullAppender\n\n#\n# Event Counter Appender\n# Sends counts of logging messages at different severity levels to Hadoop Metrics.\n#\nlog4j.ap
 pender.EventCounter=org.apache.hadoop.log.metrics.EventCounter\n\n# Removes \"deprecated\" messages\nlog4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN\n\n#\n# HDFS block state change log from block manager\n#\n# Uncomment the following to suppress normal block state change\n# messages from BlockManager in NameNode.\n#log4j.logger.BlockStateChange=WARN"
-		},
-		"hdfs-site": {
-		  "dfs.datanode.max.transfer.threads": "16384",
-		  "dfs.namenode.handler.count": "100",
-		  "dfs.namenode.startup.delay.block.deletion.sec": "3600"
-		},
-		"hive-env": {
-		  "content": "\n if [ \"$SERVICE\" = \"cli\" ]; then\n   if [ -z \"$DEBUG\" ]; then\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n   else\n     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n   fi\n fi\n\n# The heap size of the jvm stared by hive shell script can be controlled via:\n\n# Larger heap size may be required when running queries over large number of files or partitions.\n# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be\n# appropriate for hive server (hwi etc).\n\n\n# Set HADOOP_HOME to point to a specific hadoop install directory\nHADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n\n# Hive Configuration Directory can be controlled by:\nexport HIVE_CONF_DIR={{hive_config_dir}}\n\n# Folder containing extra libraries required for hive compilat
 ion/execution can be controlled by:\nif [ \"${HIVE_AUX_JARS_PATH}\" != \"\" ]; then\n  if [ -f \"${HIVE_AUX_JARS_PATH}\" ]; then    \n    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}\n  elif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\n  fi\nelif [ -d \"/usr/hdp/current/hive-webhcat/share/hcatalog\" ]; then\n  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar\nfi      \n\nexport METASTORE_PORT={{hive_metastore_port}}",
-		  "hive_dbroot": {"remove": "yes"}
-		},
-		"hive-site": {
-		  "fs.file.impl.disable.cache": {"remove": "yes"},
-		  "fs.hdfs.impl.disable.cache": {"remove": "yes"},
-		  "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
-		  "hive.cbo.enable": "true",
-		  "hive.cli.print.header": "false",
-		  "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
-		  "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
-		  "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
-		  "hive.convert.join.bucket.mapjoin.tez": "false",
-		  "hive.exec.compress.intermediate": "false",
-		  "hive.exec.compress.output": "false",
-		  "hive.exec.dynamic.partition": "true",
-		  "hive.exec.dynamic.partition.mode": "nonstrict",
-		  "hive.exec.max.created.files": "100000",
-		  "hive.exec.max.dynamic.partitions": "5000",
-		  "hive.exec.max.dynamic.partitions.pernode": "2000",
-		  "hive.exec.orc.compression.strategy": "SPEED",
-		  "hive.exec.orc.default.compress": "ZLIB",
-		  "hive.exec.orc.default.stripe.size": "67108864",
-		  "hive.exec.parallel": "false",
-		  "hive.exec.parallel.thread.number": "8",
-		  "hive.exec.reducers.bytes.per.reducer": "67108864",
-		  "hive.exec.reducers.max": "1009",
-		  "hive.exec.scratchdir": "/tmp/hive",
-		  "hive.exec.submit.local.task.via.child": "true",
-		  "hive.exec.submitviachild": "false",
-		  "hive.execution.engine": "tez",
-		  "hive.heapsize": {"remove": "yes"},
-		  "hive.fetch.task.aggr": "false",
-		  "hive.fetch.task.conversion": "more",
-		  "hive.fetch.task.conversion.threshold": "1073741824",
-		  "hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
-		  "hive.map.aggr.hash.min.reduction": "0.5",
-		  "hive.map.aggr.hash.percentmemory": "0.5",
-		  "hive.mapjoin.optimized.hashtable": "true",
-		  "hive.merge.mapfiles": "true",
-		  "hive.merge.mapredfiles": "false",
-		  "hive.merge.orcfile.stripe.level": "true",
-		  "hive.merge.rcfile.block.level": "true",
-		  "hive.merge.size.per.task": "256000000",
-		  "hive.merge.smallfiles.avgsize": "16000000",
-		  "hive.merge.tezfiles": "false",
-		  "hive.metastore.authorization.storage.checks": "false",
-		  "hive.metastore.client.connect.retry.delay": "5s",
-		  "hive.metastore.client.socket.timeout": "1800s",
-		  "hive.metastore.connect.retries": "24",
-		  "hive.metastore.failure.retries": "24",
-		  "hive.metastore.server.max.threads": "100000",
-		  "hive.optimize.constant.propagation": "true",
-		  "hive.optimize.mapjoin.mapreduce": {"remove": "yes"},
-		  "hive.optimize.metadataonly": "true",
-		  "hive.optimize.null.scan": "true",
-		  "hive.optimize.sort.dynamic.partition": "false",
-		  "hive.orc.compute.splits.num.threads": "10",
-		  "hive.prewarm.enabled": "false",
-		  "hive.prewarm.numcontainers": "10",
-		  "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
-		  "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
-		  "hive.security.metastore.authorization.auth.reads": "true",
-		  "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
-		  "hive.server2.allow.user.substitution": "true",
-		  "hive.server2.logging.operation.enabled": "true",
-		  "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs",
-		  "hive.server2.support.dynamic.service.discovery": "true",
-		  "hive.server2.table.type.mapping": "CLASSIC",
-		  "hive.server2.thrift.http.path": "cliservice",
-		  "hive.server2.thrift.http.port": "10001",
-		  "hive.server2.thrift.max.worker.threads": "500",
-		  "hive.server2.thrift.sasl.qop": "auth",
-		  "hive.server2.use.SSL": "false",
-		  "hive.smbjoin.cache.rows": "10000",
-		  "hive.stats.dbclass": "fs",
-		  "hive.stats.fetch.column.stats": "false",
-		  "hive.stats.fetch.partition.stats": "true",
-		  "hive.support.concurrency": "false",
-		  "hive.tez.auto.reducer.parallelism": "false",
-		  "hive.tez.cpu.vcores": "-1",
-		  "hive.tez.dynamic.partition.pruning": "true",
-		  "hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
-		  "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
-		  "hive.tez.log.level": "INFO",
-		  "hive.tez.max.partition.factor": "2.0",
-		  "hive.tez.min.partition.factor": "0.25",
-		  "hive.tez.smb.number.waves": "0.5",
-		  "hive.user.install.directory": "/user/",
-		  "hive.vectorized.execution.reduce.enabled": "false",
-		  "hive.vectorized.groupby.checkinterval": "4096",
-		  "hive.zookeeper.client.port": "2181",
-		  "hive.zookeeper.namespace": "hive_zookeeper_namespace",
-		  "hive.auto.convert.sortmerge.join.noconditionaltask": {"remove": "yes"},
-          "hive.server2.enable.impersonation": {"remove": "yes"}
-		},
-		"mapred-env": {
-		  "content": "\n# export JAVA_HOME=/home/y/libexec/jdk1.6.0/\n\nexport HADOOP_JOB_HISTORYSERVER_HEAPSIZE={{jobhistory_heapsize}}\n\nexport HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA\n\n#export HADOOP_JOB_HISTORYSERVER_OPTS=\n#export HADOOP_MAPRED_LOG_DIR=\"\" # Where log files are stored.  $HADOOP_MAPRED_HOME/logs by default.\n#export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger.\n#export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default.\n#export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default\n#export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0.\nexport HADOOP_OPTS=\"-Dhdp.version=$HDP_VERSION $HADOOP_OPTS\""
-		},
-		"mapred-site": {
-		  "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-		  "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-		  "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-		  "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-		  "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
-		  "mapreduce.job.emit-timeline-data": "false",
-		  "mapreduce.jobhistory.bind-host": "0.0.0.0",
-		  "mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
-		  "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
-		  "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
-		  "yarn.app.mapreduce.am.admin-command-opts": {"remove": "yes"},
-		  "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
-		  "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}"
-		},
-		"oozie-env": {
-		  "content": "\n#!/bin/bash\n\nif [ -d \"/usr/lib/bigtop-tomcat\" ]; then\n  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}\n  export CATALINA_BASE=${CATALINA_BASE:-{{oozie_server_dir}}}\n  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}\n  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat\nfi\n\n#Set JAVA HOME\nexport JAVA_HOME={{java_home}}\n\nexport JRE_HOME=${JAVA_HOME}\n\n# Set Oozie specific environment variables here.\n\n# Settings for the Embedded Tomcat that runs Oozie\n# Java System properties for Oozie should be specified in this variable\n#\n# export CATALINA_OPTS=\n\n# Oozie configuration file to load from Oozie configuration directory\n#\n# export OOZIE_CONFIG_FILE=oozie-site.xml\n\n# Oozie logs directory\n#\nexport OOZIE_LOG={{oozie_log_dir}}\n\n# Oozie pid directory\n#\nexport CATALINA_PID={{pid_file}}\n\n#Location of the data for oozie\nexport OOZIE_DATA={{oozie_data_dir}}\n\n# Oozie Log4J configuration file to load from Oozie configuration directo
 ry\n#\n# export OOZIE_LOG4J_FILE=oozie-log4j.properties\n\n# Reload interval of the Log4J configuration file, in seconds\n#\n# export OOZIE_LOG4J_RELOAD=10\n\n# The port Oozie server runs\n#\nexport OOZIE_HTTP_PORT={{oozie_server_port}}\n\n# The admin port Oozie server runs\n#\nexport OOZIE_ADMIN_PORT={{oozie_server_admin_port}}\n\n# The host name Oozie server runs on\n#\n# export OOZIE_HTTP_HOSTNAME=`hostname -f`\n\n# The base URL for callback URLs to Oozie\n#\n# export OOZIE_BASE_URL=\"http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie\"\nexport JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64\n\n# At least 1 minute of retry time to account for server downtime during\n# upgrade/downgrade\nexport OOZIE_CLIENT_OPTS=\"${OOZIE_CLIENT_OPTS} -Doozie.connection.retry.count=5 \"\n\n# This is needed so that Oozie does not run into OOM or GC Overhead limit\n# exceeded exceptions. If the oozie server is handling large number of\n# workflows/coordinator jobs, the memory setting
 s may need to be revised\nexport CATALINA_OPTS=\"${CATALINA_OPTS} -Xmx2048m -XX:MaxPermSize=256m \"",
-		  "oozie_ambari_database": {"remove": "yes"},
-		  "oozie_existing_mysql_database": {"remove": "yes"},
-		  "oozie_existing_mysql_host": {"remove": "yes"},
-		  "oozie_existing_oracle_database": {"remove": "yes"}
-		},
-		"oozie-site": {
-		  "oozie.authentication.simple.anonymous.allowed": "true",
-		  "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd",
-		  "oozie.service.coord.check.maximum.frequency": "false",
-		  "oozie.services": "\n      org.apache.oozie.service.SchedulerService,\n      org.apache.oozie.service.InstrumentationService,\n      org.apache.oozie.service.MemoryLocksService,\n      org.apache.oozie.service.UUIDService,\n      org.apache.oozie.service.ELService,\n      org.apache.oozie.service.AuthorizationService,\n      org.apache.oozie.service.UserGroupInformationService,\n      org.apache.oozie.service.HadoopAccessorService,\n      org.apache.oozie.service.JobsConcurrencyService,\n      org.apache.oozie.service.URIHandlerService,\n      org.apache.oozie.service.DagXLogInfoService,\n      org.apache.oozie.service.SchemaService,\n      org.apache.oozie.service.LiteWorkflowAppService,\n      org.apache.oozie.service.JPAService,\n      org.apache.oozie.service.StoreService,\n      org.apache.oozie.service.CoordinatorStoreService,\n      org.apache.oozie.service.SLAStoreService,\n      org.apache.oozie.service.DBLiteWorkflowStoreService,\n      org.apache.oozie.service.Callbac
 kService,\n      org.apache.oozie.service.ShareLibService,\n      org.apache.oozie.service.CallableQueueService,\n      org.apache.oozie.service.ActionService,\n      org.apache.oozie.service.ActionCheckerService,\n      org.apache.oozie.service.RecoveryService,\n      org.apache.oozie.service.PurgeService,\n      org.apache.oozie.service.CoordinatorEngineService,\n      org.apache.oozie.service.BundleEngineService,\n      org.apache.oozie.service.DagEngineService,\n      org.apache.oozie.service.CoordMaterializeTriggerService,\n      org.apache.oozie.service.StatusTransitService,\n      org.apache.oozie.service.PauseTransitService,\n      org.apache.oozie.service.GroupsService,\n      org.apache.oozie.service.ProxyUserService,\n      org.apache.oozie.service.XLogStreamingService,\n      org.apache.oozie.service.JvmPauseMonitorService"
-		},
-		"storm-env": {
-		  "content": "\n#!/bin/bash\n\n# Set Storm specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\n\n# export STORM_CONF_DIR=\"\"\nexport STORM_HOME=/usr/hdp/current/storm-client"
-		},
-		"storm-site": {
-		  "_storm.min.ruid": "null",
-		  "_storm.thrift.nonsecure.transport": "backtype.storm.security.auth.SimpleTransportPlugin",
-		  "_storm.thrift.secure.transport": "backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin",
-		  "drpc.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
-		  "java.library.path": "/usr/local/lib:/opt/local/lib:/usr/lib:/usr/hdp/current/storm-client/lib",
-		  "logviewer.childopts": "-Xmx128m _JAAS_PLACEHOLDER",
-		  "nimbus.childopts": "-Xmx1024m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-nimbus/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM",
-		  "storm.thrift.transport": {"remove": "yes"},
-		  "supervisor.childopts": "-Xmx256m _JAAS_PLACEHOLDER -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-supervisor/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM",
-		  "ui.childopts": "-Xmx768m _JAAS_PLACEHOLDER",
-		  "worker.childopts": "-Xmx768m _JAAS_PLACEHOLDER -javaagent:/usr/hdp/current/storm-client/contrib/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/hdp/current/storm-client/contrib/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM"
-		},
-		"tez-site": {
-		  "tez.am.container.idle.release-timeout-max.millis": "20000",
-		  "tez.am.container.idle.release-timeout-min.millis": "10000",
-		  "tez.am.container.reuse.non-local-fallback.enabled": "false",
-		  "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-		  "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
-		  "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-		  "tez.am.max.app.attempts": "2",
-		  "tez.am.maxtaskfailures.per.node": "10",
-		  "tez.am.resource.memory.mb": "1364",
-		  "tez.am.tez-ui.history-url.template": "__HISTORY_URL_BASE__?viewPath=%2F%23%2Ftez-app%2F__APPLICATION_ID__",
-		  "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-		  "tez.counters.max": "2000",
-		  "tez.counters.max.groups": "1000",
-		  "tez.generate.debug.artifacts": "false",
-		  "tez.grouping.max-size": "1073741824",
-		  "tez.grouping.min-size": "16777216",
-		  "tez.grouping.split-waves": "1.7",
-		  "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService",
-		  "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz",
-		  "tez.runtime.compress": "true",
-		  "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec",
-		  "tez.runtime.convert.user-payload.to.history-text": "false",
-		  "tez.runtime.io.sort.mb": "272",
-		  "tez.runtime.unordered.output.buffer.size-mb": "51",
-		  "tez.session.client.timeout.secs": "-1",
-		  "tez.shuffle-vertex-manager.max-src-fraction": "0.4",
-		  "tez.shuffle-vertex-manager.min-src-fraction": "0.2",
-		  "tez.task.am.heartbeat.counter.interval-ms.max": "4000",
-		  "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-		  "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
-		  "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-		  "tez.task.max-events-per-heartbeat": "500",
-		  "tez.task.resource.memory.mb": "682",
-		  "tez.use.cluster.hadoop-libs": "false",
-		  "tez.am.container.session.delay-allocation-millis": {"remove": "yes"},
-		  "tez.am.grouping.max-size": {"remove": "yes"},
-		  "tez.am.grouping.min-size": {"remove": "yes"},
-		  "tez.am.grouping.split-waves": {"remove": "yes"},
-		  "tez.am.java.opts": {"remove": "yes"},
-		  "tez.am.shuffle-vertex-manager.max-src-fraction": {"remove": "yes"},
-		  "tez.am.shuffle-vertex-manager.min-src-fraction": {"remove": "yes"},
-		  "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz",
-		  "tez.runtime.intermediate-output.compress.codec": {"remove": "yes"},
-		  "tez.use.cluster.hadoop-libs": "false",
-		  "tez.yarn.ats.enabled": {"remove": "yes"},
-		  "tez.runtime.intermediate-input.compress.codec": {"remove": "yes"},
-          "tez.runtime.intermediate-input.is-compressed": {"remove": "yes"},
-          "tez.runtime.intermediate-output.should-compress": {"remove": "yes"},
-		  "tez.am.env": {"remove": "yes"}
-		},
-		"webhcat-log4j": {
-		  "content": "\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# Define some default values that can be overridden by system properties\nwebhcat.root.logger = INFO, standard\nwebhcat.log.dir = .\nwebhcat.log.file = webhcat.log\n\nlog
 4j.rootLogger = ${webhcat.root.logger}\n\n# Logging Threshold\nlog4j.threshhold = DEBUG\n\nlog4j.appender.standard  =  org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.standard.File = ${webhcat.log.dir}/${webhcat.log.file}\n\n# Rollver at midnight\nlog4j.appender.DRFA.DatePattern = .yyyy-MM-dd\n\nlog4j.appender.DRFA.layout = org.apache.log4j.PatternLayout\n\nlog4j.appender.standard.layout = org.apache.log4j.PatternLayout\nlog4j.appender.standard.layout.conversionPattern = %-5p | %d{DATE} | %c | %m%n\n\n# Class logging settings\nlog4j.logger.com.sun.jersey = DEBUG\nlog4j.logger.com.sun.jersey.spi.container.servlet.WebComponent = ERROR\nlog4j.logger.org.apache.hadoop = INFO\nlog4j.logger.org.apache.hadoop.conf = WARN\nlog4j.logger.org.apache.zookeeper = WARN\nlog4j.logger.org.eclipse.jetty = INFO"
-		},
-		"webhcat-site": {
-		  "templeton.hadoop": "/usr/hdp/current/hadoop-client/bin/hadoop",
-		  "templeton.hcat": "/usr/hdp/current/hive-client/bin/hcat",
-		  "templeton.hive.archive": "hdfs:///hdp/apps/${hdp.version}/hive/hive.tar.gz",
-		  "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
-		  "templeton.libjars": "/usr/hdp/current/zookeeper-client/zookeeper.jar",
-		  "templeton.pig.archive": "hdfs:///hdp/apps/${hdp.version}/pig/pig.tar.gz",
-		  "templeton.sqoop.archive": "hdfs:///hdp/apps/${hdp.version}/sqoop/sqoop.tar.gz",
-		  "templeton.sqoop.home": "sqoop.tar.gz/sqoop",
-		  "templeton.sqoop.path": "sqoop.tar.gz/sqoop/bin/sqoop",
-		  "templeton.streaming.jar": "hdfs:///hdp/apps/${hdp.version}/mapreduce/hadoop-streaming.jar"
-		},
-		"yarn-site": {
-		  "hadoop.registry.rm.enabled": "false",
-		  "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
-		  "yarn.client.nodemanager-connect.max-wait-ms": "60000",
-		  "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
-		  "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
-		  "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
-		  "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
-		  "yarn.nodemanager.bind-host": "0.0.0.0",
-		  "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
-		  "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
-		  "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
-		  "yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
-		  "yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage": "false",
-		  "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
-		  "yarn.nodemanager.log-aggregation.debug-enabled": "false",
-		  "yarn.nodemanager.log-aggregation.num-log-files-per-app": "30",
-		  "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
-		  "yarn.nodemanager.recovery.dir": "{{yarn_log_dir_prefix}}/nodemanager/recovery-state",
-		  "yarn.nodemanager.recovery.enabled": "true",
-		  "yarn.nodemanager.resource.cpu-vcores": "1",
-		  "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
-		  "yarn.resourcemanager.bind-host": "0.0.0.0",
-		  "yarn.resourcemanager.connect.max-wait.ms": "900000",
-		  "yarn.resourcemanager.connect.retry-interval.ms": "30000",
-		  "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
-		  "yarn.resourcemanager.fs.state-store.uri": " ",
-		  "yarn.resourcemanager.ha.enabled": "false",
-		  "yarn.resourcemanager.recovery.enabled": "true",
-		  "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
-		  "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
-		  "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
-		  "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
-		  "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
-		  "yarn.resourcemanager.work-preserving-recovery.enabled": "true",
-		  "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
-		  "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
-		  "yarn.resourcemanager.zk-num-retries": "1000",
-		  "yarn.resourcemanager.zk-retry-interval-ms": "1000",
-		  "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
-		  "yarn.resourcemanager.zk-timeout-ms": "10000",
-		  "yarn.timeline-service.bind-host": "0.0.0.0",
-		  "yarn.timeline-service.client.max-retries": "30",
-		  "yarn.timeline-service.client.retry-interval-ms": "1000",
-		  "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
-		  "yarn.timeline-service.http-authentication.type": "simple",
-		  "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
-		  "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
-		  "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000"
-		},
-		"hiveserver2-site": {
-          "hive.security.authenticator.manager": "org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator",
-          "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"
-        },
-		"ranger-hbase-plugin-properties": {
-          "REPOSITORY_CONFIG_PASSWORD": "hbase",
-          "REPOSITORY_CONFIG_USERNAME": "hbase",
-          "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-          "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-          "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-          "SSL_TRUSTSTORE_PASSWORD": "changeit",
-          "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
-          "XAAUDIT.DB.IS_ENABLED": "true",
-          "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-          "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-          "XAAUDIT.HDFS.IS_ENABLED": "false",
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-          "common.name.for.certificate": "-",
-          "policy_user": "ambari-qa",
-          "ranger-hbase-plugin-enabled": "No"
-        },
-		"ranger-hdfs-plugin-properties": {
-          "REPOSITORY_CONFIG_PASSWORD": "hadoop",
-          "REPOSITORY_CONFIG_USERNAME": "hadoop",
-          "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-          "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-          "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-          "SSL_TRUSTSTORE_PASSWORD": "changeit",
-          "XAAUDIT.DB.IS_ENABLED": "true",
-          "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-          "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-          "XAAUDIT.HDFS.IS_ENABLED": "false",
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-          "common.name.for.certificate": "-",
-          "hadoop.rpc.protection": "-",
-          "policy_user": "ambari-qa",
-          "ranger-hdfs-plugin-enabled": "No"
-        },
-		"ranger-hive-plugin-properties": {
-          "REPOSITORY_CONFIG_PASSWORD": "hive",
-          "REPOSITORY_CONFIG_USERNAME": "hive",
-          "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-          "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-          "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-          "SSL_TRUSTSTORE_PASSWORD": "changeit",
-          "UPDATE_XAPOLICIES_ON_GRANT_REVOKE": "true",
-          "XAAUDIT.DB.IS_ENABLED": "true",
-          "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-          "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-          "XAAUDIT.HDFS.IS_ENABLED": "false",
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-          "common.name.for.certificate": "-",
-          "jdbc.driverClassName": "org.apache.hive.jdbc.HiveDriver",
-          "policy_user": "ambari-qa",
-          "ranger-hive-plugin-enabled": "No"
-        },
-		"ranger-storm-plugin-properties": {
-          "REPOSITORY_CONFIG_PASSWORD": "stormtestuser",
-          "REPOSITORY_CONFIG_USERNAME": "stormtestuser@EXAMPLE.COM",
-          "SSL_KEYSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-keystore.jks",
-          "SSL_KEYSTORE_PASSWORD": "myKeyFilePassword",
-          "SSL_TRUSTSTORE_FILE_PATH": "/etc/hadoop/conf/ranger-plugin-truststore.jks",
-          "SSL_TRUSTSTORE_PASSWORD": "changeit",
-          "XAAUDIT.DB.IS_ENABLED": "true",
-          "XAAUDIT.HDFS.DESTINATION_DIRECTORY": "hdfs://__REPLACE__NAME_NODE_HOST:8020/ranger/audit/%app-type%/%time:yyyyMMdd%",
-          "XAAUDIT.HDFS.DESTINTATION_FILE": "%hostname%-audit.log",
-          "XAAUDIT.HDFS.DESTINTATION_FLUSH_INTERVAL_SECONDS": "900",
-          "XAAUDIT.HDFS.DESTINTATION_OPEN_RETRY_INTERVAL_SECONDS": "60",
-          "XAAUDIT.HDFS.DESTINTATION_ROLLOVER_INTERVAL_SECONDS": "86400",
-          "XAAUDIT.HDFS.IS_ENABLED": "false",
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit/archive",
-          "XAAUDIT.HDFS.LOCAL_ARCHIVE_MAX_FILE_COUNT": "10",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_DIRECTORY": "__REPLACE__LOG_DIR/hadoop/%app-type%/audit",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FILE": "%time:yyyyMMdd-HHmm.ss%.log",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_FLUSH_INTERVAL_SECONDS": "60",
-          "XAAUDIT.HDFS.LOCAL_BUFFER_ROLLOVER_INTERVAL_SECONDS": "600",
-          "common.name.for.certificate": "-",
-          "policy_user": "storm",
-          "ranger-storm-plugin-enabled": "No"
-        }
-      }
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.json b/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.json
deleted file mode 100644
index 207bc58..0000000
--- a/ambari-server/src/main/resources/upgrade/catalog/UpgradeCatalog_2.1_to_2.2.json
+++ /dev/null
@@ -1,292 +0,0 @@
-{
-  "version": "1.0",
-  "stacks": [
-    {
-      "name": "HDP",
-      "old-version": "2.1",
-      "target-version": "2.2",
-      "options": {
-        "config-types": {
-          "core-site": {
-            "merged-copy": "yes"
-          },
-          "hdfs-site": {
-            "merged-copy": "yes"
-          },
-          "yarn-site": {
-            "merged-copy": "yes"
-          },
-          "mapred-site": {
-            "merged-copy": "yes"
-          },
-          "hbase-site": {
-            "merged-copy": "yes"
-          },
-          "hive-site": {
-            "merged-copy": "yes"
-          },
-          "oozie-site": {
-            "merged-copy": "yes"
-          },
-          "webhcat-site": {
-            "merged-copy": "yes"
-          },
-          "tez-site":{
-            "merged-copy": "yes"
-          },
-          "falcon-startup.properties": {
-            "merged-copy": "yes"
-          }
-        }
-      },
-      "properties": {
-        "falcon-startup.properties": {
-          "*.application.services": "org.apache.falcon.security.AuthenticationInitializationService, org.apache.falcon.workflow.WorkflowJobEndNotificationService, org.apache.falcon.service.ProcessSubscriberService, org.apache.falcon.entity.store.ConfigurationStore, org.apache.falcon.rerun.service.RetryService, org.apache.falcon.rerun.service.LateRunService, org.apache.falcon.service.LogCleanupService",
-          "*.dfs.namenode.kerberos.principal": "nn/_HOST@EXAMPLE.COM",
-          "*.falcon.enableTLS": "false",
-          "*.falcon.http.authentication.cookie.domain": "EXAMPLE.COM",
-          "*.falcon.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
-          "*.falcon.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
-          "*.falcon.security.authorization.admin.groups": "falcon",
-          "*.falcon.security.authorization.admin.users": "falcon,ambari-qa",
-          "*.falcon.security.authorization.enabled": "false",
-          "*.falcon.security.authorization.provider": "org.apache.falcon.security.DefaultAuthorizationProvider",
-          "*.falcon.security.authorization.superusergroup": "falcon",
-          "*.falcon.service.authentication.kerberos.keytab": "/etc/security/keytabs/falcon.service.keytab",
-          "*.falcon.service.authentication.kerberos.principal": "falcon/_HOST@EXAMPLE.COM",
-          "*.journal.impl": "org.apache.falcon.transaction.SharedFileSystemJournal",
-          "prism.application.services": "org.apache.falcon.entity.store.ConfigurationStore",
-          "prism.configstore.listeners": "org.apache.falcon.entity.v0.EntityGraph, org.apache.falcon.entity.ColoClusterRelation, org.apache.falcon.group.FeedGroupMap"
-        },
-        "tez-site":{
-          "tez.am.container.idle.release-timeout-max.millis": "20000",
-          "tez.am.container.idle.release-timeout-min.millis": "10000",
-          "tez.am.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "tez.am.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
-          "tez.am.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-          "tez.am.max.app.attempts": "2",
-          "tez.am.maxtaskfailures.per.node": "10",
-          "tez.cluster.additional.classpath.prefix": "/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-          "tez.counters.max": "2000",
-          "tez.counters.max.groups": "1000",
-          "tez.generate.debug.artifacts": "false",
-          "tez.grouping.max-size": "1073741824",
-          "tez.grouping.min-size": "16777216",
-          "tez.grouping.split-waves": "1.7",
-          "tez.history.logging.service.class": "org.apache.tez.dag.history.logging.ats.ATSHistoryLoggingService",
-          "tez.runtime.compress": "true",
-          "tez.runtime.compress.codec": "org.apache.hadoop.io.compress.SnappyCodec",
-          "tez.runtime.io.sort.mb": "272",
-          "tez.runtime.unordered.output.buffer.size-mb": "51",
-          "tez.shuffle-vertex-manager.max-src-fraction": "0.4",
-          "tez.shuffle-vertex-manager.min-src-fraction": "0.2",
-          "tez.task.am.heartbeat.counter.interval-ms.max": "4000",
-          "tez.task.launch.cluster-default.cmd-opts": "-server -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "tez.task.launch.cmd-opts": "-XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps -XX:+UseNUMA -XX:+UseParallelGC",
-          "tez.task.launch.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64",
-          "tez.task.max-events-per-heartbeat": "500",
-          "tez.task.resource.memory.mb": "682",
-          "tez.am.container.reuse.non-local-fallback.enabled": "false",
-          "tez.am.resource.memory.mb": "1364",
-          "tez.lib.uris": "/hdp/apps/${hdp.version}/tez/tez.tar.gz",
-          "tez.session.client.timeout.secs": "-1",
-          "tez.am.container.session.delay-allocation-millis": {"remove": "yes"},
-          "tez.am.env": {"remove": "yes"},
-          "tez.am.grouping.max-size": {"remove": "yes"},
-          "tez.am.grouping.min-size": {"remove": "yes"},
-          "tez.am.grouping.split-waves": {"remove": "yes"},
-          "tez.am.java.opt": {"remove": "yes"},
-          "tez.am.shuffle-vertex-manager.max-src-fraction": {"remove": "yes"},
-          "tez.am.shuffle-vertex-manager.min-src-fraction": {"remove": "yes"},
-          "tez.runtime.intermediate-input.compress.codec": {"remove": "yes"},
-          "tez.runtime.intermediate-input.is-compressed": {"remove": "yes"},
-          "tez.runtime.intermediate-output.compress.codec": {"remove": "yes"},
-          "tez.runtime.intermediate-output.should-compress": {"remove": "yes"},
-          "tez.yarn.ats.enabled": {"remove": "yes"}
-        },
-        "webhcat-site": {
-          "templeton.storage.class": "org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage",
-          "templeton.port": "50111"
-        },
-        "oozie-site": {
-          "oozie.authentication.simple.anonymous.allowed": "true",
-          "oozie.service.coord.check.maximum.frequency": "false",
-          "oozie.service.HadoopAccessorService.kerberos.enabled": "false",
-          "oozie.service.SchemaService.wf.ext.schemas": "shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,email-action-0.2.xsd,hive-action-0.2.xsd,hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd",
-          "oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService"
-        },
-        "hive-site": {
-          "hive.auto.convert.sortmerge.join.to.mapjoin": "false",
-          "hive.cbo.enable": "true",
-          "hive.cli.print.header": "false",
-          "hive.cluster.delegation.token.store.class": "org.apache.hadoop.hive.thrift.ZooKeeperTokenStore",
-          "hive.cluster.delegation.token.store.zookeeper.znode": "/hive/cluster/delegation",
-          "hive.conf.restricted.list": "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role",
-          "hive.convert.join.bucket.mapjoin.tez": "false",
-          "hive.exec.compress.intermediate": "false",
-          "hive.exec.compress.output": "false",
-          "hive.exec.dynamic.partition": "true",
-          "hive.exec.dynamic.partition.mode": "nonstrict",
-          "hive.exec.max.created.files": "100000",
-          "hive.exec.max.dynamic.partitions": "5000",
-          "hive.exec.max.dynamic.partitions.pernode": "2000",
-          "hive.exec.orc.compression.strategy": "SPEED",
-          "hive.exec.orc.default.compress": "ZLIB",
-          "hive.exec.orc.default.stripe.size": "67108864",
-          "hive.exec.parallel": "false",
-          "hive.exec.parallel.thread.number": "8",
-          "hive.exec.reducers.bytes.per.reducer": "67108864",
-          "hive.exec.reducers.max": "1009",
-          "hive.exec.scratchdir": "/tmp/hive",
-          "hive.exec.submit.local.task.via.child": "true",
-          "hive.exec.submitviachild": "false",
-          "hive.fetch.task.aggr": "false",
-          "hive.fetch.task.conversion": "more",
-          "hive.fetch.task.conversion.threshold": "1073741824",
-          "hive.map.aggr.hash.force.flush.memory.threshold": "0.9",
-          "hive.map.aggr.hash.min.reduction": "0.5",
-          "hive.map.aggr.hash.percentmemory": "0.5",
-          "hive.mapjoin.optimized.hashtable": "true",
-          "hive.merge.mapfiles": "true",
-          "hive.merge.mapredfiles": "false",
-          "hive.merge.orcfile.stripe.level": "true",
-          "hive.merge.rcfile.block.level": "true",
-          "hive.merge.size.per.task": "256000000",
-          "hive.merge.smallfiles.avgsize": "16000000",
-          "hive.merge.tezfiles": "false",
-          "hive.metastore.authorization.storage.checks": "false",
-          "hive.metastore.client.connect.retry.delay": "5s",
-          "hive.metastore.connect.retries": "24",
-          "hive.metastore.failure.retries": "24",
-          "hive.metastore.server.max.threads": "100000",
-          "hive.optimize.constant.propagation": "true",
-          "hive.optimize.metadataonly": "true",
-          "hive.optimize.null.scan": "true",
-          "hive.optimize.sort.dynamic.partition": "false",
-          "hive.orc.compute.splits.num.threads": "10",
-          "hive.prewarm.enabled": "false",
-          "hive.prewarm.numcontainers": "10",
-          "hive.security.metastore.authenticator.manager": "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
-          "hive.security.metastore.authorization.auth.reads": "true",
-          "hive.server2.allow.user.substitution": "true",
-          "hive.server2.logging.operation.enabled": "true",
-          "hive.server2.logging.operation.log.location": "/tmp/hive/operation_logs",
-          "hive.server2.table.type.mapping": "CLASSIC",
-          "hive.server2.thrift.http.path": "cliservice",
-          "hive.server2.thrift.http.port": "10001",
-          "hive.server2.thrift.max.worker.threads": "500",
-          "hive.server2.thrift.sasl.qop": "auth",
-          "hive.server2.transport.mode": "binary",
-          "hive.server2.use.SSL": "false",
-          "hive.smbjoin.cache.rows": "10000",
-          "hive.stats.dbclass": "fs",
-          "hive.stats.fetch.column.stats": "false",
-          "hive.stats.fetch.partition.stats": "true",
-          "hive.support.concurrency": "false",
-          "hive.tez.auto.reducer.parallelism": "false",
-          "hive.tez.cpu.vcores": "-1",
-          "hive.tez.dynamic.partition.pruning": "true",
-          "hive.tez.dynamic.partition.pruning.max.data.size": "104857600",
-          "hive.tez.dynamic.partition.pruning.max.event.size": "1048576",
-          "hive.tez.log.level": "INFO",
-          "hive.tez.max.partition.factor": "2.0",
-          "hive.tez.min.partition.factor": "0.25",
-          "hive.tez.smb.number.waves": "0.5",
-          "hive.user.install.directory": "/user/",
-          "hive.vectorized.execution.reduce.enabled": "false",
-          "hive.zookeeper.client.port": "2181",
-          "hive.zookeeper.namespace": "hive_zookeeper_namespace",
-          "hive.metastore.client.socket.timeout": "1800s",
-          "hive.optimize.reducededuplication.min.reducer": "4",
-          "hive.security.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory",
-          "hive.security.metastore.authorization.manager": "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly",
-          "hive.server2.support.dynamic.service.discovery": "true",
-          "hive.vectorized.groupby.checkinterval": "4096",
-          "fs.file.impl.disable.cache": "true",
-          "fs.hdfs.impl.disable.cache": "true"
-        },
-        "hbase-site": {
-          "hbase.hregion.majorcompaction.jitter": "0.50",
-          "hbase.hregion.majorcompaction": "604800000",
-          "hbase.hregion.memstore.block.multiplier": "4",
-          "hbase.hstore.flush.retries.number": {"remove": "yes"}
-        },
-        "mapred-site": {
-          "mapreduce.job.emit-timeline-data": "false",
-          "mapreduce.jobhistory.bind-host": "0.0.0.0",
-          "mapreduce.reduce.shuffle.fetch.retry.enabled": "1",
-          "mapreduce.reduce.shuffle.fetch.retry.interval-ms": "1000",
-          "mapreduce.reduce.shuffle.fetch.retry.timeout-ms": "30000",
-          "mapreduce.application.framework.path": "/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework",
-          "mapreduce.admin.map.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "mapreduce.admin.reduce.child.java.opts": "-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}",
-          "yarn.app.mapreduce.am.admin-command-opts": "-Dhdp.version=${hdp.version}",
-          "yarn.app.mapreduce.am.command-opts": "-Xmx546m -Dhdp.version=${hdp.version}",
-          "mapreduce.application.classpath": "$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:$PWD/mr-framework/hadoop/share/hadoop/tools/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure",
-          "mapreduce.admin.user.env": "LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64"
-        },
-        "core-site": {
-          "hadoop.http.authentication.simple.anonymous.allowed": "true"
-        },
-        "hdfs-site": {
-          "dfs.namenode.startup.delay.block.deletion.sec": "3600",
-          "dfs.datanode.max.transfer.threads": "4096"
-        },
-        "yarn-site": {
-          "yarn.application.classpath": "$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*",
-          "hadoop.registry.rm.enabled": "false",
-          "yarn.client.nodemanager-connect.max-wait-ms": "900000",
-          "yarn.client.nodemanager-connect.retry-interval-ms": "10000",
-          "yarn.node-labels.fs-store.retry-policy-spec": "2000, 500",
-          "yarn.node-labels.fs-store.root-dir": "/system/yarn/node-labels",
-          "yarn.node-labels.manager-class": "org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager",
-          "yarn.nodemanager.bind-host": "0.0.0.0",
-          "yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage": "90",
-          "yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb": "1000",
-          "yarn.nodemanager.linux-container-executor.cgroups.hierarchy": "hadoop-yarn",
-          "yarn.nodemanager.linux-container-executor.cgroups.mount": "false",
-          "yarn.nodemanager.linux-container-executor.cgroups.strictresource-usage": "false",
-          "yarn.nodemanager.linux-container-executor.resources-handler.class": "org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler",
-          "yarn.nodemanager.log-aggregation.debug-enabled": "false",
-          "yarn.nodemanager.log-aggregation.num-log-files-er-app": "30",
-          "yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds": "-1",
-          "yarn.nodemanager.recovery.dir": "/var/log/hadoop-yarn/nodemanager/recovery-state",
-          "yarn.nodemanager.recovery.enabled": "false",
-          "yarn.nodemanager.resource.cpu-vcores": "1",
-          "yarn.nodemanager.resource.percentage-physical-cpu-limit": "100",
-          "yarn.resourcemanager.bind-host": "0.0.0.0",
-          "yarn.resourcemanager.connect.max-wait.ms": "900000",
-          "yarn.resourcemanager.connect.retry-interval.ms": "30000",
-          "yarn.resourcemanager.fs.state-store.retry-policy-spec": "2000, 500",
-          "yarn.resourcemanager.fs.state-store.uri": " ",
-          "yarn.resourcemanager.ha.enabled": "false",
-          "yarn.resourcemanager.recovery.enabled": "false",
-          "yarn.resourcemanager.state-store.max-completed-applications": "${yarn.resourcemanager.max-completed-applications}",
-          "yarn.resourcemanager.store.class": "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore",
-          "yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size": "10",
-          "yarn.resourcemanager.system-metrics-publisher.enabled": "true",
-          "yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled": "false",
-          "yarn.resourcemanager.work-preserving-recovery.enabled": "false",
-          "yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms": "10000",
-          "yarn.resourcemanager.zk-acl": "world:anyone:rwcda",
-          "yarn.resourcemanager.zk-address": {
-            "value": "{ZOOKEEPER_QUORUM}",
-            "template": "yes"
-          },
-          "yarn.resourcemanager.zk-num-retries": "1000",
-          "yarn.resourcemanager.zk-retry-interval-ms": "1000",
-          "yarn.resourcemanager.zk-state-store.parent-path": "/rmstore",
-          "yarn.resourcemanager.zk-timeout-ms": "10000",
-          "yarn.timeline-service.bind-host": "0.0.0.0",
-          "yarn.timeline-service.client.max-retries": "30",
-          "yarn.timeline-service.client.retry-interval-ms": "1000",
-          "yarn.timeline-service.http-authentication.simple.anonymous.allowed": "true",
-          "yarn.timeline-service.http-authentication.type": "simple",
-          "yarn.timeline-service.leveldb-timeline-store.read-cache-size": "104857600",
-          "yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size": "10000",
-          "yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size": "10000"
-        }
-      }
-    }
-  ]
-}


[32/63] [abbrv] ambari git commit: AMBARI-21344. Add Services Using Repository ID (alexantonenko)

Posted by ab...@apache.org.
AMBARI-21344. Add Services Using Repository ID (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/08dd492e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/08dd492e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/08dd492e

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 08dd492e5fb801355be8e2dcd895d9cb09d3dd3c
Parents: 4522cf5
Author: Alex Antonenko <hi...@gmail.com>
Authored: Mon Jun 26 17:44:29 2017 +0300
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Tue Jun 27 11:05:14 2017 +0300

----------------------------------------------------------------------
 .../app/controllers/wizard/step8_controller.js      | 16 ++++++----------
 ambari-web/app/models/stack.js                      |  4 +++-
 2 files changed, 9 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/08dd492e/ambari-web/app/controllers/wizard/step8_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step8_controller.js b/ambari-web/app/controllers/wizard/step8_controller.js
index fa44a24..42519e0 100644
--- a/ambari-web/app/controllers/wizard/step8_controller.js
+++ b/ambari-web/app/controllers/wizard/step8_controller.js
@@ -913,6 +913,9 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, App.wiz
         installerController.postVersionDefinitionFileStep8(versionData.isXMLdata, versionData.data).done(function (versionInfo) {
           if (versionInfo.id && versionInfo.stackName && versionInfo.stackVersion) {
             var selectedStack = App.Stack.find().findProperty('isSelected', true);
+            if (selectedStack) {
+              selectedStack.set('versionInfoId', versionInfo.id);
+            }
             installerController.updateRepoOSInfo(versionInfo, selectedStack).done(function() {
               self._startDeploy();
             });
@@ -1011,20 +1014,13 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, App.wiz
    * @method createSelectedServicesData
    */
   createSelectedServicesData: function () {
-
-    var isInstaller = this.get('isInstaller')
     var selectedStack;
     if (this.get('isInstaller')) {
       selectedStack = App.Stack.find().findProperty('isSelected', true);
     }
-
-    return this.get('selectedServices').map(function (_service) {
-      if (selectedStack) {
-        return {"ServiceInfo": { "service_name": _service.get('serviceName'), "desired_repository_version": selectedStack.get('repositoryVersion') }};
-      } else {
-        return {"ServiceInfo": { "service_name": _service.get('serviceName') }};
-      }
-    });
+    return this.get('selectedServices').map(service => selectedStack ?
+      {"ServiceInfo": { "service_name": service.get('serviceName'), "desired_repository_version_id": selectedStack.get('versionInfoId') }} :
+      {"ServiceInfo": { "service_name": service.get('serviceName') }});
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/08dd492e/ambari-web/app/models/stack.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/stack.js b/ambari-web/app/models/stack.js
index 47d1c44..657ee5c 100644
--- a/ambari-web/app/models/stack.js
+++ b/ambari-web/app/models/stack.js
@@ -34,6 +34,8 @@ App.Stack = DS.Model.extend({
   operatingSystems: DS.hasMany('App.OperatingSystem'),
   isSelected: DS.attr('boolean', {defaultValue: false}),
 
+  versionInfoId: null,
+
   stackNameVersion: Em.computed.concat('-', 'stackName', 'stackVersion'),
 
   isPatch: Em.computed.equal('type', 'PATCH'),
@@ -81,4 +83,4 @@ App.Stack = DS.Model.extend({
 });
 
 
-App.Stack.FIXTURES = [];
\ No newline at end of file
+App.Stack.FIXTURES = [];


[36/63] [abbrv] ambari git commit: AMBARI-21277. Fail to create solr clients in Log Search / Log Feeder (oleewere)

Posted by ab...@apache.org.
AMBARI-21277. Fail to create solr clients in Log Search / Log Feeder (oleewere)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/535660bb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/535660bb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/535660bb

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 535660bb75efefd21692b0558dc7d74e420903f2
Parents: 1e29590
Author: oleewere <ol...@gmail.com>
Authored: Mon Jun 26 15:44:34 2017 +0200
Committer: oleewere <ol...@gmail.com>
Committed: Tue Jun 27 13:21:40 2017 +0200

----------------------------------------------------------------------
 ambari-metrics/ambari-metrics-common/pom.xml | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/535660bb/ambari-metrics/ambari-metrics-common/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-common/pom.xml b/ambari-metrics/ambari-metrics-common/pom.xml
index f0d3963..cae9734 100644
--- a/ambari-metrics/ambari-metrics-common/pom.xml
+++ b/ambari-metrics/ambari-metrics-common/pom.xml
@@ -108,6 +108,10 @@
                   <pattern>org.jboss</pattern>
                   <shadedPattern>org.apache.hadoop.metrics2.sink.relocated.jboss</shadedPattern>
                 </relocation>
+                <relocation>
+                  <pattern>org.apache.http</pattern>
+                  <shadedPattern>org.apache.hadoop.metrics2.sink.relocated.apache.http</shadedPattern>
+                </relocation>
               </relocations>
             </configuration>
           </execution>


[08/63] [abbrv] ambari git commit: AMBARI-21268 Remove Upgrade Catalogs For Every Version Before 2.5 (dgrinenko)

Posted by ab...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121Test.java
deleted file mode 100644
index 7ae3f42..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog2121Test.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.createStrictMock;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-
-import java.lang.reflect.Method;
-import java.sql.SQLException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.easymock.EasyMockSupport;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.powermock.core.classloader.annotations.PrepareForTest;
-
-import com.google.inject.AbstractModule;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Provider;
-
-@PrepareForTest(UpgradeCatalog2121.class)
-public class UpgradeCatalog2121Test {
-  private Injector injector;
-  private Provider<EntityManager> entityManagerProvider = createStrictMock(Provider.class);
-  private EntityManager entityManager = createNiceMock(EntityManager.class);
-  private UpgradeCatalogHelper upgradeCatalogHelper;
-  private StackEntity desiredStackEntity;
-
-  @Before
-  public void init() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    upgradeCatalogHelper = injector.getInstance(UpgradeCatalogHelper.class);
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-    // load the stack entity
-    StackDAO stackDAO = injector.getInstance(StackDAO.class);
-    desiredStackEntity = stackDAO.find("PHD", "3.0.0");
-  }
-
-  @After
-  public void tearDown() throws AmbariException, SQLException {
-    H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    Method updatePHDConfigs = UpgradeCatalog2121.class.getDeclaredMethod("updatePHDConfigs");
-    Method updateOozieConfigs = UpgradeCatalog2121.class.getDeclaredMethod("updateOozieConfigs");
-
-    UpgradeCatalog2121 upgradeCatalog2121 = createMockBuilder(UpgradeCatalog2121.class)
-        .addMockedMethod(updatePHDConfigs)
-        .addMockedMethod(updateOozieConfigs)
-        .createMock();
-
-    upgradeCatalog2121.updatePHDConfigs();
-    expectLastCall().once();
-    upgradeCatalog2121.updateOozieConfigs();
-    expectLastCall().once();
-
-    replay(upgradeCatalog2121);
-
-    upgradeCatalog2121.executeDMLUpdates();
-
-    verify(upgradeCatalog2121);
-  }
-
-  @Test
-  public void testUpdateOozieConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Map<String, String> propertiesOozieSite = new HashMap<String, String>() {{
-      put("oozie.authentication.kerberos.name.rules", "\n ");
-    }};
-    final Config oozieSiteConf = easyMockSupport.createNiceMock(Config.class);
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(Clusters.class).toInstance(mockClusters);
-        bind(EntityManager.class).toInstance(entityManager);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("oozie-site")).andReturn(oozieSiteConf).atLeastOnce();
-
-    expect(oozieSiteConf.getProperties()).andReturn(propertiesOozieSite).once();
-
-    UpgradeCatalog2121 upgradeCatalog2121 = createMockBuilder(UpgradeCatalog2121.class)
-            .withConstructor(Injector.class)
-            .withArgs(mockInjector)
-            .addMockedMethod("updateConfigurationPropertiesForCluster", Cluster.class, String.class,
-                    Map.class, Set.class, boolean.class, boolean.class)
-            .createMock();
-    upgradeCatalog2121.updateConfigurationPropertiesForCluster(mockClusterExpected,
-            "oozie-site", new HashMap<String, String>(), Collections.singleton("oozie.authentication.kerberos.name.rules"),
-            true, false);
-    expectLastCall().once();
-
-    easyMockSupport.replayAll();
-    replay(upgradeCatalog2121);
-    upgradeCatalog2121.updateOozieConfigs();
-    easyMockSupport.verifyAll();
-
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/af1bf85c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
deleted file mode 100644
index ed14a01..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog212Test.java
+++ /dev/null
@@ -1,694 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.upgrade;
-
-import static junit.framework.Assert.assertEquals;
-import static org.easymock.EasyMock.anyObject;
-import static org.easymock.EasyMock.anyString;
-import static org.easymock.EasyMock.capture;
-import static org.easymock.EasyMock.createMockBuilder;
-import static org.easymock.EasyMock.createNiceMock;
-import static org.easymock.EasyMock.eq;
-import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.expectLastCall;
-import static org.easymock.EasyMock.newCapture;
-import static org.easymock.EasyMock.replay;
-import static org.easymock.EasyMock.reset;
-import static org.easymock.EasyMock.verify;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.sql.Connection;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.persistence.EntityManager;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.H2DatabaseCleaner;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.orm.DBAccessor;
-import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
-import org.apache.ambari.server.orm.GuiceJpaInitializer;
-import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
-import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
-import org.apache.ambari.server.state.Config;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.Service;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.stack.OsFamily;
-import org.easymock.Capture;
-import org.easymock.EasyMock;
-import org.easymock.EasyMockRule;
-import org.easymock.EasyMockSupport;
-import org.easymock.Mock;
-import org.easymock.MockType;
-import org.easymock.TestSubject;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.inject.AbstractModule;
-import com.google.inject.Binder;
-import com.google.inject.Guice;
-import com.google.inject.Injector;
-import com.google.inject.Module;
-import com.google.inject.Provider;
-
-/**
- * {@link org.apache.ambari.server.upgrade.UpgradeCatalog212} unit tests.
- */
-public class UpgradeCatalog212Test {
-
-  private static final String TOPOLOGY_REQUEST_TABLE = "topology_request";
-  private static final String TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN = "cluster_name";
-
-  private Injector injector;
-
-  @Rule
-  public EasyMockRule mocks = new EasyMockRule(this);
-
-  @Mock(type = MockType.STRICT)
-  private Provider<EntityManager> entityManagerProvider;
-
-  @Mock(type = MockType.NICE)
-  private EntityManager entityManager;
-
-  @Mock(type = MockType.NICE)
-  private DBAccessor dbAccessor;
-
-  @Mock
-  private Injector mockedInjector;
-
-  @Mock(type = MockType.NICE)
-  private Connection connection;
-
-  @Mock
-  private Statement statement;
-
-  @Mock
-  private ResultSet resultSet;
-
-  @TestSubject
-  private UpgradeCatalog212 testSubject = new UpgradeCatalog212(
-      EasyMock.createNiceMock(Injector.class));
-
-  @Before
-  public void setUp() {
-    reset(entityManagerProvider);
-    expect(entityManagerProvider.get()).andReturn(entityManager).anyTimes();
-    replay(entityManagerProvider);
-
-    injector = Guice.createInjector(new InMemoryDefaultTestModule());
-    injector.getInstance(GuiceJpaInitializer.class);
-
-    // inject AmbariMetaInfo to ensure that stacks get populated in the DB
-    injector.getInstance(AmbariMetaInfo.class);
-  }
-
-  @After
-  public void tearDown() throws AmbariException, SQLException {
-    if (injector != null) {
-      H2DatabaseCleaner.clearDatabaseAndStopPersistenceService(injector);
-    }
-  }
-
-
-  @Test
-  public void testFinilizeTopologyDDL() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    dbAccessor.dropColumn(eq("topology_request"), eq("cluster_name"));
-    dbAccessor.setColumnNullable(eq("topology_request"), eq("cluster_id"), eq(false));
-    dbAccessor.addFKConstraint(eq("topology_request"), eq("FK_topology_request_cluster_id"), eq("cluster_id"),
-      eq("clusters"), eq("cluster_id"), eq(false));
-
-    replay(dbAccessor);
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    UpgradeCatalog212 upgradeCatalog212 = injector.getInstance(UpgradeCatalog212.class);
-    upgradeCatalog212.finilizeTopologyDDL();
-
-    verify(dbAccessor);
-  }
-
-  @Test
-  public void testExecuteDDLUpdates() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    Configuration configuration = createNiceMock(Configuration.class);
-    Connection connection = createNiceMock(Connection.class);
-    Statement statement = createNiceMock(Statement.class);
-    ResultSet resultSet = createNiceMock(ResultSet.class);
-    expect(configuration.getDatabaseUrl()).andReturn(Configuration.JDBC_IN_MEMORY_URL).anyTimes();
-    dbAccessor.getConnection();
-    expectLastCall().andReturn(connection).anyTimes();
-    connection.createStatement();
-    expectLastCall().andReturn(statement).anyTimes();
-    statement.executeQuery(anyObject(String.class));
-    expectLastCall().andReturn(resultSet).anyTimes();
-
-    // Create DDL sections with their own capture groups
-    HostRoleCommandDDL hostRoleCommandDDL = new HostRoleCommandDDL();
-
-    // Execute any DDL schema changes
-    hostRoleCommandDDL.execute(dbAccessor);
-
-    // Replay sections
-    replay(dbAccessor, configuration, resultSet, connection, statement);
-
-    AbstractUpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Class<?> c = AbstractUpgradeCatalog.class;
-    Field f = c.getDeclaredField("configuration");
-    f.setAccessible(true);
-    f.set(upgradeCatalog, configuration);
-
-    upgradeCatalog.executeDDLUpdates();
-    verify(dbAccessor, configuration, resultSet, connection, statement);
-
-    // Verify sections
-    hostRoleCommandDDL.verify(dbAccessor);
-  }
-
-  @Test
-  public void testExecuteDMLUpdates() throws Exception {
-    Method addMissingConfigs = UpgradeCatalog212.class.getDeclaredMethod("addMissingConfigs");
-    Method addNewConfigurationsFromXml = AbstractUpgradeCatalog.class.getDeclaredMethod("addNewConfigurationsFromXml");
-
-    UpgradeCatalog212 upgradeCatalog212 = createMockBuilder(UpgradeCatalog212.class)
-            .addMockedMethod(addNewConfigurationsFromXml)
-            .addMockedMethod(addMissingConfigs)
-            .createMock();
-
-    upgradeCatalog212.addNewConfigurationsFromXml();
-    expectLastCall().once();
-
-    upgradeCatalog212.addMissingConfigs();
-    expectLastCall().once();
-
-    replay(upgradeCatalog212);
-
-    upgradeCatalog212.executeDMLUpdates();
-
-    verify(upgradeCatalog212);
-  }
-
-  @Test
-  public void testUpdateHBaseAdnClusterConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-
-    final Map<String, String> propertiesHbaseSite = new HashMap<String, String>() {
-      {
-        put("hbase.bucketcache.size", "1024m");
-      }
-    };
-
-    final Map<String, String> propertiesHbaseEnv = new HashMap<String, String>() {
-      {
-        put("override_hbase_uid", "false");
-      }
-    };
-
-    final Config mockHbaseEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).once();
-    final Config mockHbaseSite = easyMockSupport.createNiceMock(Config.class);
-    expect(mockHbaseSite.getProperties()).andReturn(propertiesHbaseSite).once();
-    final Config mockClusterEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHbaseEnv = new HashMap<>();
-    final Map<String, String> propertiesExpectedClusterEnv = new HashMap<String, String>() {{
-      put("override_uid", "false");
-    }};
-    final Map<String, String> propertiesExpectedHbaseSite = new HashMap<String, String>() {{
-      put("hbase.bucketcache.size", "1024");
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("cluster-env")).andReturn(mockClusterEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-env")).andReturn(mockHbaseEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-site")).andReturn(mockHbaseSite).atLeastOnce();
-
-    expect(mockClusterEnv.getProperties()).andReturn(propertiesExpectedClusterEnv).atLeastOnce();
-    expect(mockHbaseEnv.getProperties()).andReturn(propertiesExpectedHbaseEnv).atLeastOnce();
-    expect(mockHbaseSite.getProperties()).andReturn(propertiesExpectedHbaseSite).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog212.class).updateHbaseAndClusterConfigurations();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateHBaseAdnClusterConfigsTrue() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Map<String, String> propertiesHbaseEnv = new HashMap<String, String>() {
-      {
-        put("override_hbase_uid", "true");
-      }
-    };
-
-    final Config mockHbaseEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).once();
-    final Config mockClusterEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHbaseEnv = new HashMap<>();
-    final Map<String, String> propertiesExpectedClusterEnv = new HashMap<String, String>() {{
-      put("override_uid", "true");
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("cluster-env")).andReturn(mockClusterEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-env")).andReturn(mockHbaseEnv).atLeastOnce();
-
-    expect(mockClusterEnv.getProperties()).andReturn(propertiesExpectedClusterEnv).atLeastOnce();
-    expect(mockHbaseEnv.getProperties()).andReturn(propertiesExpectedHbaseEnv).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog212.class).updateHbaseAndClusterConfigurations();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateHBaseAdnClusterConfigsNoHBaseEnv() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Config mockClusterEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedClusterEnv = new HashMap<String, String>() {{
-      put("override_uid", "false");
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("cluster-env")).andReturn(mockClusterEnv).atLeastOnce();
-
-    expect(mockClusterEnv.getProperties()).andReturn(propertiesExpectedClusterEnv).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog212.class).updateHbaseAndClusterConfigurations();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateHBaseAdnClusterConfigsNoOverrideHBaseUID() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-
-    final Map<String, String> propertiesHbaseEnv = new HashMap<String, String>() {
-      {
-        put("hbase_user", "hbase");
-      }
-    };
-
-    final Config mockHbaseEnv = easyMockSupport.createNiceMock(Config.class);
-    expect(mockHbaseEnv.getProperties()).andReturn(propertiesHbaseEnv).once();
-    final Config mockClusterEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedClusterEnv = new HashMap<String, String>() {{
-      put("override_uid", "false");
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("cluster-env")).andReturn(mockClusterEnv).atLeastOnce();
-    expect(mockClusterExpected.getDesiredConfigByType("hbase-env")).andReturn(mockHbaseEnv).atLeastOnce();
-
-    expect(mockClusterEnv.getProperties()).andReturn(propertiesExpectedClusterEnv).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog212.class).updateHbaseAndClusterConfigurations();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateHiveConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Config mockHiveSite = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedHiveSite = new HashMap<String, String>() {{
-      put("hive.heapsize", "512");
-      put("hive.server2.custom.authentication.class", "");
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    StackId stackId = new StackId("HDP-2.2");
-
-    Service hiveService = easyMockSupport.createNiceMock(Service.class);
-    expect(hiveService.getDesiredStackId()).andReturn(stackId);
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HIVE", hiveService)
-        .build());
-    expect(mockClusterExpected.getDesiredConfigByType("hive-site")).andReturn(mockHiveSite).atLeastOnce();
-    expect(mockHiveSite.getProperties()).andReturn(propertiesExpectedHiveSite).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog212.class).updateHiveConfigs();
-    easyMockSupport.verifyAll();
-
-  }
-
-  @Test
-  public void testUpdateOozieConfigs() throws Exception {
-    EasyMockSupport easyMockSupport = new EasyMockSupport();
-    final AmbariManagementController  mockAmbariManagementController = easyMockSupport.createNiceMock(AmbariManagementController.class);
-    final ConfigHelper mockConfigHelper = easyMockSupport.createMock(ConfigHelper.class);
-
-    final Clusters mockClusters = easyMockSupport.createStrictMock(Clusters.class);
-    final Cluster mockClusterExpected = easyMockSupport.createNiceMock(Cluster.class);
-    final Config mockOozieEnv = easyMockSupport.createNiceMock(Config.class);
-
-    final Map<String, String> propertiesExpectedOozieEnv = new HashMap<String, String>() {{
-      put("oozie_hostname", "");
-      put("oozie_database", "123");
-    }};
-
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(AmbariManagementController.class).toInstance(mockAmbariManagementController);
-        bind(ConfigHelper.class).toInstance(mockConfigHelper);
-        bind(Clusters.class).toInstance(mockClusters);
-
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-
-    expect(mockAmbariManagementController.getClusters()).andReturn(mockClusters).once();
-    expect(mockClusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
-      put("normal", mockClusterExpected);
-    }}).once();
-
-    expect(mockClusterExpected.getDesiredConfigByType("oozie-env")).andReturn(mockOozieEnv).atLeastOnce();
-    expect(mockOozieEnv.getProperties()).andReturn(propertiesExpectedOozieEnv).atLeastOnce();
-
-    easyMockSupport.replayAll();
-    mockInjector.getInstance(UpgradeCatalog212.class).updateOozieConfigs();
-    easyMockSupport.verifyAll();
-  }
-
-  @Test
-  public void testUpdateHiveEnvContent() throws Exception {
-    final Injector mockInjector = Guice.createInjector(new AbstractModule() {
-      @Override
-      protected void configure() {
-        bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));
-        bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    });
-    String testContent = " if [ \"$SERVICE\" = \"cli\" ]; then\n" +
-            "   if [ -z \"$DEBUG\" ]; then\n" +
-            "     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n" +
-            "   else\n" +
-            "     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n" +
-            "   fi\n" +
-            " fi\n" +
-            "\n" +
-            "export HADOOP_HEAPSIZE=\"{{hive_heapsize}}\"\n" +
-            "export HADOOP_CLIENT_OPTS=\"-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS\"\n" +
-            "\n" +
-            "# Set HADOOP_HOME to point to a specific hadoop install directory\n" +
-            "HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n";
-    String expectedResult = " if [ \"$SERVICE\" = \"cli\" ]; then\n" +
-            "   if [ -z \"$DEBUG\" ]; then\n" +
-            "     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit\"\n" +
-            "   else\n" +
-            "     export HADOOP_OPTS=\"$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit\"\n" +
-            "   fi\n" +
-            " fi\n" +
-            "\n" +
-            "\n" +
-            "\n" +
-            "# Set HADOOP_HOME to point to a specific hadoop install directory\n" +
-            "HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\n";
-    assertEquals(expectedResult, mockInjector.getInstance(UpgradeCatalog212.class).updateHiveEnvContent(testContent));
-  }
-  /**
-   * @param dbAccessor
-   * @return
-   */
-  private AbstractUpgradeCatalog getUpgradeCatalog(final DBAccessor dbAccessor) {
-    Module module = new Module() {
-      @Override
-      public void configure(Binder binder) {
-        binder.bind(DBAccessor.class).toInstance(dbAccessor);
-        binder.bind(EntityManager.class).toInstance(entityManager);
-        binder.bind(OsFamily.class).toInstance(createNiceMock(OsFamily.class));
-      }
-    };
-
-    Injector injector = Guice.createInjector(module);
-    return injector.getInstance(UpgradeCatalog212.class);
-  }
-
-  @Test
-  public void testGetSourceVersion() {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-    Assert.assertEquals("2.1.1", upgradeCatalog.getSourceVersion());
-  }
-
-  @Test
-  public void testGetTargetVersion() throws Exception {
-    final DBAccessor dbAccessor = createNiceMock(DBAccessor.class);
-    UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
-
-    Assert.assertEquals("2.1.2", upgradeCatalog.getTargetVersion());
-  }
-
-  /**
-   * Verify alert changes
-   */
-  class HostRoleCommandDDL implements SectionDDL {
-    HashMap<String, Capture<DBColumnInfo>> captures;
-
-    public HostRoleCommandDDL() {
-      captures = new HashMap<>();
-
-      Capture<DBAccessor.DBColumnInfo> hostRoleCommandAutoSkipColumnCapture = EasyMock.newCapture();
-
-      captures.put("host_role_command", hostRoleCommandAutoSkipColumnCapture);
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void execute(DBAccessor dbAccessor) throws SQLException {
-      Capture<DBColumnInfo> hostRoleCommandAuotSkipColumnCapture = captures.get(
-          "host_role_command");
-
-      dbAccessor.addColumn(eq("host_role_command"),
-          capture(hostRoleCommandAuotSkipColumnCapture));
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override
-    public void verify(DBAccessor dbAccessor) throws SQLException {
-      verifyHostRoleCommandSkipCapture(captures.get("host_role_command"));
-    }
-
-    private void verifyHostRoleCommandSkipCapture(
-        Capture<DBAccessor.DBColumnInfo> hostRoleCommandAuotSkipColumnCapture) {
-      DBColumnInfo clusterIdColumn = hostRoleCommandAuotSkipColumnCapture.getValue();
-      Assert.assertEquals(Integer.class, clusterIdColumn.getType());
-      Assert.assertEquals("auto_skip_on_failure", clusterIdColumn.getName());
-    }
-  }
-
-  @Test
-  public void testShouldSkipPreDMLLogicIfClusterNameColumnDoesNotExist() throws Exception {
-    // GIVEN
-    reset(dbAccessor);
-    Capture<String> tableNameCaptor = newCapture();
-    Capture<String> columnNameCaptor = newCapture();
-
-    // the column used by the logic is already deleted
-    // this could happen as a result of previously running the update
-    expect(dbAccessor.tableHasColumn(capture(tableNameCaptor), capture(columnNameCaptor))).andReturn(false);
-    replay(dbAccessor);
-
-    // WHEN
-    testSubject.executePreDMLUpdates();
-
-    // THEN
-    Assert.assertNotNull("The table name hasn't been captured", tableNameCaptor.getValue());
-    Assert.assertEquals("The table name is not as expected", TOPOLOGY_REQUEST_TABLE, tableNameCaptor.getValue());
-
-    Assert.assertNotNull("The column name hasn't been captured", columnNameCaptor.getValue());
-    Assert.assertEquals("The column name is not as expected", TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN,
-        columnNameCaptor.getValue());
-  }
-
-
-  @Test
-  public void testShouldPerformPreDMLLogicIfClusterNameColumnExists() throws Exception {
-    // GIVEN
-    reset(dbAccessor);
-    expect(dbAccessor.getConnection()).andReturn(connection).anyTimes();
-    expect(connection.createStatement()).andReturn(statement);
-
-    Capture<String> tableNameCaptor = newCapture();
-    Capture<String> columnNameCaptor = newCapture();
-
-    expect(dbAccessor.tableHasColumn(capture(tableNameCaptor), capture(columnNameCaptor))).andReturn(true);
-
-    expect(statement.executeQuery(anyString())).andReturn(resultSet);
-    statement.close();
-
-    expect(resultSet.next()).andReturn(false);
-    resultSet.close();
-
-    replay(dbAccessor, connection, statement, resultSet);
-
-    // WHEN
-    testSubject.executePreDMLUpdates();
-
-    // THEN
-    Assert.assertNotNull("The table name hasn't been captured", tableNameCaptor.getValue());
-    Assert.assertEquals("The table name is not as expected", TOPOLOGY_REQUEST_TABLE, tableNameCaptor.getValue());
-
-    Assert.assertNotNull("The column name hasn't been captured", columnNameCaptor.getValue());
-    Assert.assertEquals("The column name is not as expected", TOPOLOGY_REQUEST_CLUSTER_NAME_COLUMN,
-        columnNameCaptor.getValue());
-
-    verify(dbAccessor, statement, resultSet);
-  }
-
-}


[59/63] [abbrv] ambari git commit: AMBARI-21376. Using URI naming format in dfs.datanode.data.dir causing datanodes startup failure (aonishuk)

Posted by ab...@apache.org.
AMBARI-21376. Using URI naming format in dfs.datanode.data.dir causing datanodes startup failure  (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7554509f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7554509f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7554509f

Branch: refs/heads/branch-feature-logsearch-ui
Commit: 7554509f5ab1ddf262746ddab3c74f88c9f8154a
Parents: aa7a8c6
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Thu Jun 29 20:44:11 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Thu Jun 29 20:44:11 2017 +0300

----------------------------------------------------------------------
 .../resource_management/libraries/functions/mounted_dirs_helper.py  | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7554509f/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py b/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py
index 0ebd7e2..712eacf 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/mounted_dirs_helper.py
@@ -115,6 +115,7 @@ def handle_mounted_dirs(func, dirs_string, history_filename, update_cache=True):
   dirs_unmounted = set()         # set of dirs that have become unmounted
   valid_existing_dirs = []
 
+  dirs_string = dirs_string.replace("file:///","/")
   dirs_string = ",".join([re.sub(r'^\[.+\]', '', dfs_dir.strip()) for dfs_dir in dirs_string.split(",")])
   for dir in dirs_string.split(","):
     if dir is None or dir.strip() == "":