You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ad...@apache.org on 2018/04/26 13:43:55 UTC

[ambari] branch trunk updated: AMBARI-23683. dfs_ha_initial_* properties should be removed after installation (#1094)

This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 1e15cda  AMBARI-23683. dfs_ha_initial_* properties should be removed after installation (#1094)
1e15cda is described below

commit 1e15cda11647e2a6cce062654f9aede35d1782ca
Author: Doroszlai, Attila <64...@users.noreply.github.com>
AuthorDate: Thu Apr 26 15:43:52 2018 +0200

    AMBARI-23683. dfs_ha_initial_* properties should be removed after installation (#1094)
---
 .../internal/BlueprintConfigurationProcessor.java  |  60 ++++++++----
 .../apache/ambari/server/events/AmbariEvent.java   |   5 +
 .../server/events/ClusterProvisionedEvent.java     |  35 +++++++
 .../ambari/server/state/cluster/ClusterImpl.java   |  24 +++++
 .../ambari/server/topology/Configuration.java      |  37 ++++++++
 .../ambari/server/topology/TopologyManager.java    |   3 +
 .../HDFS/2.1.0.2.0/package/scripts/params_linux.py |  18 ++--
 .../YARN/2.1.0.2.0/package/scripts/params_linux.py |   4 +-
 .../BlueprintConfigurationProcessorTest.java       | 101 +++++++++------------
 .../ambari/server/topology/ConfigurationTest.java  |  47 ++++++++++
 .../server/topology/TopologyManagerTest.java       |   3 +
 .../2.0.6/configs/ha_bootstrap_active_node.json    |   6 +-
 .../2.0.6/configs/ha_bootstrap_standby_node.json   |   6 +-
 .../ha_bootstrap_standby_node_initial_start.json   |   6 +-
 ...tandby_node_initial_start_dfs_nameservices.json |   6 +-
 15 files changed, 262 insertions(+), 99 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index 24f49ad..c5399d0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -58,6 +58,8 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Predicates;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
@@ -89,10 +91,23 @@ public class BlueprintConfigurationProcessor {
   private final static String HAWQ_SITE_HAWQ_STANDBY_ADDRESS_HOST = "hawq_standby_address_host";
   private final static String HAWQSTANDBY = "HAWQSTANDBY";
 
+  private final static String HDFS_HA_INITIAL_CONFIG_TYPE = CLUSTER_ENV_CONFIG_TYPE_NAME;
+  private final static String HDFS_ACTIVE_NAMENODE_PROPERTY_NAME = "dfs_ha_initial_namenode_active";
+  private final static String HDFS_STANDBY_NAMENODE_PROPERTY_NAME = "dfs_ha_initial_namenode_standby";
   private final static String HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME = "dfs_ha_initial_namenode_active_set";
   private final static String HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME = "dfs_ha_initial_namenode_standby_set";
-
   private final static String HDFS_HA_INITIAL_CLUSTER_ID_PROPERTY_NAME = "dfs_ha_initial_cluster_id";
+  private final static Set<String> HDFS_HA_INITIAL_PROPERTIES = ImmutableSet.of(
+      HDFS_ACTIVE_NAMENODE_PROPERTY_NAME, HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME,
+      HDFS_STANDBY_NAMENODE_PROPERTY_NAME, HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME,
+      HDFS_HA_INITIAL_CLUSTER_ID_PROPERTY_NAME);
+
+  /**
+   * These properties are only required during deployment, and should be removed afterwards.
+   */
+  public static final Map<String, Set<String>> TEMPORARY_PROPERTIES_FOR_CLUSTER_DEPLOYMENT = ImmutableMap.of(
+    HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES
+  );
 
   private final static String HADOOP_ENV_CONFIG_TYPE_NAME = "hadoop-env";
 
@@ -189,10 +204,14 @@ public class BlueprintConfigurationProcessor {
         new SimplePropertyNameExportFilter("ldap-url", "kerberos-env"),
         new SimplePropertyNameExportFilter("container_dn", "kerberos-env"),
         new SimplePropertyNameExportFilter("domains", "krb5-conf"),
-        new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_active", HADOOP_ENV_CONFIG_TYPE_NAME),
-        new SimplePropertyNameExportFilter("dfs_ha_initial_namenode_standby", HADOOP_ENV_CONFIG_TYPE_NAME),
+        new SimplePropertyNameExportFilter(HDFS_ACTIVE_NAMENODE_PROPERTY_NAME, HADOOP_ENV_CONFIG_TYPE_NAME),
+        new SimplePropertyNameExportFilter(HDFS_STANDBY_NAMENODE_PROPERTY_NAME, HADOOP_ENV_CONFIG_TYPE_NAME),
         new SimplePropertyNameExportFilter(HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME, HADOOP_ENV_CONFIG_TYPE_NAME),
         new SimplePropertyNameExportFilter(HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME, HADOOP_ENV_CONFIG_TYPE_NAME),
+        new SimplePropertyNameExportFilter(HDFS_ACTIVE_NAMENODE_PROPERTY_NAME, HDFS_HA_INITIAL_CONFIG_TYPE),
+        new SimplePropertyNameExportFilter(HDFS_STANDBY_NAMENODE_PROPERTY_NAME, HDFS_HA_INITIAL_CONFIG_TYPE),
+        new SimplePropertyNameExportFilter(HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME, HDFS_HA_INITIAL_CONFIG_TYPE),
+        new SimplePropertyNameExportFilter(HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME, HDFS_HA_INITIAL_CONFIG_TYPE),
         new StackPropertyTypeFilter(),
         new KerberosAuthToLocalRulesFilter(authToLocalPerClusterMap)};
     }
@@ -354,11 +373,18 @@ public class BlueprintConfigurationProcessor {
     // specified in the stacks, and the filters defined in this class
     doFilterPriorToClusterUpdate(clusterConfig, configTypesUpdated);
 
+    Set<String> propertiesMoved = clusterConfig.moveProperties(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_PROPERTIES);
+    if (!propertiesMoved.isEmpty()) {
+      configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
+      configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
+    }
+
     // this needs to be called after doFilterPriorToClusterUpdate() to ensure that the returned
     // set of properties (copy) doesn't include the removed properties.  If an updater
     // removes a property other than the property it is registered for then we will
     // have an issue as it won't be removed from the clusterProps map as it is a copy.
     Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
+
     for (Map<String, Map<String, PropertyUpdater>> updaterMap : createCollectionOfUpdaters()) {
       for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
         String type = entry.getKey();
@@ -433,13 +459,13 @@ public class BlueprintConfigurationProcessor {
           // set the properties that configure which namenode is active,
           // and which is a standby node in this HA deployment
           Iterator<String> nnHostIterator = nnHosts.iterator();
-          clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, "dfs_ha_initial_namenode_active", nnHostIterator.next());
-          clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, "dfs_ha_initial_namenode_standby", nnHostIterator.next());
+          clusterConfig.setProperty(HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_ACTIVE_NAMENODE_PROPERTY_NAME, nnHostIterator.next());
+          clusterConfig.setProperty(HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_STANDBY_NAMENODE_PROPERTY_NAME, nnHostIterator.next());
 
-          configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
+          configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
         }
       } else {
-        if (!isPropertySet(clusterProps, HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME) && !isPropertySet(clusterProps, HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME)) {
+        if (!isPropertySet(clusterProps, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME) && !isPropertySet(clusterProps, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME)) {
           // multiple nameservices indicates an HDFS NameNode Federation install
           // process each nameservice to determine the active/standby nodes
           LOG.info("Processing multiple HDFS NameService instances, which indicates a NameNode Federation deployment");
@@ -477,15 +503,15 @@ public class BlueprintConfigurationProcessor {
 
             // set the properties what configure the NameNode Active/Standby status for each nameservice
             if (!activeNameNodeHostnames.isEmpty() && !standbyNameNodeHostnames.isEmpty()) {
-              clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME, String.join(",", activeNameNodeHostnames));
-              clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME, String.join(",", standbyNameNodeHostnames));
+              clusterConfig.setProperty(HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_ACTIVE_NAMENODE_SET_PROPERTY_NAME, String.join(",", activeNameNodeHostnames));
+              clusterConfig.setProperty(HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_STANDBY_NAMENODE_SET_PROPERTY_NAME, String.join(",", standbyNameNodeHostnames));
 
               // also set the clusterID property, required for Federation installs of HDFS
-              if (!isPropertySet(clusterProps, HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CLUSTER_ID_PROPERTY_NAME)) {
-                clusterConfig.setProperty(HADOOP_ENV_CONFIG_TYPE_NAME, HDFS_HA_INITIAL_CLUSTER_ID_PROPERTY_NAME, getClusterName());
+              if (!isPropertySet(clusterProps, HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_CLUSTER_ID_PROPERTY_NAME)) {
+                clusterConfig.setProperty(HDFS_HA_INITIAL_CONFIG_TYPE, HDFS_HA_INITIAL_CLUSTER_ID_PROPERTY_NAME, getClusterName());
               }
 
-              configTypesUpdated.add(HADOOP_ENV_CONFIG_TYPE_NAME);
+              configTypesUpdated.add(HDFS_HA_INITIAL_CONFIG_TYPE);
             } else {
               LOG.warn("Error in processing the set of active/standby namenodes in this federated cluster, please check hdfs-site configuration");
             }
@@ -1076,7 +1102,7 @@ public class BlueprintConfigurationProcessor {
    *         false if the initial active namenode property has not been configured
    */
   static boolean isNameNodeHAInitialActiveNodeSet(Map<String, Map<String, String>> configProperties) {
-    return configProperties.containsKey(HADOOP_ENV_CONFIG_TYPE_NAME) && configProperties.get(HADOOP_ENV_CONFIG_TYPE_NAME).containsKey("dfs_ha_initial_namenode_active");
+    return configProperties.containsKey(HDFS_HA_INITIAL_CONFIG_TYPE) && configProperties.get(HDFS_HA_INITIAL_CONFIG_TYPE).containsKey(HDFS_ACTIVE_NAMENODE_PROPERTY_NAME);
   }
 
 
@@ -1090,7 +1116,7 @@ public class BlueprintConfigurationProcessor {
    *         false if the initial standby namenode property has not been configured
    */
   static boolean isNameNodeHAInitialStandbyNodeSet(Map<String, Map<String, String>> configProperties) {
-    return configProperties.containsKey(HADOOP_ENV_CONFIG_TYPE_NAME) && configProperties.get(HADOOP_ENV_CONFIG_TYPE_NAME).containsKey("dfs_ha_initial_namenode_standby");
+    return configProperties.containsKey(HDFS_HA_INITIAL_CONFIG_TYPE) && configProperties.get(HDFS_HA_INITIAL_CONFIG_TYPE).containsKey(HDFS_STANDBY_NAMENODE_PROPERTY_NAME);
   }
 
   /**
@@ -2512,6 +2538,7 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> mapredEnvMap = new HashMap<>();
     Map<String, PropertyUpdater> mHadoopEnvMap = new HashMap<>();
     Map<String, PropertyUpdater> shHadoopEnvMap = new HashMap<>();
+    Map<String, PropertyUpdater> clusterEnvMap = new HashMap<>();
     Map<String, PropertyUpdater> hbaseEnvMap = new HashMap<>();
     Map<String, PropertyUpdater> hiveEnvMap = new HashMap<>();
     Map<String, PropertyUpdater> hiveInteractiveEnvMap = new HashMap<>();
@@ -2571,6 +2598,7 @@ public class BlueprintConfigurationProcessor {
     singleHostTopologyUpdaters.put("ranger-storm-audit", rangerStormAuditPropsMap);
     singleHostTopologyUpdaters.put("ranger-atlas-audit", rangerAtlasAuditPropsMap);
     singleHostTopologyUpdaters.put(HADOOP_ENV_CONFIG_TYPE_NAME, shHadoopEnvMap);
+    singleHostTopologyUpdaters.put(CLUSTER_ENV_CONFIG_TYPE_NAME, clusterEnvMap);
 
     singleHostTopologyUpdaters.put("hawq-site", hawqSiteMap);
     singleHostTopologyUpdaters.put("zookeeper-env", zookeeperEnvMap);
@@ -2619,8 +2647,8 @@ public class BlueprintConfigurationProcessor {
     multiHdfsSiteMap.put("dfs.namenode.shared.edits.dir", new MultipleHostTopologyUpdater("JOURNALNODE", ';', false, false, true));
     multiHdfsSiteMap.put("dfs.encryption.key.provider.uri", new MultipleHostTopologyUpdater("RANGER_KMS_SERVER", ';', false, false, false));
     // Explicit initial primary/secondary node assignment in HA
-    shHadoopEnvMap.put("dfs_ha_initial_namenode_active", new SingleHostTopologyUpdater("NAMENODE"));
-    shHadoopEnvMap.put("dfs_ha_initial_namenode_standby", new SingleHostTopologyUpdater("NAMENODE"));
+    clusterEnvMap.put(HDFS_ACTIVE_NAMENODE_PROPERTY_NAME, new SingleHostTopologyUpdater("NAMENODE"));
+    clusterEnvMap.put(HDFS_STANDBY_NAMENODE_PROPERTY_NAME, new SingleHostTopologyUpdater("NAMENODE"));
 
     // SECONDARY_NAMENODE
     hdfsSiteMap.put("dfs.secondary.http.address", new SingleHostTopologyUpdater("SECONDARY_NAMENODE"));
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/AmbariEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/AmbariEvent.java
index fb5f298..fee12b1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/events/AmbariEvent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/AmbariEvent.java
@@ -113,6 +113,11 @@ public abstract class AmbariEvent {
     CLUSTER_RENAME,
 
     /**
+     * The cluster was successfully provisioned.
+     */
+    CLUSTER_PROVISIONED,
+
+    /**
      * The service component recovery enabled field changed.
      */
     SERVICE_COMPONENT_RECOVERY_CHANGED,
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/events/ClusterProvisionedEvent.java b/ambari-server/src/main/java/org/apache/ambari/server/events/ClusterProvisionedEvent.java
new file mode 100644
index 0000000..1dcd831
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/events/ClusterProvisionedEvent.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.events;
+
+/**
+ * Fired when cluster provisioning is finished (only for blueprints currently).
+ */
+public class ClusterProvisionedEvent extends AmbariEvent {
+
+  private final long clusterId;
+
+  public ClusterProvisionedEvent(long clusterId) {
+    super(AmbariEventType.CLUSTER_PROVISIONED);
+    this.clusterId = clusterId;
+  }
+
+  public long getClusterId() {
+    return clusterId;
+  }
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
index a79c2da..ca4719f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
@@ -57,16 +57,19 @@ import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.agent.ExecutionCommand.KeyNames;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariSessionManager;
 import org.apache.ambari.server.controller.ClusterResponse;
 import org.apache.ambari.server.controller.ConfigurationResponse;
 import org.apache.ambari.server.controller.MaintenanceStateHelper;
 import org.apache.ambari.server.controller.RootService;
 import org.apache.ambari.server.controller.ServiceConfigVersionResponse;
+import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor;
 import org.apache.ambari.server.controller.internal.DeleteHostComponentStatusMetaData;
 import org.apache.ambari.server.events.AmbariEvent.AmbariEventType;
 import org.apache.ambari.server.events.ClusterConfigChangedEvent;
 import org.apache.ambari.server.events.ClusterEvent;
+import org.apache.ambari.server.events.ClusterProvisionedEvent;
 import org.apache.ambari.server.events.ConfigsUpdateEvent;
 import org.apache.ambari.server.events.jpa.EntityManagerCacheInvalidationEvent;
 import org.apache.ambari.server.events.jpa.JPAEvent;
@@ -254,6 +257,9 @@ public class ClusterImpl implements Cluster {
   private AmbariMetaInfo ambariMetaInfo;
 
   @Inject
+  private AmbariManagementController controller;
+
+  @Inject
   private ServiceConfigDAO serviceConfigDAO;
 
   @Inject
@@ -2782,6 +2788,24 @@ List<ClusterConfigEntity> appliedConfigs = new ArrayList<>();    String serviceN
     m_clusterPropertyCache.clear();
   }
 
+  @Subscribe
+  public void onClusterProvisioned(ClusterProvisionedEvent event) {
+    if (event.getClusterId() == getClusterId()) {
+      LOG.info("Removing temporary configurations after successful deployment of cluster id={} name={}", getClusterId(), getClusterName());
+      for (Map.Entry<String, Set<String>> e : BlueprintConfigurationProcessor.TEMPORARY_PROPERTIES_FOR_CLUSTER_DEPLOYMENT.entrySet()) {
+        try {
+          configHelper.updateConfigType(this, getCurrentStackVersion(), controller,
+            e.getKey(), Collections.emptyMap(), e.getValue(),
+            "internal", "Removing temporary configurations after successful deployment"
+          );
+          LOG.info("Removed temporary configurations: {} / {}", e.getKey(), e.getValue());
+        } catch (AmbariException ex) {
+          LOG.warn("Failed to remove temporary configurations: {} / {}", e.getKey(), e.getValue(), ex);
+        }
+      }
+    }
+  }
+
   /**
    * {@inheritDoc}
    */
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
index 28b62bc..b709146 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
@@ -23,6 +23,7 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 
 /**
  * Configuration for a topology entity such as a blueprint, hostgroup or cluster.
@@ -279,6 +280,42 @@ public class Configuration {
   }
 
   /**
+   * Moves the given properties from {@code sourceConfigType} to {@code targetConfigType}.
+   * If a property is already present in the target, it will be removed from the source, but not overwritten in the target.
+   *
+   * @param sourceConfigType the config type to move properties from
+   * @param targetConfigType the config type to move properties to
+   * @param propertiesToMove names of properties to be moved
+   * @return property names that were removed from the source
+   */
+  public Set<String> moveProperties(String sourceConfigType, String targetConfigType, Set<String> propertiesToMove) {
+    Set<String> moved = new HashSet<>();
+    for (String property : propertiesToMove) {
+      if (isPropertySet(sourceConfigType, property)) {
+        String value = removeProperty(sourceConfigType, property);
+        if (!isPropertySet(targetConfigType, property)) {
+          setProperty(targetConfigType, property, value);
+        }
+        moved.add(property);
+      }
+    }
+    return moved;
+  }
+
+  /**
+   * General convenience method to determine if a given property has been set in the cluster configuration
+   *
+   * @param configType the config type to check
+   * @param propertyName the property name to check
+   * @return true if the named property has been set
+   *         false if the named property has not been set
+   */
+  public boolean isPropertySet(String configType, String propertyName) {
+    return properties.containsKey(configType) && properties.get(configType).containsKey(propertyName) ||
+      parentConfiguration != null && parentConfiguration.isPropertySet(configType, propertyName);
+  }
+
+  /**
    * Set an attribute on the hierarchy.
    * The attribute will be set on this instance so it will override any value specified in
    * the parent hierarchy.
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index 6da7671..36f1ad0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@ -61,6 +61,7 @@ import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
 import org.apache.ambari.server.events.AmbariEvent;
 import org.apache.ambari.server.events.ClusterConfigFinishedEvent;
+import org.apache.ambari.server.events.ClusterProvisionedEvent;
 import org.apache.ambari.server.events.HostsRemovedEvent;
 import org.apache.ambari.server.events.RequestFinishedEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
@@ -234,6 +235,7 @@ public class TopologyManager {
                 clusterProvisionWithBlueprintCreateRequests.get(event.getClusterId()).getRequestId(),
                 clusterTopologyMap.get(event.getClusterId()).getBlueprint().getName(),
                 event.getClusterId());
+        ambariEventPublisher.publish(new ClusterProvisionedEvent(event.getClusterId()));
       } else {
         LOG.info("Cluster creation request id={} using Blueprint {} failed for cluster id={}",
                 clusterProvisionWithBlueprintCreateRequests.get(event.getClusterId()).getRequestId(),
@@ -1156,4 +1158,5 @@ public class TopologyManager {
       }
     }
   }
+
 }
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index a2e67b9..16bfc8e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -266,7 +266,7 @@ smoke_hdfs_user_mode = 0770
 hdfs_namenode_format_disabled = default("/configurations/cluster-env/hdfs_namenode_format_disabled", False)
 hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
 hdfs_namenode_bootstrapped_mark_suffix = "/namenode-bootstrapped/"
-namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted", 
+namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted",
   format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
   "/var/lib/hdfs/namenode/formatted"]
 dfs_name_dirs = dfs_name_dir.split(",")
@@ -303,9 +303,9 @@ dfs_ha_namenode_ids_all_ns = get_properties_for_all_nameservices(hdfs_site, 'dfs
 dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
 
 # hostname of the active HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
+dfs_ha_namenode_active = default("/configurations/cluster-env/dfs_ha_initial_namenode_active", None)
 # hostname of the standby HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_standby = default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
+dfs_ha_namenode_standby = default("/configurations/cluster-env/dfs_ha_initial_namenode_standby", None)
 ha_zookeeper_quorum = config['configurations']['core-site']['ha.zookeeper.quorum']
 jaas_file = os.path.join(hadoop_conf_secure_dir, 'hdfs_jaas.conf')
 zk_namespace = default('/configurations/hdfs-site/ha.zookeeper.parent-znode', '/hadoop-ha')
@@ -351,19 +351,19 @@ else:
 
 if journalnode_address:
   journalnode_port = journalnode_address.split(":")[1]
-  
-  
+
+
 if security_enabled:
   dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
   dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
   dn_principal_name = dn_principal_name.replace('_HOST',hostname.lower())
-  
+
   dn_kinit_cmd = format("{kinit_path_local} -kt {dn_keytab} {dn_principal_name};")
-  
+
   nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
   nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
   nn_principal_name = nn_principal_name.replace('_HOST',hostname.lower())
-  
+
   nn_kinit_cmd = format("{kinit_path_local} -kt {nn_keytab} {nn_principal_name};")
 
   jn_principal_name = default("/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
@@ -399,7 +399,7 @@ HdfsResource = functools.partial(
   immutable_paths = get_not_managed_resources(),
   dfs_type = dfs_type
 )
-  
+
 name_node_params = default("/commandParams/namenode", None)
 
 java_home = config['ambariLevelParams']['java_home']
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index 8df14e1..6be8149 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -447,8 +447,8 @@ node_label_enable = config['configurations']['yarn-site']['yarn.node-labels.enab
 cgroups_dir = "/cgroups_test/cpu"
 
 # hostname of the active HDFS HA Namenode (only used when HA is enabled)
-dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
-if dfs_ha_namenode_active is not None: 
+dfs_ha_namenode_active = default("/configurations/cluster-env/dfs_ha_initial_namenode_active", None)
+if dfs_ha_namenode_active is not None:
   namenode_hostname = dfs_ha_namenode_active
 else:
   namenode_hostname = config['clusterHostInfo']['namenode_hosts'][0]
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 47a17e1..ba74853 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -1137,6 +1137,10 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
     assertNull("Initial NameNode HA property exported although should not have", hadoopEnvProperties.get("dfs_ha_initial_namenode_active"));
     assertNull("Initial NameNode HA property exported although should not have", hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+
+    Map<String, String> clusterEnv = clusterConfig.getProperties().get("cluster-env");
+    assertTrue("Initial NameNode HA property exported although should not have", clusterEnv == null || clusterEnv.get("dfs_ha_initial_namenode_active") == null);
+    assertTrue("Initial NameNode HA property exported although should not have", clusterEnv == null || clusterEnv.get("dfs_ha_initial_namenode_standby") == null);
   }
 
   @Test
@@ -2886,13 +2890,14 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     // verify that the Blueprint config processor has set the internal required properties
     // that determine the active and standby node hostnames for this HA setup
     // one of the two hosts should be set to active and the other to standby
-    String activeHost = hadoopEnvProperties.get("dfs_ha_initial_namenode_active");
+    Map<String, String> clusterEnv = clusterConfig.getProperties().get("cluster-env");
+    String activeHost = clusterEnv.get("dfs_ha_initial_namenode_active");
     if (activeHost.equals(expectedHostName)) {
       assertEquals("Standby Namenode hostname was not set correctly",
-        expectedHostNameTwo, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+        expectedHostNameTwo, clusterEnv.get("dfs_ha_initial_namenode_standby"));
     } else if (activeHost.equals(expectedHostNameTwo)) {
       assertEquals("Standby Namenode hostname was not set correctly",
-        expectedHostName, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+        expectedHostName, clusterEnv.get("dfs_ha_initial_namenode_standby"));
     } else {
       fail("Active Namenode hostname was not set correctly: " + activeHost);
     }
@@ -5284,7 +5289,8 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     // verify that the Blueprint config processor has set the internal required properties
     // that determine the active and standby node hostnames for this HA setup.
     // one host should be active and the other standby
-    String initialActiveHost = hadoopEnvProperties.get("dfs_ha_initial_namenode_active");
+    Map<String, String> clusterEnv = clusterConfig.getProperties().get("cluster-env");
+    String initialActiveHost = clusterEnv.get("dfs_ha_initial_namenode_active");
     String expectedStandbyHost = null;
     if (initialActiveHost.equals(expectedHostName)) {
       expectedStandbyHost = expectedHostNameTwo;
@@ -5294,7 +5300,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
       fail("Active Namenode hostname was not set correctly");
     }
     assertEquals("Standby Namenode hostname was not set correctly",
-      expectedStandbyHost, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+      expectedStandbyHost, clusterEnv.get("dfs_ha_initial_namenode_standby"));
 
 
     assertEquals("fs.defaultFS should not be modified by cluster update when NameNode HA is enabled.",
@@ -5316,14 +5322,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     // verify that correct configuration types were listed as updated in the returned set
-    assertEquals("Incorrect number of updated config types returned, set = " + updatedConfigTypes,
-      3, updatedConfigTypes.size());
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("cluster-env"));
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("hdfs-site"));
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("hadoop-env"));
+    assertEquals(ImmutableSet.of("cluster-env", "hdfs-site"), updatedConfigTypes);
   }
 
   @Test
@@ -5449,14 +5448,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     // verify that correct configuration types were listed as updated in the returned set
-    assertEquals("Incorrect number of updated config types returned, set = " + updatedConfigTypes,
-      3, updatedConfigTypes.size());
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("cluster-env"));
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("hdfs-site"));
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("hadoop-env"));
+    assertEquals(ImmutableSet.of("cluster-env", "hdfs-site"), updatedConfigTypes);
   }
 
 
@@ -5632,37 +5624,32 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     // verify that correct configuration types were listed as updated in the returned set
-    assertEquals("Incorrect number of updated config types returned, set = " + updatedConfigTypes,
-      3, updatedConfigTypes.size());
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("cluster-env"));
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("hdfs-site"));
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("hadoop-env"));
+    assertEquals(ImmutableSet.of("cluster-env", "hdfs-site"), updatedConfigTypes);
+
+    Map<String, String> clusterEnv = clusterConfig.getProperties().get("cluster-env");
 
     // verify that the standard, single-nameservice HA properties are
     // NOT set in this configuration
     assertFalse("Single-node nameservice config should not have been set",
-                hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active"));
+                clusterEnv.containsKey("dfs_ha_initial_namenode_active"));
     assertFalse("Single-node nameservice config should not have been set",
-      hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby"));
+      clusterEnv.containsKey("dfs_ha_initial_namenode_standby"));
 
     // verify that the config processor sets the expected properties for
     // the sets of active and standby hostnames for NameNode deployment
     assertTrue("Expected active set not found in hadoop-env",
-      hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active_set"));
+      clusterEnv.containsKey("dfs_ha_initial_namenode_active_set"));
     assertTrue("Expected standby set not found in hadoop-env",
-      hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby_set"));
+      clusterEnv.containsKey("dfs_ha_initial_namenode_standby_set"));
     assertTrue("Expected clusterId not found in hadoop-env",
-      hadoopEnvProperties.containsKey("dfs_ha_initial_cluster_id"));
+      clusterEnv.containsKey("dfs_ha_initial_cluster_id"));
 
     // verify that the clusterID is set by default to the cluster name
     assertEquals("Expected clusterId was not set to expected value",
-      "clusterName", hadoopEnvProperties.get("dfs_ha_initial_cluster_id"));
+      "clusterName", clusterEnv.get("dfs_ha_initial_cluster_id"));
 
     // verify that the expected hostnames are included in the active set
-    String[] activeHostNames = hadoopEnvProperties.get("dfs_ha_initial_namenode_active_set").split(",");
+    String[] activeHostNames = clusterEnv.get("dfs_ha_initial_namenode_active_set").split(",");
     assertEquals("NameNode active set did not contain the expected number of hosts",
                  2, activeHostNames.length);
     Set<String> setOfActiveHostNames = new HashSet<String>(Arrays.asList(activeHostNames));
@@ -5673,7 +5660,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     // verify that the expected hostnames are included in the standby set
-    String[] standbyHostNames = hadoopEnvProperties.get("dfs_ha_initial_namenode_standby_set").split(",");
+    String[] standbyHostNames = clusterEnv.get("dfs_ha_initial_namenode_standby_set").split(",");
     assertEquals("NameNode standby set did not contain the expected number of hosts",
                  2, standbyHostNames.length);
     Set<String> setOfStandbyHostNames = new HashSet<String>(Arrays.asList(standbyHostNames));
@@ -5955,35 +5942,32 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     // verify that correct configuration types were listed as updated in the returned set
-    assertEquals("Incorrect number of updated config types returned, set = " + updatedConfigTypes,
-      2, updatedConfigTypes.size());
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("cluster-env"));
-    assertTrue("Expected config type not found in updated set",
-      updatedConfigTypes.contains("hdfs-site"));
+    assertEquals(ImmutableSet.of("cluster-env", "hadoop-env", "hdfs-site"), updatedConfigTypes);
+
+    Map<String, String> clusterEnv = clusterConfig.getProperties().get("cluster-env");
 
     // verify that the standard, single-nameservice HA properties are
     // NOT set in this configuration
     assertFalse("Single-node nameservice config should not have been set",
-      hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active"));
+      clusterEnv.containsKey("dfs_ha_initial_namenode_active"));
     assertFalse("Single-node nameservice config should not have been set",
-      hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby"));
+      clusterEnv.containsKey("dfs_ha_initial_namenode_standby"));
 
     // verify that the config processor sets the expected properties for
     // the sets of active and standby hostnames for NameNode deployment
     assertTrue("Expected active set not found in hadoop-env",
-      hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_active_set"));
+      clusterEnv.containsKey("dfs_ha_initial_namenode_active_set"));
     assertTrue("Expected standby set not found in hadoop-env",
-      hadoopEnvProperties.containsKey("dfs_ha_initial_namenode_standby_set"));
+      clusterEnv.containsKey("dfs_ha_initial_namenode_standby_set"));
     assertTrue("Expected clusterId not found in hadoop-env",
-      hadoopEnvProperties.containsKey("dfs_ha_initial_cluster_id"));
+      clusterEnv.containsKey("dfs_ha_initial_cluster_id"));
 
     // verify that the clusterID is not set by processor, since user has already customized it
     assertEquals("Expected clusterId was not set to expected value",
-      "my-custom-cluster-name", hadoopEnvProperties.get("dfs_ha_initial_cluster_id"));
+      "my-custom-cluster-name", clusterEnv.get("dfs_ha_initial_cluster_id"));
 
     // verify that the expected hostnames are included in the active set
-    String[] activeHostNames = hadoopEnvProperties.get("dfs_ha_initial_namenode_active_set").split(",");
+    String[] activeHostNames = clusterEnv.get("dfs_ha_initial_namenode_active_set").split(",");
     assertEquals("NameNode active set did not contain the expected number of hosts",
       2, activeHostNames.length);
     Set<String> setOfActiveHostNames = new HashSet<String>(Arrays.asList(activeHostNames));
@@ -5994,7 +5978,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     // verify that the expected hostnames are included in the standby set
-    String[] standbyHostNames = hadoopEnvProperties.get("dfs_ha_initial_namenode_standby_set").split(",");
+    String[] standbyHostNames = clusterEnv.get("dfs_ha_initial_namenode_standby_set").split(",");
     assertEquals("NameNode standby set did not contain the expected number of hosts",
       2, standbyHostNames.length);
     Set<String> setOfStandbyHostNames = new HashSet<String>(Arrays.asList(standbyHostNames));
@@ -6153,12 +6137,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
       hdfsSiteProperties.containsKey("dfs.namenode.rpc-address"));
 
     // verify that correct configuration types were listed as updated in the returned set
-    assertEquals("Incorrect number of updated config types returned, set = " + updatedConfigTypes,
-      2, updatedConfigTypes.size());
-    assertTrue("Expected config type 'cluster-env' not found in updated set",
-      updatedConfigTypes.contains("cluster-env"));
-    assertTrue("Expected config type 'hdfs-site' not found in updated set",
-      updatedConfigTypes.contains("hdfs-site"));
+    assertEquals(ImmutableSet.of("cluster-env", "hdfs-site"), updatedConfigTypes);
   }
 
   @Test
@@ -6236,11 +6215,13 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     // verify that the Blueprint config processor has not overridden
     // the user's configuration to determine the active and
     // standby nodes in this NameNode HA cluster
+    Map<String, String> clusterEnv = clusterConfig.getProperties().get("cluster-env");
+
     assertEquals("Active Namenode hostname was not set correctly",
-      expectedHostName, hadoopEnvProperties.get("dfs_ha_initial_namenode_active"));
+      expectedHostName, clusterEnv.get("dfs_ha_initial_namenode_active"));
 
     assertEquals("Standby Namenode hostname was not set correctly",
-      expectedHostNameTwo, hadoopEnvProperties.get("dfs_ha_initial_namenode_standby"));
+      expectedHostNameTwo, clusterEnv.get("dfs_ha_initial_namenode_standby"));
   }
 
   @Test
@@ -6411,7 +6392,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
   @Test
   public void testHadoopHaNameNode() throws Exception {
     // Given
-    final String configType = "hadoop-env";
+    final String configType = "cluster-env";
     Map<String, Map<String, String>> properties = new HashMap<>();
 
     // enable HA
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ConfigurationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ConfigurationTest.java
index f566d95..23b7e81 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ConfigurationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ConfigurationTest.java
@@ -19,6 +19,7 @@
 package org.apache.ambari.server.topology;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
@@ -26,9 +27,14 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.Set;
 
 import org.junit.Test;
 
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+
 /**
  * Configuration unit tests.
  */
@@ -497,4 +503,45 @@ public class ConfigurationTest {
     return new Configuration(EMPTY_PROPERTIES,
       new HashMap<>(attributes), parentConfiguration);
   }
+
+  @Test
+  public void moveProperties() {
+    // GIVEN
+    String sourceType = "source";
+    String targetType = "target";
+    String sourceValue = "source value";
+    String targetValue = "target value";
+    Map<String, String> keepers = ImmutableMap.of("keep1", "v1", "keep2", "v3");
+    Map<String, String> movers = ImmutableMap.of("move1", "v2", "move2", "v4");
+    Set<String> common = ImmutableSet.of("common1", "common2");
+    Configuration config = new Configuration(new HashMap<>(), new HashMap<>());
+    for (Map.Entry<String, String> e : keepers.entrySet()) {
+      config.setProperty(sourceType, e.getKey(), e.getValue());
+    }
+    for (Map.Entry<String, String> e : movers.entrySet()) {
+      config.setProperty(sourceType, e.getKey(), e.getValue());
+    }
+    for (String key : common) {
+      config.setProperty(sourceType, key, sourceValue);
+      config.setProperty(targetType, key, targetValue);
+    }
+
+    // WHEN
+    Sets.SetView<String> propertiesToMove = Sets.union(movers.keySet(), common);
+    Set<String> moved = config.moveProperties(sourceType, targetType, propertiesToMove);
+
+    // THEN
+    for (Map.Entry<String, String> e : keepers.entrySet()) {
+      assertEquals(e.getValue(), config.getPropertyValue(sourceType, e.getKey()));
+    }
+    for (Map.Entry<String, String> e : movers.entrySet()) {
+      assertEquals(e.getValue(), config.getPropertyValue(targetType, e.getKey()));
+      assertFalse(config.isPropertySet(sourceType, e.getKey()));
+    }
+    for (String key : common) {
+      assertEquals(targetValue, config.getPropertyValue(targetType, key));
+      assertFalse(config.isPropertySet(sourceType, key));
+    }
+    assertEquals(propertiesToMove, moved);
+  }
 }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
index c12965d..34b7828 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
@@ -59,6 +59,7 @@ import org.apache.ambari.server.controller.spi.ClusterController;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.events.RequestFinishedEvent;
+import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.dao.SettingDAO;
 import org.apache.ambari.server.orm.entities.SettingEntity;
 import org.apache.ambari.server.security.authorization.AuthorizationHelper;
@@ -159,6 +160,8 @@ public class TopologyManagerTest {
   private ConfigureClusterTaskFactory configureClusterTaskFactory;
   @Mock(type = MockType.NICE)
   private ConfigureClusterTask configureClusterTask;
+  @Mock(type = MockType.NICE)
+  private AmbariEventPublisher eventPublisher;
 
   @Mock(type = MockType.STRICT)
   private Future mockFuture;
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
index fa77f50..17b04de 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
@@ -499,6 +499,8 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "dfs_ha_initial_namenode_active": "c6401.ambari.apache.org",
+            "dfs_ha_initial_namenode_standby": "c6402.ambari.apache.org",
             "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
@@ -519,9 +521,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs_ha_initial_namenode_active" : "c6401.ambari.apache.org",
-            "dfs_ha_initial_namenode_standby" : "c6402.ambari.apache.org"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
index 9739deb..1226702 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
@@ -498,6 +498,8 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "dfs_ha_initial_namenode_active": "c6401.ambari.apache.org",
+            "dfs_ha_initial_namenode_standby": "c6402.ambari.apache.org",
             "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
@@ -518,9 +520,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs_ha_initial_namenode_active" : "c6401.ambari.apache.org",
-            "dfs_ha_initial_namenode_standby" : "c6402.ambari.apache.org"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
index f875ca8..fe2d644 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
@@ -498,6 +498,8 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "dfs_ha_initial_namenode_active": "c6401.ambari.apache.org",
+            "dfs_ha_initial_namenode_standby": "c6402.ambari.apache.org",
             "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
@@ -518,9 +520,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs_ha_initial_namenode_active" : "c6401.ambari.apache.org",
-            "dfs_ha_initial_namenode_standby" : "c6402.ambari.apache.org"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
index 66467f9..56522b9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start_dfs_nameservices.json
@@ -498,6 +498,8 @@
             "min_user_id": "1000"
         },
         "cluster-env": {
+            "dfs_ha_initial_namenode_active": "c6401.ambari.apache.org",
+            "dfs_ha_initial_namenode_standby": "c6402.ambari.apache.org",
             "managed_hdfs_resource_property_names": "",
             "security_enabled": "false",
             "ignore_groupsusers_create": "false",
@@ -518,9 +520,7 @@
             "dtnode_heapsize": "1024m", 
             "proxyuser_group": "users",
             "hadoop_heapsize": "1024", 
-            "hadoop_pid_dir_prefix": "/var/run/hadoop",
-            "dfs_ha_initial_namenode_active" : "c6401.ambari.apache.org",
-            "dfs_ha_initial_namenode_standby" : "c6402.ambari.apache.org"
+            "hadoop_pid_dir_prefix": "/var/run/hadoop"
         },
         "hive-env": {
             "hcat_pid_dir": "/var/run/webhcat", 

-- 
To stop receiving notification emails like this one, please contact
adoroszlai@apache.org.