You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by js...@apache.org on 2015/04/27 07:52:59 UTC

[05/13] ambari git commit: AMBARI-10750. Initial merge of advanced api provisioning work.

http://git-wip-us.apache.org/repos/asf/ambari/blob/c9f0dd0b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index 6c26b06..34b239b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -23,21 +23,33 @@ import static junit.framework.Assert.assertFalse;
 import static junit.framework.Assert.assertNotNull;
 import static junit.framework.Assert.assertTrue;
 import static junit.framework.Assert.fail;
+import static org.easymock.EasyMock.anyObject;
+import static org.easymock.EasyMock.createNiceMock;
 import static org.easymock.EasyMock.expect;
-import static org.easymock.EasyMock.isA;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.reset;
 
-import java.util.Arrays;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.StackServiceResponse;
-import org.easymock.EasyMockSupport;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.Cardinality;
+import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.ClusterTopologyImpl;
+import org.apache.ambari.server.topology.Configuration;
+import org.apache.ambari.server.topology.HostGroup;
+import org.apache.ambari.server.topology.HostGroupImpl;
+import org.apache.ambari.server.topology.HostGroupInfo;
+import org.apache.ambari.server.topology.InvalidTopologyException;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 
 /**
@@ -45,100 +57,207 @@ import org.junit.Test;
  */
 public class BlueprintConfigurationProcessorTest {
 
+  private static final Configuration EMPTY_CONFIG = new Configuration(Collections.<String, Map<String, String>>emptyMap(),
+      Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+  private final Map<String, Collection<String>> serviceComponents = new HashMap<String, Collection<String>>();
+
+  private final Blueprint bp = createNiceMock(Blueprint.class);
+  //private final AmbariMetaInfo metaInfo = createNiceMock(AmbariMetaInfo.class);
+  private final ServiceInfo serviceInfo = createNiceMock(ServiceInfo.class);
+  private final Stack stack = createNiceMock(Stack.class);
+
+  @Before
+  public void init() throws Exception {
+    expect(bp.getStack()).andReturn(stack).anyTimes();
+    expect(bp.getName()).andReturn("test-bp").anyTimes();
+
+    expect(stack.getName()).andReturn("testStack").anyTimes();
+    expect(stack.getVersion()).andReturn("1").anyTimes();
+    // return false for all components since for this test we don't care about the value
+    expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
+
+    expect(serviceInfo.getRequiredProperties()).andReturn(
+        Collections.<String, org.apache.ambari.server.state.PropertyInfo>emptyMap()).anyTimes();
+    expect(serviceInfo.getRequiredServices()).andReturn(Collections.<String>emptyList()).anyTimes();
+
+    Collection<String> hdfsComponents = new HashSet<String>();
+    hdfsComponents.add("NAMENODE");
+    hdfsComponents.add("SECONDARY_NAMENODE");
+    hdfsComponents.add("DATANODE");
+    hdfsComponents.add("HDFS_CLIENT");
+    serviceComponents.put("HDFS", hdfsComponents);
+
+    Collection<String> yarnComponents = new HashSet<String>();
+    yarnComponents.add("RESOURCEMANAGER");
+    yarnComponents.add("NODEMANAGER");
+    yarnComponents.add("YARN_CLIENT");
+    yarnComponents.add("APP_TIMELINE_SERVER");
+    serviceComponents.put("YARN", yarnComponents);
+
+    Collection<String> mrComponents = new HashSet<String>();
+    mrComponents.add("MAPREDUCE2_CLIENT");
+    mrComponents.add("HISTORY_SERVER");
+    serviceComponents.put("MAPREDUCE2", mrComponents);
+
+    Collection<String> zkComponents = new HashSet<String>();
+    zkComponents.add("ZOOKEEPER_SERVER");
+    zkComponents.add("ZOOKEEPER_CLIENT");
+    serviceComponents.put("ZOOKEEPER", zkComponents);
+
+    Collection<String> hiveComponents = new HashSet<String>();
+    hiveComponents.add("MYSQL_SERVER");
+    hiveComponents.add("HIVE_METASTORE");
+    serviceComponents.put("HIVE", hiveComponents);
+
+    Collection<String> falconComponents = new HashSet<String>();
+    falconComponents.add("FALCON_SERVER");
+    falconComponents.add("FALCON_CLIENT");
+    serviceComponents.put("FALCON", falconComponents);
+
+    Collection<String> gangliaComponents = new HashSet<String>();
+    gangliaComponents.add("GANGLIA_SERVER");
+    gangliaComponents.add("GANGLIA_CLIENT");
+    serviceComponents.put("GANGLIA", gangliaComponents);
+
+    Collection<String> kafkaComponents = new HashSet<String>();
+    kafkaComponents.add("KAFKA_BROKER");
+    serviceComponents.put("KAFKA", kafkaComponents);
+
+    Collection<String> knoxComponents = new HashSet<String>();
+    knoxComponents.add("KNOX_GATEWAY");
+    serviceComponents.put("KNOX", knoxComponents);
+
+    Collection<String> oozieComponents = new HashSet<String>();
+    oozieComponents.add("OOZIE_SERVER");
+    oozieComponents.add("OOZIE_CLIENT");
+    serviceComponents.put("OOZIE", oozieComponents);
+
+    for (Map.Entry<String, Collection<String>> entry : serviceComponents.entrySet()) {
+      String service = entry.getKey();
+      for (String component : entry.getValue()) {
+        expect(stack.getServiceForComponent(component)).andReturn(service).anyTimes();
+      }
+    }
+  }
+
+  @After
+  public void tearDown() {
+    reset(bp, serviceInfo, stack);
+  }
+
   @Test
-  public void testDoUpdateForBlueprintExport_SingleHostProperty() {
+  public void testDoUpdateForBlueprintExport_SingleHostProperty() throws Exception {
     Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
     Map<String, String> typeProps = new HashMap<String, String>();
     typeProps.put("yarn.resourcemanager.hostname", "testhost");
     properties.put("yarn-site", typeProps);
 
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
     Collection<String> hgComponents = new HashSet<String>();
     hgComponents.add("NAMENODE");
     hgComponents.add("SECONDARY_NAMENODE");
     hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
 
     Collection<String> hgComponents2 = new HashSet<String>();
     hgComponents2.add("DATANODE");
     hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("testhost2"));
 
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
     hostGroups.add(group1);
     hostGroups.add(group2);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
-    String updatedVal = updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    String updatedVal = properties.get("yarn-site").get("yarn.resourcemanager.hostname");
     assertEquals("%HOSTGROUP::group1%", updatedVal);
   }
-  
+
   @Test
-  public void testDoUpdateForBlueprintExport_SingleHostProperty__withPort() {
+  public void testDoUpdateForBlueprintExport_SingleHostProperty__withPort() throws Exception {
     Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
     Map<String, String> typeProps = new HashMap<String, String>();
     typeProps.put("fs.defaultFS", "testhost:8020");
     properties.put("core-site", typeProps);
 
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
     Collection<String> hgComponents = new HashSet<String>();
     hgComponents.add("NAMENODE");
     hgComponents.add("SECONDARY_NAMENODE");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
 
     Collection<String> hgComponents2 = new HashSet<String>();
     hgComponents2.add("DATANODE");
     hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("testhost2"));
 
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
     hostGroups.add(group1);
     hostGroups.add(group2);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
-    String updatedVal = updatedProperties.get("core-site").get("fs.defaultFS");
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    String updatedVal = properties.get("core-site").get("fs.defaultFS");
     assertEquals("%HOSTGROUP::group1%:8020", updatedVal);
   }
 
   @Test
-  public void testDoUpdateForBlueprintExport_SingleHostProperty__ExternalReference() {
+  public void testDoUpdateForBlueprintExport_SingleHostProperty__ExternalReference() throws Exception {
     Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
     Map<String, String> typeProps = new HashMap<String, String>();
     typeProps.put("yarn.resourcemanager.hostname", "external-host");
     properties.put("yarn-site", typeProps);
 
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
     Collection<String> hgComponents = new HashSet<String>();
     hgComponents.add("NAMENODE");
     hgComponents.add("SECONDARY_NAMENODE");
     hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
 
     Collection<String> hgComponents2 = new HashSet<String>();
     hgComponents2.add("DATANODE");
     hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("testhost2"));
 
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
     hostGroups.add(group1);
     hostGroups.add(group2);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
-    assertFalse(updatedProperties.get("yarn-site").containsKey("yarn.resourcemanager.hostname"));
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    assertFalse(properties.get("yarn-site").containsKey("yarn.resourcemanager.hostname"));
   }
 
   @Test
-  public void testDoUpdateForBlueprintExport_MultiHostProperty() {
+  public void testDoUpdateForBlueprintExport_MultiHostProperty() throws Exception {
     Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
     Map<String, String> typeProps = new HashMap<String, String>();
     typeProps.put("hbase.zookeeper.quorum", "testhost,testhost2,testhost2a,testhost2b");
     properties.put("hbase-site", typeProps);
 
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
     Collection<String> hgComponents = new HashSet<String>();
     hgComponents.add("NAMENODE");
     hgComponents.add("SECONDARY_NAMENODE");
     hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
 
     Collection<String> hgComponents2 = new HashSet<String>();
     hgComponents2.add("DATANODE");
@@ -148,7 +267,7 @@ public class BlueprintConfigurationProcessorTest {
     hosts2.add("testhost2");
     hosts2.add("testhost2a");
     hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, hosts2);
 
     Collection<String> hgComponents3 = new HashSet<String>();
     hgComponents2.add("HDFS_CLIENT");
@@ -156,31 +275,36 @@ public class BlueprintConfigurationProcessorTest {
     Set<String> hosts3 = new HashSet<String>();
     hosts3.add("testhost3");
     hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+    TestHostGroup group3 = new TestHostGroup("group3", hgComponents3, hosts3);
 
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
     hostGroups.add(group1);
     hostGroups.add(group2);
     hostGroups.add(group3);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
-    String updatedVal = updatedProperties.get("hbase-site").get("hbase.zookeeper.quorum");
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    String updatedVal = properties.get("hbase-site").get("hbase.zookeeper.quorum");
     assertEquals("%HOSTGROUP::group1%,%HOSTGROUP::group2%", updatedVal);
   }
 
   @Test
-  public void testDoUpdateForBlueprintExport_MultiHostProperty__WithPorts() {
+  public void testDoUpdateForBlueprintExport_MultiHostProperty__WithPorts() throws Exception {
     Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
     Map<String, String> typeProps = new HashMap<String, String>();
     typeProps.put("templeton.zookeeper.hosts", "testhost:5050,testhost2:9090,testhost2a:9090,testhost2b:9090");
     properties.put("webhcat-site", typeProps);
 
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
     Collection<String> hgComponents = new HashSet<String>();
     hgComponents.add("NAMENODE");
     hgComponents.add("SECONDARY_NAMENODE");
     hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
 
     Collection<String> hgComponents2 = new HashSet<String>();
     hgComponents2.add("DATANODE");
@@ -190,7 +314,7 @@ public class BlueprintConfigurationProcessorTest {
     hosts2.add("testhost2");
     hosts2.add("testhost2a");
     hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, hosts2);
 
     Collection<String> hgComponents3 = new HashSet<String>();
     hgComponents2.add("HDFS_CLIENT");
@@ -198,31 +322,36 @@ public class BlueprintConfigurationProcessorTest {
     Set<String> hosts3 = new HashSet<String>();
     hosts3.add("testhost3");
     hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+    TestHostGroup group3 = new TestHostGroup("group3", hgComponents3, hosts3);
 
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
     hostGroups.add(group1);
     hostGroups.add(group2);
     hostGroups.add(group3);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
-    String updatedVal = updatedProperties.get("webhcat-site").get("templeton.zookeeper.hosts");
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    String updatedVal = properties.get("webhcat-site").get("templeton.zookeeper.hosts");
     assertEquals("%HOSTGROUP::group1%:5050,%HOSTGROUP::group2%:9090", updatedVal);
   }
 
   @Test
-  public void testDoUpdateForBlueprintExport_MultiHostProperty__YAML() {
+  public void testDoUpdateForBlueprintExport_MultiHostProperty__YAML() throws Exception {
     Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
     Map<String, String> typeProps = new HashMap<String, String>();
     typeProps.put("storm.zookeeper.servers", "['testhost:5050','testhost2:9090','testhost2a:9090','testhost2b:9090']");
     properties.put("storm-site", typeProps);
 
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
     Collection<String> hgComponents = new HashSet<String>();
     hgComponents.add("NAMENODE");
     hgComponents.add("SECONDARY_NAMENODE");
     hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
 
     Collection<String> hgComponents2 = new HashSet<String>();
     hgComponents2.add("DATANODE");
@@ -232,7 +361,7 @@ public class BlueprintConfigurationProcessorTest {
     hosts2.add("testhost2");
     hosts2.add("testhost2a");
     hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, hosts2);
 
     Collection<String> hgComponents3 = new HashSet<String>();
     hgComponents2.add("HDFS_CLIENT");
@@ -240,1803 +369,1398 @@ public class BlueprintConfigurationProcessorTest {
     Set<String> hosts3 = new HashSet<String>();
     hosts3.add("testhost3");
     hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
+    TestHostGroup group3 = new TestHostGroup("group3", hgComponents3, hosts3);
 
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
     hostGroups.add(group1);
     hostGroups.add(group2);
     hostGroups.add(group3);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
-    String updatedVal = updatedProperties.get("storm-site").get("storm.zookeeper.servers");
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    String updatedVal = properties.get("storm-site").get("storm.zookeeper.servers");
     assertEquals("['%HOSTGROUP::group1%:5050','%HOSTGROUP::group2%:9090']", updatedVal);
   }
 
   @Test
-  public void testDoUpdateForBlueprintExport_DBHostProperty() {
+  public void testDoUpdateForBlueprintExport_DBHostProperty() throws Exception {
     Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
     Map<String, String> hiveSiteProps = new HashMap<String, String>();
     hiveSiteProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://testhost/hive?createDatabaseIfNotExist=true");
     properties.put("hive-site", hiveSiteProps);
 
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
     Collection<String> hgComponents = new HashSet<String>();
     hgComponents.add("NAMENODE");
     hgComponents.add("SECONDARY_NAMENODE");
     hgComponents.add("RESOURCEMANAGER");
     hgComponents.add("MYSQL_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
 
     Collection<String> hgComponents2 = new HashSet<String>();
     hgComponents2.add("DATANODE");
     hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("testhost2"));
 
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
     hostGroups.add(group1);
     hostGroups.add(group2);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
-    String updatedVal = updatedProperties.get("hive-site").get("javax.jdo.option.ConnectionURL");
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    String updatedVal = properties.get("hive-site").get("javax.jdo.option.ConnectionURL");
     assertEquals("jdbc:mysql://%HOSTGROUP::group1%/hive?createDatabaseIfNotExist=true", updatedVal);
   }
 
   @Test
-  public void testDoUpdateForBlueprintExport_DBHostProperty__External() {
+  public void testDoUpdateForBlueprintExport_DBHostProperty__External() throws Exception {
     Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
     Map<String, String> typeProps = new HashMap<String, String>();
     typeProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://external-host/hive?createDatabaseIfNotExist=true");
     properties.put("hive-site", typeProps);
 
+    Configuration clusterConfig = new Configuration(properties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
     Collection<String> hgComponents = new HashSet<String>();
     hgComponents.add("NAMENODE");
     hgComponents.add("SECONDARY_NAMENODE");
     hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    TestHostGroup group1 = new TestHostGroup("group1", hgComponents, Collections.singleton("testhost"));
 
     Collection<String> hgComponents2 = new HashSet<String>();
     hgComponents2.add("DATANODE");
     hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    TestHostGroup group2 = new TestHostGroup("group2", hgComponents2, Collections.singleton("testhost2"));
 
-    Collection<HostGroup> hostGroups = new HashSet<HostGroup>();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
     hostGroups.add(group1);
     hostGroups.add(group2);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForBlueprintExport(hostGroups);
-    assertFalse(updatedProperties.get("hive-site").containsKey("javax.jdo.option.ConnectionURL"));
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    assertFalse(properties.get("hive-site").containsKey("javax.jdo.option.ConnectionURL"));
   }
 
   @Test
-  public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("yarn.resourcemanager.hostname", "localhost");
-    properties.put("yarn-site", typeProps);
+  public void testFalconConfigExport() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
 
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    Map<String, Map<String, String>> configProperties = new HashMap<String, Map<String, String>>();
+    Map<String, String> falconStartupProperties = new HashMap<String, String>();
+    configProperties.put("falcon-startup.properties", falconStartupProperties);
 
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    // setup properties that include host information
+    falconStartupProperties.put("*.broker.url", expectedHostName + ":" + expectedPortNum);
+    falconStartupProperties.put("*.falcon.service.authentication.kerberos.principal", "falcon/" + expectedHostName + "@EXAMPLE.COM");
+    falconStartupProperties.put("*.falcon.http.authentication.kerberos.principal", "HTTP/" + expectedHostName + "@EXAMPLE.COM");
 
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
+    Configuration clusterConfig = new Configuration(configProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
-    assertEquals("testhost", updatedVal);
-  }
+    // note: test hostgroups may not accurately reflect the required components for the config properties
+    // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
+    // are not validated
+    Collection<String> groupComponents = new HashSet<String>();
+    groupComponents.add("FALCON_SERVER");
+    Collection<String> hosts = new ArrayList<String>();
+    hosts.add(expectedHostName);
+    hosts.add("serverTwo");
+    TestHostGroup group = new TestHostGroup(expectedHostGroupName, groupComponents, hosts);
 
-  @Test
-  public void testDoUpdateForClusterCreate_SingleHostProperty__MissingComponent() throws Exception {
-    EasyMockSupport mockSupport = new EasyMockSupport();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group);
 
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
 
-    AmbariManagementController mockMgmtController =
-      mockSupport.createMock(AmbariManagementController.class);
+    assertEquals("Falcon Broker URL property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), falconStartupProperties.get("*.broker.url"));
 
-    expect(mockMgmtController.getStackServices(isA(Set.class))).andReturn(Collections.<StackServiceResponse>emptySet());
+    assertEquals("Falcon Kerberos Principal property not properly exported",
+        "falcon/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.service.authentication.kerberos.principal"));
 
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("yarn.resourcemanager.hostname", "localhost");
-    typeProps.put("yarn.timeline-service.address", "localhost");
-    properties.put("yarn-site", typeProps);
+    assertEquals("Falcon Kerberos HTTP Principal property not properly exported",
+        "HTTP/" + "%HOSTGROUP::" + expectedHostGroupName + "%" + "@EXAMPLE.COM", falconStartupProperties.get("*.falcon.http.authentication.kerberos.principal"));
+  }
 
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+  @Test
+  public void testDoNameNodeHighAvailabilityExportWithHAEnabled() throws Exception {
+    final String expectedNameService = "mynameservice";
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedNodeOne = "nn1";
+    final String expectedNodeTwo = "nn2";
+    final String expectedHostGroupName = "host_group_1";
 
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    Map<String, Map<String, String>> configProperties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hdfsSiteProperties = new HashMap<String, String>();
+    Map<String, String> coreSiteProperties = new HashMap<String, String>();
+    Map<String, String> hbaseSiteProperties = new HashMap<String, String>();
 
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
+    configProperties.put("hdfs-site", hdfsSiteProperties);
+    configProperties.put("core-site", coreSiteProperties);
+    configProperties.put("hbase-site", hbaseSiteProperties);
 
-    mockSupport.replayAll();
+    // setup hdfs config for test
+    hdfsSiteProperties.put("dfs.nameservices", expectedNameService);
+    hdfsSiteProperties.put("dfs.ha.namenodes.mynameservice", expectedNodeOne + ", " + expectedNodeTwo);
 
-    Stack testStackDefinition = new Stack("HDP", "2.1", mockMgmtController) {
-      @Override
-      public Cardinality getCardinality(String component) {
-        // simulate a stack that required the APP_TIMELINE_SERVER
-        if (component.equals("APP_TIMELINE_SERVER")) {
-          return new Cardinality("1");
-        }
+    // setup properties that include host information
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne, expectedHostName + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo, expectedHostName + ":" + expectedPortNum);
 
-        return null;
-      }
-    };
+    Configuration clusterConfig = new Configuration(configProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    // note: test hostgroups may not accurately reflect the required components for the config properties
+    // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
+    // are not validated
+    Collection<String> groupComponents = new HashSet<String>();
+    groupComponents.add("NAMENODE");
+    Collection<String> hosts = new ArrayList<String>();
+    hosts.add(expectedHostName);
+    hosts.add("serverTwo");
+    TestHostGroup group = new TestHostGroup(expectedHostGroupName, groupComponents, hosts);
 
-    try {
-      updater.doUpdateForClusterCreate(hostGroups, testStackDefinition);
-      fail("IllegalArgumentException should have been thrown");
-    } catch (IllegalArgumentException illegalArgumentException) {
-      // expected exception
-    }
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group);
+
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameService + "." + expectedNodeTwo));
 
-    mockSupport.verifyAll();
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameService + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameService + "." + expectedNodeTwo));
   }
 
   @Test
-  public void testDoUpdateForClusterCreate_SingleHostProperty__MultipleMatchingHostGroupsError() throws Exception {
-    EasyMockSupport mockSupport = new EasyMockSupport();
+  public void testDoNameNodeHighAvailabilityExportWithHAEnabledNameServicePropertiesIncluded() throws Exception {
+    final String expectedNameService = "mynameservice";
+    final String expectedHostName = "c6401.apache.ambari.org";
 
+    Map<String, Map<String, String>> configProperties = new HashMap<String, Map<String, String>>();
+    Map<String, String> coreSiteProperties = new HashMap<String, String>();
+    Map<String, String> hbaseSiteProperties = new HashMap<String, String>();
+    Map<String, String> accumuloSiteProperties = new HashMap<String, String>();
+
+    configProperties.put("core-site", coreSiteProperties);
+    configProperties.put("hbase-site", hbaseSiteProperties);
+    configProperties.put("accumulo-site", accumuloSiteProperties);
 
-    AmbariManagementController mockMgmtController =
-      mockSupport.createMock(AmbariManagementController.class);
+    // configure fs.defaultFS to include a nameservice name, rather than a host name
+    coreSiteProperties.put("fs.defaultFS", "hdfs://" + expectedNameService);
+    // configure hbase.rootdir to include a nameservice name, rather than a host name
+    hbaseSiteProperties.put("hbase.rootdir", "hdfs://" + expectedNameService + "/apps/hbase/data");
+    // configure instance.volumes to include a nameservice name, rather than a host name
+    accumuloSiteProperties.put("instance.volumes", "hdfs://" + expectedNameService + "/apps/accumulo/data");
 
-    expect(mockMgmtController.getStackServices(isA(Set.class))).andReturn(Collections.<StackServiceResponse>emptySet());
+    Configuration clusterConfig = new Configuration(configProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("yarn.resourcemanager.hostname", "localhost");
-    typeProps.put("yarn.timeline-service.address", "localhost");
-    properties.put("yarn-site", typeProps);
+    // note: test hostgroups may not accurately reflect the required components for the config properties
+    // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
+    // are not validated
+    Collection<String> groupComponents = new HashSet<String>();
+    groupComponents.add("RESOURCEMANAGER");
+    Collection<String> hosts = new ArrayList<String>();
+    hosts.add(expectedHostName);
+    hosts.add("serverTwo");
+    TestHostGroup group = new TestHostGroup("group1", groupComponents, hosts);
 
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    hgComponents.add("APP_TIMELINE_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group);
 
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("APP_TIMELINE_SERVER");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
 
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
+    // verify that any properties that include nameservices are not removed from the exported blueprint's configuration
+    assertEquals("Property containing an HA nameservice (fs.defaultFS), was not correctly exported by the processor",
+        "hdfs://" + expectedNameService, coreSiteProperties.get("fs.defaultFS"));
+    assertEquals("Property containing an HA nameservice (hbase.rootdir), was not correctly exported by the processor",
+        "hdfs://" + expectedNameService + "/apps/hbase/data", hbaseSiteProperties.get("hbase.rootdir"));
+    assertEquals("Property containing an HA nameservice (instance.volumes), was not correctly exported by the processor",
+        "hdfs://" + expectedNameService + "/apps/accumulo/data", accumuloSiteProperties.get("instance.volumes"));
+  }
 
-    mockSupport.replayAll();
+  @Test
+  public void testDoNameNodeHighAvailabilityExportWithHANotEnabled() throws Exception {
+    // hdfs-site config for this test will not include an HA values
+    Map<String, Map<String, String>> configProperties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hdfsSiteProperties = new HashMap<String, String>();
+    configProperties.put("hdfs-site", hdfsSiteProperties);
 
-    Stack testStackDefinition = new Stack("HDP", "2.1", mockMgmtController) {
-      @Override
-      public Cardinality getCardinality(String component) {
-        // simulate a stack that required the APP_TIMELINE_SERVER
-        if (component.equals("APP_TIMELINE_SERVER")) {
-          return new Cardinality("0-1");
-        }
+    assertEquals("Incorrect initial state for hdfs-site config",
+        0, hdfsSiteProperties.size());
 
-        return null;
-      }
-    };
+    Configuration clusterConfig = new Configuration(configProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    // note: test hostgroups may not accurately reflect the required components for the config properties
+    // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
+    // are not validated
+    Collection<String> groupComponents = new HashSet<String>();
+    groupComponents.add("NAMENODE");
+    TestHostGroup group = new TestHostGroup("group1", groupComponents, Collections.singleton("host1"));
 
-    try {
-      updater.doUpdateForClusterCreate(hostGroups, testStackDefinition);
-      fail("IllegalArgumentException should have been thrown");
-    } catch (IllegalArgumentException illegalArgumentException) {
-      // expected exception
-    }
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group);
 
-    mockSupport.verifyAll();
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    assertEquals("Incorrect state for hdfs-site config after HA call in non-HA environment, should be zero",
+        0, hdfsSiteProperties.size());
   }
 
   @Test
-  public void testDoUpdateForClusterCreate_SingleHostProperty__MissingOptionalComponent() throws Exception {
-    final String expectedHostName = "localhost";
+  public void testDoNameNodeHighAvailabilityExportWithHAEnabledMultipleServices() throws Exception {
+    final String expectedNameServiceOne = "mynameserviceOne";
+    final String expectedNameServiceTwo = "mynameserviceTwo";
+    final String expectedHostNameOne = "c6401.apache.ambari.org";
+    final String expectedHostNameTwo = "c6402.apache.ambari.org";
 
-    EasyMockSupport mockSupport = new EasyMockSupport();
+    final String expectedPortNum = "808080";
+    final String expectedNodeOne = "nn1";
+    final String expectedNodeTwo = "nn2";
+    final String expectedHostGroupName = "host_group_1";
 
-    AmbariManagementController mockMgmtController =
-      mockSupport.createMock(AmbariManagementController.class);
+    Map<String, Map<String, String>> configProperties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hdfsSiteProperties = new HashMap<String, String>();
+    configProperties.put("hdfs-site", hdfsSiteProperties);
 
-    expect(mockMgmtController.getStackServices(isA(Set.class))).andReturn(Collections.<StackServiceResponse>emptySet());
+    // setup hdfs config for test
+    hdfsSiteProperties.put("dfs.nameservices", expectedNameServiceOne + "," + expectedNameServiceTwo);
+    hdfsSiteProperties.put("dfs.ha.namenodes." + expectedNameServiceOne, expectedNodeOne + ", " + expectedNodeTwo);
+    hdfsSiteProperties.put("dfs.ha.namenodes." + expectedNameServiceTwo, expectedNodeOne + ", " + expectedNodeTwo);
 
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("yarn.timeline-service.address", expectedHostName);
-    properties.put("yarn-site", typeProps);
+    // setup properties that include host information for nameservice one
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeOne, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeTwo, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeOne, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeTwo, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeOne, expectedHostNameOne + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeTwo, expectedHostNameOne + ":" + expectedPortNum);
 
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    // setup properties that include host information for nameservice two
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeOne, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeTwo, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeOne, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeTwo, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeOne, expectedHostNameTwo + ":" + expectedPortNum);
+    hdfsSiteProperties.put("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo, expectedHostNameTwo + ":" + expectedPortNum);
 
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    Configuration clusterConfig = new Configuration(configProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
+    // note: test hostgroups may not accurately reflect the required components for the config properties
+    // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
+    // are not validated
+    Collection<String> groupComponents = new HashSet<String>();
+    groupComponents.add("RESOURCEMANAGER");
+    Collection<String> hosts = new ArrayList<String>();
+    hosts.add(expectedHostNameOne);
+    hosts.add(expectedHostNameTwo);
+    hosts.add("serverTwo");
+    TestHostGroup group = new TestHostGroup(expectedHostGroupName, groupComponents, hosts);
 
-    mockSupport.replayAll();
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group);
 
-    Stack testStackDefinition = new Stack("HDP", "2.1", mockMgmtController) {
-      @Override
-      public Cardinality getCardinality(String component) {
-        // simulate a stack that supports 0 or 1 instances of the APP_TIMELINE_SERVER
-        if (component.equals("APP_TIMELINE_SERVER")) {
-          return new Cardinality("0-1");
-        }
+    ClusterTopology topology = createClusterTopology("c1", bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
+    configProcessor.doUpdateForBlueprintExport();
+
+    // verify results for name service one
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceOne + "." + expectedNodeTwo));
+
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceOne + "." + expectedNodeTwo));
 
-        return null;
-      }
-    };
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
+    // verify results for name service two
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.https-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
 
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, testStackDefinition);
-    String updatedVal = updatedProperties.get("yarn-site").get("yarn.timeline-service.address");
-    assertEquals("Timeline Server config property should not have been updated",
-      expectedHostName, updatedVal);
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.http-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
 
-    mockSupport.verifyAll();
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeOne));
+    assertEquals("HTTPS address HA property not properly exported",
+        createExportedAddress(expectedPortNum, expectedHostGroupName), hdfsSiteProperties.get("dfs.namenode.rpc-address." + expectedNameServiceTwo + "." + expectedNodeTwo));
   }
 
   @Test
-  public void testDoUpdateForClusterCreate_SingleHostProperty__defaultValue__WithPort() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("fs.defaultFS", "localhost:5050");
-    properties.put("core-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+  public void testYarnConfigExported() throws Exception {
+    final String expectedHostName = "c6401.apache.ambari.org";
+    final String expectedPortNum = "808080";
+    final String expectedHostGroupName = "host_group_1";
 
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
+    Map<String, Map<String, String>> configProperties = new HashMap<String, Map<String, String>>();
+    Map<String, String> yarnSiteProperties = new HashMap<String, String>();
+    configProperties.put("yarn-site", yarnSiteProperties);
 
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
+    // setup properties that include host information
+    yarnSiteProperties.put("yarn.log.server.url", "http://" + expectedHostName +":19888/jobhistory/logs");
+    yarnSiteProperties.put("yarn.resourcemanager.hostname", expectedHostName);
+    yarnSiteProperties.put("yarn.resourcemanager.resource-tracker.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.resourcemanager.webapp.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.resourcemanager.scheduler.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.resourcemanager.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.resourcemanager.admin.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.timeline-service.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.timeline-service.webapp.address", expectedHostName + ":" + expectedPortNum);
+    yarnSiteProperties.put("yarn.timeline-service.webapp.https.address", expectedHostName + ":" + expectedPortNum);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("core-site").get("fs.defaultFS");
-    assertEquals("testhost:5050", updatedVal);
-  }
+    Configuration clusterConfig = new Configuration(configProperties,
+        Collections.<String, Map<String, Map<String, String>>>emptyMap());
 
-  @Test
-  public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("hbase.zookeeper.quorum", "localhost");
-    properties.put("hbase-site", typeProps);
+    // note: test hostgroups may not accurately reflect the required components for the config properties
+    // which are mapped to them.  Only the hostgroup name is used for hostgroup resolution an the components
+    // are not validated
+    Collection<String> groupComponents = new HashSet<String>();
+    groupComponents.add("RESOURCEMANAGER");
+    Collection<String> hosts = new ArrayList<String>();
+    hosts.add(expectedHostName);
+    hosts.add("serverTwo");
+    TestHostGroup group = new TestHostGroup(expectedHostGroupName, groupComponents, hosts);
 
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group);
 
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_SERVER");
-    Set<String> hosts2 = new HashSet<String>();
-    hosts2.add("testhost2");
-    hosts2.add("testhost2a");
-    hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
-
-    Collection<String> hgComponents3 = new HashSet<String>();
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_CLIENT");
-    Set<String> hosts3 = new HashSet<String>();
-    hosts3.add("testhost3");
-    hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-    hostGroups.put(group3.getName(), group3);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("hbase-site").get("hbase.zookeeper.quorum");
-    String[] hosts = updatedVal.split(",");
-
-    Collection<String> expectedHosts = new HashSet<String>();
-    expectedHosts.add("testhost");
-    expectedHosts.add("testhost2");
-    expectedHosts.add("testhost2a");
-    expectedHosts.add("testhost2b");
-
-    assertEquals(4, hosts.length);
-    for (String host : hosts) {
-      assertTrue(expectedHosts.contains(host));
-      expectedHosts.remove(host);
-    }
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues___withPorts() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("templeton.zookeeper.hosts", "localhost:9090");
-    properties.put("webhcat-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_SERVER");
-    Set<String> hosts2 = new HashSet<String>();
-    hosts2.add("testhost2");
-    hosts2.add("testhost2a");
-    hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
-
-    Collection<String> hgComponents3 = new HashSet<String>();
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_CLIENT");
-    Set<String> hosts3 = new HashSet<String>();
-    hosts3.add("testhost3");
-    hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-    hostGroups.put(group3.getName(), group3);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("webhcat-site").get("templeton.zookeeper.hosts");
-    String[] hosts = updatedVal.split(",");
-
-    Collection<String> expectedHosts = new HashSet<String>();
-    expectedHosts.add("testhost:9090");
-    expectedHosts.add("testhost2:9090");
-    expectedHosts.add("testhost2a:9090");
-    expectedHosts.add("testhost2b:9090");
-
-    assertEquals(4, hosts.length);
-    for (String host : hosts) {
-      assertTrue(expectedHosts.contains(host));
-      expectedHosts.remove(host);
-    }
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_MultiHostProperty__defaultValues___YAML() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("storm.zookeeper.servers", "['localhost']");
-    properties.put("storm-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_SERVER");
-    Set<String> hosts2 = new HashSet<String>();
-    hosts2.add("testhost2");
-    hosts2.add("testhost2a");
-    hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
-
-    Collection<String> hgComponents3 = new HashSet<String>();
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_CLIENT");
-    Set<String> hosts3 = new HashSet<String>();
-    hosts3.add("testhost3");
-    hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-    hostGroups.put(group3.getName(), group3);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("storm-site").get("storm.zookeeper.servers");
-    assertTrue(updatedVal.startsWith("["));
-    assertTrue(updatedVal.endsWith("]"));
-    // remove the surrounding brackets
-    updatedVal = updatedVal.replaceAll("[\\[\\]]", "");
-
-    String[] hosts = updatedVal.split(",");
-
-    Collection<String> expectedHosts = new HashSet<String>();
-    expectedHosts.add("'testhost'");
-    expectedHosts.add("'testhost2'");
-    expectedHosts.add("'testhost2a'");
-    expectedHosts.add("'testhost2b'");
-
-    assertEquals(4, hosts.length);
-    for (String host : hosts) {
-      assertTrue(expectedHosts.contains(host));
-      expectedHosts.remove(host);
-    }
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_MProperty__defaultValues() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("hbase_master_heapsize", "512m");
-    properties.put("hbase-env", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("hbase-env").get("hbase_master_heapsize");
-    assertEquals("512m", updatedVal);
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_MProperty__missingM() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("hbase_master_heapsize", "512");
-    properties.put("hbase-env", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("hbase-env").get("hbase_master_heapsize");
-    assertEquals("512m", updatedVal);
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("yarn.resourcemanager.hostname", "%HOSTGROUP::group1%");
-    properties.put("yarn-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
-    assertEquals("testhost", updatedVal);
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue_UsingMinusSymbolInHostGroupName() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("yarn.resourcemanager.hostname", "%HOSTGROUP::os-amb-r6-secha-1427972156-hbaseha-3-6%");
-    properties.put("yarn-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("os-amb-r6-secha-1427972156-hbaseha-3-6", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
-    assertEquals("testhost", updatedVal);
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue_WithPort_UsingMinusSymbolInHostGroupName() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("yarn.resourcemanager.hostname", "%HOSTGROUP::os-amb-r6-secha-1427972156-hbaseha-3-6%:2180");
-    properties.put("yarn-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("os-amb-r6-secha-1427972156-hbaseha-3-6", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("yarn-site").get("yarn.resourcemanager.hostname");
-    assertEquals("testhost:2180", updatedVal);
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_SingleHostProperty__exportedValue__WithPort() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("fs.defaultFS", "%HOSTGROUP::group1%:5050");
-    properties.put("core-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("core-site").get("fs.defaultFS");
-    assertEquals("testhost:5050", updatedVal);
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("hbase.zookeeper.quorum", "%HOSTGROUP::group1%,%HOSTGROUP::group2%");
-    properties.put("hbase-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_SERVER");
-    Set<String> hosts2 = new HashSet<String>();
-    hosts2.add("testhost2");
-    hosts2.add("testhost2a");
-    hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
-
-    Collection<String> hgComponents3 = new HashSet<String>();
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_CLIENT");
-    Set<String> hosts3 = new HashSet<String>();
-    hosts3.add("testhost3");
-    hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-    hostGroups.put(group3.getName(), group3);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("hbase-site").get("hbase.zookeeper.quorum");
-    String[] hosts = updatedVal.split(",");
-
-    Collection<String> expectedHosts = new HashSet<String>();
-    expectedHosts.add("testhost");
-    expectedHosts.add("testhost2");
-    expectedHosts.add("testhost2a");
-    expectedHosts.add("testhost2b");
-
-    assertEquals(4, hosts.length);
-    for (String host : hosts) {
-      assertTrue(expectedHosts.contains(host));
-      expectedHosts.remove(host);
-    }
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___withPorts() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("templeton.zookeeper.hosts", "%HOSTGROUP::group1%:9090,%HOSTGROUP::group2%:9091");
-    properties.put("webhcat-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_SERVER");
-    Set<String> hosts2 = new HashSet<String>();
-    hosts2.add("testhost2");
-    hosts2.add("testhost2a");
-    hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
-
-    Collection<String> hgComponents3 = new HashSet<String>();
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_CLIENT");
-    Set<String> hosts3 = new HashSet<String>();
-    hosts3.add("testhost3");
-    hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-    hostGroups.put(group3.getName(), group3);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("webhcat-site").get("templeton.zookeeper.hosts");
-    String[] hosts = updatedVal.split(",");
-
-    Collection<String> expectedHosts = new HashSet<String>();
-    expectedHosts.add("testhost:9090");
-    expectedHosts.add("testhost2:9091");
-    expectedHosts.add("testhost2a:9091");
-    expectedHosts.add("testhost2b:9091");
-
-    assertEquals(4, hosts.length);
-    for (String host : hosts) {
-      assertTrue(expectedHosts.contains(host));
-      expectedHosts.remove(host);
-    }
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___withPorts_UsingMinusSymbolInHostGroupName() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("ha.zookeeper.quorum", "%HOSTGROUP::os-amb-r6-secha-1427972156-hbaseha-3-6%:2181,%HOSTGROUP::os-amb-r6-secha-1427972156-hbaseha-3-5%:2181,%HOSTGROUP::os-amb-r6-secha-1427972156-hbaseha-3-7%:2181");
-    properties.put("core-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("os-amb-r6-secha-1427972156-hbaseha-3-6", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_SERVER");
-    Set<String> hosts2 = new HashSet<String>();
-    hosts2.add("testhost2");
-    hosts2.add("testhost2a");
-    hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("os-amb-r6-secha-1427972156-hbaseha-3-5", hosts2, hgComponents2);
-
-    Collection<String> hgComponents3 = new HashSet<String>();
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_CLIENT");
-    Set<String> hosts3 = new HashSet<String>();
-    hosts3.add("testhost3");
-    hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("os-amb-r6-secha-1427972156-hbaseha-3-7", hosts3, hgComponents3);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-    hostGroups.put(group3.getName(), group3);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("core-site").get("ha.zookeeper.quorum");
-    String[] hosts = updatedVal.split(",");
-
-    Collection<String> expectedHosts = new HashSet<String>();
-    expectedHosts.add("testhost:2181");
-    expectedHosts.add("testhost2:2181");
-    expectedHosts.add("testhost2a:2181");
-    expectedHosts.add("testhost2b:2181");
-    expectedHosts.add("testhost3:2181");
-    expectedHosts.add("testhost3a:2181");
-
-    assertEquals(6, hosts.length);
-    for (String host : hosts) {
-      assertTrue("Expected host :" + host + "was not included in the multi-server list in this property.", expectedHosts.contains(host));
-      expectedHosts.remove(host);
-    }
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_MultiHostProperty_exportedValues_withPorts_singleHostValue() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> yarnSiteConfig = new HashMap<String, String>();
-
-    yarnSiteConfig.put("hadoop.registry.zk.quorum", "%HOSTGROUP::host_group_1%:2181");
-    properties.put("yarn-site", yarnSiteConfig);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("host_group_1", Collections.singleton("testhost"), hgComponents);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    assertEquals("Multi-host property with single host value was not correctly updated for cluster create.",
-      "testhost:2181", updatedProperties.get("yarn-site").get("hadoop.registry.zk.quorum"));
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_MultiHostProperty__exportedValues___YAML() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("storm.zookeeper.servers", "['%HOSTGROUP::group1%:9090','%HOSTGROUP::group2%:9091']");
-    properties.put("storm-site", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("ZOOKEEPER_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_SERVER");
-    Set<String> hosts2 = new HashSet<String>();
-    hosts2.add("testhost2");
-    hosts2.add("testhost2a");
-    hosts2.add("testhost2b");
-    HostGroup group2 = new TestHostGroup("group2", hosts2, hgComponents2);
-
-    Collection<String> hgComponents3 = new HashSet<String>();
-    hgComponents2.add("HDFS_CLIENT");
-    hgComponents2.add("ZOOKEEPER_CLIENT");
-    Set<String> hosts3 = new HashSet<String>();
-    hosts3.add("testhost3");
-    hosts3.add("testhost3a");
-    HostGroup group3 = new TestHostGroup("group3", hosts3, hgComponents3);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-    hostGroups.put(group3.getName(), group3);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("storm-site").get("storm.zookeeper.servers");
-    assertTrue(updatedVal.startsWith("["));
-    assertTrue(updatedVal.endsWith("]"));
-    // remove the surrounding brackets
-    updatedVal = updatedVal.replaceAll("[\\[\\]]", "");
-
-    String[] hosts = updatedVal.split(",");
-
-    Collection<String> expectedHosts = new HashSet<String>();
-    expectedHosts.add("'testhost:9090'");
-    expectedHosts.add("'testhost2:9091'");
-    expectedHosts.add("'testhost2a:9091'");
-    expectedHosts.add("'testhost2b:9091'");
-
-    assertEquals(4, hosts.length);
-    for (String host : hosts) {
-      assertTrue(expectedHosts.contains(host));
-      expectedHosts.remove(host);
-    }
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_DBHostProperty__defaultValue() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> hiveSiteProps = new HashMap<String, String>();
-    hiveSiteProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true");
-    Map<String, String> hiveEnvProps = new HashMap<String, String>();
-    hiveEnvProps.put("hive_database", "New MySQL Database");
-    properties.put("hive-site", hiveSiteProps);
-    properties.put("hive-env", hiveEnvProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    hgComponents.add("MYSQL_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("hive-site").get("javax.jdo.option.ConnectionURL");
-    assertEquals("jdbc:mysql://testhost/hive?createDatabaseIfNotExist=true", updatedVal);
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_DBHostProperty__exportedValue() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> hiveSiteProps = new HashMap<String, String>();
-    hiveSiteProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://%HOSTGROUP::group1%/hive?createDatabaseIfNotExist=true");
-    Map<String, String> hiveEnvProps = new HashMap<String, String>();
-    hiveEnvProps.put("hive_database", "New MySQL Database");
-    properties.put("hive-site", hiveSiteProps);
-    properties.put("hive-env", hiveEnvProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    hgComponents.add("MYSQL_SERVER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
-
-    Map<String, HostGroup> hostGroups = new HashMap<String, HostGroup>();
-    hostGroups.put(group1.getName(), group1);
-    hostGroups.put(group2.getName(), group2);
-
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(properties);
-    Map<String, Map<String, String>> updatedProperties = updater.doUpdateForClusterCreate(hostGroups, null);
-    String updatedVal = updatedProperties.get("hive-site").get("javax.jdo.option.ConnectionURL");
-    assertEquals("jdbc:mysql://testhost/hive?createDatabaseIfNotExist=true", updatedVal);
-  }
-
-  @Test
-  public void testDoUpdateForClusterCreate_DBHostProperty__external() {
-    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
-    Map<String, String> typeProps = new HashMap<String, String>();
-    typeProps.put("javax.jdo.option.ConnectionURL", "jdbc:mysql://myHost.com/hive?createDatabaseIfNotExist=true");
-    typeProps.put("hive_database", "Existing MySQL Database");
-    properties.put("hive-env", typeProps);
-
-    Collection<String> hgComponents = new HashSet<String>();
-    hgComponents.add("NAMENODE");
-    hgComponents.add("SECONDARY_NAMENODE");
-    hgComponents.add("RESOURCEMANAGER");
-    HostGroup group1 = new TestHostGroup("group1", Collections.singleton("testhost"), hgComponents);
-
-    Collection<String> hgComponents2 = new HashSet<String>();
-    hgComponents2.add("DATANODE");
-    hgComponents2.add("HDFS_CLIENT");
-    HostGroup group2 = new TestHostGroup("group2", Collections.singleton("testhost2"), hgComponents2);
-
-    Map<

<TRUNCATED>