You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ad...@apache.org on 2018/01/09 10:13:48 UTC

[ambari] branch branch-feature-AMBARI-14714 updated (98f68f9 -> 4ec01a1)

This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a change to branch branch-feature-AMBARI-14714
in repository https://gitbox.apache.org/repos/asf/ambari.git.


    from 98f68f9  AMBARI-22244. Use service type instead of service name - addendum (adoroszlai)
     new 3d536d6  AMBARI-22252. Revert Blueprint V2 changes
     new 4ec01a1  AMBARI-22253. Create default service group for blueprint (adoroszlai)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../api/query/render/ClusterBlueprintRenderer.java |  21 +-
 .../ambari/server/api/services/ServiceService.java |   2 +-
 .../StackAdvisorBlueprintProcessor.java            |  14 +-
 .../services/users/ActiveWidgetLayoutService.java  |   2 +-
 .../api/services/views/ViewInstanceService.java    |   2 +-
 .../ambari/server/controller/AmbariServer.java     |   8 +-
 .../server/controller/ServiceGroupRequest.java     |  29 +-
 .../apache/ambari/server/controller/StackV2.java   | 605 ---------------------
 .../ambari/server/controller/StackV2Factory.java   | 251 ---------
 .../AbstractControllerResourceProvider.java        |   2 +-
 .../controller/internal/BaseClusterRequest.java    |  38 +-
 .../internal/BlueprintConfigurationProcessor.java  | 286 +++++-----
 .../internal/BlueprintV2ResourceProvider.java      | 398 --------------
 .../internal/ClusterResourceProvider.java          |   2 +-
 .../controller/internal/ConfigurationContext.java  |  60 --
 .../internal/ExportBlueprintRequest.java           |  34 +-
 .../internal/ProvisionClusterRequest.java          |  49 +-
 .../controller/internal/ScaleClusterRequest.java   |  18 +-
 .../server/controller/internal/UnitUpdater.java    |  15 +-
 .../ambari/server/orm/dao/BlueprintV2DAO.java      | 129 -----
 .../server/orm/entities/BlueprintEntity.java       |   1 +
 .../server/orm/entities/BlueprintV2Entity.java     |  90 ---
 .../orm/entities/HostGroupComponentEntity.java     |  16 +-
 .../orm/entities/HostGroupComponentEntityPK.java   |  36 --
 .../ambari/server/orm/entities/StackEntity.java    |   2 +-
 .../orm/entities/TopologyConfigurationsEntity.java | 148 -----
 .../server/orm/entities/TopologyRequestEntity.java |  40 +-
 .../ambari/server/topology/AmbariContext.java      | 235 +++++---
 .../ambari/server/topology/BlueprintImplV2.java    | 408 --------------
 .../apache/ambari/server/topology/BlueprintV2.java | 251 ---------
 .../ambari/server/topology/BlueprintV2Factory.java | 203 -------
 .../server/topology/BlueprintValidatorImpl.java    |  66 +--
 .../server/topology/BlueprintValidatorV2.java      |  43 --
 .../topology/ClusterConfigurationRequest.java      |  64 +--
 .../ambari/server/topology/ClusterTopology.java    |  15 +-
 .../server/topology/ClusterTopologyImpl.java       |  84 ++-
 .../apache/ambari/server/topology/ComponentV2.java | 138 -----
 .../ambari/server/topology/Configurable.java       |  40 --
 .../ambari/server/topology/Configuration.java      |   7 -
 .../ambari/server/topology/HostGroupImpl.java      |   2 +
 .../ambari/server/topology/HostGroupInfo.java      |  17 +-
 .../apache/ambari/server/topology/HostGroupV2.java | 137 -----
 .../ambari/server/topology/HostGroupV2Impl.java    | 169 ------
 .../apache/ambari/server/topology/HostRequest.java |  38 +-
 .../ambari/server/topology/LogicalRequest.java     |   4 +-
 .../ambari/server/topology/PersistedStateImpl.java |  58 +-
 .../ambari/server/topology/RepositoryVersion.java  |  71 ---
 .../server/topology/SecurityConfiguration.java     |   7 +-
 .../org/apache/ambari/server/topology/Service.java | 159 ------
 .../ambari/server/topology/ServiceGroup.java       |  92 ----
 .../apache/ambari/server/topology/ServiceId.java   |  79 ---
 .../org/apache/ambari/server/topology/Setting.java |  18 +-
 .../ambari/server/topology/TopologyManager.java    |  20 +-
 .../ambari/server/topology/TopologyRequest.java    |  10 +-
 .../topology/tasks/PersistHostResourcesTask.java   |  12 +-
 .../validators/ClusterConfigTypeValidator.java     |   8 +-
 .../topology/validators/HiveServiceValidator.java  |  47 +-
 .../RequiredConfigPropertiesValidator.java         |  91 ++--
 .../validators/RequiredPasswordValidator.java      |  28 +-
 .../validators/StackConfigTypeValidator.java       |  31 +-
 .../server/topology/validators/UnitValidator.java  |  34 +-
 .../src/main/resources/Ambari-DDL-Derby-CREATE.sql |  19 -
 .../src/main/resources/Ambari-DDL-MySQL-CREATE.sql |  23 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql    |  20 -
 .../main/resources/Ambari-DDL-Postgres-CREATE.sql  |  22 -
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql    |  20 -
 .../main/resources/Ambari-DDL-SQLServer-CREATE.sql |  24 +-
 .../src/main/resources/META-INF/persistence.xml    |   2 -
 ambari-server/src/main/resources/properties.json   |   5 +-
 .../query/render/ClusterBlueprintRendererTest.java |   4 +-
 .../server/api/services/AmbariMetaInfoTest.java    |   2 +-
 .../api/services/RootServiceServiceTest.java       |   2 +-
 .../server/api/services/ServiceServiceTest.java    |   2 +-
 .../controller/AmbariManagementControllerTest.java |   6 +-
 .../BlueprintConfigurationProcessorTest.java       | 298 +++++-----
 .../internal/BlueprintResourceProviderTest.java    |   5 +-
 .../internal/ClusterResourceProviderTest.java      |   4 +-
 .../internal/ExportBlueprintRequestTest.java       |   2 +-
 .../internal/ProvisionClusterRequestTest.java      |   2 +-
 .../internal/RequestResourceProviderTest.java      |   2 +-
 .../internal/ScaleClusterRequestTest.java          |   2 +-
 .../controller/internal/UnitUpdaterTest.java       |   6 +-
 .../orm/dao/TopologyLogicalRequestDAOTest.java     |   4 +-
 .../server/orm/dao/TopologyRequestDAOTest.java     |   8 +-
 .../ambari/server/state/cluster/ClusterTest.java   |   2 +-
 .../ambari/server/topology/AmbariContextTest.java  |  60 +-
 .../ambari/server/topology/BlueprintImplTest.java  |  10 +-
 .../topology/ClusterConfigurationRequestTest.java  |   8 +-
 .../topology/ClusterDeployWithStartOnlyTest.java   |   4 +-
 ...terInstallWithoutStartOnComponentLevelTest.java |   4 +-
 .../topology/ClusterInstallWithoutStartTest.java   |   4 +-
 .../server/topology/ClusterTopologyImplTest.java   |  42 +-
 .../ambari/server/topology/LogicalRequestTest.java |   2 +-
 .../topology/RequiredPasswordValidatorTest.java    |   2 +-
 .../server/topology/TopologyManagerTest.java       |  31 +-
 .../topology/validators/BlueprintImplV2Test.java   |  79 ---
 .../validators/ClusterConfigTypeValidatorTest.java |  27 +-
 .../validators/HiveServiceValidatorTest.java       |  54 +-
 .../RequiredConfigPropertiesValidatorTest.java     |  46 +-
 .../validators/StackConfigTypeValidatorTest.java   |  24 +-
 .../topology/validators/UnitValidatorTest.java     |  29 +-
 .../test/resources/blueprintv2/blueprintv2.json    | 146 -----
 102 files changed, 1017 insertions(+), 5025 deletions(-)
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2Factory.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintV2ResourceProvider.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationContext.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/orm/dao/BlueprintV2DAO.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintV2Entity.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyConfigurationsEntity.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2Factory.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorV2.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentV2.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/Configurable.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2Impl.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/RepositoryVersion.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceGroup.java
 delete mode 100644 ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceId.java
 delete mode 100644 ambari-server/src/test/java/org/apache/ambari/server/topology/validators/BlueprintImplV2Test.java
 delete mode 100644 ambari-server/src/test/resources/blueprintv2/blueprintv2.json

-- 
To stop receiving notification emails like this one, please contact
['"commits@ambari.apache.org" <co...@ambari.apache.org>'].

[ambari] 02/02: AMBARI-22253. Create default service group for blueprint (adoroszlai)

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch branch-feature-AMBARI-14714
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 4ec01a1d1758e4ab870d81d2b69fa61c244aca8c
Author: Doroszlai, Attila <ad...@hortonworks.com>
AuthorDate: Mon Jan 8 09:56:13 2018 +0100

    AMBARI-22253. Create default service group for blueprint (adoroszlai)
---
 .../server/controller/ServiceGroupRequest.java     | 29 ++++++++++++++++++----
 .../ambari/server/topology/AmbariContext.java      | 18 +++++++++++++-
 .../ambari/server/topology/AmbariContextTest.java  | 28 ++++++++++++++++-----
 3 files changed, 63 insertions(+), 12 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceGroupRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceGroupRequest.java
index 53c3d1e..38fba97 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceGroupRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceGroupRequest.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,6 +17,7 @@
  */
 package org.apache.ambari.server.controller;
 
+import java.util.Objects;
 
 public class ServiceGroupRequest {
 
@@ -56,10 +57,28 @@ public class ServiceGroupRequest {
     this.serviceGroupName = serviceGroupName;
   }
 
+  @Override
   public String toString() {
-    StringBuilder sb = new StringBuilder();
-    sb.append("clusterName=" + clusterName
-      + ", serviceGroupName=" + serviceGroupName);
-    return sb.toString();
+    return String.format("clusterName=%s, serviceGroupName=%s", clusterName, serviceGroupName);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == this) {
+      return true;
+    }
+    if (obj == null || getClass() != obj.getClass()) {
+      return false;
+    }
+
+    ServiceGroupRequest other = (ServiceGroupRequest) obj;
+
+    return Objects.equals(clusterName, other.clusterName) &&
+      Objects.equals(serviceGroupName, other.serviceGroupName);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(clusterName, serviceGroupName);
   }
 }
\ No newline at end of file
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 8eb6166..4235f9f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.topology;
 
+import static java.util.stream.Collectors.toSet;
+
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
@@ -52,6 +54,7 @@ import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.RootComponent;
 import org.apache.ambari.server.controller.ServiceComponentHostRequest;
 import org.apache.ambari.server.controller.ServiceComponentRequest;
+import org.apache.ambari.server.controller.ServiceGroupRequest;
 import org.apache.ambari.server.controller.ServiceRequest;
 import org.apache.ambari.server.controller.internal.AbstractResourceProvider;
 import org.apache.ambari.server.controller.internal.ComponentResourceProvider;
@@ -131,7 +134,7 @@ public class AmbariContext {
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
   private final static AtomicLong nextTaskId = new AtomicLong(10000);
-  private static final String DEFAULT_SERVICE_GROUP_NAME = "default_service_group";
+  static final String DEFAULT_SERVICE_GROUP_NAME = "default_service_group"; // exposed for test
 
   private static HostRoleCommandFactory hostRoleCommandFactory;
   private static HostResourceProvider hostResourceProvider;
@@ -324,14 +327,22 @@ public class AmbariContext {
 
   public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
       StackId stackId, Long repositoryVersionId) {
+
+    Set<String> serviceGroups = Sets.newHashSet(DEFAULT_SERVICE_GROUP_NAME);
     Collection<String> services = topology.getBlueprint().getServices();
 
     try {
       Cluster cluster = getController().getClusters().getCluster(clusterName);
+      serviceGroups.removeAll(cluster.getServiceGroups().keySet());
       services.removeAll(cluster.getServices().keySet());
     } catch (AmbariException e) {
       throw new RuntimeException("Failed to persist service and component resources: " + e, e);
     }
+
+    Set<ServiceGroupRequest> serviceGroupRequests = serviceGroups.stream()
+      .map(serviceGroupName -> new ServiceGroupRequest(clusterName, serviceGroupName))
+      .collect(toSet());
+
     Set<ServiceRequest> serviceRequests = new HashSet<>();
     Set<ServiceComponentRequest> componentRequests = new HashSet<>();
     for (String service : services) {
@@ -344,12 +355,17 @@ public class AmbariContext {
         componentRequests.add(new ServiceComponentRequest(clusterName, DEFAULT_SERVICE_GROUP_NAME, service, component, null, recoveryEnabled));
       }
     }
+
     try {
+      if (!serviceGroupRequests.isEmpty()) {
+        getServiceGroupResourceProvider().createServiceGroups(serviceGroupRequests);
+      }
       getServiceResourceProvider().createServices(serviceRequests);
       getComponentResourceProvider().createComponents(componentRequests);
     } catch (AmbariException | AuthorizationException e) {
       throw new RuntimeException("Failed to persist service and component resources: " + e, e);
     }
+
     // set all services state to INSTALLED->STARTED
     // this is required so the user can start failed services at the service level
     Map<String, Object> installProps = new HashMap<>();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
index 67c8420..d36746f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
@@ -48,11 +48,13 @@ import org.apache.ambari.server.controller.ClusterRequest;
 import org.apache.ambari.server.controller.ConfigGroupRequest;
 import org.apache.ambari.server.controller.ServiceComponentHostRequest;
 import org.apache.ambari.server.controller.ServiceComponentRequest;
+import org.apache.ambari.server.controller.ServiceGroupRequest;
 import org.apache.ambari.server.controller.ServiceRequest;
 import org.apache.ambari.server.controller.internal.ComponentResourceProvider;
 import org.apache.ambari.server.controller.internal.ConfigGroupResourceProvider;
 import org.apache.ambari.server.controller.internal.HostComponentResourceProvider;
 import org.apache.ambari.server.controller.internal.HostResourceProvider;
+import org.apache.ambari.server.controller.internal.ServiceGroupResourceProvider;
 import org.apache.ambari.server.controller.internal.ServiceResourceProvider;
 import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.internal.VersionDefinitionResourceProvider;
@@ -106,6 +108,7 @@ public class AmbariContextTest {
   private static final AmbariManagementController controller = createNiceMock(AmbariManagementController.class);
   private static final ClusterController clusterController = createStrictMock(ClusterController.class);
   private static final HostResourceProvider hostResourceProvider = createStrictMock(HostResourceProvider.class);
+  private static final ServiceGroupResourceProvider serviceGroupResourceProvider = createStrictMock(ServiceGroupResourceProvider.class);
   private static final ServiceResourceProvider serviceResourceProvider = createStrictMock(ServiceResourceProvider.class);
   private static final ComponentResourceProvider componentResourceProvider = createStrictMock(ComponentResourceProvider.class);
   private static final HostComponentResourceProvider hostComponentResourceProvider = createStrictMock(HostComponentResourceProvider.class);
@@ -135,6 +138,10 @@ public class AmbariContextTest {
 
   @Before
   public void setUp() throws Exception {
+    reset(controller, clusterController, hostResourceProvider, serviceGroupResourceProvider, serviceResourceProvider, componentResourceProvider,
+      hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
+      cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
+
     // "inject" context state
     Class<AmbariContext> clazz = AmbariContext.class;
     Field f = clazz.getDeclaredField("controller");
@@ -149,6 +156,10 @@ public class AmbariContextTest {
     f.setAccessible(true);
     f.set(null, hostResourceProvider);
 
+    f = clazz.getDeclaredField("serviceGroupResourceProvider");
+    f.setAccessible(true);
+    f.set(null, serviceGroupResourceProvider);
+
     f = clazz.getDeclaredField("serviceResourceProvider");
     f.setAccessible(true);
     f.set(null, serviceResourceProvider);
@@ -270,17 +281,13 @@ public class AmbariContextTest {
 
   @After
   public void tearDown() throws Exception {
-    verify(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
-        hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
-        cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
-
-    reset(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
+    verify(controller, clusterController, hostResourceProvider, serviceGroupResourceProvider, serviceResourceProvider, componentResourceProvider,
         hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
         cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
   }
 
   private void replayAll() {
-    replay(controller, clusterController, hostResourceProvider, serviceResourceProvider, componentResourceProvider,
+    replay(controller, clusterController, hostResourceProvider, serviceGroupResourceProvider, serviceResourceProvider, componentResourceProvider,
       hostComponentResourceProvider, configGroupResourceProvider, topology, blueprint, stack, clusters,
       cluster, group1Info, configHelper, configGroup1, configGroup2, host1, host2, configFactory);
   }
@@ -291,11 +298,14 @@ public class AmbariContextTest {
     Capture<ClusterRequest> clusterRequestCapture = EasyMock.newCapture();
     controller.createCluster(capture(clusterRequestCapture));
     expectLastCall().once();
+    expect(cluster.getServiceGroups()).andReturn(Collections.emptyMap()).anyTimes();
     expect(cluster.getServices()).andReturn(clusterServices).anyTimes();
 
+    Capture<Set<ServiceGroupRequest>> serviceGroupRequestCapture = EasyMock.newCapture();
     Capture<Set<ServiceRequest>> serviceRequestCapture = EasyMock.newCapture();
     Capture<Set<ServiceComponentRequest>> serviceComponentRequestCapture = EasyMock.newCapture();
 
+    expect(serviceGroupResourceProvider.createServiceGroups(capture(serviceGroupRequestCapture))).andReturn(null).once();
     expect(serviceResourceProvider.createServices(capture(serviceRequestCapture))).andReturn(null).once();
     expect(componentResourceProvider.createComponents(capture(serviceComponentRequestCapture))).andReturn(null).once();
 
@@ -320,6 +330,10 @@ public class AmbariContextTest {
     assertEquals(CLUSTER_NAME, clusterRequest.getClusterName());
     assertEquals(String.format("%s-%s", STACK_NAME, STACK_VERSION), clusterRequest.getStackVersion());
 
+    Set<ServiceGroupRequest> serviceGroupRequests = serviceGroupRequestCapture.getValue();
+    Set<ServiceGroupRequest> expectedServiceGroupRequests = Collections.singleton(new ServiceGroupRequest(cluster.getClusterName(), AmbariContext.DEFAULT_SERVICE_GROUP_NAME));
+    assertEquals(expectedServiceGroupRequests, serviceGroupRequests);
+
     Collection<ServiceRequest> serviceRequests = serviceRequestCapture.getValue();
     assertEquals(2, serviceRequests.size());
     Collection<String> servicesFound = new HashSet<>();
@@ -727,8 +741,10 @@ public class AmbariContextTest {
 
     controller.createCluster(capture(Capture.<ClusterRequest>newInstance()));
     expectLastCall().once();
+    expect(cluster.getServiceGroups()).andReturn(Collections.emptyMap()).anyTimes();
     expect(cluster.getServices()).andReturn(clusterServices).anyTimes();
 
+    expect(serviceGroupResourceProvider.createServiceGroups(anyObject())).andReturn(null).once();
     expect(serviceResourceProvider.createServices(anyObject())).andReturn(null).once();
     expect(componentResourceProvider.createComponents(anyObject())).andReturn(null).once();
 

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.

[ambari] 01/02: AMBARI-22252. Revert Blueprint V2 changes

Posted by ad...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch branch-feature-AMBARI-14714
in repository https://gitbox.apache.org/repos/asf/ambari.git

commit 3d536d6942f0de8b0e07265cb20fee5ca2abc5d9
Author: Doroszlai, Attila <ad...@hortonworks.com>
AuthorDate: Mon Jan 8 09:55:56 2018 +0100

    AMBARI-22252. Revert Blueprint V2 changes
---
 .../api/query/render/ClusterBlueprintRenderer.java |  21 +-
 .../ambari/server/api/services/ServiceService.java |   2 +-
 .../StackAdvisorBlueprintProcessor.java            |  14 +-
 .../services/users/ActiveWidgetLayoutService.java  |   2 +-
 .../api/services/views/ViewInstanceService.java    |   2 +-
 .../ambari/server/controller/AmbariServer.java     |   8 +-
 .../apache/ambari/server/controller/StackV2.java   | 605 ---------------------
 .../ambari/server/controller/StackV2Factory.java   | 251 ---------
 .../AbstractControllerResourceProvider.java        |   2 +-
 .../controller/internal/BaseClusterRequest.java    |  38 +-
 .../internal/BlueprintConfigurationProcessor.java  | 286 +++++-----
 .../internal/BlueprintV2ResourceProvider.java      | 398 --------------
 .../internal/ClusterResourceProvider.java          |   2 +-
 .../controller/internal/ConfigurationContext.java  |  60 --
 .../internal/ExportBlueprintRequest.java           |  34 +-
 .../internal/ProvisionClusterRequest.java          |  49 +-
 .../controller/internal/ScaleClusterRequest.java   |  18 +-
 .../server/controller/internal/UnitUpdater.java    |  15 +-
 .../ambari/server/orm/dao/BlueprintV2DAO.java      | 129 -----
 .../server/orm/entities/BlueprintEntity.java       |   1 +
 .../server/orm/entities/BlueprintV2Entity.java     |  90 ---
 .../orm/entities/HostGroupComponentEntity.java     |  16 +-
 .../orm/entities/HostGroupComponentEntityPK.java   |  36 --
 .../ambari/server/orm/entities/StackEntity.java    |   2 +-
 .../orm/entities/TopologyConfigurationsEntity.java | 148 -----
 .../server/orm/entities/TopologyRequestEntity.java |  40 +-
 .../ambari/server/topology/AmbariContext.java      | 225 +++++---
 .../ambari/server/topology/BlueprintImplV2.java    | 408 --------------
 .../apache/ambari/server/topology/BlueprintV2.java | 251 ---------
 .../ambari/server/topology/BlueprintV2Factory.java | 203 -------
 .../server/topology/BlueprintValidatorImpl.java    |  66 +--
 .../server/topology/BlueprintValidatorV2.java      |  43 --
 .../topology/ClusterConfigurationRequest.java      |  64 +--
 .../ambari/server/topology/ClusterTopology.java    |  15 +-
 .../server/topology/ClusterTopologyImpl.java       |  84 ++-
 .../apache/ambari/server/topology/ComponentV2.java | 138 -----
 .../ambari/server/topology/Configurable.java       |  40 --
 .../ambari/server/topology/Configuration.java      |   7 -
 .../ambari/server/topology/HostGroupImpl.java      |   2 +
 .../ambari/server/topology/HostGroupInfo.java      |  17 +-
 .../apache/ambari/server/topology/HostGroupV2.java | 137 -----
 .../ambari/server/topology/HostGroupV2Impl.java    | 169 ------
 .../apache/ambari/server/topology/HostRequest.java |  38 +-
 .../ambari/server/topology/LogicalRequest.java     |   4 +-
 .../ambari/server/topology/PersistedStateImpl.java |  58 +-
 .../ambari/server/topology/RepositoryVersion.java  |  71 ---
 .../server/topology/SecurityConfiguration.java     |   7 +-
 .../org/apache/ambari/server/topology/Service.java | 159 ------
 .../ambari/server/topology/ServiceGroup.java       |  92 ----
 .../apache/ambari/server/topology/ServiceId.java   |  79 ---
 .../org/apache/ambari/server/topology/Setting.java |  18 +-
 .../ambari/server/topology/TopologyManager.java    |  20 +-
 .../ambari/server/topology/TopologyRequest.java    |  10 +-
 .../topology/tasks/PersistHostResourcesTask.java   |  12 +-
 .../validators/ClusterConfigTypeValidator.java     |   8 +-
 .../topology/validators/HiveServiceValidator.java  |  47 +-
 .../RequiredConfigPropertiesValidator.java         |  91 ++--
 .../validators/RequiredPasswordValidator.java      |  28 +-
 .../validators/StackConfigTypeValidator.java       |  31 +-
 .../server/topology/validators/UnitValidator.java  |  34 +-
 .../src/main/resources/Ambari-DDL-Derby-CREATE.sql |  19 -
 .../src/main/resources/Ambari-DDL-MySQL-CREATE.sql |  23 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql    |  20 -
 .../main/resources/Ambari-DDL-Postgres-CREATE.sql  |  22 -
 .../resources/Ambari-DDL-SQLAnywhere-CREATE.sql    |  20 -
 .../main/resources/Ambari-DDL-SQLServer-CREATE.sql |  24 +-
 .../src/main/resources/META-INF/persistence.xml    |   2 -
 ambari-server/src/main/resources/properties.json   |   5 +-
 .../query/render/ClusterBlueprintRendererTest.java |   4 +-
 .../server/api/services/AmbariMetaInfoTest.java    |   2 +-
 .../api/services/RootServiceServiceTest.java       |   2 +-
 .../server/api/services/ServiceServiceTest.java    |   2 +-
 .../controller/AmbariManagementControllerTest.java |   6 +-
 .../BlueprintConfigurationProcessorTest.java       | 298 +++++-----
 .../internal/BlueprintResourceProviderTest.java    |   5 +-
 .../internal/ClusterResourceProviderTest.java      |   4 +-
 .../internal/ExportBlueprintRequestTest.java       |   2 +-
 .../internal/ProvisionClusterRequestTest.java      |   2 +-
 .../internal/RequestResourceProviderTest.java      |   2 +-
 .../internal/ScaleClusterRequestTest.java          |   2 +-
 .../controller/internal/UnitUpdaterTest.java       |   6 +-
 .../orm/dao/TopologyLogicalRequestDAOTest.java     |   4 +-
 .../server/orm/dao/TopologyRequestDAOTest.java     |   8 +-
 .../ambari/server/state/cluster/ClusterTest.java   |   2 +-
 .../ambari/server/topology/AmbariContextTest.java  |  32 +-
 .../ambari/server/topology/BlueprintImplTest.java  |  10 +-
 .../topology/ClusterConfigurationRequestTest.java  |   8 +-
 .../topology/ClusterDeployWithStartOnlyTest.java   |   4 +-
 ...terInstallWithoutStartOnComponentLevelTest.java |   4 +-
 .../topology/ClusterInstallWithoutStartTest.java   |   4 +-
 .../server/topology/ClusterTopologyImplTest.java   |  42 +-
 .../ambari/server/topology/LogicalRequestTest.java |   2 +-
 .../topology/RequiredPasswordValidatorTest.java    |   2 +-
 .../server/topology/TopologyManagerTest.java       |  31 +-
 .../topology/validators/BlueprintImplV2Test.java   |  79 ---
 .../validators/ClusterConfigTypeValidatorTest.java |  27 +-
 .../validators/HiveServiceValidatorTest.java       |  54 +-
 .../RequiredConfigPropertiesValidatorTest.java     |  46 +-
 .../validators/StackConfigTypeValidatorTest.java   |  24 +-
 .../topology/validators/UnitValidatorTest.java     |  29 +-
 .../test/resources/blueprintv2/blueprintv2.json    | 146 -----
 101 files changed, 958 insertions(+), 5017 deletions(-)

diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java
index 7171fc7..acdf9ed 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRenderer.java
@@ -42,10 +42,10 @@ import org.apache.ambari.server.controller.AmbariServer;
 import org.apache.ambari.server.controller.internal.ArtifactResourceProvider;
 import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor;
 import org.apache.ambari.server.controller.internal.BlueprintResourceProvider;
-import org.apache.ambari.server.controller.internal.ConfigurationContext;
 import org.apache.ambari.server.controller.internal.ExportBlueprintRequest;
 import org.apache.ambari.server.controller.internal.RequestImpl;
 import org.apache.ambari.server.controller.internal.ResourceImpl;
+import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.spi.ClusterController;
 import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
 import org.apache.ambari.server.controller.spi.NoSuchResourceException;
@@ -59,10 +59,10 @@ import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.topology.AmbariContext;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.ClusterTopologyImpl;
-import org.apache.ambari.server.topology.ComponentV2;
+import org.apache.ambari.server.topology.Component;
 import org.apache.ambari.server.topology.Configuration;
+import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupInfo;
-import org.apache.ambari.server.topology.HostGroupV2;
 import org.apache.ambari.server.topology.InvalidTopologyException;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.SecurityConfigurationFactory;
@@ -193,15 +193,12 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
       throw new RuntimeException("Unable to process blueprint export request: " + e, e);
     }
 
-    ConfigurationContext configurationContext = new ConfigurationContext(topology.getBlueprint().getStacks().iterator().next(),
-      topology.getBlueprint().getConfiguration());
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, configurationContext);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
-    //TODO add service groups
-    //Stack stack = topology.getBlueprint().getStack();
-    //blueprintResource.setProperty("Blueprints/stack_name", stack.getName());
-    //blueprintResource.setProperty("Blueprints/stack_version", stack.getVersion());
+    Stack stack = topology.getBlueprint().getStack();
+    blueprintResource.setProperty("Blueprints/stack_name", stack.getName());
+    blueprintResource.setProperty("Blueprints/stack_version", stack.getVersion());
 
     if (topology.isClusterKerberosEnabled()) {
       Map<String, Object> securityConfigMap = new LinkedHashMap<>();
@@ -446,9 +443,9 @@ public class ClusterBlueprintRenderer extends BaseRenderer implements Renderer {
    *
    * @return list of component names for the host
    */
-  private List<Map<String, String>> processHostGroupComponents(HostGroupV2 group) {
+  private List<Map<String, String>> processHostGroupComponents(HostGroup group) {
     List<Map<String, String>> listHostGroupComponents = new ArrayList<>();
-    for (ComponentV2 component : group.getComponents()) {
+    for (Component component : group.getComponents()) {
       Map<String, String> mapComponentProperties = new HashMap<>();
       listHostGroupComponents.add(mapComponentProperties);
       mapComponentProperties.put("name", component.getName());
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java
index 76782d5..b43571c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/ServiceService.java
@@ -122,7 +122,7 @@ public class ServiceService extends BaseService {
   @Path("") // This is needed if class level path is not present otherwise no Swagger docs will be generated for this method
   @Produces(MediaType.TEXT_PLAIN)
   @ApiOperation(value = "Get all services",
-      nickname = "ServiceService#getServiceConfigs",
+      nickname = "ServiceService#getServices",
       notes = "Returns all services.",
       response = ServiceResponse.ServiceResponseSwagger.class,
       responseContainer = RESPONSE_CONTAINER_LIST)
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
index 9bbdc37..273c0ff 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorBlueprintProcessor.java
@@ -28,15 +28,15 @@ import java.util.Set;
 import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorRequest.StackAdvisorRequestType;
 import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse;
 import org.apache.ambari.server.api.services.stackadvisor.recommendations.RecommendationResponse.BlueprintConfigurations;
-import org.apache.ambari.server.controller.StackV2;
 import org.apache.ambari.server.controller.internal.ConfigurationTopologyException;
+import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
-import org.apache.ambari.server.topology.BlueprintV2;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
+import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupInfo;
-import org.apache.ambari.server.topology.HostGroupV2;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -89,14 +89,14 @@ public class StackAdvisorBlueprintProcessor {
   }
 
   private StackAdvisorRequest createStackAdvisorRequest(ClusterTopology clusterTopology, StackAdvisorRequestType requestType) {
-    StackV2 stack = clusterTopology.getBlueprint().getStacks().iterator().next();
+    Stack stack = clusterTopology.getBlueprint().getStack();
     Map<String, Set<String>> hgComponentsMap = gatherHostGroupComponents(clusterTopology);
     Map<String, Set<String>> hgHostsMap = gatherHostGroupBindings(clusterTopology);
     Map<String, Set<String>> componentHostsMap = gatherComponentsHostsMap(hgComponentsMap,
             hgHostsMap);
     return StackAdvisorRequest.StackAdvisorRequestBuilder
       .forStack(stack.getName(), stack.getVersion())
-      .forServices(new ArrayList<>(clusterTopology.getBlueprint().getAllServiceTypes()))
+      .forServices(new ArrayList<>(clusterTopology.getBlueprint().getServices()))
       .forHosts(gatherHosts(clusterTopology))
       .forHostsGroupBindings(gatherHostGroupBindings(clusterTopology))
       .forHostComponents(gatherHostGroupComponents(clusterTopology))
@@ -117,7 +117,7 @@ public class StackAdvisorBlueprintProcessor {
 
   private Map<String, Set<String>> gatherHostGroupComponents(ClusterTopology clusterTopology) {
     Map<String, Set<String>> hgComponentsMap = Maps.newHashMap();
-    for (Map.Entry<String, ? extends HostGroupV2> hgEnrty: clusterTopology.getBlueprint().getHostGroups().entrySet()) {
+    for (Map.Entry<String, HostGroup> hgEnrty: clusterTopology.getBlueprint().getHostGroups().entrySet()) {
       hgComponentsMap.put(hgEnrty.getKey(), Sets.newCopyOnWriteArraySet(hgEnrty.getValue().getComponentNames()));
     }
     return hgComponentsMap;
@@ -176,7 +176,7 @@ public class StackAdvisorBlueprintProcessor {
 
     Map<String, BlueprintConfigurations> recommendedConfigurations =
       response.getRecommendations().getBlueprint().getConfigurations();
-    BlueprintV2 blueprint = topology.getBlueprint();
+    Blueprint blueprint = topology.getBlueprint();
 
     for (Map.Entry<String, BlueprintConfigurations> configEntry : recommendedConfigurations.entrySet()) {
       String configType = configEntry.getKey();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/users/ActiveWidgetLayoutService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/users/ActiveWidgetLayoutService.java
index 2667a0c..e7cdabb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/users/ActiveWidgetLayoutService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/users/ActiveWidgetLayoutService.java
@@ -64,7 +64,7 @@ public class ActiveWidgetLayoutService extends BaseService {
    */
   @GET
   @Produces("text/plain")
-  @ApiOperation(value = "Get user widget layouts", nickname = "ActiveWidgetLayoutService#getServiceConfigs", notes = "Returns all active widget layouts for user.", response = ActiveWidgetLayoutResponse.class, responseContainer = "List")
+  @ApiOperation(value = "Get user widget layouts", nickname = "ActiveWidgetLayoutService#getServices", notes = "Returns all active widget layouts for user.", response = ActiveWidgetLayoutResponse.class, responseContainer = "List")
   @ApiImplicitParams({
     @ApiImplicitParam(name = "fields", value = "Filter user layout details", defaultValue = "WidgetLayoutInfo/*", dataType = "string", paramType = "query"),
     @ApiImplicitParam(name = "sortBy", value = "Sort layouts (asc | desc)", defaultValue = "WidgetLayoutInfo/user_name.asc", dataType = "string", paramType = "query"),
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/views/ViewInstanceService.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/views/ViewInstanceService.java
index 036f1bd..e4ebedb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/views/ViewInstanceService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/views/ViewInstanceService.java
@@ -76,7 +76,7 @@ public class ViewInstanceService extends BaseService {
    */
   @GET
   @Produces("text/plain")
-  @ApiOperation(value = "Get all view instances", nickname = "ViewInstanceService#getServiceConfigs", notes = "Returns all instances for a view version.", response = ViewInstanceResponse.class, responseContainer = "List")
+  @ApiOperation(value = "Get all view instances", nickname = "ViewInstanceService#getServices", notes = "Returns all instances for a view version.", response = ViewInstanceResponse.class, responseContainer = "List")
   @ApiImplicitParams({
     @ApiImplicitParam(name = "fields", value = "Filter view instance details", defaultValue = "ViewInstanceInfo/*", dataType = "string", paramType = "query"),
     @ApiImplicitParam(name = "sortBy", value = "Sort users (asc | desc)", defaultValue = "ViewInstanceInfo/instance_name.desc", dataType = "string", paramType = "query"),
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
index c88b23c..95bc525 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariServer.java
@@ -64,7 +64,6 @@ import org.apache.ambari.server.controller.internal.AbstractControllerResourcePr
 import org.apache.ambari.server.controller.internal.AmbariPrivilegeResourceProvider;
 import org.apache.ambari.server.controller.internal.BaseClusterRequest;
 import org.apache.ambari.server.controller.internal.BlueprintResourceProvider;
-import org.apache.ambari.server.controller.internal.BlueprintV2ResourceProvider;
 import org.apache.ambari.server.controller.internal.ClusterPrivilegeResourceProvider;
 import org.apache.ambari.server.controller.internal.ClusterResourceProvider;
 import org.apache.ambari.server.controller.internal.HostResourceProvider;
@@ -84,7 +83,6 @@ import org.apache.ambari.server.metrics.system.MetricsService;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.PersistenceType;
 import org.apache.ambari.server.orm.dao.BlueprintDAO;
-import org.apache.ambari.server.orm.dao.BlueprintV2DAO;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
 import org.apache.ambari.server.orm.dao.GroupDAO;
 import org.apache.ambari.server.orm.dao.MetainfoDAO;
@@ -118,7 +116,6 @@ import org.apache.ambari.server.stack.UpdateActiveRepoVersionOnStartup;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.topology.AmbariContext;
 import org.apache.ambari.server.topology.BlueprintFactory;
-import org.apache.ambari.server.topology.BlueprintV2Factory;
 import org.apache.ambari.server.topology.SecurityConfigurationFactory;
 import org.apache.ambari.server.topology.TopologyManager;
 import org.apache.ambari.server.topology.TopologyRequestFactoryImpl;
@@ -926,16 +923,13 @@ public class AmbariServer {
     BlueprintResourceProvider.init(injector.getInstance(BlueprintFactory.class),
         injector.getInstance(BlueprintDAO.class), injector.getInstance(SecurityConfigurationFactory.class),
         injector.getInstance(Gson.class), ambariMetaInfo);
-    BlueprintV2ResourceProvider.init(injector.getInstance(BlueprintV2Factory.class),
-      injector.getInstance(BlueprintV2DAO.class), injector.getInstance(SecurityConfigurationFactory.class),
-      ambariMetaInfo);
     StackDependencyResourceProvider.init(ambariMetaInfo);
     ClusterResourceProvider.init(injector.getInstance(TopologyManager.class),
         injector.getInstance(TopologyRequestFactoryImpl.class), injector.getInstance(SecurityConfigurationFactory
             .class), injector.getInstance(Gson.class));
     HostResourceProvider.setTopologyManager(injector.getInstance(TopologyManager.class));
     BlueprintFactory.init(injector.getInstance(BlueprintDAO.class));
-    BaseClusterRequest.init(injector.getInstance(BlueprintV2Factory.class));
+    BaseClusterRequest.init(injector.getInstance(BlueprintFactory.class));
     AmbariContext.init(injector.getInstance(HostRoleCommandFactory.class));
 
     PermissionResourceProvider.init(injector.getInstance(PermissionDAO.class));
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
deleted file mode 100644
index 0b0329c..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2.java
+++ /dev/null
@@ -1,605 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.state.AutoDeployInfo;
-import org.apache.ambari.server.state.ComponentInfo;
-import org.apache.ambari.server.state.DependencyInfo;
-import org.apache.ambari.server.state.PropertyDependencyInfo;
-import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.state.ValueAttributesInfo;
-import org.apache.ambari.server.topology.Cardinality;
-import org.apache.ambari.server.topology.Configuration;
-
-/**
- * Encapsulates stack information.
- */
-public class StackV2 {
-
-  /** Stack name */
-  private final String name;
-
-  /** Stack version */
-  private final String version;
-
-  /** Repo version */
-  private final String repoVersion;
-
-  /** Map of service name to components */
-  private final Map<String, Collection<String>> serviceComponents;
-
-  /** Map of component to service */
-  private final Map<String, String> componentService;
-
-  /** Map of component to dependencies */
-  private final Map<String, Collection<DependencyInfo>> dependencies;
-
-  /** Map of dependency to conditional service */
-  private final Map<DependencyInfo, String> dependencyConditionalServiceMap;
-
-  /**
-   * Map of database component name to configuration property which indicates whether
-   * the database in to be managed or if it is an external non-managed instance.
-   * If the value of the config property starts with 'New', the database is determined
-   * to be managed, otherwise it is non-managed.
-   */
-  private final Map<String, String> dbDependencyInfo;
-
-  /** Map of component to required cardinality */
-  private final Map<String, String> cardinalityRequirements = new HashMap<>();
-
-  //todo: instead of all these maps from component -> * ,
-  //todo: we should use a Component object with all of these attributes
-  private Set<String> masterComponents = new HashSet<>();
-
-  /** Map of component to auto-deploy information */
-  private final Map<String, AutoDeployInfo> componentAutoDeployInfo;
-
-  /** Map of service to config type properties */
-  private final Map<String, Map<String, Map<String, ConfigProperty>>> serviceConfigurations;
-
-  /** Map of service to required type properties */
-  private final Map<String, Map<String, Map<String, ConfigProperty>>> requiredServiceConfigurations;
-
-  /** Map of service to config type properties */
-  private final Map<String, Map<String, ConfigProperty>> stackConfigurations;
-
-  /** Map of service to set of excluded config types */
-  private final Map<String, Set<String>> excludedConfigurationTypes;
-
-  private final Map<String, ComponentInfo> componentInfos;
-
-  public StackV2(String name,
-           String version,
-           String repoVersion,
-           Map<String, Collection<String>> serviceComponents,
-           Map<String, Collection<DependencyInfo>> dependencies,
-           Map<String, String> dbDependencyInfo,
-           Map<String, AutoDeployInfo> componentAutoDeployInfo,
-           Map<String, Map<String, Map<String, ConfigProperty>>> serviceConfigurations,
-           Map<String, Map<String, Map<String, ConfigProperty>>> requiredServiceConfigurations,
-           Map<String, Map<String, ConfigProperty>> stackConfigurations,
-           Map<String, Set<String>> excludedConfigurationTypes,
-           Map<String, ComponentInfo> componentInfos) {
-    this.name = name;
-    this.version = version;
-    this.repoVersion = repoVersion;
-
-    this.serviceComponents = serviceComponents;
-    this.componentService = new HashMap<>();
-    for (Map.Entry<String, Collection<String>> entry: serviceComponents.entrySet()) {
-      for (String comp: entry.getValue()) {
-        componentService.put(comp, entry.getKey());
-      }
-    }
-
-    this.dependencies = dependencies;
-    this.dependencyConditionalServiceMap = new HashMap<>();
-    for (Map.Entry<String, Collection<DependencyInfo>> entry: dependencies.entrySet()) {
-      for (DependencyInfo di: entry.getValue()) {
-        dependencyConditionalServiceMap.put(di, entry.getKey());
-      }
-    }
-
-    this.dbDependencyInfo = dbDependencyInfo;
-    this.componentAutoDeployInfo = componentAutoDeployInfo;
-    this.serviceConfigurations = serviceConfigurations;
-    this.requiredServiceConfigurations = requiredServiceConfigurations;
-    this.stackConfigurations = stackConfigurations;
-    this.excludedConfigurationTypes = excludedConfigurationTypes;
-    this.componentInfos = componentInfos;
-  }
-
-  /** @return stack name */
-  public String getName() {
-    return name;
-  }
-
-  /** @return stack version */
-  public String getVersion() {
-    return version;
-  }
-
-  public StackId getStackId() {
-    return new StackId(name, version);
-  }
-
-  /** @return repo version */
-  public String getRepoVersion() { return repoVersion; }
-
-  Map<DependencyInfo, String> getDependencyConditionalServiceMap() {
-    return dependencyConditionalServiceMap;
-  }
-
-  /** @return collection of all services for the stack */
-  public Collection<String> getServices() {
-    return serviceComponents.keySet();
-  }
-
-  /**
-   * Get components contained in the stack for the specified service.
-   *
-   * @param service  service name
-   * @return collection of component names for the specified service
-   */
-  public Collection<String> getComponents(String service) {
-    return serviceComponents.get(service);
-  }
-
-  /** @return map of service to associated components */
-  public Map<String, Collection<String>> getComponents() {
-    return serviceComponents;
-  }
-
-    /**
-     * Get info for the specified component.
-     *
-     * @param component  component name
-     *
-     * @return component information for the requested component
-     *     or null if the component doesn't exist in the stack
-     */
-    @Deprecated
-    public ComponentInfo getComponentInfo(String component) {
-      return componentInfos.get(component);
-    }
-
-  /**
-   * Get all configuration types, including excluded types for the specified service.
-   *
-   * @param service  service name
-   *
-   * @return collection of all configuration types for the specified service
-   */
-  public Collection<String> getAllConfigurationTypes(String service) {
-    return serviceConfigurations.get(service).keySet();
-  }
-
-  /**
-   * Get configuration types for the specified service.
-   * This doesn't include any service excluded types.
-   *
-   * @param service  service name
-   *
-   * @return collection of all configuration types for the specified service
-   */
-  public Collection<String> getConfigurationTypes(String service) {
-    Set<String> serviceTypes = new HashSet<>(serviceConfigurations.get(service).keySet());
-    serviceTypes.removeAll(getExcludedConfigurationTypes(service));
-    return serviceTypes;
-  }
-
-  /**
-   * Get the set of excluded configuration types for this service.
-   *
-   * @param service service name
-   *
-   * @return Set of names of excluded config types. Will not return null.
-   */
-  public Set<String> getExcludedConfigurationTypes(String service) {
-    return excludedConfigurationTypes.containsKey(service) ?
-        excludedConfigurationTypes.get(service) :
-        Collections.emptySet();
-  }
-
-  /**
-   * Get config properties for the specified service and configuration type.
-   *
-   * @param service  service name
-   * @param type   configuration type
-   *
-   * @return map of property names to values for the specified service and configuration type
-   */
-  public Map<String, String> getConfigurationProperties(String service, String type) {
-    Map<String, String> configMap = new HashMap<>();
-    Map<String, ConfigProperty> configProperties = serviceConfigurations.get(service).get(type);
-    if (configProperties != null) {
-      for (Map.Entry<String, ConfigProperty> configProperty : configProperties.entrySet()) {
-        configMap.put(configProperty.getKey(), configProperty.getValue().getValue());
-      }
-    }
-    return configMap;
-  }
-
-  public Map<String, ConfigProperty> getConfigurationPropertiesWithMetadata(String service, String type) {
-    return serviceConfigurations.get(service).get(type);
-  }
-
-  /**
-   * Get all required config properties for the specified service.
-   *
-   * @param service  service name
-   *
-   * @return collection of all required properties for the given service
-   */
-  public Collection<ConfigProperty> getRequiredConfigurationProperties(String service) {
-    Collection<ConfigProperty> requiredConfigProperties = new HashSet<>();
-    Map<String, Map<String, ConfigProperty>> serviceProperties = requiredServiceConfigurations.get(service);
-    if (serviceProperties != null) {
-      for (Map.Entry<String, Map<String, ConfigProperty>> typePropertiesEntry : serviceProperties.entrySet()) {
-        requiredConfigProperties.addAll(typePropertiesEntry.getValue().values());
-      }
-    }
-    return requiredConfigProperties;
-  }
-
-  /**
-   * Get required config properties for the specified service which belong to the specified property type.
-   *
-   * @param service     service name
-   * @param propertyType  property type
-   *
-   * @return collection of required properties for the given service and property type
-   */
-  public Collection<ConfigProperty> getRequiredConfigurationProperties(String service, PropertyInfo.PropertyType propertyType) {
-    Collection<ConfigProperty> matchingProperties = new HashSet<>();
-    Map<String, Map<String, ConfigProperty>> requiredProperties = requiredServiceConfigurations.get(service);
-    if (requiredProperties != null) {
-      for (Map.Entry<String, Map<String, ConfigProperty>> typePropertiesEntry : requiredProperties.entrySet()) {
-        for (ConfigProperty configProperty : typePropertiesEntry.getValue().values()) {
-          if (configProperty.getPropertyTypes().contains(propertyType)) {
-            matchingProperties.add(configProperty);
-          }
-        }
-      }
-    }
-    return matchingProperties;
-  }
-
-  public boolean isPasswordProperty(String service, String type, String propertyName) {
-    return (serviceConfigurations.containsKey(service) &&
-        serviceConfigurations.get(service).containsKey(type) &&
-        serviceConfigurations.get(service).get(type).containsKey(propertyName) &&
-        serviceConfigurations.get(service).get(type).get(propertyName).getPropertyTypes().
-            contains(PropertyInfo.PropertyType.PASSWORD));
-  }
-
-  //todo
-  public Map<String, String> getStackConfigurationProperties(String type) {
-    Map<String, String> configMap = new HashMap<>();
-    Map<String, ConfigProperty> configProperties = stackConfigurations.get(type);
-    if (configProperties != null) {
-      for (Map.Entry<String, ConfigProperty> configProperty : configProperties.entrySet()) {
-        configMap.put(configProperty.getKey(), configProperty.getValue().getValue());
-      }
-    }
-    return configMap;
-  }
-
-  public boolean isKerberosPrincipalNameProperty(String service, String type, String propertyName) {
-    return (serviceConfigurations.containsKey(service) &&
-        serviceConfigurations.get(service).containsKey(type) &&
-        serviceConfigurations.get(service).get(type).containsKey(propertyName) &&
-        serviceConfigurations.get(service).get(type).get(propertyName).getPropertyTypes().
-            contains(PropertyInfo.PropertyType.KERBEROS_PRINCIPAL));
-  }
-  /**
-   * Get config attributes for the specified service and configuration type.
-   *
-   * @param service  service name
-   * @param type   configuration type
-   *
-   * @return  map of attribute names to map of property names to attribute values
-   *      for the specified service and configuration type
-   */
-  public Map<String, Map<String, String>> getConfigurationAttributes(String service, String type) {
-    Map<String, Map<String, String>> attributesMap = new HashMap<>();
-    Map<String, ConfigProperty> configProperties = serviceConfigurations.get(service).get(type);
-    if (configProperties != null) {
-      for (Map.Entry<String, ConfigProperty> configProperty : configProperties.entrySet()) {
-        String propertyName = configProperty.getKey();
-        Map<String, String> propertyAttributes = configProperty.getValue().getAttributes();
-        if (propertyAttributes != null) {
-          for (Map.Entry<String, String> propertyAttribute : propertyAttributes.entrySet()) {
-            String attributeName = propertyAttribute.getKey();
-            String attributeValue = propertyAttribute.getValue();
-            if (attributeValue != null) {
-              Map<String, String> attributes = getWithEmptyDefault(attributesMap, attributeName);
-              attributes.put(propertyName, attributeValue);
-            }
-          }
-        }
-      }
-    }
-    return attributesMap;
-  }
-
-  //todo:
-  public Map<String, Map<String, String>> getStackConfigurationAttributes(String type) {
-    Map<String, Map<String, String>> attributesMap = new HashMap<>();
-    Map<String, ConfigProperty> configProperties = stackConfigurations.get(type);
-    if (configProperties != null) {
-      for (Map.Entry<String, ConfigProperty> configProperty : configProperties.entrySet()) {
-        String propertyName = configProperty.getKey();
-        Map<String, String> propertyAttributes = configProperty.getValue().getAttributes();
-        if (propertyAttributes != null) {
-          for (Map.Entry<String, String> propertyAttribute : propertyAttributes.entrySet()) {
-            String attributeName = propertyAttribute.getKey();
-            String attributeValue = propertyAttribute.getValue();
-            Map<String, String> attributes = getWithEmptyDefault(attributesMap, attributeName);
-            attributes.put(propertyName, attributeValue);
-          }
-        }
-      }
-    }
-    return attributesMap;
-  }
-
-  /**
-   * Get the service for the specified component.
-   *
-   * @param component  component name
-   *
-   * @return service name that contains tha specified component
-   */
-  public String getServiceForComponent(String component) {
-    return componentService.get(component);
-  }
-
-  /**
-   * Get the names of the services which contains the specified components.
-   *
-   * @param components collection of components
-   *
-   * @return collection of services which contain the specified components
-   */
-  public Collection<String> getServicesForComponents(Collection<String> components) {
-    Set<String> services = new HashSet<>();
-    for (String component : components) {
-      services.add(getServiceForComponent(component));
-    }
-
-    return services;
-  }
-
-  /**
-   * Obtain the service name which corresponds to the specified configuration.
-   *
-   * @param config  configuration type
-   *
-   * @return name of service which corresponds to the specified configuration type
-   */
-  public String getServiceForConfigType(String config) {
-    for (Map.Entry<String, Map<String, Map<String, ConfigProperty>>> entry : serviceConfigurations.entrySet()) {
-      Map<String, Map<String, ConfigProperty>> typeMap = entry.getValue();
-      String serviceName = entry.getKey();
-      if (typeMap.containsKey(config) && !getExcludedConfigurationTypes(serviceName).contains(config)) {
-        return serviceName;
-      }
-    }
-    throw new IllegalArgumentException(
-        "Specified configuration type is not associated with any service: " + config);
-  }
-
-  /**
-   * Return the dependencies specified for the given component.
-   *
-   * @param component  component to get dependency information for
-   *
-   * @return collection of dependency information for the specified component
-   */
-  //todo: full dependency graph
-  public Collection<DependencyInfo> getDependenciesForComponent(String component) {
-    return dependencies.containsKey(component) ? dependencies.get(component) :
-        Collections.emptySet();
-  }
-
-  /**
-   * Get the service, if any, that a component dependency is conditional on.
-   *
-   * @param dependency  dependency to get conditional service for
-   *
-   * @return conditional service for provided component or null if dependency
-   *     is not conditional on a service
-   */
-  public String getConditionalServiceForDependency(DependencyInfo dependency) {
-    return dependencyConditionalServiceMap.get(dependency);
-  }
-
-  public String getExternalComponentConfig(String component) {
-    return dbDependencyInfo.get(component);
-  }
-
-  /**
-   * Obtain the required cardinality for the specified component.
-   */
-  public Cardinality getCardinality(String component) {
-    return new Cardinality(cardinalityRequirements.get(component));
-  }
-
-  /**
-   * Obtain auto-deploy information for the specified component.
-   */
-  public AutoDeployInfo getAutoDeployInfo(String component) {
-    return componentAutoDeployInfo.get(component);
-  }
-
-  public boolean isMasterComponent(String component) {
-    return masterComponents.contains(component);
-  }
-
-  public Configuration getConfiguration(Collection<String> services) {
-    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<>();
-    Map<String, Map<String, String>> properties = new HashMap<>();
-
-    for (String service : services) {
-      Collection<String> serviceConfigTypes = getConfigurationTypes(service);
-      for (String type : serviceConfigTypes) {
-        Map<String, String> typeProps = getWithEmptyDefault(properties, type);
-        typeProps.putAll(getConfigurationProperties(service, type));
-
-        Map<String, Map<String, String>> stackTypeAttributes = getConfigurationAttributes(service, type);
-        if (!stackTypeAttributes.isEmpty()) {
-          if (! attributes.containsKey(type)) {
-            attributes.put(type, new HashMap<>());
-          }
-          Map<String, Map<String, String>> typeAttributes = attributes.get(type);
-          for (Map.Entry<String, Map<String, String>> attribute : stackTypeAttributes.entrySet()) {
-            String attributeName = attribute.getKey();
-            Map<String, String> attributeProps = getWithEmptyDefault(typeAttributes, attributeName);
-            attributeProps.putAll(attribute.getValue());
-          }
-        }
-      }
-    }
-    return new Configuration(properties, attributes);
-  }
-
-  public Configuration getConfiguration() {
-    Map<String, Map<String, Map<String, String>>> stackAttributes = new HashMap<>();
-    Map<String, Map<String, String>> stackConfigs = new HashMap<>();
-
-    for (String service : getServices()) {
-      for (String type : getAllConfigurationTypes(service)) {
-        Map<String, String> typeProps = getWithEmptyDefault(stackConfigs, type);
-        typeProps.putAll(getConfigurationProperties(service, type));
-
-        Map<String, Map<String, String>> stackTypeAttributes = getConfigurationAttributes(service, type);
-        if (!stackTypeAttributes.isEmpty()) {
-          if (! stackAttributes.containsKey(type)) {
-            stackAttributes.put(type, new HashMap<>());
-          }
-          Map<String, Map<String, String>> typeAttrs = stackAttributes.get(type);
-          for (Map.Entry<String, Map<String, String>> attribute : stackTypeAttributes.entrySet()) {
-            String attributeName = attribute.getKey();
-            Map<String, String> attributes = getWithEmptyDefault(typeAttrs, attributeName);
-            attributes.putAll(attribute.getValue());
-          }
-        }
-      }
-    }
-    return new Configuration(stackConfigs, stackAttributes);
-  }
-
-  static <OK, IK, IV> Map<IK, IV> getWithEmptyDefault(Map<OK, Map<IK, IV>> outerMap, OK outerKey) {
-    Map<IK, IV> innerMap = outerMap.get(outerKey);
-    if (null == innerMap) {
-      innerMap = new HashMap<>();
-      outerMap.put(outerKey, innerMap);
-    }
-    return innerMap;
-  }
-
-
-  /**
-   * Contains a configuration property's value and attributes.
-   */
-  public static class ConfigProperty {
-    private ValueAttributesInfo propertyValueAttributes = null;
-    private String name;
-    private String value;
-    private Map<String, String> attributes;
-    private Set<PropertyInfo.PropertyType> propertyTypes;
-    private String type;
-    private Set<PropertyDependencyInfo> dependsOnProperties =
-        Collections.emptySet();
-
-    public ConfigProperty(ReadOnlyConfigurationResponse config) {
-      this.name = config.getPropertyName();
-      this.value = config.getPropertyValue();
-      this.attributes = config.getPropertyAttributes();
-      this.propertyTypes = config.getPropertyType();
-      this.type = normalizeType(config.getType());
-      this.dependsOnProperties = config.getDependsOnProperties();
-      this.propertyValueAttributes = config.getPropertyValueAttributes();
-    }
-
-    public ConfigProperty(String type, String name, String value) {
-      this.type = type;
-      this.name = name;
-      this.value = value;
-    }
-
-    public String getName() {
-      return name;
-    }
-
-    public String getValue() {
-      return value;
-    }
-
-    public void setValue(String value) {
-      this.value = value;
-    }
-
-    public String getType() {
-      return type;
-    }
-
-    public Set<PropertyInfo.PropertyType> getPropertyTypes() {
-      return propertyTypes;
-    }
-
-    public void setPropertyTypes(Set<PropertyInfo.PropertyType> propertyTypes) {
-      this.propertyTypes = propertyTypes;
-    }
-
-    public Map<String, String> getAttributes() {
-      return attributes;
-    }
-
-    public void setAttributes(Map<String, String> attributes) {
-      this.attributes = attributes;
-    }
-
-    public Set<PropertyDependencyInfo> getDependsOnProperties() {
-      return this.dependsOnProperties;
-    }
-
-    private String normalizeType(String type) {
-      //strip .xml from type
-      if (type.endsWith(".xml")) {
-        type = type.substring(0, type.length() - 4);
-      }
-      return type;
-    }
-
-    public ValueAttributesInfo getPropertyValueAttributes() {
-      return propertyValueAttributes;
-    }
-  }
-}
\ No newline at end of file
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2Factory.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2Factory.java
deleted file mode 100644
index 784b368..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackV2Factory.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import static java.util.AbstractMap.SimpleImmutableEntry;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.state.AutoDeployInfo;
-import org.apache.ambari.server.state.ComponentInfo;
-import org.apache.ambari.server.state.DependencyInfo;
-import org.apache.ambari.server.state.StackId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class StackV2Factory {
-  private final static Logger LOG = LoggerFactory.getLogger(StackV2Factory.class);
-
-
-  private AmbariManagementController controller;
-
-  public StackV2Factory(AmbariManagementController controller) {
-    this.controller = controller;
-  }
-
-  public StackV2 create(StackEntity stack) throws AmbariException {
-    return create(stack.getStackName(), stack.getStackVersion());
-  }
-
-  public StackV2 create(String stackId) throws AmbariException {
-    StackId id = new StackId(stackId);
-    return create(id.getStackName(), id.getStackVersion());
-  }
-
-  public StackV2 create(String name, String version) throws AmbariException {
-    Set<StackServiceResponse> stackServices = controller.getStackServices(
-      Collections.singleton(new StackServiceRequest(name, version, null)));
-
-    StackData stackData = new StackData(name, version);
-    for (StackServiceResponse stackService : stackServices) {
-      String serviceName = stackService.getServiceName();
-      parseComponents(stackData, serviceName);
-      parseExcludedConfigurations(stackData, stackService);
-      parseConfigurations(stackData, stackService);
-      registerConditionalDependencies(stackData);
-    }
-
-    //todo: already done for each service
-    parseStackConfigurations(stackData);
-
-    getComponentInfos(stackData);
-
-    return new StackV2(name, version, stackData.repoVersion /* TODO */, stackData.serviceComponents, stackData.dependencies,
-      stackData.dbDependencyInfo, stackData.componentAutoDeployInfo, stackData.serviceConfigurations,
-      stackData.requiredServiceConfigurations, stackData.stackConfigurations, stackData.excludedConfigurationTypes,
-      stackData.componentInfos);
-  }
-
-  private void getComponentInfos(StackData stackData) {
-    List<Map.Entry<String, String>> componentServices = stackData.serviceComponents.entrySet().stream().
-      flatMap(e -> e.getValue().stream().map( v -> new SimpleImmutableEntry<>(e.getKey(), v))).
-      collect(Collectors.toList());
-    componentServices.stream().forEach( componentService -> {
-      try {
-        ComponentInfo componentInfo = controller.getAmbariMetaInfo().getComponent(stackData.stackName,
-          stackData.stackVersion, componentService.getKey(), componentService.getValue());
-        if (null != componentInfo) {
-          stackData.componentInfos.put(componentService.getKey(), componentInfo);
-        }
-      } catch (AmbariException e) {
-        LOG.debug("No component info for service: {}, component: {}, stack name: {}, stack version: {}, Exception: {}",
-          componentService.getKey(), componentService.getValue(), stackData.stackName, stackData.stackVersion, e);
-      }
-    });
-  }
-
-  /**
-   * Parse configurations for the specified service from the stack definition.
-   *
-   * @param stackService  service to parse the stack configuration for
-   *
-   * @throws AmbariException an exception occurred getting configurations from the stack definition
-   */
-  private void parseConfigurations(StackData stackData,
-                                   StackServiceResponse stackService) throws AmbariException {
-    String service = stackService.getServiceName();
-    Map<String, Map<String, StackV2.ConfigProperty>> mapServiceConfig = new HashMap<>();
-    Map<String, Map<String, StackV2.ConfigProperty>> mapRequiredServiceConfig = new HashMap<>();
-
-    stackData.serviceConfigurations.put(service, mapServiceConfig);
-    stackData.requiredServiceConfigurations.put(service, mapRequiredServiceConfig);
-
-    Set<ReadOnlyConfigurationResponse> serviceConfigs = controller.getStackConfigurations(
-      Collections.singleton(new StackConfigurationRequest(stackData.stackName, stackData.stackVersion, service, null)));
-    Set<ReadOnlyConfigurationResponse> stackLevelConfigs = controller.getStackLevelConfigurations(
-      Collections.singleton(new StackLevelConfigurationRequest(stackData.stackName, stackData.stackVersion, null)));
-    serviceConfigs.addAll(stackLevelConfigs);
-
-    // shouldn't have any required properties in stack level configuration
-    for (ReadOnlyConfigurationResponse config : serviceConfigs) {
-      StackV2.ConfigProperty configProperty = new StackV2.ConfigProperty(config);
-      String type = configProperty.getType();
-
-      Map<String, StackV2.ConfigProperty> mapTypeConfig = StackV2.getWithEmptyDefault(mapServiceConfig, type);
-
-      mapTypeConfig.put(config.getPropertyName(), configProperty);
-      if (config.isRequired()) {
-        Map<String, StackV2.ConfigProperty> requiredTypeConfig =
-          StackV2.getWithEmptyDefault(mapRequiredServiceConfig, type);
-        requiredTypeConfig.put(config.getPropertyName(), configProperty);
-      }
-    }
-
-    // So far we added only config types that have properties defined
-    // in stack service definition. Since there might be config types
-    // with no properties defined we need to add those separately
-    Set<String> configTypes = stackService.getConfigTypes().keySet();
-    for (String configType: configTypes) {
-      if (!mapServiceConfig.containsKey(configType)) {
-        mapServiceConfig.put(configType, Collections.emptyMap());
-      }
-    }
-  }
-
-  private void parseStackConfigurations (StackData stackData) throws AmbariException {
-    Set<ReadOnlyConfigurationResponse> stackLevelConfigs = controller.getStackLevelConfigurations(
-      Collections.singleton(new StackLevelConfigurationRequest(stackData.stackName, stackData.stackVersion, null)));
-
-    for (ReadOnlyConfigurationResponse config : stackLevelConfigs) {
-      StackV2.ConfigProperty configProperty = new StackV2.ConfigProperty(config);
-      String type = configProperty.getType();
-
-      Map<String, StackV2.ConfigProperty> mapTypeConfig =
-        StackV2.getWithEmptyDefault(stackData.stackConfigurations, type);
-
-      mapTypeConfig.put(config.getPropertyName(),
-        configProperty);
-    }
-  }
-
-  /**
-   * Parse components for the specified service from the stack definition.
-   *
-   * @param service  service name
-   *
-   * @throws AmbariException an exception occurred getting components from the stack definition
-   */
-  private void parseComponents(StackData stackData, String service) throws AmbariException{
-    Collection<String> componentSet = new HashSet<>();
-
-    Set<StackServiceComponentResponse> components = controller.getStackComponents(
-      Collections.singleton(new StackServiceComponentRequest(stackData.stackName, stackData.stackVersion, service, null)));
-
-    // stack service components
-    for (StackServiceComponentResponse component : components) {
-      String componentName = component.getComponentName();
-      componentSet.add(componentName);
-      stackData.componentService.put(componentName, service);
-      String cardinality = component.getCardinality();
-      if (cardinality != null) {
-        stackData.cardinalityRequirements.put(componentName, cardinality);
-      }
-      AutoDeployInfo autoDeploy = component.getAutoDeploy();
-      if (autoDeploy != null) {
-        stackData.componentAutoDeployInfo.put(componentName, autoDeploy);
-      }
-
-      // populate component dependencies
-      //todo: remove usage of AmbariMetaInfo
-      Collection<DependencyInfo> componentDependencies = controller.getAmbariMetaInfo().getComponentDependencies(
-        stackData.stackName, stackData.stackVersion, service, componentName);
-
-      if (componentDependencies != null && ! componentDependencies.isEmpty()) {
-        stackData.dependencies.put(componentName, componentDependencies);
-      }
-      if (component.isMaster()) {
-        stackData.masterComponents.add(componentName);
-      }
-    }
-
-    stackData.serviceComponents.put(service, componentSet);
-  }
-
-
-  /**
-   * Obtain the excluded configuration types from the StackServiceResponse
-   *
-   * @param stackServiceResponse the response object associated with this stack service
-   */
-  private void parseExcludedConfigurations(StackData stackData, StackServiceResponse stackServiceResponse) {
-    stackData.excludedConfigurationTypes.put(stackServiceResponse.getServiceName(), stackServiceResponse.getExcludedConfigTypes());
-  }
-
-  /**
-   * Register conditional dependencies.
-   */
-  //todo: This information should be specified in the stack definition.
-  void registerConditionalDependencies(StackData stackData) {
-    stackData.dbDependencyInfo.put("MYSQL_SERVER", "global/hive_database");
-  }
-
-
-  private static final class StackData {
-    final String stackName;
-    final String stackVersion;
-
-    public StackData(String stackName, String stackVersion) {
-      this.stackName = stackName;
-      this.stackVersion = stackVersion;
-    }
-
-    String repoVersion;
-    final Map<String, String> componentService = new HashMap<>();
-    final Set<String> masterComponents = new HashSet<>();
-    final Map<String, AutoDeployInfo> componentAutoDeployInfo = new HashMap<>();
-    final Map<String, String> cardinalityRequirements = new HashMap<>();
-    final Map<String, Collection<DependencyInfo>> dependencies = new HashMap<>();
-    final Map<String, Collection<String>> serviceComponents = new HashMap<>();
-    final Map<String, Map<String, Map<String, StackV2.ConfigProperty>>> serviceConfigurations = new HashMap<>();
-    final Map<String, Map<String, Map<String, StackV2.ConfigProperty>>> requiredServiceConfigurations = new HashMap<>();
-    final Map<String, String> dbDependencyInfo = new HashMap<>();
-    final Map<String, Set<String>> excludedConfigurationTypes = new HashMap<>();
-    final Map<String, Map<String, StackV2.ConfigProperty>> stackConfigurations = new HashMap<>();
-    final Map<String, ComponentInfo> componentInfos = new HashMap<>();
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
index 5104354..f98dbf2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
@@ -243,7 +243,7 @@ public abstract class AbstractControllerResourceProvider extends AbstractAuthori
       case HostComponentProcess:
         return new HostComponentProcessResourceProvider(propertyIds, keyPropertyIds, managementController);
       case Blueprint:
-        return new BlueprintV2ResourceProvider(propertyIds, keyPropertyIds, managementController);
+        return new BlueprintResourceProvider(propertyIds, keyPropertyIds, managementController);
       case KerberosDescriptor:
         return resourceProviderFactory.getKerberosDescriptorResourceProvider(managementController, propertyIds, keyPropertyIds);
       case Recommendation:
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java
index a38f478..77eafeb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseClusterRequest.java
@@ -18,7 +18,6 @@
 
 package org.apache.ambari.server.controller.internal;
 
-import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -30,13 +29,12 @@ import org.apache.ambari.server.api.predicate.Token;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
-import org.apache.ambari.server.topology.BlueprintV2;
-import org.apache.ambari.server.topology.BlueprintV2Factory;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.BlueprintFactory;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.SecurityConfiguration;
-import org.apache.ambari.server.topology.Service;
 import org.apache.ambari.server.topology.TopologyRequest;
 
 /**
@@ -64,22 +62,22 @@ public abstract class BaseClusterRequest implements TopologyRequest {
    * blueprint
    */
   //todo: change interface to only return blueprint name
-  protected BlueprintV2 blueprint;
+  protected Blueprint blueprint;
 
   /**
-   * security configuration
+   * configuration
    */
-  protected SecurityConfiguration securityConfiguration;
+  protected Configuration configuration;
 
   /**
-   * blueprint factory
+   * security configuration
    */
-  protected static BlueprintV2Factory blueprintFactory;
+  protected SecurityConfiguration securityConfiguration;
 
   /**
-   * List of services
+   * blueprint factory
    */
-  protected Collection<Service> serviceConfigs;
+  protected static BlueprintFactory blueprintFactory;
 
   /**
    * Lexer used to obtain property names from a predicate string
@@ -96,7 +94,7 @@ public abstract class BaseClusterRequest implements TopologyRequest {
    * inject blueprint factory
    * @param factory  blueprint factory
    */
-  public static void init(BlueprintV2Factory factory) {
+  public static void init(BlueprintFactory factory) {
     blueprintFactory = factory;
   }
 
@@ -106,19 +104,13 @@ public abstract class BaseClusterRequest implements TopologyRequest {
   }
 
   @Override
-  public BlueprintV2 getBlueprint() {
+  public Blueprint getBlueprint() {
     return blueprint;
   }
 
   @Override
-  public Collection<Service> getServiceConfigs() {
-    return serviceConfigs;
-  }
-
-  @Override
-  @Deprecated
   public Configuration getConfiguration() {
-    return null;
+    return configuration;
   }
 
   @Override
@@ -163,7 +155,7 @@ public abstract class BaseClusterRequest implements TopologyRequest {
    *
    * @param blueprint blueprint
    */
-  protected void setBlueprint(BlueprintV2 blueprint) {
+  protected void setBlueprint(Blueprint blueprint) {
     this.blueprint = blueprint;
   }
 
@@ -172,14 +164,14 @@ public abstract class BaseClusterRequest implements TopologyRequest {
    *
    * @param configuration  configuration
    */
-  @Deprecated
   protected void setConfiguration(Configuration configuration) {
+    this.configuration = configuration;
   }
 
   /**
    * Get the blueprint factory.
    */
-  protected BlueprintV2Factory getBlueprintFactory() {
+  protected BlueprintFactory getBlueprintFactory() {
     return blueprintFactory;
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
index d2f24d9..5b75532 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
@@ -38,27 +38,27 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.controller.StackV2;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.ValueAttributesInfo;
 import org.apache.ambari.server.topology.AdvisedConfiguration;
-import org.apache.ambari.server.topology.BlueprintV2;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.Cardinality;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.ConfigRecommendationStrategy;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupInfo;
-import org.apache.ambari.server.topology.Service;
 import org.apache.ambari.server.topology.validators.UnitValidatedProperty;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Predicates;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 /**
@@ -213,11 +213,10 @@ public class BlueprintConfigurationProcessor {
       new HawqHAFilter() };
 
   private ClusterTopology clusterTopology;
-  public final ConfigurationContext configurationContext;
 
-  public BlueprintConfigurationProcessor(ClusterTopology clusterTopology, ConfigurationContext configurationContext) {
+
+  public BlueprintConfigurationProcessor(ClusterTopology clusterTopology) {
     this.clusterTopology = clusterTopology;
-    this.configurationContext = configurationContext;
     initRemovePropertyUpdaters();
   }
 
@@ -310,7 +309,7 @@ public class BlueprintConfigurationProcessor {
           Map<String, String> typeMap = clusterProps.get(type);
           if (typeMap != null && typeMap.containsKey(propertyName) && typeMap.get(propertyName) != null) {
             requiredHostGroups.addAll(updater.getRequiredHostGroups(
-                propertyName, typeMap.get(propertyName), clusterProps, clusterTopology, configurationContext));
+                propertyName, typeMap.get(propertyName), clusterProps, clusterTopology));
           }
 
           // host group configs
@@ -319,7 +318,7 @@ public class BlueprintConfigurationProcessor {
             Map<String, String> hgTypeMap = hgConfigProps.get(type);
             if (hgTypeMap != null && hgTypeMap.containsKey(propertyName)) {
               requiredHostGroups.addAll(updater.getRequiredHostGroups(
-                  propertyName, hgTypeMap.get(propertyName), hgConfigProps, clusterTopology, configurationContext));
+                  propertyName, hgTypeMap.get(propertyName), hgConfigProps, clusterTopology));
             }
           }
         }
@@ -362,7 +361,7 @@ public class BlueprintConfigurationProcessor {
           if (typeMap != null && typeMap.containsKey(propertyName) && typeMap.get(propertyName) != null) {
             final String originalValue = typeMap.get(propertyName);
             final String updatedValue =
-              updater.updateForClusterCreate(propertyName, originalValue, clusterProps, clusterTopology, configurationContext);
+              updater.updateForClusterCreate(propertyName, originalValue, clusterProps, clusterTopology);
 
             if(updatedValue == null ) {
               continue;
@@ -383,7 +382,7 @@ public class BlueprintConfigurationProcessor {
             if (hgTypeMap != null && hgTypeMap.containsKey(propertyName)) {
               final String originalValue = hgTypeMap.get(propertyName);
               final String updatedValue =
-                updater.updateForClusterCreate(propertyName, originalValue, hgConfigProps, clusterTopology, configurationContext);
+                updater.updateForClusterCreate(propertyName, originalValue, hgConfigProps, clusterTopology);
 
               if (!updatedValue.equals(originalValue)) {
                 configTypesUpdated.add(type);
@@ -397,7 +396,7 @@ public class BlueprintConfigurationProcessor {
     }
 
     //todo: lots of hard coded HA rules included here
-    if (clusterTopology.isNameNodeHAEnabled(configurationContext)) {
+    if (clusterTopology.isNameNodeHAEnabled()) {
 
       // add "dfs.internal.nameservices" if it's not specified
       Map<String, String> hdfsSiteConfig = clusterConfig.getFullProperties().get("hdfs-site");
@@ -429,7 +428,7 @@ public class BlueprintConfigurationProcessor {
     setStackToolsAndFeatures(clusterConfig, configTypesUpdated);
     setRetryConfiguration(clusterConfig, configTypesUpdated);
     setupHDFSProxyUsers(clusterConfig, configTypesUpdated);
-    addExcludedConfigProperties(clusterConfig, configTypesUpdated);
+    addExcludedConfigProperties(clusterConfig, configTypesUpdated, clusterTopology.getBlueprint().getStack());
 
     trimProperties(clusterConfig, clusterTopology);
 
@@ -437,15 +436,15 @@ public class BlueprintConfigurationProcessor {
   }
 
   private void trimProperties(Configuration clusterConfig, ClusterTopology clusterTopology) {
-    BlueprintV2 blueprint = clusterTopology.getBlueprint();
-    //Stack stack = blueprint.getStack();
+    Blueprint blueprint = clusterTopology.getBlueprint();
+    Stack stack = blueprint.getStack();
 
     Map<String, Map<String, String>> configTypes = clusterConfig.getFullProperties();
     for (String configType : configTypes.keySet()) {
       Map<String,String> properties = configTypes.get(configType);
-//      for (String propertyName : properties.keySet()) {
-//        trimPropertyValue(clusterConfig, stack, configType, properties, propertyName);
-//      }
+      for (String propertyName : properties.keySet()) {
+        trimPropertyValue(clusterConfig, stack, configType, properties, propertyName);
+      }
     }
   }
 
@@ -486,11 +485,11 @@ public class BlueprintConfigurationProcessor {
    */
   public void doUpdateForBlueprintExport() {
     // HA configs are only processed in cluster configuration, not HG configurations
-    if (clusterTopology.isNameNodeHAEnabled(configurationContext)) {
+    if (clusterTopology.isNameNodeHAEnabled()) {
       doNameNodeHAUpdate();
     }
 
-    if (clusterTopology.isYarnResourceManagerHAEnabled(configurationContext)) {
+    if (clusterTopology.isYarnResourceManagerHAEnabled()) {
       doYarnResourceManagerHAUpdate();
     }
 
@@ -548,7 +547,7 @@ public class BlueprintConfigurationProcessor {
     for (Map.Entry<String, Map<String, String>> configEntry : properties.entrySet()) {
       String type = configEntry.getKey();
       try {
-        //  clusterTopology.getBlueprint().getStack().getServiceForConfigType(type);
+          clusterTopology.getBlueprint().getStack().getServiceForConfigType(type);
         } catch (IllegalArgumentException illegalArgumentException) {
             LOG.error(new StringBuilder(String.format("Error encountered while trying to obtain the service name for config type [%s]. ", type))
             .append("Further processing on this config type will be skipped. ")
@@ -577,7 +576,7 @@ public class BlueprintConfigurationProcessor {
 
       for (Map.Entry<String, String> propertyEntry : configPropertiesPerType.entrySet()) {
         String propName = propertyEntry.getKey();
-        if (shouldPropertyBeExcludedForClusterUpdate(propName, propertyEntry.getValue(), configType, clusterTopology, configurationContext)) {
+        if (shouldPropertyBeExcludedForClusterUpdate(propName, propertyEntry.getValue(), configType, clusterTopology)) {
           configuration.removeProperty(configType, propName);
           configTypesUpdated.add(configType);
         }
@@ -622,29 +621,29 @@ public class BlueprintConfigurationProcessor {
    * @param advisedConfigurations advised configuration instance
    */
   private void doFilterStackDefaults(Map<String, AdvisedConfiguration> advisedConfigurations) {
-//    BlueprintV2 blueprint = clusterTopology.getBlueprint();
-//    Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getServices());
-//    Map<String, Map<String, String>> stackDefaultProps = stackDefaults.getProperties();
-//    for (Map.Entry<String, AdvisedConfiguration> adConfEntry : advisedConfigurations.entrySet()) {
-//      AdvisedConfiguration advisedConfiguration = adConfEntry.getValue();
-//      if (stackDefaultProps.containsKey(adConfEntry.getKey())) {
-//        Map<String, String> defaultProps = stackDefaultProps.get(adConfEntry.getKey());
-//        if (advisedConfiguration.getProperties() != null) {
-//          Map<String, String> outFilteredProps = Maps.filterKeys(advisedConfiguration.getProperties(),
-//            Predicates.not(Predicates.in(defaultProps.keySet())));
-//          advisedConfiguration.getProperties().keySet().removeAll(Sets.newCopyOnWriteArraySet(outFilteredProps.keySet()));
-//        }
-//
-//        if (advisedConfiguration.getPropertyValueAttributes() != null) {
-//          Map<String, ValueAttributesInfo> outFilteredValueAttrs = Maps.filterKeys(advisedConfiguration.getPropertyValueAttributes(),
-//            Predicates.not(Predicates.in(defaultProps.keySet())));
-//          advisedConfiguration.getPropertyValueAttributes().keySet().removeAll(
-//            Sets.newCopyOnWriteArraySet(outFilteredValueAttrs.keySet()));
-//        }
-//      } else {
-//        advisedConfiguration.getProperties().clear();
-//      }
-//    }
+    Blueprint blueprint = clusterTopology.getBlueprint();
+    Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getServices());
+    Map<String, Map<String, String>> stackDefaultProps = stackDefaults.getProperties();
+    for (Map.Entry<String, AdvisedConfiguration> adConfEntry : advisedConfigurations.entrySet()) {
+      AdvisedConfiguration advisedConfiguration = adConfEntry.getValue();
+      if (stackDefaultProps.containsKey(adConfEntry.getKey())) {
+        Map<String, String> defaultProps = stackDefaultProps.get(adConfEntry.getKey());
+        if (advisedConfiguration.getProperties() != null) {
+          Map<String, String> outFilteredProps = Maps.filterKeys(advisedConfiguration.getProperties(),
+            Predicates.not(Predicates.in(defaultProps.keySet())));
+          advisedConfiguration.getProperties().keySet().removeAll(Sets.newCopyOnWriteArraySet(outFilteredProps.keySet()));
+        }
+
+        if (advisedConfiguration.getPropertyValueAttributes() != null) {
+          Map<String, ValueAttributesInfo> outFilteredValueAttrs = Maps.filterKeys(advisedConfiguration.getPropertyValueAttributes(),
+            Predicates.not(Predicates.in(defaultProps.keySet())));
+          advisedConfiguration.getPropertyValueAttributes().keySet().removeAll(
+            Sets.newCopyOnWriteArraySet(outFilteredValueAttrs.keySet()));
+        }
+      } else {
+        advisedConfiguration.getProperties().clear();
+      }
+    }
   }
 
   /**
@@ -701,11 +700,11 @@ public class BlueprintConfigurationProcessor {
   private Collection<Map<String, Map<String, PropertyUpdater>>> createCollectionOfUpdaters() {
     Collection<Map<String, Map<String, PropertyUpdater>>> updaters = allUpdaters;
 
-    if (clusterTopology.isNameNodeHAEnabled(configurationContext)) {
+    if (clusterTopology.isNameNodeHAEnabled()) {
       updaters = addNameNodeHAUpdaters(updaters);
     }
 
-    if (clusterTopology.isYarnResourceManagerHAEnabled(configurationContext)) {
+    if (clusterTopology.isYarnResourceManagerHAEnabled()) {
       updaters = addYarnResourceManagerHAUpdaters(updaters);
     }
 
@@ -1061,7 +1060,7 @@ public class BlueprintConfigurationProcessor {
    */
   private boolean shouldPropertyBeExcludedForBlueprintExport(String propertyName, String propertyValue, String propertyType, ClusterTopology topology, PropertyFilter [] exportPropertyFilters ) {
     for(PropertyFilter filter : exportPropertyFilters) {
-      if (!filter.isPropertyIncluded(propertyName, propertyValue, propertyType, topology, configurationContext)) {
+      if (!filter.isPropertyIncluded(propertyName, propertyValue, propertyType, topology)) {
         return true;
       }
     }
@@ -1085,12 +1084,11 @@ public class BlueprintConfigurationProcessor {
   private static boolean shouldPropertyBeExcludedForClusterUpdate(String propertyName,
                                                                   String propertyValue,
                                                                   String propertyType,
-                                                                  ClusterTopology topology,
-                                                                  ConfigurationContext configurationContext) {
+                                                                  ClusterTopology topology) {
 
     for(PropertyFilter filter : clusterUpdatePropertyFilters) {
       try {
-        if (!filter.isPropertyIncluded(propertyName, propertyValue, propertyType, topology, configurationContext)) {
+        if (!filter.isPropertyIncluded(propertyName, propertyValue, propertyType, topology)) {
           if (!shouldPropertyBeStoredWithDefault(propertyName)) {
             return true;
           }
@@ -1328,8 +1326,7 @@ public class BlueprintConfigurationProcessor {
         Map<String, String> typeProperties = properties.get(type);
 
         if (typeProperties != null && typeProperties.containsKey(propertyName)) {
-          String newValue = npu.updateForBlueprintExport(propertyName, typeProperties.get(propertyName),
-            properties, clusterTopology, configurationContext);
+          String newValue = npu.updateForBlueprintExport(propertyName, typeProperties.get(propertyName), properties, clusterTopology);
           configuration.setProperty(type, propertyName, newValue);
         }
       }
@@ -1353,8 +1350,7 @@ public class BlueprintConfigurationProcessor {
     String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
-                                         ClusterTopology topology,
-                                         ConfigurationContext configurationContext);
+                                         ClusterTopology topology);
 
     /**
      * Determine the required host groups for the provided property.
@@ -1369,8 +1365,7 @@ public class BlueprintConfigurationProcessor {
     Collection<String> getRequiredHostGroups(String propertyName,
                                              String origValue,
                                              Map<String, Map<String, String>> properties,
-                                             ClusterTopology topology,
-                                             ConfigurationContext configurationContext);
+                                             ClusterTopology topology);
   }
 
   private static class HostGroupUpdater implements PropertyUpdater {
@@ -1381,9 +1376,7 @@ public class BlueprintConfigurationProcessor {
     public String updateForClusterCreate(String propertyName,
       String origValue,
       Map<String, Map<String, String>> properties,
-      ClusterTopology topology,
-      ConfigurationContext configurationContext
-    ) {
+      ClusterTopology topology) {
 
       //todo: getHostStrings
       Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
@@ -1409,7 +1402,7 @@ public class BlueprintConfigurationProcessor {
     public Collection<String> getRequiredHostGroups(String propertyName,
       String origValue,
       Map<String, Map<String, String>> properties,
-      ClusterTopology topology, ConfigurationContext configurationContext) {
+      ClusterTopology topology) {
       //todo: getHostStrings
       Matcher m = HostGroup.HOSTGROUP_REGEX.matcher(origValue);
       if (m.find()) {
@@ -1453,10 +1446,9 @@ public class BlueprintConfigurationProcessor {
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
-                                         ClusterTopology topology,
-                                         ConfigurationContext configurationContext)  {
+                                         ClusterTopology topology)  {
 
-      String replacedValue = super.updateForClusterCreate(propertyName, origValue, properties, topology, configurationContext);
+      String replacedValue = super.updateForClusterCreate(propertyName, origValue, properties, topology);
       if (!Objects.equals(origValue, replacedValue)) {
         return replacedValue;
       } else {
@@ -1467,8 +1459,7 @@ public class BlueprintConfigurationProcessor {
               topology.getHostAssignmentsForComponent(component).iterator().next(), properties);
         } else {
           //todo: extract all hard coded HA logic
-
-          Cardinality cardinality = configurationContext.getStack().getCardinality(component);
+          Cardinality cardinality = topology.getBlueprint().getStack().getCardinality(component);
           // if no matching host groups are found for a component whose configuration
           // is handled by this updater, check the stack first to determine if
           // zero is a valid cardinality for this component.  This is necessary
@@ -1477,7 +1468,7 @@ public class BlueprintConfigurationProcessor {
           if (matchingGroupCount == 0 && cardinality.isValidCount(0)) {
             return origValue;
           } else {
-            if (topology.isNameNodeHAEnabled(configurationContext) && isComponentNameNode() && (matchingGroupCount == 2)) {
+            if (topology.isNameNodeHAEnabled() && isComponentNameNode() && (matchingGroupCount == 2)) {
               // if this is the defaultFS property, it should reflect the nameservice name,
               // rather than a hostname (used in non-HA scenarios)
               if (properties.containsKey("core-site") && properties.get("core-site").get("fs.defaultFS").equals(origValue)) {
@@ -1503,13 +1494,13 @@ public class BlueprintConfigurationProcessor {
 
             }
 
-            if (topology.isNameNodeHAEnabled(configurationContext) && isComponentSecondaryNameNode() && (matchingGroupCount == 0)) {
+            if (topology.isNameNodeHAEnabled() && isComponentSecondaryNameNode() && (matchingGroupCount == 0)) {
               // if HDFS HA is enabled, then no replacement is necessary for properties that refer to the SECONDARY_NAMENODE
               // eventually this type of information should be encoded in the stacks
               return origValue;
             }
 
-            if (topology.isYarnResourceManagerHAEnabled(configurationContext) && isComponentResourceManager() && (matchingGroupCount == 2)) {
+            if (topology.isYarnResourceManagerHAEnabled() && isComponentResourceManager() && (matchingGroupCount == 2)) {
               if (!origValue.contains("localhost")) {
                 // if this Yarn property is a FQDN, then simply return it
                 return origValue;
@@ -1567,10 +1558,8 @@ public class BlueprintConfigurationProcessor {
     public Collection<String> getRequiredHostGroups(String propertyName,
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
-                                                    ClusterTopology topology,
-                                                    ConfigurationContext configurationContext) {
-      Collection<String> result = super.getRequiredHostGroups(propertyName,
-        origValue, properties, topology, configurationContext);
+                                                    ClusterTopology topology) {
+      Collection<String> result = super.getRequiredHostGroups(propertyName, origValue, properties, topology);
       if (!result.isEmpty()) {
         return result;
       } else {
@@ -1579,7 +1568,7 @@ public class BlueprintConfigurationProcessor {
         if (matchingGroupCount != 0) {
           return new HashSet<>(matchingGroups);
         } else {
-          Cardinality cardinality = configurationContext.getStack().getCardinality(component);
+          Cardinality cardinality = topology.getBlueprint().getStack().getCardinality(component);
           // if no matching host groups are found for a component whose configuration
           // is handled by this updater, return an empty set
           if (! cardinality.isValidCount(0)) {
@@ -1726,10 +1715,9 @@ public class BlueprintConfigurationProcessor {
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
-                                         ClusterTopology topology,
-                                         ConfigurationContext configurationContext) {
+                                         ClusterTopology topology) {
       try {
-        return super.updateForClusterCreate(propertyName, origValue, properties, topology, configurationContext);
+        return super.updateForClusterCreate(propertyName, origValue, properties, topology);
       } catch (IllegalArgumentException illegalArgumentException) {
         // return the original value, since the optional component is not available in this cluster
         return origValue;
@@ -1740,11 +1728,10 @@ public class BlueprintConfigurationProcessor {
     public Collection<String> getRequiredHostGroups(String propertyName,
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
-                                                    ClusterTopology topology,
-                                                    ConfigurationContext configurationContext) {
+                                                    ClusterTopology topology) {
 
       try {
-        return super.getRequiredHostGroups(propertyName, origValue, properties, topology, configurationContext);
+        return super.getRequiredHostGroups(propertyName, origValue, properties, topology);
       } catch (IllegalArgumentException e) {
         return Collections.emptySet();
       }
@@ -1799,11 +1786,10 @@ public class BlueprintConfigurationProcessor {
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
-                                         ClusterTopology topology,
-                                         ConfigurationContext configurationContext) {
+                                         ClusterTopology topology) {
 
       if (isDatabaseManaged(properties)) {
-        return super.updateForClusterCreate(propertyName, origValue, properties, topology, configurationContext);
+        return super.updateForClusterCreate(propertyName, origValue, properties, topology);
       } else {
         return origValue;
       }
@@ -1813,10 +1799,9 @@ public class BlueprintConfigurationProcessor {
     public Collection<String> getRequiredHostGroups(String propertyName,
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
-                                                    ClusterTopology topology,
-                                                    ConfigurationContext configurationContext) {
+                                                    ClusterTopology topology) {
       if (isDatabaseManaged(properties)) {
-        return super.getRequiredHostGroups(propertyName, origValue, properties, topology, configurationContext);
+        return super.getRequiredHostGroups(propertyName, origValue, properties, topology);
       } else {
         return Collections.emptySet();
       }
@@ -1901,8 +1886,7 @@ public class BlueprintConfigurationProcessor {
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
-                                         ClusterTopology topology,
-                                         ConfigurationContext configurationContext) {
+                                         ClusterTopology topology) {
 
       StringBuilder sb = new StringBuilder();
 
@@ -2069,8 +2053,7 @@ public class BlueprintConfigurationProcessor {
     public Collection<String> getRequiredHostGroups(String propertyName,
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
-                                                    ClusterTopology topology,
-                                                    ConfigurationContext configurationContext) {
+                                                    ClusterTopology topology) {
 
       Collection<String> requiredHostGroups = new HashSet<>();
 
@@ -2125,15 +2108,14 @@ public class BlueprintConfigurationProcessor {
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
-                                         ClusterTopology topology,
-                                         ConfigurationContext configurationContext) {
+                                         ClusterTopology topology) {
 
       // return customer-supplied properties without updating them
       if (isFQDNValue(origValue)) {
         return origValue;
       }
 
-      return doFormat(propertyUpdater.updateForClusterCreate(propertyName, origValue, properties, topology, configurationContext));
+      return doFormat(propertyUpdater.updateForClusterCreate(propertyName, origValue, properties, topology));
     }
 
     /**
@@ -2149,10 +2131,9 @@ public class BlueprintConfigurationProcessor {
     public Collection<String> getRequiredHostGroups(String propertyName,
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
-                                                    ClusterTopology topology,
-                                                    ConfigurationContext configurationContext) {
+                                                    ClusterTopology topology) {
 
-      return propertyUpdater.getRequiredHostGroups(propertyName, origValue, properties, topology, configurationContext);
+      return propertyUpdater.getRequiredHostGroups(propertyName, origValue, properties, topology);
     }
 
     /**
@@ -2264,8 +2245,7 @@ public class BlueprintConfigurationProcessor {
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
-                                         ClusterTopology topology,
-                                         ConfigurationContext configurationContext) {
+                                         ClusterTopology topology) {
       // always return the original value, since these properties do not require update handling
       return origValue;
     }
@@ -2274,8 +2254,7 @@ public class BlueprintConfigurationProcessor {
     public Collection<String> getRequiredHostGroups(String propertyName,
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
-                                                    ClusterTopology topology,
-                                                    ConfigurationContext configurationContext) {
+                                                    ClusterTopology topology) {
 
       return Collections.emptySet();
     }
@@ -2305,8 +2284,7 @@ public class BlueprintConfigurationProcessor {
     public String updateForClusterCreate(String propertyName,
                                          String origValue,
                                          Map<String, Map<String, String>> properties,
-                                         ClusterTopology topology,
-                                         ConfigurationContext configurationContext) {
+                                         ClusterTopology topology) {
 
       // short-circuit out any custom property values defined by the deployer
       if (!origValue.contains("%HOSTGROUP") &&
@@ -2331,7 +2309,7 @@ public class BlueprintConfigurationProcessor {
         String key = keyValuePair.split("=")[0].trim();
         if (mapOfKeysToUpdaters.containsKey(key)) {
           String result = mapOfKeysToUpdaters.get(key).updateForClusterCreate(
-              key, keyValuePair.split("=")[1].trim(), properties, topology, configurationContext);
+              key, keyValuePair.split("=")[1].trim(), properties, topology);
           // append the internal property result, escape out any commas in the internal property,
           // this is required due to the specific syntax of templeton.hive.properties
           updatedResult.append(key);
@@ -2349,8 +2327,7 @@ public class BlueprintConfigurationProcessor {
     public Collection<String> getRequiredHostGroups(String propertyName,
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
-                                                    ClusterTopology topology,
-                                                    ConfigurationContext configurationContext) {
+                                                    ClusterTopology topology) {
 
       // short-circuit out any custom property values defined by the deployer
       if (!origValue.contains("%HOSTGROUP") &&
@@ -2367,7 +2344,7 @@ public class BlueprintConfigurationProcessor {
         String key = keyValuePair.split("=")[0];
         if (mapOfKeysToUpdaters.containsKey(key)) {
           requiredGroups.addAll(mapOfKeysToUpdaters.get(key).getRequiredHostGroups(
-              propertyName, keyValuePair.split("=")[1], properties, topology, configurationContext));
+              propertyName, keyValuePair.split("=")[1], properties, topology));
         }
       }
       return requiredGroups;
@@ -2383,16 +2360,14 @@ public class BlueprintConfigurationProcessor {
     public Collection<String> getRequiredHostGroups(String propertyName,
                                                     String origValue,
                                                     Map<String, Map<String, String>> properties,
-                                                    ClusterTopology topology,
-                                                    ConfigurationContext configurationContext) {
+                                                    ClusterTopology topology) {
       return Collections.emptyList();
     }
 
     public String updateForBlueprintExport(String propertyName,
                                            String origValue,
                                            Map<String, Map<String, String>> properties,
-                                           ClusterTopology topology,
-                                           ConfigurationContext configurationContext) {
+                                           ClusterTopology topology) {
       return origValue;
     }
   }
@@ -2594,8 +2569,7 @@ public class BlueprintConfigurationProcessor {
       public String updateForClusterCreate(String propertyName,
                                            String origValue,
                                            Map<String, Map<String, String>> properties,
-                                           ClusterTopology topology,
-                                           ConfigurationContext configurationContext) {
+                                           ClusterTopology topology) {
         String atlasHookClass = "org.apache.atlas.hive.hook.HiveHook";
         String[] hiveHooks = origValue.split(",");
 
@@ -2606,7 +2580,7 @@ public class BlueprintConfigurationProcessor {
           }
         }
 
-        boolean isAtlasInCluster = topology.getBlueprint().getAllServiceTypes().contains("ATLAS");
+        boolean isAtlasInCluster = topology.getBlueprint().getServices().contains("ATLAS");
         boolean isAtlasHiveHookEnabled = Boolean.parseBoolean(properties.get("hive-env").get("hive.atlas.hook"));
 
         // Append atlas hook if not already present.
@@ -2635,10 +2609,9 @@ public class BlueprintConfigurationProcessor {
       public String updateForClusterCreate(String propertyName,
                                            String origValue,
                                            Map<String, Map<String, String>> properties,
-                                           ClusterTopology topology,
-                                           ConfigurationContext configurationContext) {
+                                           ClusterTopology topology) {
 
-        if (topology.getBlueprint().getAllServiceTypes().contains("ATLAS")) {
+        if (topology.getBlueprint().getServices().contains("ATLAS")) {
           // if original value is not set or is the default "primary" set the cluster id
           if (origValue == null || origValue.trim().isEmpty() || origValue.equals("primary")) {
             //use cluster id because cluster name may change
@@ -2656,7 +2629,7 @@ public class BlueprintConfigurationProcessor {
       public String updateForBlueprintExport(String propertyName,
                                             String origValue,
                                             Map<String, Map<String, String>> properties,
-                                            ClusterTopology topology, ConfigurationContext configurationContext) {
+                                            ClusterTopology topology) {
 
         // if the value is the cluster id, then update to primary
         if (origValue.equals(String.valueOf(topology.getClusterId()))) {
@@ -2672,9 +2645,8 @@ public class BlueprintConfigurationProcessor {
       public String updateForClusterCreate(String propertyName,
                                            String origValue,
                                            Map<String, Map<String, String>> properties,
-                                           ClusterTopology topology,
-                                           ConfigurationContext configurationContext) {
-        if (topology.getBlueprint().getAllServiceTypes().contains("ATLAS")) {
+                                           ClusterTopology topology) {
+        if (topology.getBlueprint().getServices().contains("ATLAS")) {
           String host = topology.getHostAssignmentsForComponent("ATLAS_SERVER").iterator().next();
 
           boolean tlsEnabled = Boolean.parseBoolean(properties.get("application-properties").get("atlas.enableTLS"));
@@ -2730,10 +2702,9 @@ public class BlueprintConfigurationProcessor {
       public String updateForClusterCreate(String propertyName,
                                            String origValue,
                                            Map<String, Map<String, String>> properties,
-                                           ClusterTopology topology,
-                                           ConfigurationContext configurationContext) {
+                                           ClusterTopology topology) {
 
-        if (topology.getBlueprint().getAllServiceTypes().contains("AMBARI_METRICS")) {
+        if (topology.getBlueprint().getServices().contains("AMBARI_METRICS")) {
           final String amsReporterClass = "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter";
           if (origValue == null || origValue.isEmpty()) {
             return amsReporterClass;
@@ -2762,10 +2733,9 @@ public class BlueprintConfigurationProcessor {
       public String updateForClusterCreate(String propertyName,
                                            String origValue,
                                            Map<String, Map<String, String>> properties,
-                                           ClusterTopology topology,
-                                           ConfigurationContext configurationContext) {
+                                           ClusterTopology topology) {
 
-        if (topology.getBlueprint().getAllServiceTypes().contains("AMBARI_METRICS")) {
+        if (topology.getBlueprint().getServices().contains("AMBARI_METRICS")) {
           final String amsReportesClass = "org.apache.hadoop.metrics2.sink.kafka.KafkaTimelineMetricsReporter";
           if (origValue == null || origValue.isEmpty()) {
             return amsReportesClass;
@@ -2825,8 +2795,7 @@ public class BlueprintConfigurationProcessor {
     // AMS
     amsSiteMap.put("timeline.metrics.service.webapp.address", new SingleHostTopologyUpdater("METRICS_COLLECTOR") {
       @Override
-      public String updateForClusterCreate(String propertyName, String origValue, Map<String, Map<String, String>> properties,
-                                           ClusterTopology topology, ConfigurationContext configurationContext) {
+      public String updateForClusterCreate(String propertyName, String origValue, Map<String, Map<String, String>> properties, ClusterTopology topology) {
         if (!origValue.startsWith(BIND_ALL_IP_ADDRESS)) {
           return origValue.replace(origValue.split(":")[0], BIND_ALL_IP_ADDRESS);
         } else {
@@ -2858,7 +2827,7 @@ public class BlueprintConfigurationProcessor {
     // AMBARI-5206
     final Map<String , String> userProps = new HashMap<>();
 
-    Collection<String> services = clusterTopology.getBlueprint().getAllServiceTypes();
+    Collection<String> services = clusterTopology.getBlueprint().getServices();
     if (services.contains("HDFS")) {
       // only add user properties to the map for
       // services actually included in the blueprint definition
@@ -2909,18 +2878,17 @@ public class BlueprintConfigurationProcessor {
    * In case the excluded config-type related service is not present in the blueprint, excluded configs are ignored
    * @param configuration
    * @param configTypesUpdated
+   * @param stack
    */
-  private void addExcludedConfigProperties(Configuration configuration, Set<String> configTypesUpdated) {
-    Collection<Service> blueprintServices = clusterTopology.getBlueprint().getAllServices();
+  private void addExcludedConfigProperties(Configuration configuration, Set<String> configTypesUpdated, Stack stack) {
+    Collection<String> blueprintServices = clusterTopology.getBlueprint().getServices();
 
     LOG.debug("Handling excluded properties for blueprint services: {}", blueprintServices);
 
-    for (Service blueprintService : blueprintServices) {
+    for (String blueprintService : blueprintServices) {
 
       LOG.debug("Handling excluded properties for blueprint service: {}", blueprintService);
-      String stackId = blueprintService.getStackId();
-      StackV2 stack = clusterTopology.getBlueprint().getStackById(stackId);
-      Set<String> excludedConfigTypes = stack.getExcludedConfigurationTypes(blueprintService.getType());
+      Set<String> excludedConfigTypes = stack.getExcludedConfigurationTypes(blueprintService);
 
       if (excludedConfigTypes.isEmpty()) {
         LOG.debug("There are no excluded config types for blueprint service: {}", blueprintService);
@@ -2950,7 +2918,7 @@ public class BlueprintConfigurationProcessor {
           continue;
         }
 
-        Map<String, String> configProperties = stack.getConfigurationProperties(blueprintService.getType(), configType);
+        Map<String, String> configProperties = stack.getConfigurationProperties(blueprintService, configType);
         for(Map.Entry<String, String> entry: configProperties.entrySet()) {
           LOG.debug("ADD property {} {} {}", configType, entry.getKey(), entry.getValue());
           ensureProperty(configuration, configType, entry.getKey(), entry.getValue(), configTypesUpdated);
@@ -3008,8 +2976,9 @@ public class BlueprintConfigurationProcessor {
   private void setStackToolsAndFeatures(Configuration configuration, Set<String> configTypesUpdated)
       throws ConfigurationTopologyException {
     ConfigHelper configHelper = clusterTopology.getAmbariContext().getConfigHelper();
-    String stackName = configurationContext.getStack().getName();
-    String stackVersion = configurationContext.getStack().getVersion();
+    Stack stack = clusterTopology.getBlueprint().getStack();
+    String stackName = stack.getName();
+    String stackVersion = stack.getVersion();
 
     StackId stackId = new StackId(stackName, stackVersion);
 
@@ -3072,8 +3041,7 @@ public class BlueprintConfigurationProcessor {
      * @return true if the property should be included
      *         false if the property should not be included
      */
-    boolean isPropertyIncluded(String propertyName, String propertyValue,
-                               String configType, ClusterTopology topology, ConfigurationContext configurationContext);
+    boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology);
   }
 
   /**
@@ -3107,8 +3075,7 @@ public class BlueprintConfigurationProcessor {
      *         false if the property should not be included
      */
     @Override
-    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType,
-                                      ClusterTopology topology, ConfigurationContext configurationContext) {
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
       return !PASSWORD_NAME_REGEX.matcher(propertyName).matches();
     }
   }
@@ -3133,9 +3100,8 @@ public class BlueprintConfigurationProcessor {
      *         false if the property should not be included
      */
     @Override
-    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType,
-                                      ClusterTopology topology, ConfigurationContext configurationContext) {
-        StackV2 stack = configurationContext.getStack();
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
+        Stack stack = topology.getBlueprint().getStack();
         final String serviceName = stack.getServiceForConfigType(configType);
         return !(stack.isPasswordProperty(serviceName, configType, propertyName) ||
                 stack.isKerberosPrincipalNameProperty(serviceName, configType, propertyName));
@@ -3166,10 +3132,8 @@ public class BlueprintConfigurationProcessor {
       this.authToLocalPerClusterMap = authToLocalPerClusterMap;
     }
     @Override
-    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType,
-                                      ClusterTopology topology, ConfigurationContext configurationContext) {
-      return (authToLocalPerClusterMap == null || authToLocalPerClusterMap.get(topology.getClusterId()) == null ||
-        !authToLocalPerClusterMap.get(topology.getClusterId()).contains(String.format("%s/%s", configType, propertyName)));
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
+      return (authToLocalPerClusterMap == null || authToLocalPerClusterMap.get(topology.getClusterId()) == null || !authToLocalPerClusterMap.get(topology.getClusterId()).contains(String.format("%s/%s", configType, propertyName)));
     }
   }
 
@@ -3191,8 +3155,7 @@ public class BlueprintConfigurationProcessor {
     }
 
     @Override
-    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType,
-                                      ClusterTopology topology, ConfigurationContext configurationContext) {
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
       return !(propertyConfigType.equals(configType) &&
              this.propertyName.equals(propertyName));
     }
@@ -3234,15 +3197,15 @@ public class BlueprintConfigurationProcessor {
      *         false if the property should not be included
      */
     @Override
-    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType,
-                                      ClusterTopology topology, ConfigurationContext configurationContext) {
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
+      Stack stack = topology.getBlueprint().getStack();
       Configuration configuration = topology.getConfiguration();
 
-      final String serviceName = configurationContext.getStack().getServiceForConfigType(configType);
-      Map<String, StackV2.ConfigProperty> typeProperties =
-        configurationContext.getStack().getConfigurationPropertiesWithMetadata(serviceName, configType);
+      final String serviceName = stack.getServiceForConfigType(configType);
+      Map<String, Stack.ConfigProperty> typeProperties =
+        stack.getConfigurationPropertiesWithMetadata(serviceName, configType);
 
-      StackV2.ConfigProperty configProperty = typeProperties.get(propertyName);
+      Stack.ConfigProperty configProperty = typeProperties.get(propertyName);
       if (configProperty != null) {
         Set<PropertyDependencyInfo> dependencyInfos = configProperty.getDependsOnProperties();
         if (dependencyInfos != null) {
@@ -3363,9 +3326,8 @@ public class BlueprintConfigurationProcessor {
      *         false if the property should not be included
      */
     @Override
-    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType,
-                                      ClusterTopology topology, ConfigurationContext configurationContext) {
-      if (topology.isNameNodeHAEnabled(configurationContext)) {
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
+      if (topology.isNameNodeHAEnabled()) {
         if (setOfHDFSPropertyNamesNonHA.contains(propertyName)) {
           return false;
         }
@@ -3401,8 +3363,7 @@ public class BlueprintConfigurationProcessor {
      *         false if the property should not be included
      */
     @Override
-    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType,
-                                      ClusterTopology topology, ConfigurationContext configurationContext) {
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
       if (configType.equals(this.configType) && propertyName.equals(this.propertyName) && propertyValue.equals(this
         .propertyValue)) {
         return false;
@@ -3435,8 +3396,7 @@ public class BlueprintConfigurationProcessor {
      *         false if the property should not be included
      */
     @Override
-    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType,
-                                      ClusterTopology topology, ConfigurationContext configurationContext) {
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
       int matchingGroupCount = topology.getHostGroupsForComponent(HAWQSTANDBY).size();
       if (matchingGroupCount == 0) {
         if (setOfHawqPropertyNamesNonHA.contains(propertyName)) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintV2ResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintV2ResourceProvider.java
deleted file mode 100644
index 6d2c4f0..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintV2ResourceProvider.java
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.DuplicateResourceException;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
-import org.apache.ambari.server.controller.spi.NoSuchResourceException;
-import org.apache.ambari.server.controller.spi.Predicate;
-import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.RequestStatus;
-import org.apache.ambari.server.controller.spi.Resource;
-import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
-import org.apache.ambari.server.controller.spi.SystemException;
-import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.orm.dao.BlueprintV2DAO;
-import org.apache.ambari.server.orm.entities.BlueprintV2Entity;
-import org.apache.ambari.server.topology.BlueprintV2;
-import org.apache.ambari.server.topology.BlueprintV2Factory;
-import org.apache.ambari.server.topology.InvalidTopologyException;
-import org.apache.ambari.server.topology.SecurityConfigurationFactory;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Splitter;
-import com.google.common.base.Strings;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.SetMultimap;
-
-
-/**
- * Resource Provider for Blueprint resources.
- */
-public class BlueprintV2ResourceProvider extends AbstractControllerResourceProvider {
-
-  // ----- Property ID constants ---------------------------------------------
-
-  // Blueprints
-  public static final String BLUEPRINT_NAME_PROPERTY_ID =
-    PropertyHelper.getPropertyId("Blueprints", "blueprint_name");
-
-  public static final String BLUEPRINT_SECURITY_PROPERTY_ID =
-    PropertyHelper.getPropertyId("Blueprints", "security");
-
-  public static final String BLUEPRINTS_PROPERTY_ID = "Blueprints";
-
-  // Host Groups
-  public static final String HOST_GROUP_PROPERTY_ID = "host_groups";
-  public static final String HOST_GROUP_NAME_PROPERTY_ID = "name";
-  public static final String HOST_GROUP_CARDINALITY_PROPERTY_ID = "cardinality";
-
-  // Host Group Components
-  public static final String COMPONENT_PROPERTY_ID ="components";
-  public static final String COMPONENT_NAME_PROPERTY_ID ="name";
-  public static final String COMPONENT_PROVISION_ACTION_PROPERTY_ID = "provision_action";
-
-  // Configurations
-  public static final String CONFIGURATION_PROPERTY_ID = "configurations";
-
-
-  // Setting
-  public static final String SETTING_PROPERTY_ID = "settings";
-  public static final String CLUSTER_SETTING_PROPERTY_ID = "cluster_settings";
-
-  public static final String PROPERTIES_PROPERTY_ID = "properties";
-  public static final String PROPERTIES_ATTRIBUTES_PROPERTY_ID = "properties_attributes";
-  public static final String SCHEMA_IS_NOT_SUPPORTED_MESSAGE =
-    "Configuration format provided in Blueprint is not supported";
-  public static final String REQUEST_BODY_EMPTY_ERROR_MESSAGE =
-    "Request body for Blueprint create request is empty";
-  public static final String CONFIGURATION_LIST_CHECK_ERROR_MESSAGE =
-    "Configurations property must be a List of Maps";
-  public static final String CONFIGURATION_MAP_CHECK_ERROR_MESSAGE =
-    "Configuration elements must be Maps";
-  public static final String CONFIGURATION_MAP_SIZE_CHECK_ERROR_MESSAGE =
-    "Configuration Maps must hold a single configuration type each";
-
-  // Primary Key Fields
-  private static Set<String> pkPropertyIds =
-    new HashSet<>(Arrays.asList(new String[]{
-      BLUEPRINT_NAME_PROPERTY_ID}));
-
-  /**
-   * Used to create Blueprint instances
-   */
-  private static BlueprintV2Factory blueprintFactory;
-
-  /**
-   * Used to create SecurityConfiguration instances
-   */
-  private static SecurityConfigurationFactory securityConfigurationFactory;
-
-  /**
-   * Blueprint Data Access Object
-   */
-  private static BlueprintV2DAO blueprintDAO;
-
-  // ----- Constructors ----------------------------------------------------
-
-  /**
-   * Create a  new resource provider for the given management controller.
-   *
-   * @param propertyIds     the property ids
-   * @param keyPropertyIds  the key property ids
-   * @param controller      management controller
-   */
-  BlueprintV2ResourceProvider(Set<String> propertyIds,
-                            Map<Resource.Type, String> keyPropertyIds,
-                            AmbariManagementController controller) {
-
-    super(propertyIds, keyPropertyIds, controller);
-    blueprintFactory = BlueprintV2Factory.create(controller);
-  }
-
-  /**
-   * Static initialization.
-   *
-   * @param factory   blueprint factory
-   * @param dao       blueprint data access object
-   * @param securityFactory
-   * @param metaInfo
-   */
-  public static void init(BlueprintV2Factory factory, BlueprintV2DAO dao, SecurityConfigurationFactory
-    securityFactory, AmbariMetaInfo metaInfo) {
-    blueprintFactory = factory;
-    blueprintDAO = dao;
-    securityConfigurationFactory = securityFactory;
-    ambariMetaInfo = metaInfo;
-  }
-
-  // ----- ResourceProvider ------------------------------------------------
-
-  @Override
-  protected Set<String> getPKPropertyIds() {
-    return pkPropertyIds;
-  }
-
-  @Override
-  public RequestStatus createResources(Request request)
-    throws SystemException, UnsupportedPropertyException,
-    ResourceAlreadyExistsException, NoSuchParentResourceException {
-
-    for (Map<String, Object> properties : request.getProperties()) {
-      try {
-        createResources(getCreateCommand(properties, request.getRequestInfoProperties()));
-      }catch(IllegalArgumentException e) {
-        LOG.error("Exception while creating blueprint", e);
-        throw e;
-      }
-    }
-    notifyCreate(Resource.Type.Blueprint, request);
-
-    return getRequestStatus(null);
-  }
-
-  @Override
-  //todo: continue to use dao/entity directly or use blueprint factory?
-  public Set<Resource> getResources(Request request, Predicate predicate)
-    throws SystemException, UnsupportedPropertyException,
-    NoSuchResourceException, NoSuchParentResourceException {
-
-    List<BlueprintV2Entity> results = null;
-    boolean applyPredicate = false;
-
-    if (predicate != null) {
-      Set<Map<String, Object>> requestProps = getPropertyMaps(predicate);
-      if (requestProps.size() == 1 ) {
-        String name = (String) requestProps.iterator().next().get(
-          BLUEPRINT_NAME_PROPERTY_ID);
-
-        if (name != null) {
-          BlueprintV2Entity entity = blueprintDAO.findByName(name);
-          results = entity == null ? Collections.emptyList() : Collections.singletonList(entity);
-        }
-      }
-    }
-
-    if (results == null) {
-      applyPredicate = true;
-      results = blueprintDAO.findAll();
-    }
-
-    Set<Resource> resources  = new HashSet<>();
-    for (BlueprintV2Entity entity : results) {
-      Resource resource = toResource(entity, getRequestPropertyIds(request, predicate));
-      if (predicate == null || ! applyPredicate || predicate.evaluate(resource)) {
-        resources.add(resource);
-      }
-    }
-
-    if (predicate != null && resources.isEmpty()) {
-      throw new NoSuchResourceException(
-        "The requested resource doesn't exist: Blueprint not found, " + predicate);
-    }
-
-    return resources;
-  }
-
-  @Override
-  public RequestStatus updateResources(Request request, Predicate predicate)
-    throws SystemException, UnsupportedPropertyException,
-    NoSuchResourceException, NoSuchParentResourceException {
-
-    // no-op, blueprints are immutable.  Service doesn't support PUT so should never get here.
-    return null;
-  }
-
-  @Override
-  public RequestStatus deleteResources(Request request, Predicate predicate)
-    throws SystemException, UnsupportedPropertyException,
-    NoSuchResourceException, NoSuchParentResourceException {
-
-    //TODO (jspeidel): Revisit concurrency control
-    Set<Resource> setResources = getResources(
-      new RequestImpl(null, null, null, null), predicate);
-
-    for (final Resource resource : setResources) {
-      final String blueprintName =
-        (String) resource.getPropertyValue(BLUEPRINT_NAME_PROPERTY_ID);
-
-      LOG.info("Deleting Blueprint, name = " + blueprintName);
-
-      modifyResources(new Command<Void>() {
-        @Override
-        public Void invoke() throws AmbariException {
-          blueprintDAO.removeByName(blueprintName);
-          return null;
-        }
-      });
-    }
-
-    notifyDelete(Resource.Type.Blueprint, predicate);
-    return getRequestStatus(null);
-  }
-
-  /**
-   * Used to get stack metainfo.
-   */
-  private static AmbariMetaInfo ambariMetaInfo;
-
-  // ----- Instance Methods ------------------------------------------------
-
-  /**
-   * Create a resource instance from a blueprint entity.
-   *
-   * @param entity        blueprint entity
-   * @param requestedIds  requested id's
-   *
-   * @return a new resource instance for the given blueprint entity
-   */
-  protected Resource toResource(BlueprintV2Entity entity, Set<String> requestedIds) throws NoSuchResourceException {
-    try {
-      Resource resource = new ResourceImpl(Resource.Type.Blueprint);
-      Map<String, Object> blueprintAsMap = blueprintFactory.convertToMap(entity);
-      if (!requestedIds.isEmpty()) {
-        Map<String, Object> filteredMap = new HashMap<>();
-        applySelectFilters(requestedIds, blueprintAsMap, filteredMap);
-        blueprintAsMap = filteredMap;
-      }
-      // flatten the Blueprint property category
-      Map<String, Object> blueprintPc = (Map<String, Object>)blueprintAsMap.remove(BLUEPRINTS_PROPERTY_ID);
-      for (Map.Entry<String, Object> entry: blueprintPc.entrySet()) {
-        blueprintAsMap.put(BLUEPRINTS_PROPERTY_ID + "/" + entry.getKey(), entry.getValue());
-      }
-      // set resources
-      blueprintAsMap.entrySet().forEach( entry -> resource.setProperty(entry.getKey(), entry.getValue()) );
-      return resource;
-    }
-    catch (IOException e) {
-      throw new NoSuchResourceException("Cannot convert blueprint entity to resource. name=" + entity.getBlueprintName(), e);
-    }
-  }
-
-  /**
-   * Recursively applies select filters on an input map. Only properties matchig the filters will be preserved.
-   * @param filters list of filters. Each filter is a string that can contain subfilters sepatated by '/'
-   * @param startingMap The map to filter
-   * @param collectingMap The map to put the results to
-   */
-  private void applySelectFilters(Set<String> filters, Map<String, Object> startingMap, Map<String, Object> collectingMap) {
-    // Identify filters that apply to this level and those that will be applied on lower levels of the recursion
-    Splitter splitter = Splitter.on('/').omitEmptyStrings().trimResults();
-    Joiner joiner = Joiner.on('/');
-    SetMultimap<String, String> lowerLevelFilters = HashMultimap.create();
-    List<String> currentLevelFilters = new ArrayList<>();
-    filters.forEach( filter -> {
-      List<String> filterParts = ImmutableList.copyOf(splitter.split(filter));
-      if (filterParts.size() == 1) {
-        currentLevelFilters.add(filter);
-      }
-      else {
-        lowerLevelFilters.put(filterParts.get(0), joiner.join(filterParts.subList(1, filterParts.size())));
-      }
-    });
-    startingMap.entrySet().forEach( entry -> {
-      if (currentLevelFilters.contains(entry.getKey())) {
-        collectingMap.put(entry.getKey(), entry.getValue());
-      }
-      else if (lowerLevelFilters.containsKey(entry.getKey()) && entry.getValue() instanceof Map) {
-        Map<String, Object> lowerLevelCollector = (Map<String, Object>)collectingMap.get(entry.getKey());
-        if (null == lowerLevelCollector) {
-          lowerLevelCollector = new HashMap<>();
-          collectingMap.put(entry.getKey(), lowerLevelCollector);
-        }
-        applySelectFilters(lowerLevelFilters.get(entry.getKey()), (Map<String, Object>)entry.getValue(), lowerLevelCollector);
-      }
-    });
-  }
-
-
-  /**
-   * Create a create command with all properties set.
-   *
-   * @param properties        properties to be applied to blueprint
-   * @param requestInfoProps  request info properties
-   *
-   * @return a new create command
-   */
-  private Command<Void> getCreateCommand(final Map<String, Object> properties, final Map<String, String> requestInfoProps) {
-    return new Command<Void>() {
-      @SuppressWarnings("rawtypes")
-      @Override
-      public Void invoke() throws AmbariException {
-        String rawRequestBody = requestInfoProps.get(Request.REQUEST_INFO_BODY_PROPERTY);
-        Preconditions.checkArgument(!Strings.isNullOrEmpty(rawRequestBody), REQUEST_BODY_EMPTY_ERROR_MESSAGE);
-
-        BlueprintV2 blueprint = null;
-        try {
-          blueprint = blueprintFactory.convertFromJson(rawRequestBody);
-        }
-        catch (IOException e) {
-            throw new AmbariException("Unable to parse blueprint", e);
-        }
-
-        if (blueprintDAO.findByName(blueprint.getName()) != null) {
-          throw new DuplicateResourceException(
-            "Attempted to create a Blueprint which already exists, blueprint_name=" +
-              blueprint.getName());
-        }
-
-        try {
-          blueprint.validateRequiredProperties();
-        } catch (InvalidTopologyException e) {
-          throw new IllegalArgumentException("Blueprint configuration validation failed: " + e.getMessage(), e);
-        }
-
-        String validateTopology =  requestInfoProps.get("validate_topology");
-        if (validateTopology == null || ! validateTopology.equalsIgnoreCase("false")) {
-          try {
-            blueprint.validateTopology();
-          } catch (InvalidTopologyException e) {
-            throw new IllegalArgumentException("Invalid blueprint topology", e);
-          }
-        }
-
-        // TODO: handle security descriptor
-
-        try {
-          BlueprintV2Entity entity = blueprintFactory.convertToEntity(blueprint);
-          blueprintDAO.create(entity);
-        } catch (Exception e) {
-          throw new RuntimeException(e);
-        }
-        return null;
-      }
-    };
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
index cc0cea3..1581c72 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
@@ -206,7 +206,7 @@ public class ClusterResourceProvider extends AbstractControllerResourceProvider
     baseUnsupported.remove("blueprint");
     baseUnsupported.remove("host_groups");
     baseUnsupported.remove("default_password");
-    baseUnsupported.remove("services");
+    baseUnsupported.remove("configurations");
     baseUnsupported.remove("credentials");
     baseUnsupported.remove("config_recommendation_strategy");
     baseUnsupported.remove("provision_action");
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationContext.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationContext.java
deleted file mode 100644
index aa63021..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ConfigurationContext.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller.internal;
-
-import java.util.Map;
-
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.topology.Configuration;
-
-/**
- * Provides a context for configuration.
- */
-public class ConfigurationContext  {
-
-  private final Configuration configuration;
-
-  private final StackV2 stack;
-
-  public ConfigurationContext(StackV2 stack, Configuration configuration){
-    this.stack = stack;
-    this.configuration = configuration;
-  }
-
-  public Configuration getConfiguration() {
-    return configuration;
-  }
-
-  public StackV2 getStack() {
-    return stack;
-  }
-
-  public boolean isNameNodeHAEnabled() {
-    Map<String, Map<String, String>> configurationProperties = getConfiguration().getProperties();
-    return configurationProperties.containsKey("hdfs-site") &&
-      (configurationProperties.get("hdfs-site").containsKey("dfs.nameservices") ||
-        configurationProperties.get("hdfs-site").containsKey("dfs.internal.nameservices"));
-  }
-
-  public boolean isYarnResourceManagerHAEnabled() {
-    Map<String, Map<String, String>> configProperties = getConfiguration().getProperties();
-    return configProperties.containsKey("yarn-site") && configProperties.get("yarn-site").containsKey("yarn.resourcemanager.ha.enabled")
-      && configProperties.get("yarn-site").get("yarn.resourcemanager.ha.enabled").equals("true");
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
index 565369b..16d3114 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequest.java
@@ -38,14 +38,14 @@ import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.state.DesiredConfig;
 import org.apache.ambari.server.state.HostConfig;
-import org.apache.ambari.server.topology.BlueprintV2;
+import org.apache.ambari.server.topology.Blueprint;
+import org.apache.ambari.server.topology.BlueprintImpl;
 import org.apache.ambari.server.topology.Component;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupImpl;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
-import org.apache.ambari.server.topology.Service;
 import org.apache.ambari.server.topology.TopologyRequest;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -60,12 +60,8 @@ public class ExportBlueprintRequest implements TopologyRequest {
 
   private String clusterName;
   private Long clusterId;
-  private BlueprintV2 blueprint;
-  /**
-   * List of services
-   */
-  protected Collection<Service> services;
-
+  private Blueprint blueprint;
+  private Configuration configuration;
   //todo: Should this map be represented by a new class?
   private Map<String, HostGroupInfo> hostGroupInfo = new HashMap<>();
 
@@ -102,18 +98,13 @@ public class ExportBlueprintRequest implements TopologyRequest {
   }
 
   @Override
-  public BlueprintV2 getBlueprint() {
+  public Blueprint getBlueprint() {
     return blueprint;
   }
 
   @Override
-  public Collection<Service> getServiceConfigs() {
-    return services;
-  }
-
-  @Override
   public Configuration getConfiguration() {
-    return null;
+    return configuration;
   }
 
   @Override
@@ -144,7 +135,7 @@ public class ExportBlueprintRequest implements TopologyRequest {
       hostGroups.add(new HostGroupImpl(exportedHostGroup.getName(), bpName, stack, componentList,
           exportedHostGroup.getConfiguration(), String.valueOf(exportedHostGroup.getCardinality())));
     }
-    //blueprint = new BlueprintImplV2(bpName, hostGroups, stack, configuration, null);
+    blueprint = new BlueprintImpl(bpName, hostGroups, stack, configuration, null);
   }
 
   private void createHostGroupInfo(Collection<ExportedHostGroup> exportedHostGroups) {
@@ -192,12 +183,11 @@ public class ExportBlueprintRequest implements TopologyRequest {
         attributes.put(configuration.getType(), configuration.getPropertyAttributes());
       }
     }
-//    configuration = new Configuration(properties, attributes);
-//    // empty parent configuration when exporting as all properties are included in this configuration
-//    configuration.setParentConfiguration(new Configuration(
-//        Collections.emptyMap(),
-//        Collections.emptyMap()));
-
+    configuration = new Configuration(properties, attributes);
+    // empty parent configuration when exporting as all properties are included in this configuration
+    configuration.setParentConfiguration(new Configuration(
+        Collections.emptyMap(),
+        Collections.emptyMap()));
   }
 
   /**
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
index f773fc8..1fd6091 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequest.java
@@ -17,8 +17,6 @@
  */
 package org.apache.ambari.server.controller.internal;
 
-import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
@@ -38,9 +36,6 @@ import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
 import org.apache.ambari.server.topology.NoSuchBlueprintException;
 import org.apache.ambari.server.topology.SecurityConfiguration;
-import org.apache.ambari.server.topology.Service;
-import org.apache.ambari.server.topology.ServiceId;
-import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -95,11 +90,6 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
   public static final String CONFIGURATIONS_PROPERTY = "configurations";
 
   /**
-   * services property name
-   */
-  public static final String SERVICES_PROPERTY = "services";
-
-  /**
    * default password property name
    */
   public static final String DEFAULT_PASSWORD_PROPERTY = "default_password";
@@ -128,8 +118,6 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
    * The service and component level quick link filters property
    */
   public static final String QUICKLINKS_PROFILE_SERVICES_PROPERTY = "quicklinks_profile/services";
-  public static final String SERVICE_GROUP_NAME_PROPERETY = "service_group";
-  public static final String SERVICE_NAME_PROPERTY = "name";
 
 
   /**
@@ -195,34 +183,15 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
 
     this.securityConfiguration = securityConfiguration;
 
-    // parse service configs and merge with BP service configs
-    serviceConfigs = new ArrayList<>();
-    Collection<Map> services = (Collection<Map>) properties.get(SERVICES_PROPERTY);
-    for (Map serviceMap : services) {
-      String serviceName = (String) serviceMap.get(SERVICE_NAME_PROPERTY);
-      if (StringUtils.isEmpty(serviceName)) {
-        throw new InvalidTopologyTemplateException("Service name must be specified.");
-      }
-      String serviceGroupName = (String) serviceMap.get(SERVICE_GROUP_NAME_PROPERETY);
-      if (StringUtils.isEmpty(serviceGroupName)) {
-        throw new InvalidTopologyTemplateException("Service group name must be specified for service: " + serviceName);
-      }
-      Configuration configuration = configurationFactory.getConfiguration((Collection<Map<String, String>>)
-              serviceMap.get(CONFIGURATIONS_PROPERTY));
-      ServiceId serviceId = ServiceId.of(serviceName, serviceGroupName);
-      Service service = blueprint.getServiceById(serviceId);
-      if (service == null) {
-        throw new InvalidTopologyTemplateException("Service: " + serviceName + " in service group: "
-                + serviceGroupName + " not found.");
-      }
-      service.getConfiguration().setParentConfiguration(service.getStack().getConfiguration());
-      configuration.setParentConfiguration(service.getConfiguration());
-      service.setConfiguration(configuration);
-      serviceConfigs.add(service);
-    }
+    Configuration configuration = configurationFactory.getConfiguration(
+      (Collection<Map<String, String>>) properties.get(CONFIGURATIONS_PROPERTY));
+    configuration.setParentConfiguration(blueprint.getConfiguration());
+    setConfiguration(configuration);
 
     parseHostGroupInfo(properties);
+
     this.credentialsMap = parseCredentials(properties);
+
     this.configRecommendationStrategy = parseConfigRecommendationStrategy(properties);
 
     setProvisionAction(parseProvisionAction(properties));
@@ -322,11 +291,7 @@ public class ProvisionClusterRequest extends BaseClusterRequest {
   private void parseBlueprint(Map<String, Object> properties) throws NoSuchStackException, NoSuchBlueprintException {
     String blueprintName = String.valueOf(properties.get(ClusterResourceProvider.BLUEPRINT));
     // set blueprint field
-    try {
-      setBlueprint(getBlueprintFactory().getBlueprint(blueprintName));
-    } catch (IOException e) {
-      throw new NoSuchBlueprintException(blueprintName);
-    }
+    setBlueprint(getBlueprintFactory().getBlueprint(blueprintName));
 
     if (blueprint == null) {
       throw new NoSuchBlueprintException(blueprintName);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
index 1284c26..fe33f93 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ScaleClusterRequest.java
@@ -19,18 +19,16 @@
 
 package org.apache.ambari.server.controller.internal;
 
-import java.io.IOException;
 import java.util.Collections;
 import java.util.Map;
 import java.util.Set;
 
 import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.stack.NoSuchStackException;
-import org.apache.ambari.server.topology.BlueprintV2;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyTemplateException;
-import org.apache.ambari.server.topology.NoSuchBlueprintException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,8 +58,8 @@ public class ScaleClusterRequest extends BaseClusterRequest {
         setClusterName(String.valueOf(properties.get(HostResourceProvider.HOST_CLUSTER_NAME_PROPERTY_ID)));
       }
       // currently don't allow cluster scoped configuration in scaling operation
-//      setConfiguration(new Configuration(Collections.emptyMap(),
-//          Collections.emptyMap()));
+      setConfiguration(new Configuration(Collections.emptyMap(),
+          Collections.emptyMap()));
 
       parseHostGroups(properties);
     }
@@ -112,7 +110,7 @@ public class ScaleClusterRequest extends BaseClusterRequest {
       throw new InvalidTopologyTemplateException("A name must be specified for all host groups");
     }
 
-    BlueprintV2 blueprint = getBlueprint();
+    Blueprint blueprint = getBlueprint();
     if (getBlueprint() == null) {
       blueprint = parseBlueprint(blueprintName);
       setBlueprint(blueprint);
@@ -199,14 +197,10 @@ public class ScaleClusterRequest extends BaseClusterRequest {
    *
    * @throws InvalidTopologyTemplateException if specified blueprint or stack doesn't exist
    */
-  private BlueprintV2 parseBlueprint(String blueprintName) throws InvalidTopologyTemplateException  {
-    BlueprintV2 blueprint;
+  private Blueprint parseBlueprint(String blueprintName) throws InvalidTopologyTemplateException  {
+    Blueprint blueprint;
     try {
       blueprint = getBlueprintFactory().getBlueprint(blueprintName);
-    } catch (NoSuchBlueprintException e) {
-      throw new InvalidTopologyTemplateException("Invalid blueprint specified: " + blueprintName);
-    } catch (IOException e) {
-      throw new InvalidTopologyTemplateException("Error reading blueprint: " + blueprintName);
     } catch (NoSuchStackException e) {
       throw new InvalidTopologyTemplateException("Invalid stack specified in the blueprint: " + blueprintName);
     }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
index 3a70f81..8b7cb67 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UnitUpdater.java
@@ -24,7 +24,6 @@ import java.util.Collections;
 import java.util.Map;
 import java.util.Optional;
 
-import org.apache.ambari.server.controller.StackV2;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.validators.UnitValidatedProperty;
 
@@ -49,9 +48,8 @@ public class UnitUpdater implements BlueprintConfigurationProcessor.PropertyUpda
   public String updateForClusterCreate(String propertyName,
                                        String origValue,
                                        Map<String, Map<String, String>> properties,
-                                       ClusterTopology topology,
-                                       ConfigurationContext configurationContext) {
-      PropertyUnit stackUnit = PropertyUnit.of(configurationContext.getStack(), serviceName, configType, propertyName);
+                                       ClusterTopology topology) {
+      PropertyUnit stackUnit = PropertyUnit.of(topology.getBlueprint().getStack(), serviceName, configType, propertyName);
       PropertyValue value = PropertyValue.of(propertyName, origValue);
       if (value.hasUnit(stackUnit)) {
         return value.toString();
@@ -63,8 +61,7 @@ public class UnitUpdater implements BlueprintConfigurationProcessor.PropertyUpda
   }
 
   @Override
-  public Collection<String> getRequiredHostGroups(String propertyName, String origValue, Map<String, Map<String, String>> properties,
-                                                  ClusterTopology topology, ConfigurationContext configurationContext) {
+  public Collection<String> getRequiredHostGroups(String propertyName, String origValue, Map<String, Map<String, String>> properties, ClusterTopology topology) {
     return Collections.emptySet();
   }
 
@@ -72,18 +69,18 @@ public class UnitUpdater implements BlueprintConfigurationProcessor.PropertyUpda
     private static final String DEFAULT_UNIT = "m";
     private final String unit;
 
-    public static PropertyUnit of(StackV2 stack, UnitValidatedProperty property) {
+    public static PropertyUnit of(Stack stack, UnitValidatedProperty property) {
       return PropertyUnit.of(stack, property.getServiceName(), property.getConfigType(), property.getPropertyName());
     }
 
-    public static PropertyUnit of(StackV2 stack, String serviceName, String configType, String propertyName) {
+    public static PropertyUnit of(Stack stack, String serviceName, String configType, String propertyName) {
       return new PropertyUnit(
         stackUnit(stack, serviceName, configType, propertyName)
           .map(PropertyUnit::toJvmUnit)
           .orElse(DEFAULT_UNIT));
     }
 
-    private static Optional<String> stackUnit(StackV2 stack, String serviceName, String configType, String propertyName) {
+    private static Optional<String> stackUnit(Stack stack, String serviceName, String configType, String propertyName) {
       try {
         return Optional.ofNullable(
           stack.getConfigurationPropertiesWithMetadata(serviceName, configType)
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/BlueprintV2DAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/BlueprintV2DAO.java
deleted file mode 100644
index 6a054e8..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/BlueprintV2DAO.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.dao;
-
-import java.util.List;
-
-import javax.persistence.EntityManager;
-import javax.persistence.TypedQuery;
-
-import org.apache.ambari.server.orm.RequiresSession;
-import org.apache.ambari.server.orm.entities.BlueprintV2Entity;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.inject.Inject;
-import com.google.inject.Provider;
-import com.google.inject.Singleton;
-import com.google.inject.persist.Transactional;
-
-/**
- * Blueprint V2 Data Access Object.
- */
-@Singleton
-public class BlueprintV2DAO {
-
-  protected final static Logger LOG = LoggerFactory.getLogger(BlueprintV2DAO.class);
-
-  /**
-   * JPA entity manager
-   */
-  @Inject
-  Provider<EntityManager> entityManagerProvider;
-
-  @Inject
-  StackDAO stackDAO;
-
-  /**
-   * Find a blueprint with a given name.
-   *
-   * @param blueprint_name name of blueprint to find
-   *
-   * @return  a matching blueprint or null
-   */
-  @RequiresSession
-  public BlueprintV2Entity findByName(String blueprint_name) {
-    return entityManagerProvider.get().find(BlueprintV2Entity.class, blueprint_name);
-  }
-
-  /**
-   * Find all blueprints.
-   *
-   * @return all blueprints or an empty List
-   */
-  @RequiresSession
-  public List<BlueprintV2Entity> findAll() {
-    TypedQuery<BlueprintV2Entity> query = entityManagerProvider.get().
-      createNamedQuery("allBlueprintsv2", BlueprintV2Entity.class);
-    return query.getResultList();
-  }
-
-  /**
-   * Refresh the state of the instance from the database,
-   * overwriting changes made to the entity, if any.
-   *
-   * @param blueprintEntity  entity to refresh
-   */
-  @Transactional
-  public void refresh(BlueprintV2Entity blueprintEntity) {
-    entityManagerProvider.get().refresh(blueprintEntity);
-  }
-
-  /**
-   * Make an instance managed and persistent.
-   *
-   * @param blueprintEntity  entity to persist
-   */
-  @Transactional
-  public void create(BlueprintV2Entity blueprintEntity) {
-    entityManagerProvider.get().persist(blueprintEntity);
-  }
-
-  /**
-   * Merge the state of the given entity into the current persistence context.
-   *
-   * @param blueprintEntity  entity to merge
-   * @return the merged entity
-   */
-  @Transactional
-  public BlueprintV2Entity merge(BlueprintV2Entity blueprintEntity) {
-    return entityManagerProvider.get().merge(blueprintEntity);
-  }
-
-  /**
-   * Remove the entity instance.
-   *
-   * @param blueprintEntity  entity to remove
-   */
-  @Transactional
-  public void remove(BlueprintV2Entity blueprintEntity) {
-    entityManagerProvider.get().remove(merge(blueprintEntity));
-  }
-
-  /**
-   * Remove entity instance by primary key
-   * @param blueprintName Primary key: blueprint name
-   */
-  @Transactional
-  public void removeByName(String blueprintName) {
-    LOG.debug("Removing blueprintv2: {}", blueprintName);
-    entityManagerProvider.get().remove(findByName(blueprintName));
-  }
-
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintEntity.java
index 8141f07..bab393a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintEntity.java
@@ -75,6 +75,7 @@ public class BlueprintEntity {
   @OneToMany(cascade = CascadeType.ALL, mappedBy = "blueprint")
   private Collection<BlueprintSettingEntity> settings;
 
+
   /**
    * Get the blueprint name.
    *
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintV2Entity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintV2Entity.java
deleted file mode 100644
index de9bde4..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/BlueprintV2Entity.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Basic;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.EnumType;
-import javax.persistence.Enumerated;
-import javax.persistence.Id;
-import javax.persistence.NamedQuery;
-import javax.persistence.Table;
-
-import org.apache.ambari.server.state.SecurityType;
-
-/**
- * Entity representing a Blueprint.
- */
-@Table(name = "blueprintv2")
-@NamedQuery(name = "allBlueprintsv2",
-  query = "SELECT blueprint FROM BlueprintV2Entity blueprint")
-@Entity
-public class BlueprintV2Entity {
-
-  @Id
-  @Column(name = "blueprint_name", nullable = false, insertable = true,
-    updatable = false, unique = true, length = 100)
-  private String blueprintName;
-
-  @Basic
-  @Enumerated(value = EnumType.STRING)
-  @Column(name = "security_type", nullable = false, insertable = true, updatable = true)
-  private SecurityType securityType = SecurityType.NONE;
-
-  @Basic
-  @Column(name = "security_descriptor_reference", nullable = true, insertable = true, updatable = true)
-  private String securityDescriptorReference;
-
-  @Basic
-  @Column(name = "content", nullable = false, insertable = true, updatable = true)
-  private String content;
-
-  public String getBlueprintName() {
-    return blueprintName;
-  }
-
-  public void setBlueprintName(String blueprintName) {
-    this.blueprintName = blueprintName;
-  }
-
-  public SecurityType getSecurityType() {
-    return securityType;
-  }
-
-  public void setSecurityType(SecurityType securityType) {
-    this.securityType = securityType;
-  }
-
-  public String getSecurityDescriptorReference() {
-    return securityDescriptorReference;
-  }
-
-  public void setSecurityDescriptorReference(String securityDescriptorReference) {
-    this.securityDescriptorReference = securityDescriptorReference;
-  }
-
-  public String getContent() {
-    return content;
-  }
-
-  public void setContent(String content) {
-    this.content = content;
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostGroupComponentEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostGroupComponentEntity.java
index bd34d29..6b75df7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostGroupComponentEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostGroupComponentEntity.java
@@ -44,19 +44,7 @@ public class HostGroupComponentEntity {
   private String blueprintName;
 
   @Id
-  @Column(name = "service_group", nullable = true, insertable = true, updatable = false)
-  private String serviceGroup;
-
-  @Id
-  @Column(name = "service_name", nullable = true, insertable = true, updatable = false)
-  private String serviceName;
-
-  @Id
-  @Column(name = "type", nullable = false, insertable = true, updatable = false)
-  private String type;
-
-  @Id
-  @Column(name = "instance_name", nullable = true, insertable = true, updatable = false)
+  @Column(name = "name", nullable = false, insertable = true, updatable = false)
   private String name;
 
   @Column(name = "provision_action", nullable = true, insertable = true, updatable = false)
@@ -142,8 +130,6 @@ public class HostGroupComponentEntity {
     this.blueprintName = blueprintName;
   }
 
-
-
   /**
    * Get the provision action associated with this
    *   component.
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostGroupComponentEntityPK.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostGroupComponentEntityPK.java
index 812f0d6..0898133 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostGroupComponentEntityPK.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostGroupComponentEntityPK.java
@@ -38,18 +38,6 @@ public class HostGroupComponentEntityPK {
   @Column(name = "name", nullable = false, insertable = true, updatable = false, length = 100)
   private String name;
 
-  @Id
-  @Column(name = "service_group", nullable = true, insertable = true, updatable = false)
-  private String serviceGroup;
-
-  @Id
-  @Column(name = "service_name", nullable = true, insertable = true, updatable = false)
-  private String serviceName;
-
-  @Id
-  @Column(name = "type", nullable = false, insertable = true, updatable = false)
-  private String type;
-
   /**
    * Get the name of the associated host group.
    *
@@ -104,30 +92,6 @@ public class HostGroupComponentEntityPK {
     this.name = name;
   }
 
-  public String getServiceGroup() {
-    return serviceGroup;
-  }
-
-  public void setServiceGroup(String serviceGroup) {
-    this.serviceGroup = serviceGroup;
-  }
-
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  public String getType() {
-    return type;
-  }
-
-  public void setType(String type) {
-    this.type = type;
-  }
-
   @Override
   public boolean equals(Object o) {
     if (this == o) return true;
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StackEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StackEntity.java
index e804797..95fa07c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StackEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/StackEntity.java
@@ -171,4 +171,4 @@ public class StackEntity {
     buffer.append("}");
     return buffer.toString();
   }
-}
\ No newline at end of file
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyConfigurationsEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyConfigurationsEntity.java
deleted file mode 100644
index a29db99..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyConfigurationsEntity.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.orm.entities;
-
-import javax.persistence.Basic;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.FetchType;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Id;
-import javax.persistence.JoinColumn;
-import javax.persistence.Lob;
-import javax.persistence.OneToOne;
-import javax.persistence.Table;
-import javax.persistence.TableGenerator;
-
-
-@Entity
-@Table(name = "topology_configurations")
-@TableGenerator(name = "topology_configurations_id_generator", table = "ambari_sequences",
-        pkColumnName = "sequence_name", valueColumnName = "sequence_value",
-        pkColumnValue = "topology_configurations_id_seq", initialValue = 0)
-public class TopologyConfigurationsEntity {
-
-  @Id
-  @GeneratedValue(strategy = GenerationType.TABLE, generator = "topology_configurations_id_generator")
-  private Long id;
-
-  @OneToOne
-  @JoinColumn(name = "request_id", referencedColumnName = "id", nullable = false)
-  private TopologyRequestEntity topologyRequestEntity;
-
-  @Column(name = "service_group_name", length = 100, nullable = false)
-  private String serviceGroupName;
-
-  @Column(name = "service_name", length = 100, nullable = false)
-  private String serviceName;
-
-  @Column(name = "component_name", length = 100, nullable = true)
-  private String componentName;
-
-  @Column(name = "host_group_name", length = 100, nullable = true)
-  private String hostGroupName;
-
-  @Column(name = "cluster_properties")
-  @Basic(fetch = FetchType.LAZY)
-  @Lob
-  private String configProperties;
-
-  @Column(name = "cluster_attributes")
-  @Basic(fetch = FetchType.LAZY)
-  @Lob
-  private String configAttributes;
-
-  public Long getId() {
-    return id;
-  }
-
-  public void setId(Long id) {
-    this.id = id;
-  }
-
-  public TopologyRequestEntity getTopologyRequestEntity() {
-    return topologyRequestEntity;
-  }
-
-  public void setTopologyRequestEntity(TopologyRequestEntity topologyRequestEntity) {
-    this.topologyRequestEntity = topologyRequestEntity;
-  }
-
-  public String getServiceGroupName() {
-    return serviceGroupName;
-  }
-
-  public void setServiceGroupName(String serviceGroupName) {
-    this.serviceGroupName = serviceGroupName;
-  }
-
-  public String getServiceName() {
-    return serviceName;
-  }
-
-  public void setServiceName(String serviceName) {
-    this.serviceName = serviceName;
-  }
-
-  public String getComponentName() {
-    return componentName;
-  }
-
-  public void setComponentName(String componentName) {
-    this.componentName = componentName;
-  }
-
-  public String getHostGroupName() {
-    return hostGroupName;
-  }
-
-  public void setHostGroupName(String hostGroupName) {
-    this.hostGroupName = hostGroupName;
-  }
-
-  public String getConfigProperties() {
-    return configProperties;
-  }
-
-  public void setConfigProperties(String configProperties) {
-    this.configProperties = configProperties;
-  }
-
-  public String getConfigAttributes() {
-    return configAttributes;
-  }
-
-  public void setConfigAttributes(String configAttributes) {
-    this.configAttributes = configAttributes;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-    TopologyConfigurationsEntity that = (TopologyConfigurationsEntity) o;
-    if (!id.equals(that.id)) return false;
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    return id.hashCode();
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyRequestEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyRequestEntity.java
index 4b431f1..d281838 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyRequestEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/TopologyRequestEntity.java
@@ -19,14 +19,17 @@ package org.apache.ambari.server.orm.entities;
 
 import java.util.Collection;
 
+import javax.persistence.Basic;
 import javax.persistence.CascadeType;
 import javax.persistence.Column;
 import javax.persistence.Entity;
 import javax.persistence.EnumType;
 import javax.persistence.Enumerated;
+import javax.persistence.FetchType;
 import javax.persistence.GeneratedValue;
 import javax.persistence.GenerationType;
 import javax.persistence.Id;
+import javax.persistence.Lob;
 import javax.persistence.NamedQueries;
 import javax.persistence.NamedQuery;
 import javax.persistence.OneToMany;
@@ -59,6 +62,16 @@ public class TopologyRequestEntity {
   @Column(name = "bp_name", length = 100, nullable = false)
   private String blueprintName;
 
+  @Column(name = "cluster_properties")
+  @Basic(fetch = FetchType.LAZY)
+  @Lob
+  private String clusterProperties;
+
+  @Column(name = "cluster_attributes")
+  @Basic(fetch = FetchType.LAZY)
+  @Lob
+  private String clusterAttributes;
+
   @Column(name = "description", length = 1024, nullable = false)
   private String description;
 
@@ -68,9 +81,6 @@ public class TopologyRequestEntity {
   @OneToOne(mappedBy = "topologyRequestEntity", cascade = CascadeType.ALL)
   private TopologyLogicalRequestEntity topologyLogicalRequestEntity;
 
-  @OneToMany(mappedBy = "topologyRequestEntity", cascade = CascadeType.ALL)
-  private Collection<TopologyConfigurationsEntity> topologyConfigurationsEntities;
-
   @Column(name = "provision_action", length = 255, nullable = true)
   @Enumerated(EnumType.STRING)
   private ProvisionAction provisionAction;
@@ -107,6 +117,22 @@ public class TopologyRequestEntity {
     this.blueprintName = blueprintName;
   }
 
+  public String getClusterProperties() {
+    return clusterProperties;
+  }
+
+  public void setClusterProperties(String clusterProperties) {
+    this.clusterProperties = clusterProperties;
+  }
+
+  public String getClusterAttributes() {
+    return clusterAttributes;
+  }
+
+  public void setClusterAttributes(String clusterAttributes) {
+    this.clusterAttributes = clusterAttributes;
+  }
+
   public String getDescription() {
     return description;
   }
@@ -131,14 +157,6 @@ public class TopologyRequestEntity {
     this.topologyLogicalRequestEntity = topologyLogicalRequestEntity;
   }
 
-  public Collection<TopologyConfigurationsEntity> getTopologyConfigurationsEntities() {
-    return topologyConfigurationsEntities;
-  }
-
-  public void setTopologyConfigurationsEntities(Collection<TopologyConfigurationsEntity> topologyConfigurationsEntity) {
-    this.topologyConfigurationsEntities = topologyConfigurationsEntity;
-  }
-
   public ProvisionAction getProvisionAction() {
     return provisionAction;
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
index 3811848..8eb6166 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/AmbariContext.java
@@ -52,9 +52,7 @@ import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.RootComponent;
 import org.apache.ambari.server.controller.ServiceComponentHostRequest;
 import org.apache.ambari.server.controller.ServiceComponentRequest;
-import org.apache.ambari.server.controller.ServiceGroupRequest;
 import org.apache.ambari.server.controller.ServiceRequest;
-import org.apache.ambari.server.controller.StackV2;
 import org.apache.ambari.server.controller.internal.AbstractResourceProvider;
 import org.apache.ambari.server.controller.internal.ComponentResourceProvider;
 import org.apache.ambari.server.controller.internal.ConfigGroupResourceProvider;
@@ -66,10 +64,13 @@ import org.apache.ambari.server.controller.internal.ServiceDependencyResourcePro
 import org.apache.ambari.server.controller.internal.ServiceGroupDependencyResourceProvider;
 import org.apache.ambari.server.controller.internal.ServiceGroupResourceProvider;
 import org.apache.ambari.server.controller.internal.ServiceResourceProvider;
+import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.internal.VersionDefinitionResourceProvider;
 import org.apache.ambari.server.controller.predicate.EqualsPredicate;
 import org.apache.ambari.server.controller.spi.ClusterController;
 import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.utilities.ClusterControllerHelper;
 import org.apache.ambari.server.orm.dao.RepositoryVersionDAO;
@@ -87,9 +88,12 @@ import org.apache.ambari.server.state.SecurityType;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.utils.RetryHelper;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Function;
+import com.google.common.collect.Collections2;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.Striped;
@@ -127,6 +131,7 @@ public class AmbariContext {
   private static ClusterController clusterController;
   //todo: task id's.  Use existing mechanism for getting next task id sequence
   private final static AtomicLong nextTaskId = new AtomicLong(10000);
+  private static final String DEFAULT_SERVICE_GROUP_NAME = "default_service_group";
 
   private static HostRoleCommandFactory hostRoleCommandFactory;
   private static HostResourceProvider hostResourceProvider;
@@ -203,12 +208,95 @@ public class AmbariContext {
     return getController().getActionManager().getTasks(ids);
   }
 
-  public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType) {
+  public void createAmbariResources(ClusterTopology topology, String clusterName, SecurityType securityType,
+                                    String repoVersionString, Long repoVersionId) {
+    Stack stack = topology.getBlueprint().getStack();
+    StackId stackId = new StackId(stack.getName(), stack.getVersion());
 
-    StackV2 stack = topology.getBlueprint().getStacks().iterator().next();
+    RepositoryVersionEntity repoVersion = null;
+    if (StringUtils.isEmpty(repoVersionString) && null == repoVersionId) {
+      List<RepositoryVersionEntity> stackRepoVersions = repositoryVersionDAO.findByStack(stackId);
+
+      if (stackRepoVersions.isEmpty()) {
+        // !!! no repos, try to get the version for the stack
+        VersionDefinitionResourceProvider vdfProvider = getVersionDefinitionResourceProvider();
+
+        Map<String, Object> properties = new HashMap<>();
+        properties.put(VersionDefinitionResourceProvider.VERSION_DEF_AVAILABLE_DEFINITION, stackId.toString());
+
+        Request request = new RequestImpl(Collections.<String>emptySet(),
+            Collections.singleton(properties), Collections.<String, String>emptyMap(), null);
+
+        Long defaultRepoVersionId = null;
+
+        try {
+          RequestStatus requestStatus = vdfProvider.createResources(request);
+          if (!requestStatus.getAssociatedResources().isEmpty()) {
+            Resource resource = requestStatus.getAssociatedResources().iterator().next();
+            defaultRepoVersionId = (Long) resource.getPropertyValue(VersionDefinitionResourceProvider.VERSION_DEF_ID);
+          }
+        } catch (Exception e) {
+          throw new IllegalArgumentException(String.format(
+              "Failed to create a default repository version definition for stack %s. "
+              + "This typically is a result of not loading the stack correctly or being able "
+              + "to load information about released versions.  Create a repository version "
+              + " and try again.", stackId), e);
+        }
+
+        repoVersion = repositoryVersionDAO.findByPK(defaultRepoVersionId);
+        // !!! better not!
+        if (null == repoVersion) {
+          throw new IllegalArgumentException(String.format(
+              "Failed to load the default repository version definition for stack %s. "
+              + "Check for a valid repository version and try again.", stackId));
+        }
+
+      } else if (stackRepoVersions.size() > 1) {
+
+        Function<RepositoryVersionEntity, String> function = new Function<RepositoryVersionEntity, String>() {
+          @Override
+          public String apply(RepositoryVersionEntity input) {
+            return input.getVersion();
+          }
+        };
+
+        Collection<String> versions = Collections2.transform(stackRepoVersions, function);
+
+        throw new IllegalArgumentException(String.format("Several repositories were found for %s:  %s.  Specify the version"
+            + " with '%s'", stackId, StringUtils.join(versions, ", "), ProvisionClusterRequest.REPO_VERSION_PROPERTY));
+      } else {
+        repoVersion = stackRepoVersions.get(0);
+        LOG.warn("Cluster is being provisioned using the single matching repository version {}", repoVersion.getVersion());
+      }
+    } else if (null != repoVersionId){
+      repoVersion = repositoryVersionDAO.findByPK(repoVersionId);
+
+      if (null == repoVersion) {
+        throw new IllegalArgumentException(String.format(
+          "Could not identify repository version with repository version id %s for installing services. "
+            + "Specify a valid repository version id with '%s'",
+          repoVersionId, ProvisionClusterRequest.REPO_VERSION_ID_PROPERTY));
+      }
+    } else {
+      repoVersion = repositoryVersionDAO.findByStackAndVersion(stackId, repoVersionString);
+
+      if (null == repoVersion) {
+        throw new IllegalArgumentException(String.format(
+          "Could not identify repository version with stack %s and version %s for installing services. "
+            + "Specify a valid version with '%s'",
+          stackId, repoVersionString, ProvisionClusterRequest.REPO_VERSION_PROPERTY));
+      }
+    }
+
+    // only use a STANDARD repo when creating a new cluster
+    if (repoVersion.getType() != RepositoryType.STANDARD) {
+      throw new IllegalArgumentException(String.format(
+          "Unable to create a cluster using the following repository since it is not a STANDARD type: %s",
+          repoVersion));
+    }
 
     createAmbariClusterResource(clusterName, stack.getName(), stack.getVersion(), securityType);
-    createAmbariServiceAndComponentResources(topology, clusterName);
+    createAmbariServiceAndComponentResources(topology, clusterName, stackId, repoVersion.getId());
   }
 
   public void createAmbariClusterResource(String clusterName, String stackName, String stackVersion, SecurityType securityType) {
@@ -234,57 +322,34 @@ public class AmbariContext {
     }
   }
 
-  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName) {
+  public void createAmbariServiceAndComponentResources(ClusterTopology topology, String clusterName,
+      StackId stackId, Long repositoryVersionId) {
+    Collection<String> services = topology.getBlueprint().getServices();
 
-    Collection<ServiceGroup> serviceGroups = topology.getBlueprint().getServiceGroups();
-    Set<ServiceGroupRequest> serviceGroupRequests = new HashSet<>();
+    try {
+      Cluster cluster = getController().getClusters().getCluster(clusterName);
+      services.removeAll(cluster.getServices().keySet());
+    } catch (AmbariException e) {
+      throw new RuntimeException("Failed to persist service and component resources: " + e, e);
+    }
     Set<ServiceRequest> serviceRequests = new HashSet<>();
     Set<ServiceComponentRequest> componentRequests = new HashSet<>();
-
-    for (ServiceGroup serviceGroup : serviceGroups) {
-      serviceGroupRequests.add(new ServiceGroupRequest(clusterName, serviceGroup.getName()));
-
-      for (Service service : serviceGroup.getServices()) {
-        String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service.getType());
-
-        String stackIdStr = service.getStackId();
-        StackV2 stack = topology.getBlueprint().getStackById(stackIdStr);
-        StackId stackId = new StackId(stack.getName(), stack.getVersion());
-        RepositoryVersionEntity repoVersion = repositoryVersionDAO.findByStackAndVersion(stackId, stack.getRepoVersion());
-
-        if (null == repoVersion) {
-          throw new IllegalArgumentException(String.format(
-            "Could not identify repository version with stack %s and version %s for installing services. "
-              + "Specify a valid version with '%s'",
-            stackId, stack.getRepoVersion(), ProvisionClusterRequest.REPO_VERSION_PROPERTY));
-        }
-
-        // only use a STANDARD repo when creating a new cluster
-        if (repoVersion.getType() != RepositoryType.STANDARD) {
-          throw new IllegalArgumentException(String.format(
-            "Unable to create a cluster using the following repository since it is not a STANDARD type: %s",
-            repoVersion));
-        }
-
-        serviceRequests.add(new ServiceRequest(clusterName, serviceGroup.getName(), service.getType(), service.getName(),
-          repoVersion.getId(), null, credentialStoreEnabled, null));
-
-        for (ComponentV2 component : topology.getBlueprint().getComponents(service)) {
-          String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(component);
-          componentRequests.add(new ServiceComponentRequest(clusterName, serviceGroup.getName(), service.getName(),
-            component.getName(), null, recoveryEnabled));
-        }
+    for (String service : services) {
+      String credentialStoreEnabled = topology.getBlueprint().getCredentialStoreEnabled(service);
+      serviceRequests.add(new ServiceRequest(clusterName, DEFAULT_SERVICE_GROUP_NAME, service, service,
+              repositoryVersionId, null, credentialStoreEnabled, null));
+
+      for (String component : topology.getBlueprint().getComponents(service)) {
+        String recoveryEnabled = topology.getBlueprint().getRecoveryEnabled(service, component);
+        componentRequests.add(new ServiceComponentRequest(clusterName, DEFAULT_SERVICE_GROUP_NAME, service, component, null, recoveryEnabled));
       }
-
     }
     try {
-      getServiceGroupResourceProvider().createServiceGroups(serviceGroupRequests);
       getServiceResourceProvider().createServices(serviceRequests);
       getComponentResourceProvider().createComponents(componentRequests);
     } catch (AmbariException | AuthorizationException e) {
       throw new RuntimeException("Failed to persist service and component resources: " + e, e);
     }
-
     // set all services state to INSTALLED->STARTED
     // this is required so the user can start failed services at the service level
     Map<String, Object> installProps = new HashMap<>();
@@ -294,20 +359,20 @@ public class AmbariContext {
     startProps.put(ServiceResourceProvider.SERVICE_SERVICE_STATE_PROPERTY_ID, "STARTED");
     startProps.put(ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
     Predicate predicate = new EqualsPredicate<>(
-    ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
+      ServiceResourceProvider.SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
     try {
       getServiceResourceProvider().updateResources(
-      new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
+          new RequestImpl(null, Collections.singleton(installProps), null, null), predicate);
 
       getServiceResourceProvider().updateResources(
-      new RequestImpl(null, Collections.singleton(startProps), null, null), predicate);
+        new RequestImpl(null, Collections.singleton(startProps), null, null), predicate);
     } catch (Exception e) {
       // just log as this won't prevent cluster from being provisioned correctly
       LOG.error("Unable to update state of services during cluster provision: " + e, e);
     }
   }
 
-  public void createAmbariHostResources(long  clusterId, String hostName, Map<Service, Collection<ComponentV2>> components)  {
+  public void createAmbariHostResources(long  clusterId, String hostName, Map<String, Collection<String>> components)  {
     Host host;
     try {
       host = getController().getClusters().getHost(hostName);
@@ -341,15 +406,13 @@ public class AmbariContext {
 
     final Set<ServiceComponentHostRequest> requests = new HashSet<>();
 
-    for (Map.Entry<Service, Collection<ComponentV2>> entry : components.entrySet()) {
-      Service service = entry.getKey();
-      for (ComponentV2 component : entry.getValue()) {
+    for (Map.Entry<String, Collection<String>> entry : components.entrySet()) {
+      String service = entry.getKey();
+      for (String component : entry.getValue()) {
         //todo: handle this in a generic manner.  These checks are all over the code
         try {
-          if (cluster.getService(service.getName()) != null && !RootComponent.AMBARI_SERVER.name().equals("AMBARI_SERVER")) {
-            requests.add(new ServiceComponentHostRequest(clusterName, service.getServiceGroup().getName(),
-              service.getName(), component.getName(), hostName, null));
-
+          if (cluster.getService(service) != null && !component.equals(RootComponent.AMBARI_SERVER.name())) {
+            requests.add(new ServiceComponentHostRequest(clusterName, DEFAULT_SERVICE_GROUP_NAME, service, component, hostName, null));
           }
         } catch(AmbariException se) {
           LOG.warn("Service already deleted from cluster: {}", service);
@@ -663,32 +726,34 @@ public class AmbariContext {
    * and the hosts associated with the host group are assigned to the config group.
    */
   private void createConfigGroupsAndRegisterHost(ClusterTopology topology, String groupName) throws AmbariException {
-
-    Map<Service, Map<String, Config>> groupConfigs = new HashMap<>();
-
-
-    // only get user provided configuration for host group per service which includes only CCT/HG and BP/HG properties
-    Collection<Service> serviceConfigurations = topology.getHostGroupInfo().get(groupName).getServiceConfigs();
-    serviceConfigurations.forEach(service -> {
-      Map<String, Map<String, String>> userProvidedGroupProperties = service.getConfiguration().getProperties();
-
-      // iterate over topo host group configs which were defined in
-      for (Map.Entry<String, Map<String, String>> entry : userProvidedGroupProperties.entrySet()) {
-        String type = entry.getKey();
-        Config config = configFactory.createReadOnly(type, groupName, entry.getValue(), null);
-        //todo: attributes
-        Map<String, Config> serviceConfigs = groupConfigs.get(service);
-        if (serviceConfigs == null) {
-          serviceConfigs = new HashMap<>();
-          groupConfigs.put(service, serviceConfigs);
-        }
-        serviceConfigs.put(type, config);
+    Map<String, Map<String, Config>> groupConfigs = new HashMap<>();
+    Stack stack = topology.getBlueprint().getStack();
+
+    // get the host-group config with cluster creation template overrides
+    Configuration topologyHostGroupConfig = topology.
+        getHostGroupInfo().get(groupName).getConfiguration();
+
+    // only get user provided configuration for host group which includes only CCT/HG and BP/HG properties
+    Map<String, Map<String, String>> userProvidedGroupProperties =
+        topologyHostGroupConfig.getFullProperties(1);
+
+    // iterate over topo host group configs which were defined in
+    for (Map.Entry<String, Map<String, String>> entry : userProvidedGroupProperties.entrySet()) {
+      String type = entry.getKey();
+      String service = stack.getServiceForConfigType(type);
+      Config config = configFactory.createReadOnly(type, groupName, entry.getValue(), null);
+      //todo: attributes
+      Map<String, Config> serviceConfigs = groupConfigs.get(service);
+      if (serviceConfigs == null) {
+        serviceConfigs = new HashMap<>();
+        groupConfigs.put(service, serviceConfigs);
       }
-    });
+      serviceConfigs.put(type, config);
+    }
 
     String bpName = topology.getBlueprint().getName();
-    for (Map.Entry<Service, Map<String, Config>> entry : groupConfigs.entrySet()) {
-      Service service = entry.getKey();
+    for (Map.Entry<String, Map<String, Config>> entry : groupConfigs.entrySet()) {
+      String service = entry.getKey();
       Map<String, Config> serviceConfigs = entry.getValue();
       String absoluteGroupName = getConfigurationGroupName(bpName, groupName);
       Collection<String> groupHosts;
@@ -714,7 +779,7 @@ public class AmbariContext {
       });
 
       ConfigGroupRequest request = new ConfigGroupRequest(null, clusterName,
-        absoluteGroupName, service.getName(), service.getServiceGroupName(), service.getName(), "Host Group Configuration",
+        absoluteGroupName, service, DEFAULT_SERVICE_GROUP_NAME, service, "Host Group Configuration",
         Sets.newHashSet(filteredGroupHosts), serviceConfigs);
 
       // get the config group provider and create config group resource
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
deleted file mode 100644
index d31e9d4..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintImplV2.java
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distribut
- * ed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-import static java.util.stream.Collectors.toList;
-import static java.util.stream.Collectors.toMap;
-import static java.util.stream.Collectors.toSet;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-
-import javax.annotation.Nonnull;
-
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.orm.entities.BlueprintEntity;
-import org.apache.ambari.server.state.ConfigHelper;
-import org.apache.ambari.server.state.StackId;
-
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * Blueprint implementation.
- */
-public class BlueprintImplV2 implements BlueprintV2 {
-
-  private String name;
-  private SecurityConfiguration securityConfiguration;
-  private Collection<RepositoryVersion> repositoryVersions;
-  private Map<String, ServiceGroup> serviceGroups;
-  private Setting setting;
-  private Configuration configuration;
-
-  // Transient fields
-  @JsonIgnore
-  private Map<String, HostGroupV2Impl> hostGroupMap = new HashMap<>();
-
-  @JsonIgnore
-  private Map<StackId, StackV2> stacks;
-
-  @JsonIgnore
-  private List<RepositorySetting> repoSettings;
-
-  @JsonIgnore
-  private Map<ServiceId, Service> services = new HashMap<>();
-
-  public void setStacks(Map<StackId, StackV2> stacks) {
-    this.stacks = stacks;
-    getAllServices().stream().forEach(s -> s.setStackFromBlueprint(this));
-  }
-
-  @JsonProperty("Blueprints")
-  public void setBlueprints(Blueprints blueprints) {
-    this.name = blueprints.name;
-    this.securityConfiguration = blueprints.securityConfiguration;
-  }
-
-  @JsonProperty("Blueprints")
-  public Blueprints getBlueprints() {
-    Blueprints blueprints = new Blueprints();
-    blueprints.name = this.name;
-    blueprints.securityConfiguration = this.securityConfiguration;
-    return blueprints;
-  }
-
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public void setSecurityConfiguration(SecurityConfiguration securityConfiguration) {
-    this.securityConfiguration = securityConfiguration;
-  }
-
-  @JsonProperty("repository_versions")
-  public void setRepositoryVersions(Collection<RepositoryVersion> repositoryVersions) {
-    this.repositoryVersions = repositoryVersions;
-  }
-
-  @JsonProperty("service_groups")
-  public void setServiceGroups(Collection<ServiceGroup> serviceGroups) {
-    this.serviceGroups = serviceGroups.stream().collect(toMap( sg -> sg.getName(), sg -> sg ));
-  }
-
-  @JsonProperty("host_groups")
-  public void setHostGroups(Collection<HostGroupV2Impl> hostGroups) {
-    this.hostGroupMap = hostGroups.stream().collect(toMap(
-      hg -> hg.getName(),
-      hg -> hg
-    ));
-  }
-
-  @JsonProperty("cluster_settings")
-  public void setClusterSettings(Map<String, Set<HashMap<String, String>>> properties) {
-    this.setting = new Setting(properties);
-  }
-
-  @Override
-  @JsonIgnore
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public HostGroupV2 getHostGroup(String name) {
-    return hostGroupMap.get(name);
-  }
-
-  @Override
-  @JsonProperty("host_groups")
-  public Map<String, ? extends HostGroupV2> getHostGroups() {
-    return hostGroupMap;
-  }
-
-  @Override
-  public Collection<StackV2> getStacks() {
-    return stacks.values();
-  }
-
-  @Override
-  public Collection<String> getStackIds() {
-    return repositoryVersions.stream().map(rv -> rv.getStackId()).collect(toList());
-  }
-
-  @Override
-  public Collection<ServiceGroup> getServiceGroups() {
-    return serviceGroups.values();
-  }
-
-  @Override
-  public ServiceGroup getServiceGroup(String name) {
-    return serviceGroups.get(name);
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<ServiceId> getAllServiceIds() {
-    return getHostGroups().values().stream().flatMap(hg -> hg.getServiceIds().stream()).collect(toSet());
-  }
-
-  @Override
-  public Service getServiceById(ServiceId serviceId) {
-    return null;
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<Service> getServicesFromServiceGroup(ServiceGroup serviceGroup, String serviceType) {
-    if (serviceType == null) {
-      return serviceGroup.getServices();
-    } else {
-      return serviceGroup.getServices().stream().filter(
-              service -> service.getType().equalsIgnoreCase(serviceType)).collect(toList());
-    }
-  }
-
-  @Override
-  @JsonIgnore
-  public StackV2 getStackById(String stackId) {
-    return stacks.get(new StackId(stackId));
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<Service> getAllServices() {
-    return services.values();
-  }
-
-  @Override
-  @JsonIgnore
-  public Service getService(ServiceId serviceId) {
-    return services.get(serviceId);
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<String> getAllServiceTypes() {
-    return getServiceGroups().stream().flatMap(sg -> sg.getServices().stream()).map(s -> s.getType()).collect(toSet());
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<Service> getServicesByType(String serviceType) {
-    return serviceGroups.values().stream().flatMap(sg -> sg.getServiceByType(serviceType).stream()).collect(toList());
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<ComponentV2> getComponents(Service service) {
-    return getHostGroupsForService(service.getId()).stream().flatMap(
-      hg -> hg.getComponents().stream()).
-      collect(toList());
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<ComponentV2> getComponentsByType(Service service, String componentType) {
-    return getComponents(service).stream().filter(
-            compnoent -> compnoent.getType().equalsIgnoreCase(componentType)).collect(toList());
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<ComponentV2> getComponents(ServiceId serviceId) {
-    return getHostGroupsForService(serviceId).stream().flatMap(hg -> hg.getComponents().stream()).collect(toSet());
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<HostGroupV2> getHostGroupsForService(ServiceId serviceId) {
-    return getHostGroups().values().stream().filter(hg -> !hg.getComponentsByServiceId(serviceId).isEmpty()).collect(toList());
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<HostGroupV2> getHostGroupsForComponent(ComponentV2 component) {
-    return hostGroupMap.values().stream().filter(hg -> hg.getComponents().contains(component)).collect(toList());
-  }
-
-  @Override
-  @JsonIgnore
-  public Configuration getConfiguration() {
-    if (null == configuration) {
-      configuration = new Configuration(new HashMap<>(), new HashMap<>());
-      getServiceGroups().stream().forEach( sg -> addChildConfiguration(configuration, sg.getConfiguration()) );
-      getHostGroups().values().stream().forEach(
-        hg -> hg.getComponents().stream().forEach(
-          c -> addChildConfiguration(configuration, c.getConfiguration())));
-    }
-    return configuration;
-  }
-
-  private void addChildConfiguration(Configuration parent, Configuration child) {
-    child.setParentConfiguration(parent);
-    parent.getProperties().putAll(child.getProperties());
-    parent.getAttributes().putAll(child.getAttributes());
-  }
-
-  @Override
-  @JsonProperty("cluster_settings")
-  public Setting getSetting() {
-    return this.setting;
-  }
-
-  @Nonnull
-  @Override
-  @JsonIgnore
-  public Collection<String> getAllServiceNames() {
-    return getAllServices().stream().map(s -> s.getName()).collect(toList());
-  }
-
-  @Nonnull
-  @Override
-  public Collection<String> getComponentNames(ServiceId serviceId) {
-    return getComponents(serviceId).stream().map(c -> c.getName()).collect(toList());
-  }
-
-  @Override
-  public String getRecoveryEnabled(ComponentV2 component) {
-    Optional<String> value =
-      setting.getSettingValue(Setting.SETTING_NAME_RECOVERY_SETTINGS, Setting.SETTING_NAME_RECOVERY_ENABLED);
-    // TODO: handle service and component level settings
-    return value.orElse(null);
-  }
-
-//  private Optional<String> getSettingValue(String settingCategory, String settingName, Optional<String> nameFilter) {
-//    if (this.setting != null) {
-//      Set<HashMap<String, String>> settingValue = this.setting.getSettingValue(settingCategory);
-//      for (Map<String, String> setting : settingValue) {
-//        String name = setting.get(Setting.SETTING_NAME_NAME);
-//        if (!nameFilter.isPresent() || StringUtils.equals(name, nameFilter.get())) {
-//          String value = setting.get(settingName);
-//          if (!StringUtils.isEmpty(value)) {
-//            return Optional.of(value);
-//          }
-//        }
-//      }
-//    }
-//    return Optional.empty();
-//  }
-
-  @Override
-  public String getCredentialStoreEnabled(String serviceName) {
-    // TODO: this is a service level level setting, handle appropriately
-    return null;
-  }
-
-  @Override
-  public boolean shouldSkipFailure() {
-    Optional<String> shouldSkipFailure = setting.getSettingValue(
-      Setting.SETTING_NAME_DEPLOYMENT_SETTINGS,
-      Setting.SETTING_NAME_SKIP_FAILURE);
-    return shouldSkipFailure.isPresent() ? shouldSkipFailure.get().equalsIgnoreCase("true") : false;
-  }
-
-  @Override
-  @JsonIgnore
-  public SecurityConfiguration getSecurity() {
-    return this.securityConfiguration;
-  }
-
-  @Override
-  public void validateRequiredProperties() throws InvalidTopologyException {
-    // TODO implement
-  }
-
-  @Override
-  public void validateTopology() throws InvalidTopologyException {
-    // TODO implement
-  }
-
-
-  @Override
-  public boolean isValidConfigType(String configType) {
-    if (ConfigHelper.CLUSTER_ENV.equals(configType) || "global".equals(configType)) {
-      return true;
-    }
-    final Set<String> serviceNames =
-      getAllServices().stream().map(s -> s.getName()).collect(toSet());
-    return getStacks().stream().anyMatch(
-      stack -> {
-        String service = stack.getServiceForConfigType(configType);
-        return serviceNames.contains(service);
-      }
-    );
-  }
-
-  public void postDeserialization() {
-    // Maintain a ServiceId -> Service map
-    this.services = getAllServiceIds().stream().collect(toMap(
-      serviceId -> serviceId,
-      serviceId -> {
-        ServiceGroup sg = getServiceGroup(serviceId.getServiceGroup());
-        Service service = null != sg ? sg.getServiceByName(serviceId.getName()) : null;
-        if (null == service) {
-          throw new IllegalStateException("Cannot find service for service id: " + serviceId);
-        }
-        return service;
-      }
-    ));
-
-    // Set Service -> ServiceGroup references and Service -> Service dependencies
-    getAllServices().stream().forEach( s -> {
-      s.setServiceGroup(serviceGroups.get(s.getServiceGroupId()));
-      Map<ServiceId, Service> dependencies = s.getDependentServiceIds().stream().collect(toMap(
-        serviceId -> serviceId,
-        serviceId -> getService(serviceId)
-      ));
-      s.setDependencyMap(dependencies);
-    });
-
-
-    // Set HostGroup -> Services and Component -> Service references
-    for (HostGroupV2Impl hg: hostGroupMap.values()) {
-      hg.setServiceMap(hg.getServiceIds().stream().collect(toMap(
-        serviceId -> serviceId,
-        serviceId -> this.services.get(serviceId)
-      )));
-      for (ComponentV2 comp: hg.getComponents()) {
-        comp.setService(hg.getService(comp.getServiceId()));
-      }
-    }
-  }
-
-  @Override
-  public BlueprintEntity toEntity() {
-    throw new UnsupportedOperationException("This is not supported here and will be removed. Pls. use BlueprintV2Factory");
-  }
-
-  @Override
-  public List<RepositorySetting> getRepositorySettings() {
-    return repoSettings;
-  }
-
-  /**
-   * Class to support Jackson data binding. Instances are used only temporarily during serialization
-   */
-  public class Blueprints {
-    @JsonProperty("blueprint_name")
-    public String name;
-    @JsonProperty("security")
-    public SecurityConfiguration securityConfiguration;
-
-    public Blueprints() { }
-  }
-
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
deleted file mode 100644
index 9ca0248..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-
-import javax.annotation.Nonnull;
-
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.orm.entities.BlueprintEntity;
-
-
-/**
- * Blueprint representation.
- */
-public interface BlueprintV2 {
-
-  /**
-   * Get the name of the blueprint.
-   *
-   * @return blueprint name
-   */
-  String getName();
-
-  /**
-   * Get a hostgroup specified by name.
-   *
-   * @param name  name of the host group to get
-   *
-   * @return the host group with the given name or null
-   */
-  HostGroupV2 getHostGroup(String name);
-
-  /**
-   * Get the hot groups contained in the blueprint.
-   * @return map of host group name to host group
-   */
-  Map<String, ? extends HostGroupV2> getHostGroups();
-
-  /**
-   * Get  stacks associated with the blueprint.
-   *
-   * @return associated stacks
-   */
-  Collection<StackV2> getStacks();
-
-  /**
-  * @return associated stack ids
-  **/
-  public Collection<String> getStackIds();
-
-  StackV2 getStackById(String stackId);
-
-  Collection<ServiceGroup> getServiceGroups();
-
-  ServiceGroup getServiceGroup(String name);
-
-  /**
-   * Get all of the services represented in the blueprint.
-   *
-   * @return collection of all represented service names
-   */
-  Collection<ServiceId> getAllServiceIds();
-
-  /**
-   * Get service by Id
-   * @param serviceId
-   * @return
-   */
-  Service getServiceById(ServiceId serviceId);
-
-  /**
-   * Get all of the services represented in the blueprint.
-   *
-   * @return collection of all represented service names
-   */
-  Collection<Service> getAllServices();
-
-  /**
-   * Get the names of all the services represented in the blueprint.
-   *
-   * @return collection of all represented service names
-   */
-  @Nonnull
-  Collection<String> getAllServiceNames();
-
-
-
-  /**
-   * Get all of the service types represented in the blueprint.
-   *
-   * @return collection of all represented service types
-   */
-  Collection<String> getAllServiceTypes();
-
-  /**
-   * Get all of the services represented in the blueprint with a given type.
-   *
-   * @return collection of all represented services represented in the blueprint with a given type.
-   */
-  Collection<Service> getServicesByType(String serviceType);
-
-  Service getService(ServiceId serviceId);
-
-  /**
-   * Get services by type from a service group.
-   * @param serviceGroup
-   * @param serviceType
-   * @return
-   */
-  Collection<Service> getServicesFromServiceGroup(ServiceGroup serviceGroup, String serviceType);
-
-  /**
-   * Get the components that are included in the blueprint for the specified service.
-   *
-   * @param serviceId  serviceId
-   *
-   * @return collection of component names for the service.  Will not return null.
-   */
-  @Nonnull
-  Collection<String> getComponentNames(ServiceId serviceId);
-
-  /**
-   * Get the component names s that are included in the blueprint for the specified service.
-   *
-   * @param serviceId  serviceId
-   *
-   * @return collection of component names for the service.  Will not return null.
-   */
-  Collection<ComponentV2> getComponents(ServiceId serviceId);
-
-  Collection<ComponentV2> getComponents(Service service);
-
-
-  /**
-   * Get components by type from a service.
-   * @param service
-   * @param componentType
-   * @return
-   */
-  Collection<ComponentV2> getComponentsByType(Service service, String componentType);
-
-
-  /**
-   * Get the host groups which contain components for the specified service.
-   *
-   * @param serviceId  service Id
-   *
-   * @return collection of host groups containing components for the specified service;
-   *         will not return null
-   */
-  Collection<HostGroupV2> getHostGroupsForService(ServiceId serviceId);
-
-  /**
-   * Get the host groups which contain the give component.
-   *
-   * @param component  component name
-   *
-   * @return collection of host groups containing the specified component; will not return null
-   */
-  Collection<HostGroupV2> getHostGroupsForComponent(ComponentV2 component);
-
-
-  /**
-   * Get the Blueprint cluster scoped configuration.
-   * The blueprint cluster scoped configuration has the stack
-   * configuration with the config types associated with the blueprint
-   * set as it's parent.
-   *
-   * @return blueprint cluster scoped configuration
-   */
-  @Deprecated
-  Configuration getConfiguration();
-
-
-  /**
-   * Get the Blueprint cluster scoped setting.
-   * The blueprint cluster scoped setting has the setting properties
-   * with the setting names associated with the blueprint.
-   *
-   * @return blueprint cluster scoped setting
-   */
-  Setting getSetting();
-
-
-  /**
-   * Get whether a component is enabled for auto start.
-   *
-   * @param component - Component.
-   *
-   * @return null if value is not specified; true or false if specified.
-   */
-  String getRecoveryEnabled(ComponentV2 component);
-
-  /**
-   * Get whether a service is enabled for credential store use.
-   *
-   * @param serviceName - Service name.
-   *
-   * @return null if value is not specified; true or false if specified.
-   */
-  String getCredentialStoreEnabled(String serviceName);
-
-  /**
-   * Check if auto skip failure is enabled.
-   * @return true if enabled, otherwise false.
-   */
-  boolean shouldSkipFailure();
-
-
-  SecurityConfiguration getSecurity();
-
-  void validateRequiredProperties() throws InvalidTopologyException;
-
-  void validateTopology() throws InvalidTopologyException;
-
-  /**
-   *
-   * A config type is valid if there are services related to except cluster-env and global.
-   * @param configType
-   * @return
-   */
-  boolean isValidConfigType(String configType);
-
-  /**
-   * Obtain the blueprint as an entity.
-   *
-   * @return entity representation of the blueprint
-   */
-  BlueprintEntity toEntity();
-
-  List<RepositorySetting> getRepositorySettings();
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2Factory.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2Factory.java
deleted file mode 100644
index 7b228e5..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintV2Factory.java
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distribut
- * ed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-import java.util.stream.Collectors;
-
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.ObjectNotFoundException;
-import org.apache.ambari.server.controller.AmbariManagementController;
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.controller.StackV2Factory;
-import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.orm.dao.BlueprintV2DAO;
-import org.apache.ambari.server.orm.dao.StackDAO;
-import org.apache.ambari.server.orm.entities.BlueprintV2Entity;
-import org.apache.ambari.server.orm.entities.StackEntity;
-import org.apache.ambari.server.stack.NoSuchStackException;
-import org.apache.ambari.server.state.StackId;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.core.Version;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.module.SimpleAbstractTypeResolver;
-import com.fasterxml.jackson.databind.module.SimpleModule;
-import com.google.inject.Inject;
-
-public class BlueprintV2Factory {
-  // Blueprints
-  protected static final String BLUEPRINT_NAME_PROPERTY_ID =
-    PropertyHelper.getPropertyId("Blueprints", "blueprint_name");
-  protected static final String STACK_NAME_PROPERTY_ID =
-    PropertyHelper.getPropertyId("Blueprints", "stack_name");
-  protected static final String STACK_VERSION_PROPERTY_ID =
-    PropertyHelper.getPropertyId("Blueprints", "stack_version");
-
-  // Host Groups
-  protected static final String HOST_GROUP_PROPERTY_ID = "host_groups";
-  protected static final String HOST_GROUP_NAME_PROPERTY_ID = "name";
-  protected static final String HOST_GROUP_CARDINALITY_PROPERTY_ID = "cardinality";
-
-  // Host Group Components
-  protected static final String COMPONENT_PROPERTY_ID ="components";
-  protected static final String COMPONENT_NAME_PROPERTY_ID ="name";
-  protected static final String COMPONENT_PROVISION_ACTION_PROPERTY_ID = "provision_action";
-
-  // Configurations
-  protected static final String CONFIGURATION_PROPERTY_ID = "configurations";
-  protected static final String PROPERTIES_PROPERTY_ID = "properties";
-  protected static final String PROPERTIES_ATTRIBUTES_PROPERTY_ID = "properties_attributes";
-
-  protected static final String SETTINGS_PROPERTY_ID = "settings";
-
-  private static BlueprintV2DAO blueprintDAO;
-  private static StackDAO stackDao;
-  private ConfigurationFactory configFactory = new ConfigurationFactory();
-
-  private StackV2Factory stackFactory;
-
-  protected BlueprintV2Factory() {
-
-  }
-  protected BlueprintV2Factory(StackV2Factory stackFactory) {
-    this.stackFactory = stackFactory;
-  }
-
-  public static BlueprintV2Factory create(AmbariManagementController controller) {
-    return new BlueprintV2Factory(new StackV2Factory(controller));
-  }
-
-  public BlueprintV2 getBlueprint(String blueprintName) throws NoSuchStackException, NoSuchBlueprintException, IOException {
-    BlueprintV2Entity entity =
-      Optional.ofNullable(blueprintDAO.findByName(blueprintName)).orElseThrow(() -> new NoSuchBlueprintException(blueprintName));
-    return convertFromEntity(entity);
-  }
-
-  public BlueprintV2 convertFromJson(String json) throws IOException {
-    BlueprintImplV2 blueprintV2 = createObjectMapper().readValue(json, BlueprintImplV2.class);
-    blueprintV2.postDeserialization();
-    blueprintV2.setStacks(
-      blueprintV2.getStackIds().stream().collect(Collectors.toMap(
-        stackId -> new StackId(stackId),
-        stackId -> parseStack(new StackId(stackId))
-      ))
-    );
-    return blueprintV2;
-  }
-
-  public BlueprintV2 convertFromEntity(BlueprintV2Entity blueprintEntity) throws IOException {
-    return convertFromJson(blueprintEntity.getContent());
-  }
-
-  public Map<String, Object> convertToMap(BlueprintV2Entity entity) throws IOException {
-    return createObjectMapper().readValue(entity.getContent(), HashMap.class);
-  }
-
-  private StackV2 parseStack(StackId stackId) {
-    try {
-      return stackFactory.create(stackId.getStackName(), stackId.getStackVersion());
-    } catch (AmbariException e) {
-      throw new IllegalArgumentException(
-        String.format("Unable to parse stack. name=%s, version=%s", stackId.getStackName(), stackId.getStackVersion()),
-        e);
-    }
-  }
-
-  private StackV2 parseStack(StackEntity stackEntity) {
-    return parseStack(new StackId(stackEntity.getStackName(), stackEntity.getStackVersion()));
-  }
-
-  public BlueprintV2Entity convertToEntity(BlueprintV2 blueprint) throws JsonProcessingException {
-    BlueprintV2Entity entity = new BlueprintV2Entity();
-    String content = createObjectMapper().writeValueAsString(blueprint);
-    entity.setContent(content);
-    entity.setBlueprintName(blueprint.getName());
-    entity.setSecurityType(blueprint.getSecurity().getType());
-    entity.setSecurityDescriptorReference(blueprint.getSecurity().getDescriptorReference());
-    return entity;
-  }
-
-  /**
-   * Convert a map of properties to a blueprint entity.
-   *
-   * @param properties  property map
-   * @param securityConfiguration security related properties
-   * @return new blueprint entity
-   */
-  @SuppressWarnings("unchecked")
-  public BlueprintV2 createBlueprint(Map<String, Object> properties, SecurityConfiguration securityConfiguration) throws NoSuchStackException, IOException {
-    String name = String.valueOf(properties.get(BLUEPRINT_NAME_PROPERTY_ID));
-    // String.valueOf() will return "null" if value is null
-    if (name.equals("null") || name.isEmpty()) {
-      //todo: should throw a checked exception from here
-      throw new IllegalArgumentException("Blueprint name must be provided");
-    }
-    ObjectMapper om = createObjectMapper();
-    String json = om.writeValueAsString(properties);
-    BlueprintImplV2 blueprint = om.readValue(json, BlueprintImplV2.class);
-    blueprint.postDeserialization();
-    Map<String, StackV2> stacks = new HashMap<>();
-    for (String stackId: blueprint.getStackIds()) {
-      stacks.put(stackId, stackFactory.create(stackId));
-    }
-    blueprint.setSecurityConfiguration(securityConfiguration);
-    return blueprint;
-  }
-
-  protected StackV2 createStack(Map<String, Object> properties) throws NoSuchStackException {
-    String stackName = String.valueOf(properties.get(STACK_NAME_PROPERTY_ID));
-    String stackVersion = String.valueOf(properties.get(STACK_VERSION_PROPERTY_ID));
-    try {
-      //todo: don't pass in controller
-      return stackFactory.create(stackName, stackVersion);
-    } catch (ObjectNotFoundException e) {
-      throw new NoSuchStackException(stackName, stackVersion);
-    } catch (AmbariException e) {
-      //todo:
-      throw new RuntimeException("An error occurred parsing the stack information.", e);
-    }
-  }
-
-  static ObjectMapper createObjectMapper() {
-    ObjectMapper mapper = new ObjectMapper();
-    SimpleModule module = new SimpleModule("CustomModel", Version.unknownVersion());
-    SimpleAbstractTypeResolver resolver = new SimpleAbstractTypeResolver();
-    resolver.addMapping(HostGroupV2.class, HostGroupV2Impl.class);
-    module.setAbstractTypes(resolver);
-    mapper.registerModule(module);
-    mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
-    return mapper;
-  }
-
-  /**
-   * Static initialization.
-   *
-   * @param dao  blueprint data access object
-   */
-  @Inject
-  public static void init(BlueprintV2DAO dao) {
-    blueprintDAO = dao;
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
index 7155dcc..fbd0e4b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorImpl.java
@@ -18,10 +18,13 @@
 
 package org.apache.ambari.server.topology;
 
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.ambari.server.StaticallyInject;
 import org.apache.ambari.server.controller.internal.Stack;
@@ -149,31 +152,31 @@ public class BlueprintValidatorImpl implements BlueprintValidator {
               " using existing db!");
           }
         }
-//        if (configurationContext.isNameNodeHAEnabled(clusterConfigurations) && component.equals("NAMENODE")) {
-//            Map<String, String> hadoopEnvConfig = clusterConfigurations.get("hadoop-env");
-//            if(hadoopEnvConfig != null && !hadoopEnvConfig.isEmpty() && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_active") && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_standby")) {
-//              ArrayList<HostGroup> hostGroupsForComponent = new ArrayList<>(blueprint.getHostGroupsForComponent(component));
-//              Set<String> givenHostGroups = new HashSet<>();
-//              givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_active"));
-//              givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby"));
-//              if(givenHostGroups.size() != hostGroupsForComponent.size()) {
-//                 throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent);
-//              }
-//              if(HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")).matches() && HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")).matches()){
-//                for (HostGroup hostGroupForComponent : hostGroupsForComponent) {
-//                   Iterator<String> itr = givenHostGroups.iterator();
-//                   while(itr.hasNext()){
-//                      if(itr.next().contains(hostGroupForComponent.getName())){
-//                         itr.remove();
-//                      }
-//                   }
-//                 }
-//                 if(!givenHostGroups.isEmpty()){
-//                    throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent);
-//                 }
-//                }
-//              }
-//        }
+        if (ClusterTopologyImpl.isNameNodeHAEnabled(clusterConfigurations) && component.equals("NAMENODE")) {
+            Map<String, String> hadoopEnvConfig = clusterConfigurations.get("hadoop-env");
+            if(hadoopEnvConfig != null && !hadoopEnvConfig.isEmpty() && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_active") && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_standby")) {
+              ArrayList<HostGroup> hostGroupsForComponent = new ArrayList<>(blueprint.getHostGroupsForComponent(component));
+              Set<String> givenHostGroups = new HashSet<>();
+              givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_active"));
+              givenHostGroups.add(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby"));
+              if(givenHostGroups.size() != hostGroupsForComponent.size()) {
+                 throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent);
+              }
+              if(HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")).matches() && HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")).matches()){
+                for (HostGroup hostGroupForComponent : hostGroupsForComponent) {
+                   Iterator<String> itr = givenHostGroups.iterator();
+                   while(itr.hasNext()){
+                      if(itr.next().contains(hostGroupForComponent.getName())){
+                         itr.remove();
+                      }
+                   }
+                 }
+                 if(!givenHostGroups.isEmpty()){
+                    throw new IllegalArgumentException("NAMENODE HA host groups mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected Host groups are :" + hostGroupsForComponent);
+                 }
+                }
+              }
+          }
 
         if (component.equals("HIVE_METASTORE")) {
           Map<String, String> hiveEnvConfig = clusterConfigurations.get("hive-env");
@@ -311,13 +314,12 @@ public class BlueprintValidatorImpl implements BlueprintValidator {
     Map<String, Map<String, String>> configProperties = blueprint.getConfiguration().getProperties();
     Collection<String> cardinalityFailures = new HashSet<>();
     //todo: don't hard code this HA logic here
-//TODO
-//    if (ClusterTopologyImpl.isNameNodeHAEnabled(configProperties) &&
-//        (component.equals("SECONDARY_NAMENODE"))) {
-//      // override the cardinality for this component in an HA deployment,
-//      // since the SECONDARY_NAMENODE should not be started in this scenario
-//      cardinality = new Cardinality("0");
-//    }
+    if (ClusterTopologyImpl.isNameNodeHAEnabled(configProperties) &&
+        (component.equals("SECONDARY_NAMENODE"))) {
+      // override the cardinality for this component in an HA deployment,
+      // since the SECONDARY_NAMENODE should not be started in this scenario
+      cardinality = new Cardinality("0");
+    }
 
     int actualCount = blueprint.getHostGroupsForComponent(component).size();
     if (! cardinality.isValidCount(actualCount)) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorV2.java
deleted file mode 100644
index cfe083e..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/BlueprintValidatorV2.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distribut
- * ed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-public interface BlueprintValidatorV2 {
-
-  /**
-   * Validate blueprint topology.
-   *
-   * @param blueprint The blueprint to validate
-   *
-   * @throws InvalidTopologyException if the topology is invalid
-   */
-  void validateTopology(BlueprintV2 blueprint) throws InvalidTopologyException;
-
-  /**
-   * Validate that required properties are provided.
-   * This doesn't include password properties.
-   *
-   * @param blueprint The blueprint to validate
-   *
-   * @throws InvalidTopologyException if required properties are not set in blueprint
-   */
-  void validateRequiredProperties(BlueprintV2 blueprint) throws InvalidTopologyException;
-
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
index 3273a4e..740dd91 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
@@ -33,11 +33,10 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.api.services.stackadvisor.StackAdvisorBlueprintProcessor;
 import org.apache.ambari.server.controller.ClusterRequest;
 import org.apache.ambari.server.controller.ConfigurationRequest;
-import org.apache.ambari.server.controller.StackV2;
 import org.apache.ambari.server.controller.internal.BlueprintConfigurationProcessor;
 import org.apache.ambari.server.controller.internal.ClusterResourceProvider;
-import org.apache.ambari.server.controller.internal.ConfigurationContext;
 import org.apache.ambari.server.controller.internal.ConfigurationTopologyException;
+import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.serveraction.kerberos.KerberosInvalidConfigurationException;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.SecurityType;
@@ -64,6 +63,7 @@ public class ClusterConfigurationRequest {
   private ClusterTopology clusterTopology;
   private BlueprintConfigurationProcessor configurationProcessor;
   private StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor;
+  private Stack stack;
   private boolean configureSecurity = false;
 
   public ClusterConfigurationRequest(AmbariContext ambariContext, ClusterTopology topology, boolean setInitial, StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor, boolean configureSecurity) {
@@ -75,12 +75,10 @@ public class ClusterConfigurationRequest {
                                      StackAdvisorBlueprintProcessor stackAdvisorBlueprintProcessor) {
     this.ambariContext = ambariContext;
     this.clusterTopology = clusterTopology;
-    BlueprintV2 blueprint = clusterTopology.getBlueprint();
+    Blueprint blueprint = clusterTopology.getBlueprint();
+    this.stack = blueprint.getStack();
     // set initial configuration (not topology resolved)
-    //TODO set up proper ConfigurationContext
-    ConfigurationContext configurationContext = new ConfigurationContext(blueprint.getStacks().iterator().next(), clusterTopology
-            .getConfiguration());
-    this.configurationProcessor = new BlueprintConfigurationProcessor(clusterTopology, configurationContext);
+    this.configurationProcessor = new BlueprintConfigurationProcessor(clusterTopology);
     this.stackAdvisorBlueprintProcessor = stackAdvisorBlueprintProcessor;
     removeOrphanConfigTypes();
     if (setInitial) {
@@ -92,7 +90,7 @@ public class ClusterConfigurationRequest {
    * Remove config-types from the given configuration if there is no any services related to them (except cluster-env and global).
    */
   private void removeOrphanConfigTypes(Configuration configuration) {
-    BlueprintV2 blueprint = clusterTopology.getBlueprint();
+    Blueprint blueprint = clusterTopology.getBlueprint();
 
     Collection<String> configTypes = configuration.getAllConfigTypes();
     for (String configType : configTypes) {
@@ -164,11 +162,10 @@ public class ClusterConfigurationRequest {
     Set<String> updatedConfigTypes = new HashSet<>();
 
     Cluster cluster = getCluster();
-    BlueprintV2 blueprint = clusterTopology.getBlueprint();
+    Blueprint blueprint = clusterTopology.getBlueprint();
 
-    //Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getAllServices());
-    //Map<String, Map<String, String>> stackDefaultProps = stackDefaults.getProperties();
-    Map<String, Map<String, String>> stackDefaultProps = new Configuration(new HashMap<>(), new HashMap<>()).getProperties();
+    Configuration stackDefaults = blueprint.getStack().getConfiguration(blueprint.getServices());
+    Map<String, Map<String, String>> stackDefaultProps = stackDefaults.getProperties();
 
     // add clusterHostInfo containing components to hosts map, based on Topology, to use this one instead of
     // StageUtils.getClusterInfo()
@@ -179,7 +176,7 @@ public class ClusterConfigurationRequest {
       // generate principals & keytabs for headless identities
       AmbariContext.getController().getKerberosHelper()
         .ensureHeadlessIdentities(cluster, existingConfigurations,
-          new HashSet(blueprint.getAllServices()));
+          new HashSet<>(blueprint.getServices()));
 
       // apply Kerberos specific configurations
       Map<String, Map<String, String>> updatedConfigs = AmbariContext.getController().getKerberosHelper()
@@ -234,17 +231,17 @@ public class ClusterConfigurationRequest {
    * @param blueprint the blueprint
    * @return a map of service names to component names
    */
-  private Map<String, Set<String>> createServiceComponentMap(BlueprintV2 blueprint) {
+  private Map<String, Set<String>> createServiceComponentMap(Blueprint blueprint) {
     Map<String, Set<String>> serviceComponents = new HashMap<>();
-    Collection<Service> services = blueprint.getAllServices();
+    Collection<String> services = blueprint.getServices();
 
     if(services != null) {
-      for (Service service : services) {
-        Collection<ComponentV2> components = blueprint.getComponents(service);
-        serviceComponents.put(service.getType(),
+      for (String service : services) {
+        Collection<String> components = blueprint.getComponents(service);
+        serviceComponents.put(service,
             (components == null)
                 ? Collections.emptySet()
-                : new HashSet(blueprint.getComponents(service)));
+                : new HashSet<>(blueprint.getComponents(service)));
       }
     }
 
@@ -281,16 +278,16 @@ public class ClusterConfigurationRequest {
     return propertyHasCustomValue;
   }
 
-  private Map<String, String> createComponentHostMap(BlueprintV2 blueprint) {
+  private Map<String, String> createComponentHostMap(Blueprint blueprint) {
     Map<String, String> componentHostsMap = new HashMap<>();
-    for (Service service : blueprint.getAllServices()) {
-      Collection<ComponentV2> components = blueprint.getComponents(service);
-      for (ComponentV2 component : components) {
-        Collection<String> componentHost = clusterTopology.getHostAssignmentsForComponent(component.getType());
+    for (String service : blueprint.getServices()) {
+      Collection<String> components = blueprint.getComponents(service);
+      for (String component : components) {
+        Collection<String> componentHost = clusterTopology.getHostAssignmentsForComponent(component);
         // retrieve corresponding clusterInfoKey for component using StageUtils
-        String clusterInfoKey = StageUtils.getComponentToClusterInfoKeyMap().get(component.getType());
+        String clusterInfoKey = StageUtils.getComponentToClusterInfoKeyMap().get(component);
         if (clusterInfoKey == null) {
-          clusterInfoKey = component.getType().toLowerCase() + "_hosts";
+          clusterInfoKey = component.toLowerCase() + "_hosts";
         }
         componentHostsMap.put(clusterInfoKey, StringUtils.join(componentHost, ","));
       }
@@ -303,7 +300,7 @@ public class ClusterConfigurationRequest {
 
     try {
       Cluster cluster = getCluster();
-      BlueprintV2 blueprint = clusterTopology.getBlueprint();
+      Blueprint blueprint = clusterTopology.getBlueprint();
 
       Configuration clusterConfiguration = clusterTopology.getConfiguration();
       Map<String, Map<String, String>> existingConfigurations = clusterConfiguration.getFullProperties();
@@ -355,17 +352,16 @@ public class ClusterConfigurationRequest {
     //todo: also handle setting of host group scoped configuration which is updated by config processor
     List<BlueprintServiceConfigRequest> configurationRequests = new LinkedList<>();
 
-    BlueprintV2 blueprint = clusterTopology.getBlueprint();
+    Blueprint blueprint = clusterTopology.getBlueprint();
     Configuration clusterConfiguration = clusterTopology.getConfiguration();
 
-    for (Service service : blueprint.getAllServices()) {
+    for (String service : blueprint.getServices()) {
       //todo: remove intermediate request type
       // one bp config request per service
-      BlueprintServiceConfigRequest blueprintConfigRequest = new BlueprintServiceConfigRequest(service.getType());
-      String serviceStackId = service.getStackId();
-      StackV2 serviceStack = blueprint.getStackById(serviceStackId);
-      for (String serviceConfigType : serviceStack.getAllConfigurationTypes(service.getType())) {
-        Set<String> excludedConfigTypes = serviceStack.getExcludedConfigurationTypes(service.getType());
+      BlueprintServiceConfigRequest blueprintConfigRequest = new BlueprintServiceConfigRequest(service);
+
+      for (String serviceConfigType : stack.getAllConfigurationTypes(service)) {
+        Set<String> excludedConfigTypes = stack.getExcludedConfigurationTypes(service);
         if (!excludedConfigTypes.contains(serviceConfigType)) {
           // skip handling of cluster-env here
           if (! serviceConfigType.equals("cluster-env")) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
index 289f053..69ccb61 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopology.java
@@ -22,7 +22,6 @@ import java.util.Collection;
 import java.util.Map;
 
 import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.internal.ConfigurationContext;
 import org.apache.ambari.server.controller.internal.ProvisionAction;
 
 /**
@@ -50,7 +49,7 @@ public interface ClusterTopology {
    *
    * @return assocaited blueprint
    */
-  BlueprintV2 getBlueprint();
+  Blueprint getBlueprint();
 
   /**
    * Get the cluster scoped configuration for the cluster.
@@ -59,12 +58,8 @@ public interface ClusterTopology {
    *
    * @return cluster scoped configuration
    */
-  @Deprecated
   Configuration getConfiguration();
 
-
-  Collection<Service> getServiceConfigs();
-
   /**
    * Get host group information.
    *
@@ -123,18 +118,18 @@ public interface ClusterTopology {
   void addHostToTopology(String hostGroupName, String host) throws InvalidTopologyException, NoSuchHostGroupException;
 
   /**
-   * Determine if NameNode HA is enabled within ConfigurationContext.
+   * Determine if NameNode HA is enabled.
    *
    * @return true if NameNode HA is enabled; false otherwise
    */
-  boolean isNameNodeHAEnabled(ConfigurationContext configuration);
+  boolean isNameNodeHAEnabled();
 
   /**
-   * Determine if Yarn ResourceManager HA is enabled within ConfigurationContext.
+   * Determine if Yarn ResourceManager HA is enabled.
    *
    * @return true if Yarn ResourceManager HA is enabled; false otherwise
    */
-  boolean isYarnResourceManagerHAEnabled(ConfigurationContext configuration);
+  boolean isYarnResourceManagerHAEnabled();
 
   /**
    * Determine if the cluster is kerberos enabled.
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
index db3f29b..f50e60f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterTopologyImpl.java
@@ -31,7 +31,6 @@ import java.util.Set;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.RequestStatusResponse;
-import org.apache.ambari.server.controller.internal.ConfigurationContext;
 import org.apache.ambari.server.controller.internal.ProvisionAction;
 import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.slf4j.Logger;
@@ -43,16 +42,13 @@ import org.slf4j.LoggerFactory;
  */
 public class ClusterTopologyImpl implements ClusterTopology {
 
-  private final static Logger LOG = LoggerFactory.getLogger(ClusterTopologyImpl.class);
-
   private Long clusterId;
 
   //todo: currently topology is only associated with a single bp
   //todo: this will need to change to allow usage of multiple bp's for the same cluster
   //todo: for example: provision using bp1 and scale using bp2
-  private BlueprintV2 blueprint;
+  private Blueprint blueprint;
   private Configuration configuration;
-  private Collection<Service> serviceConfigs;
   private ConfigRecommendationStrategy configRecommendationStrategy;
   private ProvisionAction provisionAction = ProvisionAction.INSTALL_AND_START;
   private Map<String, AdvisedConfiguration> advisedConfigurations = new HashMap<>();
@@ -60,13 +56,16 @@ public class ClusterTopologyImpl implements ClusterTopology {
   private final AmbariContext ambariContext;
   private final String defaultPassword;
 
+  private final static Logger LOG = LoggerFactory.getLogger(ClusterTopologyImpl.class);
+
+
   //todo: will need to convert all usages of hostgroup name to use fully qualified name (BP/HG)
   //todo: for now, restrict scaling to the same BP
   public ClusterTopologyImpl(AmbariContext ambariContext, TopologyRequest topologyRequest) throws InvalidTopologyException {
     this.clusterId = topologyRequest.getClusterId();
     // provision cluster currently requires that all hostgroups have same BP so it is ok to use root level BP here
     this.blueprint = topologyRequest.getBlueprint();
-    this.serviceConfigs = topologyRequest.getServiceConfigs();
+    this.configuration = topologyRequest.getConfiguration();
     if (topologyRequest instanceof ProvisionClusterRequest) {
       this.defaultPassword = ((ProvisionClusterRequest) topologyRequest).getDefaultPassword();
     } else {
@@ -75,15 +74,6 @@ public class ClusterTopologyImpl implements ClusterTopology {
 
     registerHostGroupInfo(topologyRequest.getHostGroupInfo());
 
-    // merge service configs into global cluster configs
-    Map<String, Map<String, String>> properties = new HashMap<>();
-    Map<String, Map<String, Map<String, String>>> attributes = new HashMap<>();
-    serviceConfigs.forEach(service -> {
-      properties.putAll(service.getConfiguration().getProperties());
-      attributes.putAll(service.getConfiguration().getAttributes());
-    });
-    configuration = new Configuration(properties, attributes);
-
     // todo extract validation to specialized service
     validateTopology();
     this.ambariContext = ambariContext;
@@ -105,20 +95,15 @@ public class ClusterTopologyImpl implements ClusterTopology {
   }
 
   @Override
-  public BlueprintV2 getBlueprint() {
+  public Blueprint getBlueprint() {
     return blueprint;
   }
 
   @Override
-  @Deprecated
   public Configuration getConfiguration() {
     return configuration;
   }
 
-  public Collection<Service> getServiceConfigs() {
-    return serviceConfigs;
-  }
-
   @Override
   public Map<String, HostGroupInfo> getHostGroupInfo() {
     return hostGroupInfoMap;
@@ -128,7 +113,7 @@ public class ClusterTopologyImpl implements ClusterTopology {
   @Override
   public Collection<String> getHostGroupsForComponent(String component) {
     Collection<String> resultGroups = new ArrayList<>();
-    for (HostGroupV2 group : getBlueprint().getHostGroups().values() ) {
+    for (HostGroup group : getBlueprint().getHostGroups().values() ) {
       if (group.getComponentNames().contains(component)) {
         resultGroups.add(group.getName());
       }
@@ -193,48 +178,49 @@ public class ClusterTopologyImpl implements ClusterTopology {
   }
 
   @Override
-  public boolean isNameNodeHAEnabled(ConfigurationContext configurationContext) {
-    return configurationContext.isNameNodeHAEnabled();
+  public boolean isNameNodeHAEnabled() {
+    return isNameNodeHAEnabled(configuration.getFullProperties());
+  }
+
+  public static boolean isNameNodeHAEnabled(Map<String, Map<String, String>> configurationProperties) {
+    return configurationProperties.containsKey("hdfs-site") &&
+           (configurationProperties.get("hdfs-site").containsKey("dfs.nameservices") ||
+            configurationProperties.get("hdfs-site").containsKey("dfs.internal.nameservices"));
+  }
+
+  @Override
+  public boolean isYarnResourceManagerHAEnabled() {
+    return isYarnResourceManagerHAEnabled(configuration.getFullProperties());
   }
 
   /**
    * Static convenience function to determine if Yarn ResourceManager HA is enabled
-   * @param configurationContext configuration context
+   * @param configProperties configuration properties for this cluster
    * @return true if Yarn ResourceManager HA is enabled
    *         false if Yarn ResourceManager HA is not enabled
    */
-  @Override
-  public boolean isYarnResourceManagerHAEnabled(ConfigurationContext configurationContext) {
-    return configurationContext.isYarnResourceManagerHAEnabled();
+  static boolean isYarnResourceManagerHAEnabled(Map<String, Map<String, String>> configProperties) {
+    return configProperties.containsKey("yarn-site") && configProperties.get("yarn-site").containsKey("yarn.resourcemanager.ha.enabled")
+      && configProperties.get("yarn-site").get("yarn.resourcemanager.ha.enabled").equals("true");
   }
 
   private void validateTopology()
       throws InvalidTopologyException {
 
-    Collection<Service> hdfsServices = getBlueprint().getServicesByType("HDFS");
-    for (Service hdfsService : hdfsServices) {
-      ConfigurationContext configContext = new ConfigurationContext(hdfsService.getStack(), hdfsService.getConfiguration());
-      if(isNameNodeHAEnabled(configContext)) {
-
+    if(isNameNodeHAEnabled()){
         Collection<String> nnHosts = getHostAssignmentsForComponent("NAMENODE");
         if (nnHosts.size() != 2) {
-          throw new InvalidTopologyException("NAMENODE HA requires exactly 2 hosts running NAMENODE but there are: " +
-            nnHosts.size() + " Hosts: " + nnHosts);
+            throw new InvalidTopologyException("NAMENODE HA requires exactly 2 hosts running NAMENODE but there are: " +
+                nnHosts.size() + " Hosts: " + nnHosts);
         }
-
-        Map<String, String> hadoopEnvConfig = hdfsService.getConfiguration().getProperties().get("hadoop-env");
+        Map<String, String> hadoopEnvConfig = configuration.getFullProperties().get("hadoop-env");
         if(hadoopEnvConfig != null && !hadoopEnvConfig.isEmpty() && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_active") && hadoopEnvConfig.containsKey("dfs_ha_initial_namenode_standby")) {
-          if((!HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")).matches() && !nnHosts.contains(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")))
-            || (!HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")).matches() && !nnHosts.contains(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")))){
-            throw new IllegalArgumentException("NAMENODE HA hosts mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected hosts are: " + nnHosts);
-          }
+           if((!HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")).matches() && !nnHosts.contains(hadoopEnvConfig.get("dfs_ha_initial_namenode_active")))
+             || (!HostGroup.HOSTGROUP_REGEX.matcher(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")).matches() && !nnHosts.contains(hadoopEnvConfig.get("dfs_ha_initial_namenode_standby")))){
+              throw new IllegalArgumentException("NAMENODE HA hosts mapped incorrectly for properties 'dfs_ha_initial_namenode_active' and 'dfs_ha_initial_namenode_standby'. Expected hosts are: " + nnHosts);
+        }
         }
-
-      }
-
     }
-
-
   }
 
   @Override
@@ -246,7 +232,7 @@ public class ClusterTopologyImpl implements ClusterTopology {
   public RequestStatusResponse installHost(String hostName, boolean skipInstallTaskCreate, boolean skipFailure) {
     try {
       String hostGroupName = getHostGroupForHost(hostName);
-      HostGroupV2 hostGroup = this.blueprint.getHostGroup(hostGroupName);
+      HostGroup hostGroup = this.blueprint.getHostGroup(hostGroupName);
 
       Collection<String> skipInstallForComponents = new ArrayList<>();
       if (skipInstallTaskCreate) {
@@ -271,7 +257,7 @@ public class ClusterTopologyImpl implements ClusterTopology {
   public RequestStatusResponse startHost(String hostName, boolean skipFailure) {
     try {
       String hostGroupName = getHostGroupForHost(hostName);
-      HostGroupV2 hostGroup = this.blueprint.getHostGroup(hostGroupName);
+      HostGroup hostGroup = this.blueprint.getHostGroup(hostGroupName);
 
       // get the set of components that are marked as INSTALL_ONLY
       // for this hostgroup
@@ -335,7 +321,7 @@ public class ClusterTopologyImpl implements ClusterTopology {
       String hostGroupName = requestedHostGroupInfo.getHostGroupName();
 
       //todo: doesn't support using a different blueprint for update (scaling)
-      HostGroupV2 baseHostGroup = getBlueprint().getHostGroup(hostGroupName);
+      HostGroup baseHostGroup = getBlueprint().getHostGroup(hostGroupName);
 
       if (baseHostGroup == null) {
         throw new IllegalArgumentException("Invalid host_group specified: " + hostGroupName +
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentV2.java
deleted file mode 100644
index 68744d4..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ComponentV2.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.controller.internal.ProvisionAction;
-
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class ComponentV2 implements Configurable {
-
-  private String type;
-
-  private String name;
-
-  private ServiceId serviceId = new ServiceId();
-
-  private ProvisionAction provisionAction = ProvisionAction.INSTALL_AND_START;
-
-  private Configuration configuration;
-
-  private boolean masterComponent = false;
-
-  @JsonIgnore
-  private Service service;
-
-  public ComponentV2() { }
-
-
-  /**
-   * Gets the name of this component
-   *
-   * @return component name
-   */
-  public String getName() {
-    return this.name;
-  }
-
-  /** @return the masterComponent flag */
-  public boolean isMasterComponent() {
-    return masterComponent;
-  }
-
-  /**
-   * Gets the provision action associated with this component.
-   *
-   * @return the provision action for this component, which
-   *         may be null if the default action is to be used
-   */
-  public ProvisionAction getProvisionAction() {
-    return this.provisionAction;
-  }
-
-  public ServiceId getServiceId() {
-    return serviceId;
-  }
-
-  public Service getService() {
-    return service;
-  }
-
-  //TODO
-  public ServiceGroup getServiceGroup() {
-    return null;
-  }
-
-  public Configuration getConfiguration() {
-    return configuration;
-  }
-
-  public String getType() {
-    return type;
-  }
-
-  public void setType(String type) {
-    this.type = type;
-    if (null == this.name) {
-      this.name = type;
-    }
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public String getServiceGroupName() {
-    return serviceId.getServiceGroup();
-  }
-
-  @JsonProperty("service_group")
-  public void setServiceGroup(String serviceGroup) {
-    serviceId.setServiceGroup(serviceGroup);
-  }
-
-  @JsonProperty("service_name")
-  public void setServiceName(String serviceName) {
-    serviceId.setName(serviceName);
-  }
-
-  public String getServiceName() {
-    return serviceId.getName();
-  }
-
-  @JsonProperty("provision_action")
-  public void setProvisionAction(ProvisionAction provisionAction) {
-    this.provisionAction = provisionAction;
-  }
-
-  public void setConfiguration(Configuration configuration) {
-    this.configuration = configuration;
-  }
-
-  @JsonIgnore
-  public void setMasterComponent(StackV2 stack) {
-    this.masterComponent = stack.isMasterComponent(this.type);
-  }
-
-  public void setService(Service service) {
-    this.service = service;
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configurable.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Configurable.java
deleted file mode 100644
index 74308ab..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configurable.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public interface Configurable {
-  void setConfiguration(Configuration configuration);
-
-  @JsonProperty("configurations")
-  default void setConfigs(Collection<Map<String, Map<String, Map<String, String>>>> configs) {
-    Map<String, Map<String, String>> allProps = configs.stream().collect(Collectors.toMap(
-      config -> config.keySet().iterator().next(),
-      config -> config.values().iterator().next().get("properties")
-    ));
-    setConfiguration(new Configuration(allProps, new HashMap<>()));
-  }
-
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
index 28dbbaa..28b62bc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Configuration.java
@@ -24,8 +24,6 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 
-import com.fasterxml.jackson.annotation.JsonIgnore;
-
 /**
  * Configuration for a topology entity such as a blueprint, hostgroup or cluster.
  */
@@ -100,7 +98,6 @@ public class Configuration {
    *
    * @return complete map of merged properties keyed by config type
    */
-  @JsonIgnore
   public Map<String, Map<String, String>> getFullProperties() {
     return getFullProperties(Integer.MAX_VALUE);
   }
@@ -116,7 +113,6 @@ public class Configuration {
    *
    * @return map of merged properties keyed by config type
    */
-  @JsonIgnore
   public Map<String, Map<String, String>> getFullProperties(int depthLimit) {
     if (depthLimit == 0) {
       HashMap<String, Map<String, String>> propertiesCopy = new HashMap<>();
@@ -160,7 +156,6 @@ public class Configuration {
    *
    * @return complete map of merged attributes {configType -> {attributeName -> {propName, attributeValue}}}
    */
-  @JsonIgnore
   public Map<String, Map<String, Map<String, String>>> getFullAttributes() {
     Map<String, Map<String, Map<String, String>>> mergedAttributeMap = parentConfiguration == null ?
       new HashMap<>() :
@@ -319,7 +314,6 @@ public class Configuration {
    *
    * @return collection of all represented configuration types
    */
-  @JsonIgnore
   public Collection<String> getAllConfigTypes() {
     Collection<String> allTypes = new HashSet<>();
     for (String type : getFullProperties().keySet()) {
@@ -338,7 +332,6 @@ public class Configuration {
    *
    * @return the parent configuration or null if no parent is set
    */
-  @JsonIgnore
   public Configuration getParentConfiguration() {
     return parentConfiguration;
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java
index 9d3a1b8..9aeadd1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupImpl.java
@@ -255,6 +255,8 @@ public class HostGroupImpl implements HostGroup {
       } else {
         addComponent(componentEntity.getName());
       }
+
+
     }
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupInfo.java
index 7cbdd98..4648412 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupInfo.java
@@ -57,14 +57,14 @@ public class HostGroupInfo {
   private final Map<String, String> hostRackInfo = new HashMap<>();
 
   /**
-   * List of services
+   * explicitly specified host count
    */
-  protected Collection<Service> serviceConfigs;
+  private int requested_count = 0;
 
   /**
-   * explicitly specified host count
+   * host group scoped configuration
    */
-  private int requested_count = 0;
+  Configuration configuration;
 
   /**
    * explicitly specified host predicate string
@@ -110,10 +110,6 @@ public class HostGroupInfo {
     }
   }
 
-  public Collection<Service> getServiceConfigs() {
-    return serviceConfigs;
-  }
-
   /**
    * Get the requested host count.
    * This is either the user specified value or
@@ -170,7 +166,7 @@ public class HostGroupInfo {
    * @param configuration configuration instance
    */
   public void setConfiguration(Configuration configuration) {
-
+    this.configuration = configuration;
   }
 
   /**
@@ -179,9 +175,8 @@ public class HostGroupInfo {
    * @return associated host group scoped configuration or null if no configuration
    *         is specified for the host group
    */
-  @Deprecated
   public Configuration getConfiguration() {
-    return null;
+    return configuration;
   }
 
   /**
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
deleted file mode 100644
index 8da24bd..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-import java.util.Collection;
-import java.util.regex.Pattern;
-
-import org.apache.ambari.server.controller.internal.ProvisionAction;
-
-/**
- * Host Group representation.
- */
-public interface HostGroupV2 {
-
-  /**
-   * Compiled regex for hostgroup token.
-   */
-  Pattern HOSTGROUP_REGEX = Pattern.compile("%HOSTGROUP::(\\S+?)%");
-  /**
-   * Get the name of the host group.
-   *
-   * @return the host group name
-   */
-  String getName();
-
-  /**
-   * Get the name of the associated blueprint
-   *
-   * @return associated blueprint name
-   */
-  String getBlueprintName();
-
-  /**
-   * Get the fully qualified host group name in the form of
-   * blueprintName:hostgroupName
-   *
-   * @return fully qualified host group name
-   */
-  String getFullyQualifiedName();
-
-  /**
-   * Get all of the host group components.
-   *
-   * @return collection of component instances
-   */
-  Collection<ComponentV2> getComponents();
-
-  /**
-   * Get all of the host group component names
-   *
-   * @return collection of component names as String
-   */
-  Collection<String> getComponentNames();
-
-  /**
-   * Get all host group component names for instances
-   *   that have the specified provision action association.
-   *
-   * @param provisionAction the provision action that must be associated
-   *                          with the component names returned
-   *
-   * @return collection of component names as String that are associated with
-   *           the specified provision action
-   */
-  Collection<String> getComponentNames(ProvisionAction provisionAction);
-
-  /**
-   * Get the host group components which belong to the specified service.
-   *
-   * @param serviceId  service id
-   *
-   * @return collection of component names for the specified service; will not return null
-   */
-  Collection<ComponentV2> getComponentsByServiceId(ServiceId serviceId);
-
-  Collection<ComponentV2> getComponents(Service serviceId);
-
-  /**
-   * Determine if the host group contains a master component.
-   *
-   * @return true if the host group contains a master component; false otherwise
-   */
-  boolean containsMasterComponent();
-
-  /**
-   * @return collection of service ids associated with the host group components.
-   */
-  Collection<ServiceId> getServiceIds();
-
-  /**
-   * @return collection of services associated with the host group components.
-   */
-  Collection<Service> getServices();
-
-  Service getService(ServiceId serviceId);
-
-  /**
-   * @return collection of service names associated with the host group components.
-   */
-  Collection<String> getServiceNames();
-
-  /**
-   * Get the configuration associated with the host group.
-   * The host group configuration has the blueprint cluster scoped
-   * configuration set as it's parent.
-   *
-   * @return host group configuration
-   */
-  @Deprecated
-  Configuration getConfiguration();
-
-  /**
-   * Get the cardinality value that was specified for the host group.
-   * This is simply meta-data for the stack that a deployer can use
-   * and this information is not used by ambari.
-   *
-   * @return the cardinality specified for the hostgroup
-   */
-  String getCardinality();
-}
-
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2Impl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2Impl.java
deleted file mode 100644
index 01f34da..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostGroupV2Impl.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.topology;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import javax.annotation.Nullable;
-
-import org.apache.ambari.server.controller.internal.ProvisionAction;
-
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.google.common.base.Function;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.collect.Collections2;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-
-public class HostGroupV2Impl implements HostGroupV2, Configurable {
-
-  private String name;
-  private String blueprintName;
-  private List<ComponentV2> components;
-  private Set<ServiceId> serviceIds;
-  private Configuration configuration;
-  private String cardinality;
-  private boolean containsMasterComponent;
-  @JsonIgnore
-  private Map<ServiceId, Service> serviceMap;
-
-  public HostGroupV2Impl() { }
-
-  @Override
-  public String getName() {
-    return name;
-  }
-
-  @Override
-  public String getBlueprintName() {
-    return blueprintName;
-  }
-
-  @Override
-  public String getFullyQualifiedName() {
-    return blueprintName + ":" + name;
-  }
-
-  @Override
-  public Collection<ComponentV2> getComponents() {
-    return components;
-  }
-
-  @Override
-  public Collection<String> getComponentNames() {
-    return getComponentNames(components);
-  }
-
-  private Collection<String> getComponentNames(List<ComponentV2> components) {
-    return Lists.transform(components,
-      new Function<ComponentV2, String>() {
-        @Override public String apply(@Nullable ComponentV2 input) { return input.getName(); }
-      });
-  }
-
-  @Override
-  public Collection<String> getComponentNames(ProvisionAction provisionAction) {
-    List<ComponentV2> filtered =
-      ImmutableList.copyOf(Collections2.filter(components, Predicates.equalTo(provisionAction)));
-    return getComponentNames(filtered);
-  }
-
-  @Override
-  public Collection<ComponentV2> getComponents(Service service) {
-    return getComponentsByServiceId(service.getId());
-  }
-
-  @Override
-  public Collection<ComponentV2> getComponentsByServiceId(ServiceId serviceId) {
-    return components.stream().filter(c -> c.getServiceId().equals(serviceId)).collect(Collectors.toList());
-  }
-
-  @Override
-  public boolean containsMasterComponent() {
-    return containsMasterComponent;
-  }
-
-  @Override
-  public Collection<ServiceId> getServiceIds() {
-    return serviceIds;
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<Service> getServices() {
-    return serviceMap.values();
-  }
-
-  @Override
-  @JsonIgnore
-  public Service getService(ServiceId serviceId) {
-    return serviceMap.get(serviceId);
-  }
-
-  @Override
-  @JsonIgnore
-  public Collection<String> getServiceNames() {
-    return serviceMap.values().stream().map(s -> s.getName()).collect(Collectors.toList());
-  }
-
-  @JsonIgnore
-  public void setServiceMap(Map<ServiceId, Service> serviceMap) {
-    Preconditions.checkArgument(serviceMap.keySet().equals(this.serviceIds),
-      "Maitained list of service ids doesn't match with received service map: %s vs %s", serviceIds, serviceMap.keySet());
-    this.serviceMap = serviceMap;
-  }
-
-  @Override
-  public Configuration getConfiguration() {
-    return configuration;
-  }
-
-  @Override
-  public String getCardinality() {
-    return cardinality;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public void setBlueprintName(String blueprintName) {
-    this.blueprintName = blueprintName;
-  }
-
-  public void setComponents(List<ComponentV2> components) {
-    this.components = components;
-    this.containsMasterComponent = components.stream().anyMatch(c -> c.isMasterComponent());
-    this.serviceIds = components.stream().map(c -> c.getServiceId()).collect(Collectors.toSet());
-  }
-
-  public void setConfiguration(Configuration configuration) {
-    this.configuration = configuration;
-  }
-
-  public void setCardinality(String cardinality) {
-    this.cardinality = cardinality;
-  }
-
-}
-
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
index 685d208..7045912 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/HostRequest.java
@@ -31,9 +31,9 @@ import java.util.Map;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.api.predicate.InvalidQueryException;
 import org.apache.ambari.server.api.predicate.PredicateCompiler;
-import org.apache.ambari.server.controller.StackV2;
 import org.apache.ambari.server.controller.internal.HostResourceProvider;
 import org.apache.ambari.server.controller.internal.ResourceImpl;
+import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
@@ -60,7 +60,7 @@ public class HostRequest implements Comparable<HostRequest> {
 
   private long requestId;
   private String blueprint;
-  private HostGroupV2 hostGroup;
+  private HostGroup hostGroup;
   private String hostgroupName;
   private Predicate predicate;
   private String hostname = null;
@@ -84,7 +84,7 @@ public class HostRequest implements Comparable<HostRequest> {
   private static PredicateCompiler predicateCompiler = new PredicateCompiler();
 
   public HostRequest(long requestId, long id, long clusterId, String hostname, String blueprintName,
-                     HostGroupV2 hostGroup, Predicate predicate, ClusterTopology topology, boolean skipFailure) {
+                     HostGroup hostGroup, Predicate predicate, ClusterTopology topology, boolean skipFailure) {
     this.requestId = requestId;
     this.id = id;
     this.clusterId = clusterId;
@@ -164,7 +164,7 @@ public class HostRequest implements Comparable<HostRequest> {
     return blueprint;
   }
 
-  public HostGroupV2 getHostGroup() {
+  public HostGroup getHostGroup() {
     return hostGroup;
   }
 
@@ -206,13 +206,13 @@ public class HostRequest implements Comparable<HostRequest> {
     }
 
     // lower level logical component level tasks which get mapped to physical tasks
-    HostGroupV2 hostGroup = getHostGroup();
+    HostGroup hostGroup = getHostGroup();
     Collection<String> startOnlyComponents = hostGroup.getComponentNames(START_ONLY);
     Collection<String> installOnlyComponents = hostGroup.getComponentNames(INSTALL_ONLY);
     Collection<String> installAndStartComponents = hostGroup.getComponentNames(INSTALL_AND_START);
 
-    for (ComponentV2 component : hostGroup.getComponents()) {
-      if (component == null || component.getType().equals("AMBARI_SERVER")) {
+    for (String component : hostGroup.getComponentNames()) {
+      if (component == null || component.equals("AMBARI_SERVER")) {
         LOG.info("Skipping component {} when creating request\n", component);
         continue;
       }
@@ -222,31 +222,31 @@ public class HostRequest implements Comparable<HostRequest> {
           "PENDING HOST ASSIGNMENT : HOSTGROUP=" + getHostgroupName();
 
       AmbariContext context = topology.getAmbariContext();
-      StackV2 stack = component.getService().getStack();
+      Stack stack = hostGroup.getStack();
 
       // Skip INSTALL task in case server component is marked as START_ONLY, or the cluster provision_action is
       // START_ONLY, unless component is marked with INSTALL_ONLY or INSTALL_AND_START.
-      if (startOnlyComponents.contains(component.getName()) || (skipInstallTaskCreate &&
-        !installOnlyComponents.contains(component.getName()) && !installAndStartComponents.contains(component.getName()))
-          && stack != null && !stack.getComponentInfo(component.getType()).isClient()) {
-        LOG.info("Skipping create of INSTALL task for {} on {}.", component.getName(), hostName);
+      if (startOnlyComponents.contains(component) || (skipInstallTaskCreate &&
+        !installOnlyComponents.contains(component) && !installAndStartComponents.contains(component))
+          && stack != null && !stack.getComponentInfo(component).isClient()) {
+        LOG.info("Skipping create of INSTALL task for {} on {}.", component, hostName);
       } else {
         HostRoleCommand logicalInstallTask = context.createAmbariTask(
-          getRequestId(), id, component.getName(), hostName, AmbariContext.TaskType.INSTALL, skipFailure);
+          getRequestId(), id, component, hostName, AmbariContext.TaskType.INSTALL, skipFailure);
         logicalTasks.put(logicalInstallTask.getTaskId(), logicalInstallTask);
-        logicalTaskMap.get(installTask).put(component.getName(), logicalInstallTask.getTaskId());
+        logicalTaskMap.get(installTask).put(component, logicalInstallTask.getTaskId());
       }
 
       // Skip START task if component is a client, or ir marked as INSTALL_ONLY or cluster provision_action is
       // INSTALL_ONLY
-      if (installOnlyComponents.contains(component.getName()) || skipStartTaskCreate ||
-        (stack != null && stack.getComponentInfo(component.getType()).isClient())) {
-        LOG.info("Skipping create of START task for {} on {}.", component.getName(), hostName);
+      if (installOnlyComponents.contains(component) || skipStartTaskCreate ||
+        (stack != null && stack.getComponentInfo(component).isClient())) {
+        LOG.info("Skipping create of START task for {} on {}.", component, hostName);
       } else {
         HostRoleCommand logicalStartTask = context.createAmbariTask(
-            getRequestId(), id, component.getName(), hostName, AmbariContext.TaskType.START, skipFailure);
+            getRequestId(), id, component, hostName, AmbariContext.TaskType.START, skipFailure);
         logicalTasks.put(logicalStartTask.getTaskId(), logicalStartTask);
-        logicalTaskMap.get(startTask).put(component.getName(), logicalStartTask.getTaskId());
+        logicalTaskMap.get(startTask).put(component, logicalStartTask.getTaskId());
       }
     }
   }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
index fc6101d..b63bbad 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/LogicalRequest.java
@@ -222,7 +222,7 @@ public class LogicalRequest extends Request {
 
     //todo: synchronization
     for (HostRequest hostRequest : allHostRequests) {
-      HostGroupV2 hostGroup = hostRequest.getHostGroup();
+      HostGroup hostGroup = hostRequest.getHostGroup();
       for (String host : topology.getHostGroupInfo().get(hostGroup.getName()).getHostNames()) {
         Collection<String> hostComponents = hostComponentMap.get(host);
         if (hostComponents == null) {
@@ -407,7 +407,7 @@ public class LogicalRequest extends Request {
 
   private void createHostRequests(TopologyRequest request, ClusterTopology topology) {
     Map<String, HostGroupInfo> hostGroupInfoMap = request.getHostGroupInfo();
-    BlueprintV2 blueprint = topology.getBlueprint();
+    Blueprint blueprint = topology.getBlueprint();
     boolean skipFailure = topology.getBlueprint().shouldSkipFailure();
     for (HostGroupInfo hostGroupInfo : hostGroupInfoMap.values()) {
       String groupName = hostGroupInfo.getHostGroupName();
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
index 1374a42..63898ba 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/PersistedStateImpl.java
@@ -18,7 +18,6 @@
 
 package org.apache.ambari.server.topology;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -40,7 +39,6 @@ import org.apache.ambari.server.orm.dao.TopologyLogicalRequestDAO;
 import org.apache.ambari.server.orm.dao.TopologyLogicalTaskDAO;
 import org.apache.ambari.server.orm.dao.TopologyRequestDAO;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
-import org.apache.ambari.server.orm.entities.TopologyConfigurationsEntity;
 import org.apache.ambari.server.orm.entities.TopologyHostGroupEntity;
 import org.apache.ambari.server.orm.entities.TopologyHostInfoEntity;
 import org.apache.ambari.server.orm.entities.TopologyHostRequestEntity;
@@ -95,7 +93,7 @@ public class PersistedStateImpl implements PersistedState {
   private HostRoleCommandDAO physicalTaskDAO;
 
   @Inject
-  private BlueprintV2Factory blueprintFactory;
+  private BlueprintFactory blueprintFactory;
 
   @Inject
   private LogicalRequestFactory logicalRequestFactory;
@@ -256,18 +254,9 @@ public class PersistedStateImpl implements PersistedState {
       entity.setBlueprintName(request.getBlueprint().getName());
     }
 
-    Collection<TopologyConfigurationsEntity> serviceConfigurations = new ArrayList<>();
-    request.getServiceConfigs().forEach(service -> {
-      TopologyConfigurationsEntity topologyConfigurationsEntity = new TopologyConfigurationsEntity();
-      topologyConfigurationsEntity.setServiceGroupName(service.getServiceGroup().getName());
-      topologyConfigurationsEntity.setServiceName(service.getName());
-      topologyConfigurationsEntity.setConfigProperties(propertiesAsString(service.getConfiguration().getProperties()));
-      topologyConfigurationsEntity.setConfigAttributes(attributesAsString(service.getConfiguration().getAttributes()));
-      serviceConfigurations.add(topologyConfigurationsEntity);
-    });
-    entity.setTopologyConfigurationsEntities(serviceConfigurations);
-
+    entity.setClusterAttributes(attributesAsString(request.getConfiguration().getAttributes()));
     entity.setClusterId(request.getClusterId());
+    entity.setClusterProperties(propertiesAsString(request.getConfiguration().getProperties()));
     entity.setDescription(request.getDescription());
 
     if (request.getProvisionAction() != null) {
@@ -389,43 +378,22 @@ public class PersistedStateImpl implements PersistedState {
     private final Long clusterId;
     private final Type type;
     private final String description;
-    private final BlueprintV2 blueprint;
-    private final Collection<Service> services;
+    private final Blueprint blueprint;
+    private final Configuration configuration;
     private final Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<>();
 
-    public ReplayedTopologyRequest(TopologyRequestEntity entity, BlueprintV2Factory blueprintFactory) {
+    public ReplayedTopologyRequest(TopologyRequestEntity entity, BlueprintFactory blueprintFactory) {
       clusterId = entity.getClusterId();
       type = Type.valueOf(entity.getAction());
       description = entity.getDescription();
 
       try {
         blueprint = blueprintFactory.getBlueprint(entity.getBlueprintName());
-      } catch (NoSuchBlueprintException e) {
-        throw new RuntimeException("Unable to load blueprint while replaying topology request: " + e, e);
-      } catch (IOException e) {
-        throw new RuntimeException("Unable to load blueprint while replaying topology request: " + e, e);
       } catch (NoSuchStackException e) {
         throw new RuntimeException("Unable to load blueprint while replaying topology request: " + e, e);
       }
-      // load Service configurations from db, set Blueprint service config as parent for each
-      services = new ArrayList<>();
-      entity.getTopologyConfigurationsEntities().stream().filter(topologyConfigurationsEntity -> (
-        topologyConfigurationsEntity.getComponentName() == null
-                && topologyConfigurationsEntity.getHostGroupName() == null))
-              .forEach(topologyConfigurationsEntity -> {
-
-        ServiceId serviceId = ServiceId.of(topologyConfigurationsEntity.getServiceName(),
-                topologyConfigurationsEntity.getServiceGroupName());
-        Service service = blueprint.getServiceById(serviceId);
-        Configuration configuration = createConfiguration(topologyConfigurationsEntity.getConfigProperties(),
-                topologyConfigurationsEntity.getConfigAttributes());
-        service.getConfiguration().setParentConfiguration(service.getStack().getConfiguration());
-        configuration.setParentConfiguration(service.getConfiguration());
-
-        service.setConfiguration(configuration);
-        services.add(service);
-
-      });
+      configuration = createConfiguration(entity.getClusterProperties(), entity.getClusterAttributes());
+      configuration.setParentConfiguration(blueprint.getConfiguration());
 
       parseHostGroupInfo(entity);
     }
@@ -441,19 +409,13 @@ public class PersistedStateImpl implements PersistedState {
     }
 
     @Override
-    public BlueprintV2 getBlueprint() {
+    public Blueprint getBlueprint() {
       return blueprint;
     }
 
     @Override
-    @Deprecated
     public Configuration getConfiguration() {
-      return null;
-    }
-
-    @Override
-    public Collection<Service> getServiceConfigs() {
-      return services;
+      return configuration;
     }
 
     @Override
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/RepositoryVersion.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/RepositoryVersion.java
deleted file mode 100644
index 8eb6663..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/RepositoryVersion.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distribut
- * ed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class RepositoryVersion {
-  @JsonProperty("stack_id")
-  private String stackId;
-
-  @JsonProperty("repository_version")
-  private String repositoryVersion;
-
-  public RepositoryVersion() { }
-
-  public RepositoryVersion(String stackId, String repositoryVersion) {
-    this.stackId = stackId;
-    this.repositoryVersion = repositoryVersion;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    RepositoryVersion that = (RepositoryVersion) o;
-
-    if (stackId != null ? !stackId.equals(that.stackId) : that.stackId != null) return false;
-    return repositoryVersion != null ? repositoryVersion.equals(that.repositoryVersion) : that.repositoryVersion == null;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = stackId != null ? stackId.hashCode() : 0;
-    result = 31 * result + (repositoryVersion != null ? repositoryVersion.hashCode() : 0);
-    return result;
-  }
-
-  public String getStackId() {
-    return stackId;
-  }
-
-  public void setStackId(String stackId) {
-    this.stackId = stackId;
-  }
-
-  public String getRepositoryVersion() {
-    return repositoryVersion;
-  }
-
-  public void setRepositoryVersion(String repositoryVersion) {
-    this.repositoryVersion = repositoryVersion;
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/SecurityConfiguration.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/SecurityConfiguration.java
index 7955169..4ff5504 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/SecurityConfiguration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/SecurityConfiguration.java
@@ -21,9 +21,6 @@ package org.apache.ambari.server.topology;
 
 import org.apache.ambari.server.state.SecurityType;
 
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
 /**
  * Holds security related properties, the securityType and security descriptor (in case of KERBEROS
  * kerberos_descriptor) either contains the whole descriptor or just the reference to it.
@@ -46,9 +43,7 @@ public class SecurityConfiguration {
    */
   private String descriptor;
 
-
-  @JsonCreator
-  public SecurityConfiguration(@JsonProperty("type") SecurityType type) {
+  public SecurityConfiguration(SecurityType type) {
     this.type = type;
   }
 
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
deleted file mode 100644
index 317e29f..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Service.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.ambari.server.controller.StackV2;
-
-import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-
-
-public class Service implements Configurable {
-
-  private String type;
-
-  private ServiceId id = new ServiceId();
-
-  private String stackId;
-
-  private Configuration configuration;
-
-  private Set<ServiceId> dependencies = ImmutableSet.of();
-
-  @JsonIgnore
-  private Map<ServiceId, Service> dependencyMap = ImmutableMap.of();
-
-  @JsonIgnore
-  private ServiceGroup serviceGroup;
-
-  @JsonIgnore
-  private StackV2 stack;
-
-  /**
-   * Gets the name of this service
-   *
-   * @return component name
-   */
-  public String getName() {
-    return this.id.getName();
-  }
-
-  public String getServiceGroupId() {
-    return this.id.getServiceGroup();
-  }
-
-  public ServiceGroup getServiceGroup() {
-    return serviceGroup;
-  }
-
-  public String getType() {
-    return type;
-  }
-
-  public String getStackId() {
-    return stackId;
-  }
-
-  public StackV2 getStack() {
-    return stack;
-  }
-
-  public Set<ServiceId> getDependentServiceIds() {
-    return dependencies;
-  }
-
-  public Set<Service> getDependencies() {
-    return ImmutableSet.copyOf(dependencyMap.values());
-  }
-
-  public Configuration getConfiguration() {
-    return configuration;
-  }
-
-  public void setType(String type) {
-    this.type = type;
-    if (null == this.getName()) {
-      setName(type);
-    }
-  }
-
-  public void setName(String name) {
-    this.id.setName(name);
-  }
-
-  public void setServiceGroup(ServiceGroup serviceGroup) {
-    this.serviceGroup = serviceGroup;
-    this.id.setServiceGroup(serviceGroup.getName());
-  }
-
-  @JsonProperty("stack_id")
-  public void setStackId(String stackId) {
-    this.stackId = stackId;
-  }
-
-  public void setStackFromBlueprint(BlueprintV2 blueprint) {
-    this.stack = blueprint.getStackById(this.stackId);
-  }
-
-  public void setConfiguration(Configuration configuration) {
-    this.configuration = configuration;
-  }
-
-  public void setDependencies(Set<ServiceId> dependencies) {
-    this.dependencies = dependencies;
-  }
-
-  /**
-   * Called during post-deserialization
-   * @param dependencyMap
-   */
-  void setDependencyMap(Map<ServiceId, Service> dependencyMap) {
-    Preconditions.checkArgument(dependencyMap.keySet().equals(dependencies),
-      "Received dependency map is not consisted with persisted dependency references: %s vs. %s",
-      dependencyMap.keySet(), dependencies);
-    this.dependencyMap = dependencyMap;
-  }
-
-  public ServiceId getId() {
-    return id;
-  }
-
-  @Override
-  public String toString() {
-    return "Service{" +
-      "type='" + type + '\'' +
-      ", id=" + id +
-      ", stackId='" + stackId + '\'' +
-      '}';
-  }
-
-  public String getServiceGroupName() {
-    if (serviceGroup != null) {
-      return serviceGroup.getName();
-    }
-    return null;
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceGroup.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceGroup.java
deleted file mode 100644
index 7f38b2c..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceGroup.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.topology;
-
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.google.common.collect.ListMultimap;
-import com.google.common.collect.Multimaps;
-
-public class ServiceGroup {
-
-  private String name = null;
-
-  private Map<String, Service> servicesByName;
-  private ListMultimap<String, Service> servicesByType;
-
-  private Configuration configuration = null;
-
-  private Set<String> dependencies = new HashSet<>();
-
-  public ServiceGroup() { }
-
-  /**
-   * Gets the name of this service group
-   *
-   * @return component name
-   */
-  public String getName() {
-    return this.name;
-  }
-
-  public Collection<Service> getServices() {
-    return servicesByName.values();
-  }
-
-  public Service getServiceByName(String name) {
-    return servicesByName.get(name);
-  }
-
-  public List<Service> getServiceByType(String name) {
-    return servicesByType.get(name);
-  }
-
-  public Configuration getConfiguration() {
-    return configuration;
-  }
-
-  public Set<String> getDependencies() {
-    return dependencies;
-  }
-
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  public void setServices(Collection<Service> services) {
-    services.forEach(s -> s.setServiceGroup(this));
-    this.servicesByName = services.stream().collect(Collectors.toMap(Service::getName, s -> s));
-    this.servicesByType = Multimaps.index(services, Service::getType);
-    services.forEach(s -> s.setServiceGroup(this));
-  }
-
-  public void setConfiguration(Configuration configuration) {
-    this.configuration = configuration;
-  }
-
-  public void setDependencies(Set<String> dependencies) {
-    this.dependencies = dependencies;
-  }
-
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceId.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceId.java
deleted file mode 100644
index 2d81a07..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ServiceId.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.topology;
-
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class ServiceId {
-  private String serviceGroup;
-  private String name;
-
-  public ServiceId() { }
-
-  public static ServiceId of(String name, String serviceGroup) {
-    ServiceId id = new ServiceId();
-    id.name = name;
-    id.serviceGroup = serviceGroup;
-    return id;
-  }
-
-  public String getServiceGroup() {
-    return serviceGroup;
-  }
-
-  @JsonProperty("service_group")
-  public void setServiceGroup(String serviceGroup) {
-    this.serviceGroup = serviceGroup;
-  }
-
-  public String getName() {
-    return name;
-  }
-
-  @JsonProperty("service_name")
-  public void setName(String name) {
-    this.name = name;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    ServiceId serviceId = (ServiceId) o;
-
-    if (serviceGroup != null ? !serviceGroup.equals(serviceId.serviceGroup) : serviceId.serviceGroup != null)
-      return false;
-    return name != null ? name.equals(serviceId.name) : serviceId.name == null;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = serviceGroup != null ? serviceGroup.hashCode() : 0;
-    result = 31 * result + (name != null ? name.hashCode() : 0);
-    return result;
-  }
-
-  @Override
-  public String toString() {
-    return "ServiceId{" +
-      "serviceGroup='" + serviceGroup + '\'' +
-      ", name='" + name + '\'' +
-      '}';
-  }
-}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java
index 34542e1..904c784 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/Setting.java
@@ -20,14 +20,8 @@ package org.apache.ambari.server.topology;
 
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
 import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.google.common.base.Preconditions;
 
 public class Setting {
   /**
@@ -101,17 +95,7 @@ public class Setting {
     if (properties.containsKey(settingName)) {
       return properties.get(settingName);
     }
-    return Collections.emptySet();
-  }
 
-  public Optional<String> getSettingValue(String settingCategory, String propertyName) {
-    List<String> values = getSettingValue(settingCategory).stream().
-      flatMap(sv -> sv.entrySet().stream()).
-      filter(entry -> Objects.equals(entry.getKey(), propertyName)).
-      map(entry -> entry.getValue()).
-      collect(Collectors.toList());
-    Preconditions.checkState(values.size() < 2, "Ambigous settings (%s) for category %s, property %s",
-      values.size(), settingCategory, settingCategory);
-    return values.isEmpty() ? Optional.empty() : Optional.of(values.get(0));
+    return Collections.emptySet();
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
index 3b7dcc8..d07dec0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyManager.java
@@ -42,7 +42,6 @@ import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariServer;
 import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.ShortTaskStatus;
-import org.apache.ambari.server.controller.StackV2;
 import org.apache.ambari.server.controller.internal.ArtifactResourceProvider;
 import org.apache.ambari.server.controller.internal.BaseClusterRequest;
 import org.apache.ambari.server.controller.internal.CalculatedStatus;
@@ -50,6 +49,7 @@ import org.apache.ambari.server.controller.internal.CredentialResourceProvider;
 import org.apache.ambari.server.controller.internal.ProvisionClusterRequest;
 import org.apache.ambari.server.controller.internal.RequestImpl;
 import org.apache.ambari.server.controller.internal.ScaleClusterRequest;
+import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
 import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.Resource;
@@ -274,6 +274,7 @@ public class TopologyManager {
 
     final ClusterTopology topology = new ClusterTopologyImpl(ambariContext, request);
     final String clusterName = request.getClusterName();
+    final Stack stack = topology.getBlueprint().getStack();
     final String repoVersion = request.getRepositoryVersion();
     final Long repoVersionID = request.getRepositoryVersionId();
 
@@ -290,7 +291,7 @@ public class TopologyManager {
       addKerberosClient(topology);
 
       // refresh default stack config after adding KERBEROS_CLIENT component to topology
-      //topology.getBlueprint().getConfiguration().setParentConfiguration(stack.getConfiguration(topology.getBlueprint().getAllServices()));
+      topology.getBlueprint().getConfiguration().setParentConfiguration(stack.getConfiguration(topology.getBlueprint().getServices()));
 
       credential = request.getCredentialsMap().get(KDC_ADMIN_CREDENTIAL);
       if (credential == null) {
@@ -300,8 +301,9 @@ public class TopologyManager {
 
     topologyValidatorService.validateTopologyConfiguration(topology);
 
+
     // create resources
-    ambariContext.createAmbariResources(topology, clusterName, securityType);
+    ambariContext.createAmbariResources(topology, clusterName, securityType, repoVersion, repoVersionID);
 
     if (securityConfiguration != null && securityConfiguration.getDescriptor() != null) {
       submitKerberosDescriptorAsArtifact(clusterName, securityConfiguration.getDescriptor());
@@ -343,11 +345,8 @@ public class TopologyManager {
 
     //todo: this should be invoked as part of a generic lifecycle event which could possibly
     //todo: be tied to cluster state
-    //TODO add all stack or remove concrete stack version
-    Collection<StackV2> stackList = topology.getBlueprint().getStacks();
-    StackV2 stack = stackList.iterator().next();
-    ambariContext.persistInstallStateForUI(clusterName, stack.getName(), stack.getVersion());
 
+    ambariContext.persistInstallStateForUI(clusterName, stack.getName(), stack.getVersion());
     clusterProvisionWithBlueprintCreateRequests.put(clusterId, logicalRequest);
     return getRequestStatus(logicalRequest.getRequestId());
   }
@@ -1102,10 +1101,9 @@ public class TopologyManager {
    * @param topology  cluster topology
    */
   private void addKerberosClient(ClusterTopology topology) {
-    //TODO lookup KERBEROS_CLIENT
-//    for (HostGroupV2 group : topology.getBlueprint().getHostGroups().values()) {
-//      group.addComponent("KERBEROS_CLIENT");
-//    }
+    for (HostGroup group : topology.getBlueprint().getHostGroups().values()) {
+      group.addComponent("KERBEROS_CLIENT");
+    }
   }
 
   /**
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
index 632473a..bd5630b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/TopologyRequest.java
@@ -18,7 +18,6 @@
 
 package org.apache.ambari.server.topology;
 
-import java.util.Collection;
 import java.util.Map;
 
 /**
@@ -53,23 +52,16 @@ public interface TopologyRequest {
    *
    * @return associated blueprint instance
    */
-  BlueprintV2 getBlueprint();
+  Blueprint getBlueprint();
 
   /**
    * Get the cluster scoped configuration for the request.
    *
    * @return cluster scoped configuration
    */
-  @Deprecated
   Configuration getConfiguration();
 
   /**
-   * Returns services.
-   * @return
-   */
-  Collection<Service> getServiceConfigs();
-
-  /**
    * Get host group info.
    *
    * @return map of host group name to group info
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java
index 00eb706..990aee7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/tasks/PersistHostResourcesTask.java
@@ -23,10 +23,8 @@ import java.util.HashSet;
 import java.util.Map;
 
 import org.apache.ambari.server.topology.ClusterTopology;
-import org.apache.ambari.server.topology.ComponentV2;
-import org.apache.ambari.server.topology.HostGroupV2;
+import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostRequest;
-import org.apache.ambari.server.topology.Service;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -51,10 +49,10 @@ public class PersistHostResourcesTask extends TopologyHostTask  {
   public void runTask() {
     LOG.info("HostRequest: Executing RESOURCE_CREATION task for host: {}", hostRequest.getHostName());
 
-    HostGroupV2 group = hostRequest.getHostGroup();
-    Map<Service, Collection<ComponentV2>> serviceComponents = new HashMap<>();
-    for (Service service : group.getServices()) {
-      serviceComponents.put(service, new HashSet(group.getComponents(service)));
+    HostGroup group = hostRequest.getHostGroup();
+    Map<String, Collection<String>> serviceComponents = new HashMap<>();
+    for (String service : group.getServices()) {
+      serviceComponents.put(service, new HashSet<>(group.getComponents(service)));
     }
     clusterTopology.getAmbariContext().createAmbariHostResources(hostRequest.getClusterId(),
       hostRequest.getHostName(), serviceComponents);
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java
index 7ac75e9..0170186 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidator.java
@@ -16,10 +16,8 @@ package org.apache.ambari.server.topology.validators;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.ambari.server.controller.StackV2;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.InvalidTopologyException;
-import org.apache.ambari.server.topology.Service;
 import org.apache.ambari.server.topology.TopologyValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -46,10 +44,8 @@ public class ClusterConfigTypeValidator implements TopologyValidator {
 
     // collecting all config types for services in the blueprint (from the related stack)
     Set<String> stackServiceConfigTypes = new HashSet<>();
-    for (Service service : topology.getBlueprint().getAllServices()) {
-      String stackId = service.getStackId();
-      StackV2 stack = topology.getBlueprint().getStackById(stackId);
-      stackServiceConfigTypes.addAll(stack.getConfigurationTypes(service.getType()));
+    for (String serviceName : topology.getBlueprint().getServices()) {
+      stackServiceConfigTypes.addAll(topology.getBlueprint().getStack().getConfigurationTypes(serviceName));
     }
 
     // identifying invalid config types
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
index 80b6560..80b2593 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/HiveServiceValidator.java
@@ -14,13 +14,9 @@
 
 package org.apache.ambari.server.topology.validators;
 
-import java.util.Collection;
-
 import org.apache.ambari.server.topology.ClusterTopology;
-import org.apache.ambari.server.topology.ComponentV2;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.InvalidTopologyException;
-import org.apache.ambari.server.topology.Service;
 import org.apache.ambari.server.topology.TopologyValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -41,41 +37,34 @@ public class HiveServiceValidator implements TopologyValidator {
   @Override
   public void validate(ClusterTopology topology) throws InvalidTopologyException {
 
-    Collection<Service> services = topology.getBlueprint().getServicesByType(HIVE_SERVICE);
     // there is no hive configured in the blueprint, nothing to do (does the validator apply?)
-    if (services.isEmpty()) {
+    if (!topology.getBlueprint().getServices().contains(HIVE_SERVICE)) {
       LOGGER.info(" [{}] service is not listed in the blueprint, skipping hive service validation.", HIVE_SERVICE);
       return;
     }
 
-    for (Service service : services) {
-
-      Configuration serviceConfiguration = service.getConfiguration();
+    Configuration clusterConfiguration = topology.getConfiguration();
 
-      // hive database settings are missing (this should never be the case, defaults come from the stack def.)
-      if (!serviceConfiguration.getAllConfigTypes().contains(HIVE_ENV)) {
-        String errorMessage = String.format(" [ %s ] config type is missing from the service [ %s ]. HIVE service validation failed.", HIVE_ENV, HIVE_SERVICE);
-        LOGGER.error(errorMessage);
-        throw new InvalidTopologyException(errorMessage);
-      }
-
-      // hive database has custom configuration, skipping validation
-      if (!HIVE_DB_DEFAULT.equals(serviceConfiguration.getPropertyValue(HIVE_ENV, HIVE_DB_PROPERTY))) {
-        LOGGER.info("Custom hive database settings detected. HIVE service validation succeeded.");
-        return;
-      }
+    // hive database settings are missing (this should never be the case, defaults come from the stack def.)
+    if (!clusterConfiguration.getAllConfigTypes().contains(HIVE_ENV)) {
+      String errorMessage = String.format(" [ %s ] config type is missing from the service [ %s ]. HIVE service validation failed.", HIVE_ENV, HIVE_SERVICE);
+      LOGGER.error(errorMessage);
+      throw new InvalidTopologyException(errorMessage);
+    }
 
-      Collection<ComponentV2> mySqlComponents = topology.getBlueprint().getComponentsByType(service, MYSQL_SERVER_COMPONENT);
+    // hive database has custom configuration, skipping validation
+    if (!HIVE_DB_DEFAULT.equals(clusterConfiguration.getPropertyValue(HIVE_ENV, HIVE_DB_PROPERTY))) {
+      LOGGER.info("Custom hive database settings detected. HIVE service validation succeeded.");
+      return;
+    }
 
-      // hive database settings need the mysql-server component in the blueprint
-      if (mySqlComponents.isEmpty()) {
-        String errorMessage = String.format("Component [%s] must explicitly be set in the blueprint when hive database " +
+    // hive database settings need the mysql-server component in the blueprint
+    if (!topology.getBlueprint().getComponents(HIVE_SERVICE).contains(MYSQL_SERVER_COMPONENT)) {
+      String errorMessage = String.format("Component [%s] must explicitly be set in the blueprint when hive database " +
         "is configured with the current settings. HIVE service validation failed.", MYSQL_SERVER_COMPONENT);
-        LOGGER.error(errorMessage);
-        throw new InvalidTopologyException(errorMessage);
-      }
+      LOGGER.error(errorMessage);
+      throw new InvalidTopologyException(errorMessage);
     }
-
   }
 
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
index fd07572..4022fcb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidator.java
@@ -21,12 +21,12 @@ import java.util.Map;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
-import org.apache.ambari.server.controller.StackV2;
+import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
-import org.apache.ambari.server.topology.HostGroupV2;
+import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.InvalidTopologyException;
-import org.apache.ambari.server.topology.Service;
 import org.apache.ambari.server.topology.TopologyValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -52,11 +52,14 @@ public class RequiredConfigPropertiesValidator implements TopologyValidator {
   @Override
   public void validate(ClusterTopology topology) throws InvalidTopologyException {
 
+    // collect required properties
+    Map<String, Map<String, Collection<String>>> requiredPropertiesByService = getRequiredPropertiesByService(topology.getBlueprint());
+
     // find missing properties in the cluster configuration
     Map<String, Collection<String>> missingProperties = new TreeMap<>();
     Map<String, Map<String, String>> topologyConfiguration = new HashMap<>(topology.getConfiguration().getFullProperties(1));
 
-    for (HostGroupV2 hostGroup : topology.getBlueprint().getHostGroups().values()) {
+    for (HostGroup hostGroup : topology.getBlueprint().getHostGroups().values()) {
       LOGGER.debug("Processing hostgroup configurations for hostgroup: {}", hostGroup.getName());
 
       // copy of all configurations available in the topology hgConfig -> topologyConfig -> bpConfig
@@ -70,21 +73,21 @@ public class RequiredConfigPropertiesValidator implements TopologyValidator {
         }
       }
 
-      for (Service hostGroupService : hostGroup.getServices()) {
+      for (String hostGroupService : hostGroup.getServices()) {
 
-        // collect required properties
-        Map<String, Collection<String>> requiredPropertiesForService = getRequiredPropertiesForService(hostGroupService);
-        if (requiredPropertiesForService.isEmpty()) {
+        if (!requiredPropertiesByService.containsKey(hostGroupService)) {
           // there are no required properties for the service
           LOGGER.debug("There are no required properties found for hostgroup/service: [{}/{}]", hostGroup.getName(), hostGroupService);
           continue;
         }
 
-        for (String configType : requiredPropertiesForService.keySet()) {
+        Map<String, Collection<String>> requiredPropertiesByType = requiredPropertiesByService.get(hostGroupService);
+
+        for (String configType : requiredPropertiesByType.keySet()) {
 
           // We need a copy not to modify the original
           Collection<String> requiredPropertiesForType = new HashSet(
-            requiredPropertiesForService.get(configType));
+              requiredPropertiesByType.get(configType));
 
           if (!operationalConfigurations.containsKey(configType)) {
             // all required configuration is missing for the config type
@@ -113,48 +116,60 @@ public class RequiredConfigPropertiesValidator implements TopologyValidator {
 
 
   /**
-   * Collects required properties for a specified services in the blueprint. Configuration properties are returned
-   * by configuration type. configType -> properties
-   * @param service the blueprint from the cluster topology
+   * Collects required properties for services in the blueprint. Configuration properties are returned by configuration type.
+   * service -> configType -> properties
+   *
+   * @param blueprint the blueprint from the cluster topology
    * @return a map with configuration types mapped to collections of required property names
    */
 
-  private Map<String, Collection<String>> getRequiredPropertiesForService(Service service) {
+  private Map<String, Map<String, Collection<String>>> getRequiredPropertiesByService(Blueprint blueprint) {
 
-    LOGGER.debug("Collecting required properties for the service: {}", service.getName());
+    Map<String, Map<String, Collection<String>>> requiredPropertiesForServiceByType = new HashMap<>();
 
-    Collection<StackV2.ConfigProperty> requiredConfigsForService = service.getStack().
-      getRequiredConfigurationProperties(service.getType());
-    Map<String, Collection<String>> requiredPropertiesByConfigType = new HashMap<>();
+    for (String bpService : blueprint.getServices()) {
+      LOGGER.debug("Collecting required properties for the service: {}", bpService);
 
-    for (StackV2.ConfigProperty configProperty : requiredConfigsForService) {
+      Collection<Stack.ConfigProperty> requiredConfigsForService = blueprint.getStack().getRequiredConfigurationProperties(bpService);
+      Map<String, Collection<String>> requiredPropertiesByConfigType = new HashMap<>();
 
-      if (configProperty.getPropertyTypes() != null && configProperty.getPropertyTypes().contains(PropertyInfo.PropertyType.PASSWORD)) {
-        LOGGER.debug("Skipping required property validation for password type: {}", configProperty.getName());
-        // skip password types
-        continue;
-      }
+      for (Stack.ConfigProperty configProperty : requiredConfigsForService) {
 
-      // add collection of required properties
-      Collection<String> requiredPropsForType = new HashSet<>();
-      if (requiredPropertiesByConfigType.containsKey(configProperty.getType())) {
-        requiredPropsForType = requiredPropertiesByConfigType.get(configProperty.getType());
-      } else {
-        LOGGER.debug("Adding required properties entry for configuration type: {}", configProperty.getType());
-        requiredPropertiesByConfigType.put(configProperty.getType(), requiredPropsForType);
-      }
+        if (configProperty.getPropertyTypes() != null && configProperty.getPropertyTypes().contains(PropertyInfo.PropertyType.PASSWORD)) {
+          LOGGER.debug("Skipping required property validation for password type: {}", configProperty.getName());
+          // skip password types
+          continue;
+        }
 
-      requiredPropsForType.add(configProperty.getName());
-      LOGGER.debug("Added required property for service; {}, configuration type: {}, property: {}", service.getName(),
-        configProperty.getType(), configProperty.getName());
+        // add / get  service related required propeByType map
+        if (requiredPropertiesForServiceByType.containsKey(bpService)) {
+          requiredPropertiesByConfigType = requiredPropertiesForServiceByType.get(bpService);
+        } else {
+          LOGGER.debug("Adding required properties entry for service: {}", bpService);
+          requiredPropertiesForServiceByType.put(bpService, requiredPropertiesByConfigType);
+        }
+
+        // add collection of required properties
+        Collection<String> requiredPropsForType = new HashSet<>();
+        if (requiredPropertiesByConfigType.containsKey(configProperty.getType())) {
+          requiredPropsForType = requiredPropertiesByConfigType.get(configProperty.getType());
+        } else {
+          LOGGER.debug("Adding required properties entry for configuration type: {}", configProperty.getType());
+          requiredPropertiesByConfigType.put(configProperty.getType(), requiredPropsForType);
+        }
+
+        requiredPropsForType.add(configProperty.getName());
+        LOGGER.debug("Added required property for service; {}, configuration type: {}, property: {}", bpService,
+          configProperty.getType(), configProperty.getName());
+      }
     }
 
-    return requiredPropertiesByConfigType;
+    LOGGER.info("Identified required properties for blueprint services: {}", requiredPropertiesForServiceByType);
+    return requiredPropertiesForServiceByType;
 
   }
 
-  private Map<String, Collection<String>> addTomissingProperties(Map<String, Collection<String>> missingProperties,
-                                                                 String hostGroup, Collection<String> values) {
+  private Map<String, Collection<String>> addTomissingProperties(Map<String, Collection<String>> missingProperties, String hostGroup, Collection<String> values) {
     Map<String, Collection<String>> missing;
 
     if (missingProperties == null) {
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
index bd21b08..38a5153 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/RequiredPasswordValidator.java
@@ -20,15 +20,13 @@ import java.util.HashSet;
 import java.util.Map;
 
 import org.apache.ambari.server.controller.RootComponent;
-import org.apache.ambari.server.controller.StackV2;
+import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.state.PropertyInfo;
-import org.apache.ambari.server.topology.BlueprintV2;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
-import org.apache.ambari.server.topology.ComponentV2;
+import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.HostGroupInfo;
-import org.apache.ambari.server.topology.HostGroupV2;
 import org.apache.ambari.server.topology.InvalidTopologyException;
-import org.apache.ambari.server.topology.Service;
 import org.apache.ambari.server.topology.TopologyValidator;
 
 /**
@@ -83,23 +81,23 @@ public class RequiredPasswordValidator implements TopologyValidator {
           groupEntry.getValue().getConfiguration().getFullProperties(3);
 
       Collection<String> processedServices = new HashSet<>();
-      BlueprintV2 blueprint = topology.getBlueprint();
+      Blueprint blueprint = topology.getBlueprint();
+      Stack stack = blueprint.getStack();
 
-      HostGroupV2 hostGroup = blueprint.getHostGroup(hostGroupName);
-      for (ComponentV2 component : hostGroup.getComponents()) {
+      HostGroup hostGroup = blueprint.getHostGroup(hostGroupName);
+      for (String component : hostGroup.getComponentNames()) {
         //for now, AMBARI is not recognized as a service in Stacks
-
         if (component.equals(RootComponent.AMBARI_SERVER.name())) {
           continue;
         }
 
-        Service service = component.getService();
-        if (processedServices.add(service.getName())) {
+        String serviceName = stack.getServiceForComponent(component);
+        if (processedServices.add(serviceName)) {
           //todo: do I need to subtract excluded configs?
-          Collection<StackV2.ConfigProperty> requiredProperties =
-          service.getStack().getRequiredConfigurationProperties(service.getType(), PropertyInfo.PropertyType.PASSWORD);
+          Collection<Stack.ConfigProperty> requiredProperties =
+              stack.getRequiredConfigurationProperties(serviceName, PropertyInfo.PropertyType.PASSWORD);
 
-          for (StackV2.ConfigProperty property : requiredProperties) {
+          for (Stack.ConfigProperty property : requiredProperties) {
             String category = property.getType();
             String name = property.getName();
             if (! propertyExists(topology, groupProperties, category, name)) {
@@ -159,4 +157,4 @@ public class RequiredPasswordValidator implements TopologyValidator {
   public int hashCode() {
     return defaultPassword != null ? defaultPassword.hashCode() : 0;
   }
-}
\ No newline at end of file
+}
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java
index 2adda10..f028a31 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidator.java
@@ -19,7 +19,6 @@ import java.util.Set;
 
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.InvalidTopologyException;
-import org.apache.ambari.server.topology.Service;
 import org.apache.ambari.server.topology.TopologyValidator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,27 +36,25 @@ public class StackConfigTypeValidator implements TopologyValidator {
 
   @Override
   public void validate(ClusterTopology topology) throws InvalidTopologyException {
-    for (Service service : topology.getServiceConfigs()) {
-      // get the config types form the request
-      Set<String> incomingConfigTypes = new HashSet<>(service.getConfiguration().getAllConfigTypes());
 
-      if (incomingConfigTypes.isEmpty()) {
-        LOGGER.debug("No config types to be checked.");
-        return;
-      }
+    // get the config types form the request
+    Set<String> incomingConfigTypes = new HashSet<>(topology.getConfiguration().getAllConfigTypes());
 
-      Set<String> stackConfigTypes = new HashSet<>(service.getStack().getConfiguration().getAllConfigTypes());
+    if (incomingConfigTypes.isEmpty()) {
+      LOGGER.debug("No config types to be checked.");
+      return;
+    }
 
-      // remove all "valid" config types from the incoming set
-      incomingConfigTypes.removeAll(stackConfigTypes);
+    Set<String> stackConfigTypes = new HashSet<>(topology.getBlueprint().getStack().getConfiguration().getAllConfigTypes());
 
-      if (!incomingConfigTypes.isEmpty()) {
-        // there are config types in the request that are not in the stack
-        String message = String.format("The following config types are not defined in the stack: %s ", incomingConfigTypes);
-        LOGGER.error(message);
-        throw new InvalidTopologyException(message);
-      }
+    // remove all "valid" config types from the incoming set
+    incomingConfigTypes.removeAll(stackConfigTypes);
 
+    if (!incomingConfigTypes.isEmpty()) {
+      // there are config types in the request that are not in the stack
+      String message = String.format("The following config types are not defined in the stack: %s ", incomingConfigTypes);
+      LOGGER.error(message);
+      throw new InvalidTopologyException(message);
     }
   }
 }
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java
index 00b9838..e75ffa4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/validators/UnitValidator.java
@@ -22,9 +22,10 @@ import static org.apache.ambari.server.controller.internal.UnitUpdater.PropertyV
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.ambari.server.controller.StackV2;
+import org.apache.ambari.server.controller.internal.Stack;
 import org.apache.ambari.server.controller.internal.UnitUpdater.PropertyUnit;
 import org.apache.ambari.server.topology.ClusterTopology;
+import org.apache.ambari.server.topology.HostGroupInfo;
 import org.apache.ambari.server.topology.InvalidTopologyException;
 import org.apache.ambari.server.topology.TopologyValidator;
 
@@ -41,34 +42,33 @@ public class UnitValidator implements TopologyValidator {
 
   @Override
   public void validate(ClusterTopology topology) throws InvalidTopologyException {
-    topology.getServiceConfigs().forEach(service -> {
-      String stackId = service.getStackId();
-      validateConfig(service.getConfiguration().getFullProperties(), topology.getBlueprint().getStackById(stackId));
-      topology.getHostGroupInfo().values().forEach(hostGroup -> {
-        String stackId2 = service.getStackId();
-        validateConfig(hostGroup.getConfiguration().getFullProperties(), topology.getBlueprint().getStackById(stackId2));
-      });
-    });
+    Stack stack = topology.getBlueprint().getStack();
+    validateConfig(topology.getConfiguration().getFullProperties(), stack);
+    for (HostGroupInfo hostGroup : topology.getHostGroupInfo().values()) {
+      validateConfig(hostGroup.getConfiguration().getFullProperties(), stack);
+    }
   }
 
-  private void validateConfig(Map<String, Map<String, String>> configuration, StackV2 stack) {
-    configuration.entrySet().forEach(each ->
-      validateConfigType(each.getKey(), each.getValue(), stack)
-    );
+  private void validateConfig(Map<String, Map<String, String>> configuration, Stack stack) {
+    for (Map.Entry<String, Map<String, String>> each : configuration.entrySet()) {
+      validateConfigType(each.getKey(), each.getValue(), stack);
+    }
   }
 
-  private void validateConfigType(String configType, Map<String, String> config, StackV2 stack) {
-    config.keySet().forEach(propertyName -> validateProperty(configType, config, propertyName, stack));
+  private void validateConfigType(String configType, Map<String, String> config, Stack stack) {
+    for (String propertyName : config.keySet()) {
+      validateProperty(configType, config, propertyName, stack);
+    }
   }
 
-  private void validateProperty(String configType, Map<String, String> config, String propertyName, StackV2 stack) {
+  private void validateProperty(String configType, Map<String, String> config, String propertyName, Stack stack) {
     relevantProps.stream()
       .filter(each -> each.hasTypeAndName(configType, propertyName))
       .findFirst()
       .ifPresent(relevantProperty -> checkUnit(config, stack, relevantProperty));
   }
 
-  private void checkUnit(Map<String, String> configToBeValidated, StackV2 stack, UnitValidatedProperty prop) {
+  private void checkUnit(Map<String, String> configToBeValidated, Stack stack, UnitValidatedProperty prop) {
     PropertyUnit stackUnit = PropertyUnit.of(stack, prop);
     PropertyValue value = PropertyValue.of(prop.getPropertyName(), configToBeValidated.get(prop.getPropertyName()));
     if (value.hasAnyUnit() && !value.hasUnit(stackUnit)) {
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
index 8ca2359..b5ae330 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Derby-CREATE.sql
@@ -610,13 +610,6 @@ CREATE TABLE blueprint (
   CONSTRAINT PK_blueprint PRIMARY KEY (blueprint_name),
   CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id));
 
-CREATE TABLE blueprintv2 (
-  blueprint_name VARCHAR(255) NOT NULL,
-  security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
-  security_descriptor_reference VARCHAR(255),
-  content VARCHAR(32000) NOT NULL,
-  CONSTRAINT PK_blueprintv2 PRIMARY KEY (blueprint_name));
-
 CREATE TABLE hostgroup (
   blueprint_name VARCHAR(255) NOT NULL,
   name VARCHAR(255) NOT NULL,
@@ -844,18 +837,6 @@ CREATE TABLE topology_request (
   CONSTRAINT PK_topology_request PRIMARY KEY (id),
   CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id));
 
-CREATE TABLE topology_configurations (
-  id BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  service_group_name VARCHAR(100) NOT NULL,
-  service_name VARCHAR(100) NOT NULL,
-  component_name VARCHAR(100),
-  host_group_name VARCHAR(100),
-  cluster_properties VARCHAR(3000),
-  cluster_attributes VARCHAR(3000),
-  CONSTRAINT PK_topology_configurations PRIMARY KEY (id),
-  CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id));
-
 CREATE TABLE topology_hostgroup (
   id BIGINT NOT NULL,
   name VARCHAR(255) NOT NULL,
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index 8bfc7eb..0d2d0dc 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -627,19 +627,12 @@ CREATE TABLE blueprint (
   CONSTRAINT PK_blueprint PRIMARY KEY (blueprint_name),
   CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id));
 
-CREATE TABLE blueprintv2 (
-  blueprint_name VARCHAR(255) NOT NULL,
-  security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
-  security_descriptor_reference VARCHAR(255),
-  content LONGTEXT NOT NULL,
-  CONSTRAINT PK_blueprintv2 PRIMARY KEY (blueprint_name),
-  CONSTRAINT FK_blueprintv2_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id));
-
 CREATE TABLE hostgroup (
   blueprint_name VARCHAR(100) NOT NULL,
   name VARCHAR(100) NOT NULL,
   cardinality VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_hostgroup PRIMARY KEY (blueprint_name, name));
+  CONSTRAINT PK_hostgroup PRIMARY KEY (blueprint_name, name),
+  CONSTRAINT FK_hg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name));
 
 CREATE TABLE hostgroup_component (
   blueprint_name VARCHAR(100) NOT NULL,
@@ -860,18 +853,6 @@ CREATE TABLE topology_request (
   CONSTRAINT PK_topology_request PRIMARY KEY (id),
   CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id));
 
-CREATE TABLE topology_configurations (
-  id BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  service_group_name VARCHAR(100) NOT NULL,
-  service_name VARCHAR(100) NOT NULL,
-  component_name VARCHAR(100),
-  host_group_name VARCHAR(100),
-  cluster_properties LONGTEXT,
-  cluster_attributes LONGTEXT,
-  CONSTRAINT PK_topology_configurations PRIMARY KEY (id),
-  CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id));
-
 CREATE TABLE topology_hostgroup (
   id BIGINT NOT NULL,
   name VARCHAR(255) NOT NULL,
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 0168af8..c954cd3 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -607,14 +607,6 @@ CREATE TABLE blueprint (
   CONSTRAINT PK_blueprint PRIMARY KEY (blueprint_name),
   CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id));
 
-CREATE TABLE blueprintv2 (
-  blueprint_name VARCHAR2(255) NOT NULL,
-  security_type VARCHAR2(32) NOT NULL DEFAULT 'NONE',
-  security_descriptor_reference VARCHAR2(255),
-  content CLOB NOT NULL,
-  CONSTRAINT PK_blueprintv2 PRIMARY KEY (blueprint_name));
-
-
 CREATE TABLE hostgroup (
   blueprint_name VARCHAR2(255) NOT NULL,
   name VARCHAR2(255) NOT NULL,
@@ -839,18 +831,6 @@ CREATE TABLE topology_request (
   CONSTRAINT PK_topology_request PRIMARY KEY (id),
   CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id));
 
-CREATE TABLE topology_configurations (
-  id NUMBER(19) NOT NULL,
-  request_id NUMBER(19) NOT NULL,
-  service_group_name VARCHAR(100) NOT NULL,
-  service_name VARCHAR(100) NOT NULL,
-  component_name VARCHAR(100),
-  host_group_name VARCHAR(100),
-  cluster_properties CLOB,
-  cluster_attributes CLOB,
-  CONSTRAINT PK_topology_configurations PRIMARY KEY (id),
-  CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id));
-
 CREATE TABLE topology_hostgroup (
   id NUMBER(19) NOT NULL,
   name VARCHAR(255) NOT NULL,
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index 1ebc0ee..e8850e3 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -579,13 +579,6 @@ CREATE TABLE blueprint (
   CONSTRAINT PK_blueprint PRIMARY KEY (blueprint_name),
   CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id));
 
-CREATE TABLE blueprintv2 (
-  blueprint_name VARCHAR(255) NOT NULL,
-  security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
-  security_descriptor_reference VARCHAR(255),
-  content VARCHAR(32000) NOT NULL,
-  CONSTRAINT PK_blueprintv2 PRIMARY KEY (blueprint_name));
-
 CREATE TABLE hostgroup (
   blueprint_name VARCHAR(255) NOT NULL,
   name VARCHAR(255) NOT NULL,
@@ -597,9 +590,6 @@ CREATE TABLE hostgroup_component (
   blueprint_name VARCHAR(255) NOT NULL,
   hostgroup_name VARCHAR(255) NOT NULL,
   name VARCHAR(255) NOT NULL,
-  service_group VARCHAR(255) NOT NULL,
-  service_name VARCHAR(255) NOT NULL,
-  type VARCHAR(255) NOT NULL,
   provision_action VARCHAR(255),
   CONSTRAINT PK_hostgroup_component PRIMARY KEY (blueprint_name, hostgroup_name, name),
   CONSTRAINT FK_hgc_blueprint_name FOREIGN KEY (blueprint_name, hostgroup_name) REFERENCES hostgroup (blueprint_name, name));
@@ -817,18 +807,6 @@ CREATE TABLE topology_request (
   CONSTRAINT PK_topology_request PRIMARY KEY (id),
   CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id));
 
-CREATE TABLE topology_configurations (
-  id BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  service_group_name VARCHAR(100) NOT NULL,
-  service_name VARCHAR(100) NOT NULL,
-  component_name VARCHAR(100),
-  host_group_name VARCHAR(100),
-  cluster_properties TEXT,
-  cluster_attributes TEXT,
-  CONSTRAINT PK_topology_configurations PRIMARY KEY (id),
-  CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id));
-
 CREATE TABLE topology_hostgroup (
   id BIGINT NOT NULL,
   name VARCHAR(255) NOT NULL,
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
index dc4bafd..cbd5aaf 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLAnywhere-CREATE.sql
@@ -604,14 +604,6 @@ CREATE TABLE blueprint (
   CONSTRAINT PK_blueprint PRIMARY KEY (blueprint_name),
   CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id));
 
-CREATE TABLE blueprintv2 (
-  blueprint_name VARCHAR(255) NOT NULL,
-  security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
-  security_descriptor_reference VARCHAR(255),
-  content TEXT NOT NULL,
-  CONSTRAINT PK_blueprintv2 PRIMARY KEY (blueprint_name));
-
-
 CREATE TABLE hostgroup (
   blueprint_name VARCHAR(255) NOT NULL,
   name VARCHAR(255) NOT NULL,
@@ -838,18 +830,6 @@ CREATE TABLE topology_request (
   CONSTRAINT PK_topology_request PRIMARY KEY (id),
   CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id));
 
-CREATE TABLE topology_configurations (
-  id NUMERIC(19) NOT NULL,
-  request_id NUMERIC(19) NOT NULL,
-  service_group_name VARCHAR(100) NOT NULL,
-  service_name VARCHAR(100) NOT NULL,
-  component_name VARCHAR(100),
-  host_group_name VARCHAR(100),
-  cluster_properties TEXT,
-  cluster_attributes TEXT,
-  CONSTRAINT PK_topology_configurations PRIMARY KEY (id),
-  CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id));
-
 CREATE TABLE topology_hostgroup (
   id NUMERIC(19) NOT NULL,
   name VARCHAR(255) NOT NULL,
diff --git a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
index 8bef24f..ad83318 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-SQLServer-CREATE.sql
@@ -613,20 +613,12 @@ CREATE TABLE blueprint (
   CONSTRAINT PK_blueprint PRIMARY KEY CLUSTERED (blueprint_name),
   CONSTRAINT FK_blueprint_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id));
 
-
-CREATE TABLE blueprintv2 (
-  blueprint_name VARCHAR(255) NOT NULL,
-  security_type VARCHAR(32) NOT NULL DEFAULT 'NONE',
-  security_descriptor_reference VARCHAR(255),
-  content VARCHAR(MAX) NOT NULL,
-  CONSTRAINT PK_blueprintv2 PRIMARY KEY (blueprint_name),
-  CONSTRAINT FK_blueprintv2_stack_id FOREIGN KEY (stack_id) REFERENCES stack(stack_id));
-
 CREATE TABLE hostgroup (
   blueprint_name VARCHAR(255) NOT NULL,
   NAME VARCHAR(255) NOT NULL,
   cardinality VARCHAR(255) NOT NULL,
-  CONSTRAINT PK_hostgroup PRIMARY KEY CLUSTERED (blueprint_name, NAME));
+  CONSTRAINT PK_hostgroup PRIMARY KEY CLUSTERED (blueprint_name, NAME),
+  CONSTRAINT FK_hg_blueprint_name FOREIGN KEY (blueprint_name) REFERENCES blueprint(blueprint_name));
 
 CREATE TABLE hostgroup_component (
   blueprint_name VARCHAR(255) NOT NULL,
@@ -860,18 +852,6 @@ CREATE TABLE topology_request (
   CONSTRAINT PK_topology_request PRIMARY KEY CLUSTERED (id),
   CONSTRAINT FK_topology_request_cluster_id FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id));
 
-CREATE TABLE topology_configurations (
-  id BIGINT NOT NULL,
-  request_id BIGINT NOT NULL,
-  service_group_name VARCHAR(100) NOT NULL,
-  service_name VARCHAR(100) NOT NULL,
-  component_name VARCHAR(100),
-  host_group_name VARCHAR(100),
-  cluster_properties TEXT,
-  cluster_attributes TEXT,
-  CONSTRAINT PK_topology_configurations PRIMARY KEY CLUSTERED(id),
-  CONSTRAINT FK_hostgroup_req_id FOREIGN KEY (request_id) REFERENCES topology_request(id));
-
 CREATE TABLE topology_hostgroup (
   id BIGINT NOT NULL,
   name VARCHAR(255) NOT NULL,
diff --git a/ambari-server/src/main/resources/META-INF/persistence.xml b/ambari-server/src/main/resources/META-INF/persistence.xml
index 336dc7a..12a37b8 100644
--- a/ambari-server/src/main/resources/META-INF/persistence.xml
+++ b/ambari-server/src/main/resources/META-INF/persistence.xml
@@ -23,7 +23,6 @@
     <class>org.apache.ambari.server.orm.entities.BlueprintConfigEntity</class>
     <class>org.apache.ambari.server.orm.entities.BlueprintSettingEntity</class>
     <class>org.apache.ambari.server.orm.entities.BlueprintEntity</class>
-    <class>org.apache.ambari.server.orm.entities.BlueprintV2Entity</class>
     <class>org.apache.ambari.server.orm.entities.ClusterConfigEntity</class>
     <class>org.apache.ambari.server.orm.entities.ClusterEntity</class>
     <class>org.apache.ambari.server.orm.entities.ServiceGroupEntity</class>
@@ -92,7 +91,6 @@
     <class>org.apache.ambari.server.orm.entities.WidgetLayoutEntity</class>
     <class>org.apache.ambari.server.orm.entities.WidgetLayoutUserWidgetEntity</class>
     <class>org.apache.ambari.server.orm.entities.TopologyRequestEntity</class>
-    <class>org.apache.ambari.server.orm.entities.TopologyConfigurationsEntity</class>
     <class>org.apache.ambari.server.orm.entities.TopologyLogicalRequestEntity</class>
     <class>org.apache.ambari.server.orm.entities.TopologyHostRequestEntity</class>
     <class>org.apache.ambari.server.orm.entities.TopologyHostGroupEntity</class>
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index 225bd9e..a995049 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -375,10 +375,9 @@
         "host_groups",
         "host_groups/components",
         "host_groups/cardinality",
-        "service_groups",
-        "repository_versions",
+        "configurations",
         "validate_topology",
-        "cluster_settings"
+        "settings"
     ],
     "Recommendation":[
         "Recommendation/id",
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
index 3d44c55..dcb9ef0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/query/render/ClusterBlueprintRendererTest.java
@@ -155,7 +155,7 @@ public class ClusterBlueprintRendererTest {
     groupInfoMap.put("host_group_1", group1Info);
     groupInfoMap.put("host_group_2", group2Info);
 
-    expect(topology.isNameNodeHAEnabled(null)).andReturn(false).anyTimes();
+    expect(topology.isNameNodeHAEnabled()).andReturn(false).anyTimes();
     expect(topology.getConfiguration()).andReturn(clusterConfig).anyTimes();
     expect(topology.getBlueprint()).andReturn(null).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
@@ -207,7 +207,7 @@ public class ClusterBlueprintRendererTest {
     groupInfoMap.put("host_group_1", group1Info);
     groupInfoMap.put("host_group_2", group2Info);
 
-    expect(topology.isNameNodeHAEnabled(null)).andReturn(false).anyTimes();
+    expect(topology.isNameNodeHAEnabled()).andReturn(false).anyTimes();
     expect(topology.getConfiguration()).andReturn(clusterConfig).anyTimes();
     expect(topology.getBlueprint()).andReturn(null).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(groupInfoMap).anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index bbd0fea..8c44632 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -322,7 +322,7 @@ public class AmbariMetaInfoTest {
   }
 
   /**
-   * Method: Map<String, ServiceInfo> getServiceConfigs(String stackName, String
+   * Method: Map<String, ServiceInfo> getServices(String stackName, String
    * version, String serviceName)
    * @throws AmbariException
    */
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/RootServiceServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/RootServiceServiceTest.java
index 490d2da..cbf2036 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/RootServiceServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/RootServiceServiceTest.java
@@ -40,7 +40,7 @@ public class RootServiceServiceTest extends BaseServiceTest {
   public List<ServiceTestInvocation> getTestInvocations() throws Exception {
     List<ServiceTestInvocation> listInvocations = new ArrayList<>();
     
-    //getServiceConfigs
+    //getServices
     RootServiceService service = new TestRootServiceService(null, null, null);
     Method m = service.getClass().getMethod("getRootServices", String.class, HttpHeaders.class, UriInfo.class);
     Object[] args = new Object[] {null, getHttpHeaders(), getUriInfo()};
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/ServiceServiceTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/ServiceServiceTest.java
index b8384a6..04eadb2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/ServiceServiceTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/ServiceServiceTest.java
@@ -45,7 +45,7 @@ public class ServiceServiceTest extends BaseServiceTest {
     Object[] args = new Object[] {null, getHttpHeaders(), getUriInfo(), "serviceName"};
     listInvocations.add(new ServiceTestInvocation(Request.Type.GET, service, m, args, null));
 
-    //getServiceConfigs
+    //getServices
     service = new TestServiceService("clusterName", null);
     m = service.getClass().getMethod("getServices", String.class, HttpHeaders.class, UriInfo.class);
     args = new Object[] {null, getHttpHeaders(), getUriInfo()};
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 34bbb8c..e664cae 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -8793,7 +8793,7 @@ public class AmbariManagementControllerTest {
     expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper);
     expect(injector.getInstance(KerberosHelper.class)).andReturn(createStrictMock(KerberosHelper.class));
 
-    // getServiceConfigs
+    // getServices
     expect(clusters.getCluster("cluster1")).andReturn(cluster);
     expect(cluster.getService("service1")).andReturn(service);
 
@@ -8837,7 +8837,7 @@ public class AmbariManagementControllerTest {
     expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper);
     expect(injector.getInstance(KerberosHelper.class)).andReturn(createStrictMock(KerberosHelper.class));
 
-    // getServiceConfigs
+    // getServices
     expect(clusters.getCluster("cluster1")).andReturn(cluster);
     expect(cluster.getService("service1")).andThrow(new ServiceNotFoundException("custer1", "service1"));
 
@@ -8896,7 +8896,7 @@ public class AmbariManagementControllerTest {
     expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(maintHelper);
     expect(injector.getInstance(KerberosHelper.class)).andReturn(createStrictMock(KerberosHelper.class));
 
-    // getServiceConfigs
+    // getServices
     expect(clusters.getCluster("cluster1")).andReturn(cluster).times(4);
     expect(cluster.getService("service1")).andReturn(service1);
     expect(cluster.getService("service2")).andThrow(new ServiceNotFoundException("cluster1", "service2"));
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
index f9765f2..3f6de7d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java
@@ -290,7 +290,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     String updatedVal = properties.get("yarn-site").get("yarn.resourcemanager.hostname");
@@ -338,7 +338,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals(properties.size(), 3);
@@ -372,7 +372,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForBlueprintExport();
@@ -416,7 +416,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("%HOSTGROUP::group1%", clusterConfig.getPropertyValue("yarn-site", "yarn.resourcemanager.hostname"));
@@ -464,7 +464,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("%HOSTGROUP::group1%", properties.get("yarn-site").get("yarn.resourcemanager.hostname"));
@@ -499,7 +499,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     String updatedVal = properties.get("core-site").get("fs.defaultFS");
@@ -532,7 +532,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertFalse(properties.get("yarn-site").containsKey("yarn.resourcemanager.hostname"));
@@ -578,7 +578,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group3);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     String updatedVal = properties.get("hbase-site").get("hbase.zookeeper.quorum");
@@ -625,7 +625,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group3);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     String updatedVal = properties.get("webhcat-site").get("templeton.zookeeper.hosts");
@@ -680,7 +680,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group3);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     String updatedVal = properties.get("storm-site").get("storm.zookeeper.servers");
@@ -723,7 +723,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     String updatedVal = properties.get("hive-site").get("javax.jdo.option.ConnectionURL");
@@ -756,7 +756,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertFalse(properties.get("hive-site").containsKey("javax.jdo.option.ConnectionURL"));
@@ -808,7 +808,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
 
@@ -891,7 +891,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Falcon Broker URL property not properly exported",
@@ -928,7 +928,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertFalse("tez.tez-ui.history-url.base should not be present in exported blueprint in tez-site",
@@ -979,7 +979,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     // verify that these properties are filtered out of the exported configuration
@@ -1043,7 +1043,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("HTTPS address HA property not properly exported",
@@ -1116,7 +1116,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("HTTPS address HA property not properly exported",
@@ -1176,7 +1176,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     // verify that any properties that include nameservices are not removed from the exported blueprint's configuration
@@ -1212,7 +1212,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Incorrect state for hdfs-site config after HA call in non-HA environment, should be zero",
@@ -1275,7 +1275,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     // verify results for name service one
@@ -1352,7 +1352,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Yarn Log Server URL was incorrectly exported",
@@ -1418,7 +1418,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("Yarn Log Server URL was incorrectly exported",
@@ -1494,7 +1494,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertEquals("hdfs config property not exported properly",
@@ -1581,7 +1581,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     configProcessor.doUpdateForBlueprintExport();
@@ -1674,7 +1674,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     configProcessor.doUpdateForBlueprintExport();
@@ -1770,7 +1770,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     }
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     configProcessor.doUpdateForBlueprintExport();
@@ -1833,7 +1833,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("OOZIE_SERVER")).andReturn(new Cardinality("1+")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor blueprintConfigurationProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor blueprintConfigurationProcessor = new BlueprintConfigurationProcessor(topology);
 
     assertTrue(BlueprintConfigurationProcessor.singleHostTopologyUpdaters.get("oozie-site").containsKey("oozie.service.JPAService.jdbc.url"));
     assertNull(blueprintConfigurationProcessor.getRemovePropertyUpdaters().get("oozie-site"));
@@ -1872,7 +1872,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("OOZIE_SERVER")).andReturn(new Cardinality("1+")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor blueprintConfigurationProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor blueprintConfigurationProcessor = new BlueprintConfigurationProcessor(topology);
 
     assertTrue(BlueprintConfigurationProcessor.singleHostTopologyUpdaters.get("oozie-site").containsKey("oozie.service.JPAService.jdbc.url"));
     assertNull(blueprintConfigurationProcessor.getRemovePropertyUpdaters().get("oozie-site"));
@@ -1935,7 +1935,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     configProcessor.doUpdateForBlueprintExport();
@@ -2015,7 +2015,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     configProcessor.doUpdateForBlueprintExport();
@@ -2064,7 +2064,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     configProcessor.doUpdateForBlueprintExport();
@@ -2103,7 +2103,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     configProcessor.doUpdateForBlueprintExport();
@@ -2145,7 +2145,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     Set<String> configTypesUpdated =
       updater.doUpdateForClusterCreate();
@@ -2201,7 +2201,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -2248,7 +2248,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -2299,7 +2299,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     // todo: set as BP hostgroup
     topology.getHostGroupInfo().get("group2").getConfiguration().setParentConfiguration(group2BPConfig);
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -2335,7 +2335,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("APP_TIMELINE_SERVER")).andReturn(new Cardinality("1")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     //todo: should throw a checked exception, not the exception expected by the api
     try {
@@ -2378,7 +2378,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     try {
       updater.doUpdateForClusterCreate();
@@ -2422,7 +2422,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("yarn-site").get("yarn.timeline-service.address");
@@ -2458,7 +2458,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("APP_TIMELINE_SERVER")).andReturn(new Cardinality("0-1")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("yarn-site").get("yarn.timeline-service.address");
@@ -2490,7 +2490,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("core-site").get("fs.defaultFS");
     assertEquals("testhost:5050", updatedVal);
@@ -2535,7 +2535,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group3);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("hbase-site").get("hbase.zookeeper.quorum");
     String[] hosts = updatedVal.split(",");
@@ -2592,7 +2592,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group3);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("webhcat-site").get("templeton.zookeeper.hosts");
     String[] hosts = updatedVal.split(",");
@@ -2645,7 +2645,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
 
     BlueprintConfigurationProcessor.MultipleHostTopologyUpdater mhtu = new BlueprintConfigurationProcessor.MultipleHostTopologyUpdater(component1);
-    String newValue = mhtu.updateForClusterCreate(propertyName, originalValue, properties, topology, null);
+    String newValue = mhtu.updateForClusterCreate(propertyName, originalValue, properties, topology);
 
     assertEquals("testhost1a", newValue);
   }
@@ -2679,7 +2679,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
 
     BlueprintConfigurationProcessor.MultipleHostTopologyUpdater mhtu = new BlueprintConfigurationProcessor.MultipleHostTopologyUpdater(component1);
-    String newValue = mhtu.updateForClusterCreate(propertyName, originalValue, properties, topology, null);
+    String newValue = mhtu.updateForClusterCreate(propertyName, originalValue, properties, topology);
 
     List<String> hostArray = Arrays.asList(newValue.split(","));
     Assert.assertTrue(hostArray.containsAll(hosts1) && hosts1.containsAll(hostArray));
@@ -2714,7 +2714,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
 
     BlueprintConfigurationProcessor.MultipleHostTopologyUpdater mhtu = new BlueprintConfigurationProcessor.MultipleHostTopologyUpdater(component1);
-    String newValue = mhtu.updateForClusterCreate(propertyName, originalValue, properties, topology, null);
+    String newValue = mhtu.updateForClusterCreate(propertyName, originalValue, properties, topology);
 
     List<String> hostArray = Arrays.asList(newValue.split(","));
     Assert.assertTrue(hostArray.containsAll(hosts1) && hosts1.containsAll(hostArray));
@@ -2733,7 +2733,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     TestHostGroup testHostGroup = new TestHostGroup("test-host-group-one", Collections.emptySet(), Collections.emptySet());
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, Collections.singleton(testHostGroup));
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     Set<String> updatedConfigTypes =
       updater.doUpdateForClusterCreate();
@@ -2773,7 +2773,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     TestHostGroup testHostGroup = new TestHostGroup("test-host-group-one", Collections.emptySet(), Collections.emptySet());
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, Collections.singleton(testHostGroup));
 
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     Set<String> updatedConfigTypes =
       updater.doUpdateForClusterCreate();
@@ -2860,7 +2860,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new Cardinality("1")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
 
     // verify that dfs.internal.nameservices was added
@@ -2935,7 +2935,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
 
     assertEquals("Unexpected config update for templeton.hive.properties",
@@ -2984,7 +2984,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("HIVE_SERVER")).andReturn(new Cardinality("1+")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
 
     assertEquals("Unexpected config update for hive.metastore.uris",
@@ -3048,7 +3048,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("HIVE_SERVER")).andReturn(new Cardinality("1+")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
 
     assertEquals("Unexpected config update for hive.metastore.uris",
@@ -3109,7 +3109,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     }
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
     String updatedValue = webHCatSiteProperties.get(propertyKey);
 
@@ -3163,7 +3163,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForBlueprintExport();
 
     final String expectedPropertyValue = createExportedAddress("2181", expectedHostGroupName) + "," + createExportedAddress("2181", expectedHostGroupNameTwo);
@@ -3217,7 +3217,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("OOZIE_SERVER")).andReturn(new Cardinality("1+")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
 
     assertEquals("oozie property not updated correctly",
@@ -3283,7 +3283,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("OOZIE_SERVER")).andReturn(new Cardinality("1+")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForBlueprintExport();
 
     assertEquals("oozie property not updated correctly",
@@ -3337,7 +3337,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("RESOURCEMANAGER")).andReturn(new Cardinality("1-2")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
 
     // verify that the properties with hostname information was correctly preserved
@@ -3424,7 +3424,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("RESOURCEMANAGER")).andReturn(new Cardinality("1-2")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForBlueprintExport();
 
     // verify that the properties with hostname information was correctly preserved
@@ -3507,7 +3507,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
     updater.doUpdateForClusterCreate();
 
     // expect that all servers are included in configuration property without changes, and that the qjournal URL format is preserved
@@ -3556,7 +3556,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("storm-site").get("storm.zookeeper.servers");
@@ -3607,7 +3607,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("storm-site").get("nimbus.seeds");
@@ -3657,7 +3657,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("storm-site").get("nimbus.seeds");
@@ -3692,7 +3692,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("hbase-env").get("hbase_master_heapsize");
@@ -3724,7 +3724,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("hbase-env").get("hbase_master_heapsize");
@@ -3756,7 +3756,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("yarn-site").get("yarn.resourcemanager.hostname");
@@ -3788,7 +3788,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("yarn-site").get("yarn.resourcemanager.hostname");
@@ -3820,7 +3820,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("yarn-site").get("yarn.resourcemanager.hostname");
@@ -3852,7 +3852,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("core-site").get("fs.defaultFS");
@@ -3898,7 +3898,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group3);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("hbase-site").get("hbase.zookeeper.quorum");
@@ -3956,7 +3956,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group3);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("webhcat-site").get("templeton.zookeeper.hosts");
@@ -4014,7 +4014,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group3);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("core-site").get("ha.zookeeper.quorum");
@@ -4055,7 +4055,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     assertEquals("Multi-host property with single host value was not correctly updated for cluster create.",
@@ -4111,7 +4111,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group4);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("storm-site").get("storm.zookeeper.servers");
@@ -4185,7 +4185,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("hive-site").get("javax.jdo.option.ConnectionURL");
@@ -4221,7 +4221,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("hive-site").get("javax.jdo.option.ConnectionURL");
@@ -4254,7 +4254,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
     String updatedVal = topology.getConfiguration().getFullProperties().get("hive-env").get("javax.jdo.option.ConnectionURL");
@@ -4293,7 +4293,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -4330,7 +4330,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -4370,7 +4370,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -4415,7 +4415,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -4453,7 +4453,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -4494,7 +4494,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     updater.doUpdateForClusterCreate();
@@ -4530,7 +4530,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level cluster config update method
     updater.doUpdateForClusterCreate();
@@ -4606,7 +4606,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level cluster config update method
     updater.doUpdateForClusterCreate();
@@ -4684,7 +4684,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level cluster config update method
     updater.doUpdateForClusterCreate();
@@ -4762,7 +4762,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level cluster config update method
     updater.doUpdateForClusterCreate();
@@ -4850,7 +4850,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level cluster config update method
     updater.doUpdateForClusterCreate();
@@ -4918,7 +4918,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level cluster config update method
     updater.doUpdateForClusterCreate();
@@ -4956,7 +4956,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level cluster config update method
     updater.doUpdateForClusterCreate();
@@ -5009,7 +5009,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level cluster config update method
     updater.doUpdateForClusterCreate();
@@ -5073,7 +5073,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level cluster config update method
     updater.doUpdateForClusterCreate();
@@ -5113,7 +5113,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("GANGLIA_SERVER")).andReturn(new Cardinality("1")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     updater.doUpdateForClusterCreate();
@@ -5161,7 +5161,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     updater.doUpdateForClusterCreate();
@@ -5256,7 +5256,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new Cardinality("1")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     Set<String> updatedConfigTypes =
       updater.doUpdateForClusterCreate();
@@ -5375,7 +5375,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(stack.getCardinality("SECONDARY_NAMENODE")).andReturn(new Cardinality("1")).anyTimes();
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     Set<String> updatedConfigTypes =
       updater.doUpdateForClusterCreate();
@@ -5444,7 +5444,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -5591,7 +5591,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     updater.doUpdateForClusterCreate();
@@ -5633,7 +5633,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     updater.doUpdateForClusterCreate();
@@ -5684,7 +5684,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -5724,7 +5724,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     Collection<String> requiredGroups = updater.getRequiredHostGroups();
@@ -5757,7 +5757,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     Collection<String> requiredGroups = updater.getRequiredHostGroups();
@@ -5792,7 +5792,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     // call top-level export method
     Collection<String> requiredGroups = updater.getRequiredHostGroups();
@@ -5836,7 +5836,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -5881,7 +5881,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -5925,7 +5925,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -5968,7 +5968,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -6012,7 +6012,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -6103,7 +6103,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology1 = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(null, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology1);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -6151,7 +6151,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -6183,7 +6183,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -6214,7 +6214,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -6245,7 +6245,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -6277,7 +6277,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -6330,7 +6330,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     reset(stack);
     expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
@@ -6390,7 +6390,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ONLY_STACK_DEFAULTS_APPLY);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     reset(stack);
     expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
@@ -6458,7 +6458,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.ALWAYS_APPLY);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     // WHEN
     Set<String> configTypes = configProcessor.doUpdateForClusterCreate();
     // THEN
@@ -6513,7 +6513,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
     topology.getAdvisedConfigurations().putAll(createAdvisedConfigMap());
     topology.setConfigRecommendationStrategy(ConfigRecommendationStrategy.NEVER_APPLY);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     // WHEN
     configProcessor.doUpdateForClusterCreate();
     // THEN
@@ -6552,7 +6552,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -6588,7 +6588,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -6625,7 +6625,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -6680,7 +6680,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -6739,7 +6739,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1);//, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -6805,7 +6805,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -6879,7 +6879,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -6945,7 +6945,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForBlueprintExport();
@@ -7017,7 +7017,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForBlueprintExport();
@@ -7068,7 +7068,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -7108,7 +7108,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singleton(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -7153,7 +7153,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -7207,7 +7207,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
 
     // When
@@ -7252,7 +7252,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -7298,7 +7298,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -7352,7 +7352,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -7408,7 +7408,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -7452,7 +7452,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -7496,7 +7496,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
 
     // When
@@ -7541,7 +7541,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Lists.newArrayList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     // When
     configProcessor.doUpdateForClusterCreate();
@@ -7796,7 +7796,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group3);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -7828,7 +7828,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
 
     updater.doUpdateForClusterCreate();
 
@@ -7856,7 +7856,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     typeProps.put("atlas.cluster.name", String.valueOf(clusterId));
     properties.put("hive-site", typeProps);
 
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     String updatedVal = properties.get("hive-site").get("atlas.cluster.name");
@@ -7895,7 +7895,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     stormSiteProps.put("metrics.reporter.register", someString);
     properties.put("storm-site", stormSiteProps);
 
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     String hiveExecPostHooks = properties.get("hive-site").get("hive.exec.post.hooks");
@@ -7930,7 +7930,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Arrays.asList(group1, group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -7960,7 +7960,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -7990,7 +7990,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -8025,7 +8025,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     hostGroups.add(group2);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -8061,7 +8061,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
 
     expect(stack.isPasswordProperty((String) anyObject(), (String) anyObject(), (String) anyObject())).andReturn(true).once();
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
     configProcessor.doUpdateForBlueprintExport();
 
     assertFalse(properties.get("ranger-admin-site").containsKey("ranger.service.https.attrib.keystore.pass"));
@@ -8140,7 +8140,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     Collection<TestHostGroup> hostGroups = Collections.singletonList(group1);
 
     ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
-    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology, null);
+    BlueprintConfigurationProcessor configProcessor = new BlueprintConfigurationProcessor(topology);
 
     configProcessor.doUpdateForClusterCreate();
 
@@ -8237,7 +8237,7 @@ public class BlueprintConfigurationProcessorTest extends EasyMockSupport {
     expect(bp.getHostGroups()).andReturn(allHostGroups).anyTimes();
 
     expect(topologyRequestMock.getClusterId()).andReturn(1L).anyTimes();
-    expect(topologyRequestMock.getBlueprint()).andReturn(null).anyTimes();
+    expect(topologyRequestMock.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topologyRequestMock.getConfiguration()).andReturn(configuration).anyTimes();
     expect(topologyRequestMock.getHostGroupInfo()).andReturn(hostGroupInfo).anyTimes();
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java
index 48d6f20..f534411 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintResourceProviderTest.java
@@ -56,7 +56,6 @@ import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
 import org.apache.ambari.server.controller.spi.NoSuchResourceException;
 import org.apache.ambari.server.controller.spi.Predicate;
 import org.apache.ambari.server.controller.spi.Request;
-import org.apache.ambari.server.controller.spi.RequestStatus;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
@@ -342,8 +341,8 @@ public class BlueprintResourceProviderTest {
     AbstractResourceProviderTest.TestObserver observer = new AbstractResourceProviderTest.TestObserver();
     ((ObservableResourceProvider)provider).addObserver(observer);
 
-    RequestStatus resources = provider.createResources(request);
-    System.out.println(resources);
+    provider.createResources(request);
+
     ResourceProviderEvent lastEvent = observer.getLastEvent();
     assertNotNull(lastEvent);
     assertEquals(Resource.Type.Blueprint, lastEvent.getResourceType());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
index 6e503a6..4a80893 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterResourceProviderTest.java
@@ -104,7 +104,7 @@ public class ClusterResourceProviderTest {
   @Before
   public void setup() throws Exception{
     ClusterResourceProvider.init(topologyManager, topologyFactory, securityFactory, gson);
-    ProvisionClusterRequest.init(null);
+    ProvisionClusterRequest.init(blueprintFactory);
     provider = new ClusterResourceProvider(controller);
 
     expect(blueprintFactory.getBlueprint(BLUEPRINT_NAME)).andReturn(blueprint).anyTimes();
@@ -188,7 +188,7 @@ public class ClusterResourceProviderTest {
     expect(securityFactory.createSecurityConfigurationFromRequest(EasyMock.anyObject(), anyBoolean())).andReturn
       (securityConfiguration).once();
     expect(topologyFactory.createProvisionClusterRequest(properties, securityConfiguration)).andReturn(topologyRequest).once();
-    expect(topologyRequest.getBlueprint()).andReturn(null).anyTimes();
+    expect(topologyRequest.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.getSecurity()).andReturn(blueprintSecurityConfiguration).anyTimes();
     expect(requestStatusResponse.getRequestId()).andReturn(5150L).anyTimes();
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java
index 743d1d5..e1f5583 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ExportBlueprintRequestTest.java
@@ -120,7 +120,7 @@ public class ExportBlueprintRequestTest {
 
     // assertions
     assertEquals(CLUSTER_NAME, exportBlueprintRequest.getClusterName());
-    Blueprint bp = null; exportBlueprintRequest.getBlueprint();
+    Blueprint bp = exportBlueprintRequest.getBlueprint();
     assertEquals("exported-blueprint", bp.getName());
     Map<String, HostGroup> hostGroups = bp.getHostGroups();
     assertEquals(2, hostGroups.size());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
index 01d69a5..5ed582f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ProvisionClusterRequestTest.java
@@ -82,7 +82,7 @@ public class ProvisionClusterRequestTest {
   @Before
   public void setUp() throws Exception {
     reset(blueprintFactory, blueprint, hostResourceProvider);
-    ProvisionClusterRequest.init(null);
+    ProvisionClusterRequest.init(blueprintFactory);
     // set host resource provider field
     Class clazz = BaseClusterRequest.class;
     Field f = clazz.getDeclaredField("hostResourceProvider");
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
index 4e52250..c0695b1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/RequestResourceProviderTest.java
@@ -1717,7 +1717,7 @@ public class RequestResourceProviderTest {
 
     TopologyRequest topologyRequest = createNiceMock(TopologyRequest.class);
     expect(topologyRequest.getHostGroupInfo()).andReturn(hostGroupInfoMap).anyTimes();
-    expect(topology.getBlueprint()).andReturn(null).anyTimes();
+    expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.shouldSkipFailure()).andReturn(true).anyTimes();
 
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
index b1e52cc..2a03b1f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ScaleClusterRequestTest.java
@@ -75,7 +75,7 @@ public class ScaleClusterRequestTest {
 
   @Before
   public void setUp() throws Exception {
-    ScaleClusterRequest.init(null);
+    ScaleClusterRequest.init(blueprintFactory);
     // set host resource provider field
     Class clazz = BaseClusterRequest.class;
     Field f = clazz.getDeclaredField("hostResourceProvider");
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java
index 320e167..a7d0d47 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UnitUpdaterTest.java
@@ -105,10 +105,10 @@ public class UnitUpdaterTest extends EasyMockSupport {
 
   private String updateUnit(String serviceName, String configType, String propName, String propValue) throws InvalidTopologyException, ConfigurationTopologyException {
     UnitUpdater updater = new UnitUpdater(serviceName, configType);
-    expect(clusterTopology.getBlueprint()).andReturn(null).anyTimes();
+    expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
     expect(stack.getConfigurationPropertiesWithMetadata(serviceName, configType)).andReturn(stackConfigWithMetadata).anyTimes();
     replayAll();
-    return updater.updateForClusterCreate(propName, propValue, Collections.emptyMap(), clusterTopology, null);
+    return updater.updateForClusterCreate(propName, propValue, Collections.emptyMap(), clusterTopology);
   }
-}
\ No newline at end of file
+}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/TopologyLogicalRequestDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/TopologyLogicalRequestDAOTest.java
index bd529c9..6d6d32a 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/TopologyLogicalRequestDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/TopologyLogicalRequestDAOTest.java
@@ -72,8 +72,8 @@ public class TopologyLogicalRequestDAOTest {
     TopologyRequestEntity requestEntity = new TopologyRequestEntity();
     requestEntity.setAction("a1");
     requestEntity.setBlueprintName("bp1");
-    //requestEntity.setClusterAttributes("attributes");
-    //requestEntity.setClusterProperties("properties");
+    requestEntity.setClusterAttributes("attributes");
+    requestEntity.setClusterProperties("properties");
     requestEntity.setClusterId(clusterId);
     requestEntity.setDescription("description");
     requestDAO.create(requestEntity);
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/TopologyRequestDAOTest.java b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/TopologyRequestDAOTest.java
index 3437d7d..68aef6c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/TopologyRequestDAOTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/orm/dao/TopologyRequestDAOTest.java
@@ -63,8 +63,8 @@ public class TopologyRequestDAOTest {
     TopologyRequestEntity requestEntity = new TopologyRequestEntity();
     requestEntity.setAction("a1");
     requestEntity.setBlueprintName("bp1");
-    //requestEntity.setClusterAttributes("attributes");
-    //requestEntity.setClusterProperties("properties");
+    requestEntity.setClusterAttributes("attributes");
+    requestEntity.setClusterProperties("properties");
     requestEntity.setClusterId(clusterId);
     requestEntity.setDescription("description");
     TopologyHostGroupEntity hostGroupEntity = new TopologyHostGroupEntity();
@@ -88,8 +88,8 @@ public class TopologyRequestDAOTest {
     TopologyRequestEntity requestEntity = requestEntities.iterator().next();
     Assert.assertEquals("a1", requestEntity.getAction());
     Assert.assertEquals("bp1", requestEntity.getBlueprintName());
-    //Assert.assertEquals("attributes", requestEntity.getClusterAttributes());
-    //Assert.assertEquals("properties", requestEntity.getClusterProperties());
+    Assert.assertEquals("attributes", requestEntity.getClusterAttributes());
+    Assert.assertEquals("properties", requestEntity.getClusterProperties());
     Assert.assertEquals("description", requestEntity.getDescription());
     Collection<TopologyHostGroupEntity> hostGroupEntities = requestEntity.getTopologyHostGroupEntities();
     Assert.assertEquals(1, hostGroupEntities.size());
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 87f6685..96348b5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -597,7 +597,7 @@ public class ClusterTest {
     // TODO write unit tests for
     // public void addService(Service service) throws AmbariException;
     // public Service getService(String serviceName) throws AmbariException;
-    // public Map<String, Service> getServiceConfigs();
+    // public Map<String, Service> getServices();
 
     RepositoryVersionEntity repositoryVersion = helper.getOrCreateRepositoryVersion(c1);
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
index 16de535..67c8420 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/AmbariContextTest.java
@@ -225,7 +225,7 @@ public class AmbariContextTest {
     blueprintServices.add("service2");
 
     expect(topology.getClusterId()).andReturn(CLUSTER_ID).anyTimes();
-    expect(topology.getBlueprint()).andReturn(null).anyTimes();
+    expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(Collections.singletonMap(HOST_GROUP_1, group1Info)).anyTimes();
 
     expect(blueprint.getName()).andReturn(BP_NAME).anyTimes();
@@ -296,10 +296,8 @@ public class AmbariContextTest {
     Capture<Set<ServiceRequest>> serviceRequestCapture = EasyMock.newCapture();
     Capture<Set<ServiceComponentRequest>> serviceComponentRequestCapture = EasyMock.newCapture();
 
-    serviceResourceProvider.createServices(capture(serviceRequestCapture));
-    expectLastCall().once();
-    componentResourceProvider.createComponents(capture(serviceComponentRequestCapture));
-    expectLastCall().once();
+    expect(serviceResourceProvider.createServices(capture(serviceRequestCapture))).andReturn(null).once();
+    expect(componentResourceProvider.createComponents(capture(serviceComponentRequestCapture))).andReturn(null).once();
 
     Capture<Request> serviceInstallRequestCapture = EasyMock.newCapture();
     Capture<Request> serviceStartRequestCapture = EasyMock.newCapture();
@@ -315,7 +313,7 @@ public class AmbariContextTest {
     replayAll();
 
     // test
-    context.createAmbariResources(topology, CLUSTER_NAME, null);
+    context.createAmbariResources(topology, CLUSTER_NAME, null, null, null);
 
     // assertions
     ClusterRequest clusterRequest = clusterRequestCapture.getValue();
@@ -385,8 +383,7 @@ public class AmbariContextTest {
     expect(cluster.getService("service2")).andReturn(mockService1).once();
     Capture<Set<ServiceComponentHostRequest>> requestsCapture = EasyMock.newCapture();
 
-    controller.createHostComponents(capture(requestsCapture));
-    expectLastCall().once();
+    expect(controller.createHostComponents(capture(requestsCapture))).andReturn(null).once();
 
     replayAll();
 
@@ -400,7 +397,7 @@ public class AmbariContextTest {
     components.add("component3");
     componentsMap.put("service2", components);
 
-    context.createAmbariHostResources(CLUSTER_ID, "host1", new HashMap<org.apache.ambari.server.topology.Service, Collection<ComponentV2>>());
+    context.createAmbariHostResources(CLUSTER_ID, "host1", componentsMap);
 
     assertEquals(requestsCapture.getValue().size(), 3);
   }
@@ -415,8 +412,7 @@ public class AmbariContextTest {
     expect(cluster.getService("service1")).andReturn(mockService1).times(2);
     Capture<Set<ServiceComponentHostRequest>> requestsCapture = EasyMock.newCapture();
 
-    controller.createHostComponents(capture(requestsCapture));
-    expectLastCall().once();
+    expect(controller.createHostComponents(capture(requestsCapture))).andReturn(null).once();
 
     replayAll();
 
@@ -430,7 +426,7 @@ public class AmbariContextTest {
     components.add("component3");
     componentsMap.put("service2", components);
 
-    context.createAmbariHostResources(CLUSTER_ID, "host1", new HashMap<org.apache.ambari.server.topology.Service, Collection<ComponentV2>>());
+    context.createAmbariHostResources(CLUSTER_ID, "host1", componentsMap);
 
     assertEquals(requestsCapture.getValue().size(), 2);
   }
@@ -733,10 +729,8 @@ public class AmbariContextTest {
     expectLastCall().once();
     expect(cluster.getServices()).andReturn(clusterServices).anyTimes();
 
-    serviceResourceProvider.createServices(capture(Capture.<Set<ServiceRequest>>newInstance()));
-    expectLastCall().once();
-    componentResourceProvider.createComponents(capture(Capture.<Set<ServiceComponentRequest>>newInstance()));
-    expectLastCall().once();
+    expect(serviceResourceProvider.createServices(anyObject())).andReturn(null).once();
+    expect(componentResourceProvider.createComponents(anyObject())).andReturn(null).once();
 
     expect(serviceResourceProvider.updateResources(
         capture(Capture.<Request>newInstance()), capture(Capture.<Predicate>newInstance()))).andReturn(null).atLeastOnce();
@@ -744,7 +738,7 @@ public class AmbariContextTest {
     replayAll();
 
     // test
-    context.createAmbariResources(topology, CLUSTER_NAME, null);
+    context.createAmbariResources(topology, CLUSTER_NAME, null, null, null);
   }
 
   @Test
@@ -769,7 +763,7 @@ public class AmbariContextTest {
 
     // test
     try {
-      context.createAmbariResources(topology, CLUSTER_NAME, null);
+      context.createAmbariResources(topology, CLUSTER_NAME, null, null, null);
       fail("Expected failure when several versions are found");
     } catch (IllegalArgumentException e) {
       assertEquals(
@@ -792,7 +786,7 @@ public class AmbariContextTest {
 
     // test
     try {
-      context.createAmbariResources(topology, CLUSTER_NAME, null);
+      context.createAmbariResources(topology, CLUSTER_NAME, null, "xyz", null);
       fail("Expected failure when a bad version is provided");
     } catch (IllegalArgumentException e) {
       assertEquals(
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
index dd8ea61..dd0adcc 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/BlueprintImplTest.java
@@ -532,7 +532,7 @@ public class BlueprintImplTest {
 //    expect(request.getProperties()).andReturn(setProperties);
 //    expect(request.getRequestInfoProperties()).andReturn(Collections.<String, String>emptyMap());
 //    expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null);
-//    expect(metaInfo.getServiceConfigs("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
+//    expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
 //    expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")).
 //        andReturn(serviceComponents).anyTimes();
 //    expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")).
@@ -643,7 +643,7 @@ public class BlueprintImplTest {
 //    expect(request.getProperties()).andReturn(setProperties);
 //    expect(request.getRequestInfoProperties()).andReturn(Collections.<String, String>emptyMap());
 //    expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null);
-//    expect(metaInfo.getServiceConfigs("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
+//    expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
 //    expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")).
 //        andReturn(serviceComponents).anyTimes();
 //    expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")).
@@ -762,7 +762,7 @@ public class BlueprintImplTest {
 //    expect(request.getProperties()).andReturn(setProperties);
 //    expect(request.getRequestInfoProperties()).andReturn(Collections.<String, String>emptyMap());
 //    expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null);
-//    expect(metaInfo.getServiceConfigs("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
+//    expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
 //    expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")).
 //        andReturn(serviceComponents).anyTimes();
 //    expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")).
@@ -870,7 +870,7 @@ public class BlueprintImplTest {
 //    expect(request.getProperties()).andReturn(setProperties);
 //    expect(request.getRequestInfoProperties()).andReturn(Collections.<String, String>emptyMap());
 //    expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null);
-//    expect(metaInfo.getServiceConfigs("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
+//    expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
 //    expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")).
 //        andReturn(serviceComponents).anyTimes();
 //    expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")).
@@ -938,7 +938,7 @@ public class BlueprintImplTest {
 //    expect(request.getProperties()).andReturn(setProperties);
 //    expect(request.getRequestInfoProperties()).andReturn(Collections.<String, String>emptyMap());
 //    expect(dao.findByName(BLUEPRINT_NAME)).andReturn(null);
-//    expect(metaInfo.getServiceConfigs("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
+//    expect(metaInfo.getServices("test-stack-name", "test-stack-version")).andReturn(services).anyTimes();
 //    expect(metaInfo.getComponentsByService("test-stack-name", "test-stack-version", "test-service")).
 //        andReturn(serviceComponents).anyTimes();
 //    expect(metaInfo.getComponentToService("test-stack-name", "test-stack-version", "component1")).
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
index ca74cf0..771b89f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
@@ -263,7 +263,7 @@ public class ClusterConfigurationRequestTest {
 
     expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
-    expect(topology.getBlueprint()).andReturn(null).anyTimes();
+    expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.isValidConfigType("testConfigType")).andReturn(true).anyTimes();
 
     expect(topology.getConfiguration()).andReturn(blueprintConfig).anyTimes();
@@ -352,7 +352,7 @@ public class ClusterConfigurationRequestTest {
 
     expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
-    expect(topology.getBlueprint()).andReturn(null).anyTimes();
+    expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getConfiguration()).andReturn(stackConfig).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(Collections.emptyMap()).anyTimes();
     expect(topology.getClusterId()).andReturn(Long.valueOf(1)).anyTimes();
@@ -390,7 +390,7 @@ public class ClusterConfigurationRequestTest {
 
     expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
-    expect(topology.getBlueprint()).andReturn(null).anyTimes();
+    expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
     expect(blueprint.getServices()).andReturn(services).anyTimes();
@@ -440,7 +440,7 @@ public class ClusterConfigurationRequestTest {
 
     expect(topology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
-    expect(topology.getBlueprint()).andReturn(null).anyTimes();
+    expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
     expect(blueprint.getStack()).andReturn(stack).anyTimes();
     expect(blueprint.getServices()).andReturn(services).anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
index a22cef3..aecc6cb 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterDeployWithStartOnlyTest.java
@@ -288,7 +288,7 @@ public class ClusterDeployWithStartOnlyTest extends EasyMockSupport {
     expect(stack.getExcludedConfigurationTypes("service1")).andReturn(Collections.emptySet()).anyTimes();
     expect(stack.getExcludedConfigurationTypes("service2")).andReturn(Collections.emptySet()).anyTimes();
 
-    expect(request.getBlueprint()).andReturn(null).anyTimes();
+    expect(request.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(request.getClusterId()).andReturn(CLUSTER_ID).anyTimes();
     expect(request.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
@@ -341,7 +341,7 @@ public class ClusterDeployWithStartOnlyTest extends EasyMockSupport {
 
     expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes();
     //todo: don't ignore param
-    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull());
+    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), eq("1"), anyLong());
     expectLastCall().once();
     expect(ambariContext.getNextRequestId()).andReturn(1L).once();
     expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
index ba2118d..a4b2160 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartOnComponentLevelTest.java
@@ -284,7 +284,7 @@ public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupp
     expect(stack.getExcludedConfigurationTypes("service1")).andReturn(Collections.emptySet()).anyTimes();
     expect(stack.getExcludedConfigurationTypes("service2")).andReturn(Collections.emptySet()).anyTimes();
 
-    expect(request.getBlueprint()).andReturn(null).anyTimes();
+    expect(request.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(request.getClusterId()).andReturn(CLUSTER_ID).anyTimes();
     expect(request.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
@@ -343,7 +343,7 @@ public class ClusterInstallWithoutStartOnComponentLevelTest extends EasyMockSupp
 
     expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes();
     //todo: don't ignore param
-    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull());
+    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), (String) eq("1"), anyLong());
     expectLastCall().once();
     expect(ambariContext.getNextRequestId()).andReturn(1L).once();
     expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
index 8d58107..d89c8ca 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
@@ -286,7 +286,7 @@ public class ClusterInstallWithoutStartTest extends EasyMockSupport {
     expect(stack.getExcludedConfigurationTypes("service1")).andReturn(Collections.emptySet()).anyTimes();
     expect(stack.getExcludedConfigurationTypes("service2")).andReturn(Collections.emptySet()).anyTimes();
 
-    expect(request.getBlueprint()).andReturn(null).anyTimes();
+    expect(request.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(request.getClusterId()).andReturn(CLUSTER_ID).anyTimes();
     expect(request.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
@@ -340,7 +340,7 @@ public class ClusterInstallWithoutStartTest extends EasyMockSupport {
 
     expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes();
     //todo: don't ignore param
-    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull());
+    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), eq("1"), anyLong());
     expectLastCall().once();
     expect(ambariContext.getNextRequestId()).andReturn(1L).once();
     expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
index d8c266d..e51ce5f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterTopologyImplTest.java
@@ -31,7 +31,6 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.ambari.server.controller.internal.Stack;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -45,14 +44,13 @@ public class ClusterTopologyImplTest {
   private static final String CLUSTER_NAME = "cluster_name";
   private static final long CLUSTER_ID = 1L;
   private static final String predicate = "Hosts/host_name=foo";
-  private static final BlueprintV2 blueprint = createNiceMock(BlueprintV2.class);
-  private static final HostGroupV2 group1 = createNiceMock(HostGroupV2.class);
-  private static final HostGroupV2 group2 = createNiceMock(HostGroupV2.class);
-  private static final HostGroupV2 group3 = createNiceMock(HostGroupV2.class);
-  private static final HostGroupV2 group4 = createNiceMock(HostGroupV2.class);
-  private static final Stack stack = createNiceMock(Stack.class);
+  private static final Blueprint blueprint = createNiceMock(Blueprint.class);
+  private static final HostGroup group1 = createNiceMock(HostGroup.class);
+  private static final HostGroup group2 = createNiceMock(HostGroup.class);
+  private static final HostGroup group3 = createNiceMock(HostGroup.class);
+  private static final HostGroup group4 = createNiceMock(HostGroup.class);
   private final Map<String, HostGroupInfo> hostGroupInfoMap = new HashMap<>();
-  private final Map<String, HostGroupV2> hostGroupMap = new HashMap<>();
+  private final Map<String, HostGroup> hostGroupMap = new HashMap<>();
 
   private Configuration configuration;
   private Configuration bpconfiguration;
@@ -103,22 +101,22 @@ public class ClusterTopologyImplTest {
     hostGroupMap.put("group3", group3);
     hostGroupMap.put("group4", group4);
 
-    Set<ComponentV2> group1Components = new HashSet<>();
-    group1Components.add(null);
-    group1Components.add(null);
+    Set<Component> group1Components = new HashSet<>();
+    group1Components.add(new Component("component1"));
+    group1Components.add(new Component("component2"));
 
     Set<String> group1ComponentNames = new HashSet<>();
     group1ComponentNames.add("component1");
     group1ComponentNames.add("component2");
 
-    Set<ComponentV2> group2Components = new HashSet<>();
-    group2Components.add(null);
-    Set<ComponentV2> group3Components = new HashSet<>();
-    group3Components.add(null);
-    Set<ComponentV2> group4Components = new HashSet<>();
-    group4Components.add(null);
+    Set<Component> group2Components = new HashSet<>();
+    group2Components.add(new Component("component3"));
+    Set<Component> group3Components = new HashSet<>();
+    group3Components.add(new Component("component4"));
+    Set<Component> group4Components = new HashSet<>();
+    group4Components.add(new Component("component5"));
 
-    expect(blueprint.getHostGroups()).andReturn(null).anyTimes();
+    expect(blueprint.getHostGroups()).andReturn(hostGroupMap).anyTimes();
     expect(blueprint.getHostGroup("group1")).andReturn(group1).anyTimes();
     expect(blueprint.getHostGroup("group2")).andReturn(group2).anyTimes();
     expect(blueprint.getHostGroup("group3")).andReturn(group3).anyTimes();
@@ -145,6 +143,7 @@ public class ClusterTopologyImplTest {
     verify(blueprint, group1, group2, group3, group4);
     reset(blueprint, group1, group2, group3, group4);
 
+
     hostGroupInfoMap.clear();
     hostGroupMap.clear();
   }
@@ -242,7 +241,7 @@ public class ClusterTopologyImplTest {
     }
 
     @Override
-    public BlueprintV2 getBlueprint() {
+    public Blueprint getBlueprint() {
       return blueprint;
     }
 
@@ -252,11 +251,6 @@ public class ClusterTopologyImplTest {
     }
 
     @Override
-    public Collection<Service> getServiceConfigs() {
-      return null;
-    }
-
-    @Override
     public Map<String, HostGroupInfo> getHostGroupInfo() {
       return hostGroupInfoMap;
     }
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
index 02e4e75..a5265f6 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/LogicalRequestTest.java
@@ -112,7 +112,7 @@ public class LogicalRequestTest extends EasyMockSupport {
     expect(clusterTopology.getAmbariContext()).andReturn(ambariContext).anyTimes();
     expect(clusterTopology.getClusterId()).andReturn(clusterId).anyTimes();
     expect(clusterTopology.getProvisionAction()).andReturn(ProvisionAction.INSTALL_ONLY).anyTimes();
-    expect(clusterTopology.getBlueprint()).andReturn(null).anyTimes();
+    expect(clusterTopology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(blueprint.getName()).andReturn("blueprintDef").anyTimes();
     expect(blueprint.shouldSkipFailure()).andReturn(true).anyTimes();
 
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
index e2b3346..c474493 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/RequiredPasswordValidatorTest.java
@@ -139,7 +139,7 @@ public class RequiredPasswordValidatorTest extends EasyMockSupport {
     hostGroupInfo.put("group2", hostGroup2Info);
 
     expect(topology.getConfiguration()).andReturn(topoClusterConfig).anyTimes();
-    expect(topology.getBlueprint()).andReturn(null).anyTimes();
+    expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getHostGroupInfo()).andReturn(hostGroupInfo).anyTimes();
 
     expect(blueprint.getHostGroups()).andReturn(hostGroups).anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
index 4eb29b7..5f61c85 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
@@ -18,6 +18,7 @@
 
 package org.apache.ambari.server.topology;
 
+import static org.easymock.EasyMock.anyLong;
 import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.eq;
@@ -58,7 +59,6 @@ import org.apache.ambari.server.controller.spi.ClusterController;
 import org.apache.ambari.server.controller.spi.Resource;
 import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.events.RequestFinishedEvent;
-import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.dao.SettingDAO;
 import org.apache.ambari.server.orm.entities.SettingEntity;
 import org.apache.ambari.server.security.authorization.AuthorizationHelper;
@@ -278,10 +278,10 @@ public class TopologyManagerTest {
     expect(stack.getConfiguration()).andReturn(stackConfig).anyTimes();
     expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
     expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
-    expect(stack.getExcludedConfigurationTypes("service1")).andReturn(new HashSet<String>()).anyTimes();
-    expect(stack.getExcludedConfigurationTypes("service2")).andReturn(new HashSet<String>()).anyTimes();
+    expect(stack.getExcludedConfigurationTypes("service1")).andReturn(Collections.emptySet()).anyTimes();
+    expect(stack.getExcludedConfigurationTypes("service2")).andReturn(Collections.emptySet()).anyTimes();
 
-    expect(request.getBlueprint()).andReturn(null).anyTimes();
+    expect(request.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(request.getClusterId()).andReturn(CLUSTER_ID).anyTimes();
     expect(request.getClusterName()).andReturn(CLUSTER_NAME).anyTimes();
     expect(request.getDescription()).andReturn("Provision Cluster Test").anyTimes();
@@ -323,7 +323,7 @@ public class TopologyManagerTest {
 
     expect(ambariContext.getPersistedTopologyState()).andReturn(persistedState).anyTimes();
     //todo: don't ignore param
-    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull());
+    ambariContext.createAmbariResources(isA(ClusterTopology.class), eq(CLUSTER_NAME), (SecurityType) isNull(), (String) isNull(), anyLong());
     expectLastCall().anyTimes();
     expect(ambariContext.getNextRequestId()).andReturn(1L).anyTimes();
     expect(ambariContext.isClusterKerberosEnabled(CLUSTER_ID)).andReturn(false).anyTimes();
@@ -345,8 +345,7 @@ public class TopologyManagerTest {
 
     expect(clusterController.ensureResourceProvider(anyObject(Resource.Type.class))).andReturn(resourceProvider);
 
-    expect(configureClusterTaskFactory.createConfigureClusterTask(anyObject(ClusterTopology.class), anyObject(ClusterConfigurationRequest.class),
-            anyObject(AmbariEventPublisher.class))).andReturn(configureClusterTask);
+    expect(configureClusterTaskFactory.createConfigureClusterTask(anyObject(), anyObject(), anyObject())).andReturn(configureClusterTask);
     expect(configureClusterTask.getTimeout()).andReturn(1000L);
     expect(configureClusterTask.getRepeatDelay()).andReturn(50L);
     expect(executor.submit(anyObject(AsyncCallableService.class))).andReturn(mockFuture).anyTimes();
@@ -400,10 +399,10 @@ public class TopologyManagerTest {
     requestList.add(logicalRequest);
     expect(logicalRequest.hasPendingHostRequests()).andReturn(false).anyTimes();
     allRequests.put(clusterTopologyMock, requestList);
-    expect(requestStatusResponse.getTasks()).andReturn(new ArrayList<ShortTaskStatus>()).anyTimes();
+    expect(requestStatusResponse.getTasks()).andReturn(Collections.emptyList()).anyTimes();
     expect(clusterTopologyMock.isClusterKerberosEnabled()).andReturn(true);
     expect(clusterTopologyMock.getClusterId()).andReturn(CLUSTER_ID).anyTimes();
-    expect(clusterTopologyMock.getBlueprint()).andReturn(null).anyTimes();
+    expect(clusterTopologyMock.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(persistedState.getAllRequests()).andReturn(allRequests).anyTimes();
     expect(persistedState.getProvisionRequest(CLUSTER_ID)).andReturn(logicalRequest).anyTimes();
     expect(ambariContext.isTopologyResolved(CLUSTER_ID)).andReturn(true).anyTimes();
@@ -430,7 +429,7 @@ public class TopologyManagerTest {
     tasks.add(t3);
 
     expect(requestStatusResponse.getTasks()).andReturn(tasks).anyTimes();
-    expect(persistedState.getAllRequests()).andReturn(new HashMap<ClusterTopology, List<LogicalRequest>>()).anyTimes();
+    expect(persistedState.getAllRequests()).andReturn(Collections.emptyMap()).anyTimes();
     expect(persistedState.getProvisionRequest(CLUSTER_ID)).andReturn(logicalRequest).anyTimes();
     replayAll();
     topologyManager.provisionCluster(request);
@@ -452,7 +451,7 @@ public class TopologyManagerTest {
     tasks.add(t3);
 
     expect(requestStatusResponse.getTasks()).andReturn(tasks).anyTimes();
-    expect(persistedState.getAllRequests()).andReturn(new HashMap<ClusterTopology, List<LogicalRequest>>()).anyTimes();
+    expect(persistedState.getAllRequests()).andReturn(Collections.emptyMap()).anyTimes();
     expect(persistedState.getProvisionRequest(CLUSTER_ID)).andReturn(logicalRequest).anyTimes();
     replayAll();
     topologyManager.provisionCluster(request);
@@ -474,7 +473,7 @@ public class TopologyManagerTest {
     tasks.add(t3);
 
     expect(requestStatusResponse.getTasks()).andReturn(tasks).anyTimes();
-    expect(persistedState.getAllRequests()).andReturn(new HashMap<ClusterTopology, List<LogicalRequest>>()).anyTimes();
+    expect(persistedState.getAllRequests()).andReturn(Collections.emptyMap()).anyTimes();
     expect(persistedState.getProvisionRequest(CLUSTER_ID)).andReturn(logicalRequest).anyTimes();
     replayAll();
     topologyManager.provisionCluster(request);
@@ -546,9 +545,9 @@ public class TopologyManagerTest {
     propertySet.add(properties);
     BlueprintFactory bpfMock = EasyMock.createNiceMock(BlueprintFactory.class);
     EasyMock.expect(bpfMock.getBlueprint(BLUEPRINT_NAME)).andReturn(blueprint).anyTimes();
-    ScaleClusterRequest.init(null);
+    ScaleClusterRequest.init(bpfMock);
     replay(bpfMock);
-    expect(persistedState.getAllRequests()).andReturn(new HashMap<ClusterTopology, List<LogicalRequest>>()).anyTimes();
+    expect(persistedState.getAllRequests()).andReturn(Collections.emptyMap()).anyTimes();
     replayAll();
     topologyManager.provisionCluster(request);
     topologyManager.scaleHosts(new ScaleClusterRequest(propertySet));
@@ -557,7 +556,7 @@ public class TopologyManagerTest {
 
   @Test
   public void testProvisionCluster_QuickLinkProfileIsSavedTheFirstTime() throws Exception {
-    expect(persistedState.getAllRequests()).andReturn(new HashMap<ClusterTopology, List<LogicalRequest>>()).anyTimes();
+    expect(persistedState.getAllRequests()).andReturn(Collections.emptyMap()).anyTimes();
 
     // request has a quicklinks profile
     expect(request.getQuickLinksProfileJson()).andReturn(SAMPLE_QUICKLINKS_PROFILE_1).anyTimes();
@@ -580,7 +579,7 @@ public class TopologyManagerTest {
 
   @Test
   public void testProvisionCluster_ExistingQuickLinkProfileIsOverwritten() throws Exception {
-    expect(persistedState.getAllRequests()).andReturn(new HashMap<ClusterTopology, List<LogicalRequest>>()).anyTimes();
+    expect(persistedState.getAllRequests()).andReturn(Collections.emptyMap()).anyTimes();
 
     // request has a quicklinks profile
     expect(request.getQuickLinksProfileJson()).andReturn(SAMPLE_QUICKLINKS_PROFILE_2).anyTimes();
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/BlueprintImplV2Test.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/BlueprintImplV2Test.java
deleted file mode 100644
index ad2cdbd..0000000
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/BlueprintImplV2Test.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ambari.server.topology.validators;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.state.StackId;
-import org.apache.ambari.server.topology.BlueprintImplV2;
-import org.apache.ambari.server.topology.HostGroupV2;
-import org.apache.ambari.server.topology.HostGroupV2Impl;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.fasterxml.jackson.core.Version;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.SerializationFeature;
-import com.fasterxml.jackson.databind.module.SimpleAbstractTypeResolver;
-import com.fasterxml.jackson.databind.module.SimpleModule;
-import com.google.common.base.Charsets;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.io.Resources;
-
-public class BlueprintImplV2Test {
-
-  static String BLUEPRINT_V2_JSON;
-
-  @BeforeClass
-  public static void setUpClass() throws Exception {
-    BLUEPRINT_V2_JSON = Resources.toString(Resources.getResource("blueprintv2/blueprintv2.json"), Charsets.UTF_8);
-  }
-
-  @Test
-  public void testSerialization() throws Exception {
-    ObjectMapper mapper = new ObjectMapper();
-    SimpleModule module = new SimpleModule("CustomModel", Version.unknownVersion());
-    SimpleAbstractTypeResolver resolver = new SimpleAbstractTypeResolver();
-    resolver.addMapping(HostGroupV2.class, HostGroupV2Impl.class);
-    module.setAbstractTypes(resolver);
-    mapper.registerModule(module);
-    mapper.enable(SerializationFeature.INDENT_OUTPUT);
-    BlueprintImplV2 bp = mapper.readValue(BLUEPRINT_V2_JSON, BlueprintImplV2.class);
-    bp.postDeserialization();
-    // -- add stack --
-    StackV2 hdpCore = new StackV2("HDPCORE", "3.0.0", "3.0.0.0-1", new HashMap<>(), new HashMap<>(), new HashMap<>(),
-      new HashMap<>(), new HashMap<>(), new HashMap<>(), new HashMap<>(), new HashMap<>(), new HashMap<>());
-    StackV2 analytics = new StackV2("ANALYTICS", "1.0.0", "1.0.0.0-1", new HashMap<>(), new HashMap<>(), new HashMap<>(),
-      new HashMap<>(), new HashMap<>(), new HashMap<>(), new HashMap<>(), new HashMap<>(), new HashMap<>());
-    bp.setStacks(ImmutableMap.of(new StackId("HDPCORE", "3.0.0"), hdpCore, new StackId("ANALYTICS", "1.0.0"), analytics));
-    // ---------------
-    String bpJson = mapper.writeValueAsString(bp);
-    System.out.println(bpJson);
-    System.out.println("\n\n====================================================================================\n\n");
-    Map<String, Object> map = mapper.readValue(BLUEPRINT_V2_JSON, HashMap.class);
-    System.out.println(map);
-    System.out.println("\n\n====================================================================================\n\n");
-    String bpJson2 = mapper.writeValueAsString(map);
-    System.out.println(bpJson2);
-    System.out.println("\n\n====================================================================================\n\n");
-    BlueprintImplV2 bp2 = mapper.readValue(bpJson2, BlueprintImplV2.class);
-    System.out.println(bp2);
-  }
-}
\ No newline at end of file
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidatorTest.java
index e469576..c2fea1d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/ClusterConfigTypeValidatorTest.java
@@ -19,12 +19,11 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.topology.BlueprintV2;
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.InvalidTopologyException;
-import org.apache.ambari.server.topology.Service;
 import org.easymock.EasyMock;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
@@ -46,16 +45,10 @@ public class ClusterConfigTypeValidatorTest extends EasyMockSupport {
   private Configuration clusterConfigurationMock;
 
   @Mock
-  private BlueprintV2 blueprintMock;
+  private Blueprint blueprintMock;
 
   @Mock
-  private StackV2 stackMock;
-
-  @Mock
-  private Service yarnMock;
-
-  @Mock
-  private Service hdfsMock;
+  private Stack stackMock;
 
   @Mock
   private ClusterTopology clusterTopologyMock;
@@ -70,11 +63,7 @@ public class ClusterConfigTypeValidatorTest extends EasyMockSupport {
     EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(clusterConfigurationMock).anyTimes();
 
     EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
-    EasyMock.expect(blueprintMock.getStackById("1")).andReturn(stackMock).anyTimes();
-    EasyMock.expect(yarnMock.getStackId()).andReturn("1").anyTimes();
-    EasyMock.expect(yarnMock.getType()).andReturn("YARN").anyTimes();
-    EasyMock.expect(hdfsMock.getStackId()).andReturn("1").anyTimes();
-    EasyMock.expect(hdfsMock.getType()).andReturn("HDFS").anyTimes();
+    EasyMock.expect(blueprintMock.getStack()).andReturn(stackMock).anyTimes();
   }
 
   @After
@@ -105,7 +94,7 @@ public class ClusterConfigTypeValidatorTest extends EasyMockSupport {
     clusterRequestConfigTypes = new HashSet<>(Arrays.asList("core-site", "yarn-site"));
     EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(clusterRequestConfigTypes).anyTimes();
 
-    EasyMock.expect(blueprintMock.getAllServices()).andReturn(new HashSet<>(Arrays.asList(yarnMock, hdfsMock)));
+    EasyMock.expect(blueprintMock.getServices()).andReturn(new HashSet<>(Arrays.asList("YARN", "HDFS")));
 
     EasyMock.expect(stackMock.getConfigurationTypes("HDFS")).andReturn(Arrays.asList("core-site"));
     EasyMock.expect(stackMock.getConfigurationTypes("YARN")).andReturn(Arrays.asList("yarn-site"));
@@ -128,7 +117,7 @@ public class ClusterConfigTypeValidatorTest extends EasyMockSupport {
     clusterRequestConfigTypes = new HashSet<>(Arrays.asList("oozie-site"));
     EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(clusterRequestConfigTypes).anyTimes();
 
-    EasyMock.expect(blueprintMock.getAllServices()).andReturn(new HashSet<>(Arrays.asList(yarnMock, hdfsMock)));
+    EasyMock.expect(blueprintMock.getServices()).andReturn(new HashSet<>(Arrays.asList("YARN", "HDFS")));
     EasyMock.expect(stackMock.getConfigurationTypes("HDFS")).andReturn(Arrays.asList("core-site"));
     EasyMock.expect(stackMock.getConfigurationTypes("YARN")).andReturn(Arrays.asList("yarn-site"));
 
@@ -149,7 +138,7 @@ public class ClusterConfigTypeValidatorTest extends EasyMockSupport {
     clusterRequestConfigTypes = new HashSet<>(Arrays.asList("core-site", "yarn-site", "oozie-site"));
     EasyMock.expect(clusterConfigurationMock.getAllConfigTypes()).andReturn(clusterRequestConfigTypes).anyTimes();
 
-    EasyMock.expect(blueprintMock.getAllServices()).andReturn(new HashSet<>(Arrays.asList(yarnMock, hdfsMock)));
+    EasyMock.expect(blueprintMock.getServices()).andReturn(new HashSet<>(Arrays.asList("YARN", "HDFS")));
 
     EasyMock.expect(stackMock.getConfigurationTypes("HDFS")).andReturn(Arrays.asList("core-site"));
     EasyMock.expect(stackMock.getConfigurationTypes("YARN")).andReturn(Arrays.asList("yarn-site"));
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
index 45d0b3e..f4c5ca5 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/HiveServiceValidatorTest.java
@@ -18,12 +18,10 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 
-import org.apache.ambari.server.topology.BlueprintV2;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
-import org.apache.ambari.server.topology.ComponentV2;
 import org.apache.ambari.server.topology.Configuration;
 import org.apache.ambari.server.topology.InvalidTopologyException;
-import org.apache.ambari.server.topology.Service;
 import org.easymock.EasyMock;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
@@ -43,13 +41,7 @@ public class HiveServiceValidatorTest extends EasyMockSupport {
   private ClusterTopology clusterTopologyMock;
 
   @Mock
-  private BlueprintV2 blueprintMock;
-
-  @Mock
-  private Service hiveMock;
-
-  @Mock
-  private ComponentV2 mysqlComponent;
+  private Blueprint blueprintMock;
 
   @Mock
   private Configuration configurationMock;
@@ -73,7 +65,7 @@ public class HiveServiceValidatorTest extends EasyMockSupport {
 
     // GIVEN
     EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock);
-    EasyMock.expect(blueprintMock.getServicesByType("HIVE")).andReturn(Collections.emptySet());
+    EasyMock.expect(blueprintMock.getServices()).andReturn(Collections.emptySet());
     replayAll();
 
     // WHEN
@@ -87,10 +79,10 @@ public class HiveServiceValidatorTest extends EasyMockSupport {
   public void testShouldValidationFailWhenHiveServiceIsMissingConfigType() throws Exception {
 
     // GIVEN
-    Collection<Service> blueprintServices = Arrays.asList(hiveMock);
+    Collection<String> blueprintServices = Arrays.asList("HIVE", "OOZIE");
     EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock);
-    EasyMock.expect(blueprintMock.getServicesByType("HIVE")).andReturn(blueprintServices);
-    EasyMock.expect(hiveMock.getConfiguration()).andReturn(configurationMock);
+    EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices);
+    EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock);
     EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(Collections.emptySet());
 
     replayAll();
@@ -106,11 +98,11 @@ public class HiveServiceValidatorTest extends EasyMockSupport {
   public void testShouldValidationPassWhenCustomHiveDatabaseSettingsProvided() throws Exception {
 
     // GIVEN
-    Collection<Service> blueprintServices = Arrays.asList(hiveMock);
+    Collection<String> blueprintServices = Arrays.asList("HIVE", "OOZIE");
     Collection<String> configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env");
     EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock);
-    EasyMock.expect(blueprintMock.getServicesByType("HIVE")).andReturn(blueprintServices);
-    EasyMock.expect(hiveMock.getConfiguration()).andReturn(configurationMock);
+    EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices);
+    EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock);
     EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes);
 
     EasyMock.expect(configurationMock.getPropertyValue("hive-env", "hive_database")).andReturn("PSQL");
@@ -126,12 +118,12 @@ public class HiveServiceValidatorTest extends EasyMockSupport {
   @Test(expected = InvalidTopologyException.class)
   public void testShouldValidationFailWhenDefaultsAreUsedAndMysqlComponentIsMissing() throws Exception {
     // GIVEN
-    Collection<Service> blueprintServices = Arrays.asList(hiveMock);
+    Collection<String> blueprintServices = Arrays.asList("HIVE", "HDFS");
     Collection<String> configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env");
-    EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).atLeastOnce();
-    EasyMock.expect(blueprintMock.getServicesByType("HIVE")).andReturn(blueprintServices).atLeastOnce();
-    EasyMock.expect(blueprintMock.getComponentsByType(hiveMock, "MYSQL_SERVER")).andReturn(Collections.emptyList()).atLeastOnce();
-    EasyMock.expect(hiveMock.getConfiguration()).andReturn(configurationMock);
+    EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
+    EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices).anyTimes();
+    EasyMock.expect(blueprintMock.getComponents("HIVE")).andReturn(Collections.emptyList()).anyTimes();
+    EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock);
     EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes);
 
     EasyMock.expect(configurationMock.getPropertyValue("hive-env", "hive_database")).andReturn("New MySQL Database");
@@ -148,16 +140,16 @@ public class HiveServiceValidatorTest extends EasyMockSupport {
   @Test
   public void testShouldValidationPassWhenDefaultsAreUsedAndMsqlComponentIsListed() throws Exception {
     // GIVEN
-    Collection<Service> blueprintServices = Arrays.asList(hiveMock);
-    Collection<ComponentV2> hiveComponents = Arrays.asList(mysqlComponent);
+    Collection<String> blueprintServices = Arrays.asList("HIVE", "HDFS", "MYSQL_SERVER");
+    Collection<String> hiveComponents = Arrays.asList("MYSQL_SERVER");
     Collection<String> configTypes = Arrays.asList("hive-env", "core-site", "hadoop-env");
-    EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).atLeastOnce();
-    EasyMock.expect(blueprintMock.getServicesByType("HIVE")).andReturn(blueprintServices).atLeastOnce();
-    EasyMock.expect(blueprintMock.getComponentsByType(hiveMock, "MYSQL_SERVER")).andReturn(hiveComponents).atLeastOnce();
-    EasyMock.expect(hiveMock.getConfiguration()).andReturn(configurationMock).atLeastOnce();
-    EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes).atLeastOnce();
+    EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
+    EasyMock.expect(blueprintMock.getServices()).andReturn(blueprintServices).anyTimes();
+    EasyMock.expect(blueprintMock.getComponents("HIVE")).andReturn(hiveComponents).anyTimes();
+    EasyMock.expect(clusterTopologyMock.getConfiguration()).andReturn(configurationMock);
+    EasyMock.expect(configurationMock.getAllConfigTypes()).andReturn(configTypes);
 
-    EasyMock.expect(configurationMock.getPropertyValue("hive-env", "hive_database")).andReturn("New MySQL Database").atLeastOnce();
+    EasyMock.expect(configurationMock.getPropertyValue("hive-env", "hive_database")).andReturn("New MySQL Database");
     replayAll();
 
     // WHEN
@@ -166,4 +158,4 @@ public class HiveServiceValidatorTest extends EasyMockSupport {
     // THEN
 
   }
-}
\ No newline at end of file
+}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java
index f6b4ec8..780ca53 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/RequiredConfigPropertiesValidatorTest.java
@@ -22,13 +22,12 @@ import java.util.Map;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
-import org.apache.ambari.server.controller.StackV2;
-import org.apache.ambari.server.topology.BlueprintV2;
+import org.apache.ambari.server.controller.internal.Stack;
+import org.apache.ambari.server.topology.Blueprint;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.Configuration;
-import org.apache.ambari.server.topology.HostGroupV2Impl;
+import org.apache.ambari.server.topology.HostGroup;
 import org.apache.ambari.server.topology.InvalidTopologyException;
-import org.apache.ambari.server.topology.Service;
 import org.easymock.EasyMock;
 import org.easymock.EasyMockRule;
 import org.easymock.EasyMockSupport;
@@ -54,19 +53,16 @@ public class RequiredConfigPropertiesValidatorTest extends EasyMockSupport {
   private Configuration topologyConfigurationMock;
 
   @Mock
-  private BlueprintV2 blueprintMock;
+  private Blueprint blueprintMock;
 
   @Mock
-  private StackV2 stackMock;
+  private Stack stackMock;
 
   @Mock
-  private Service kerberosMock;
+  private HostGroup slaveHostGroupMock;
 
   @Mock
-  private HostGroupV2Impl slaveHostGroupMock;
-
-  @Mock
-  private HostGroupV2Impl masterHostGroupMock;
+  private HostGroup masterHostGroupMock;
 
   @Mock
   private Configuration slaveHostGroupConfigurationMock;
@@ -79,10 +75,9 @@ public class RequiredConfigPropertiesValidatorTest extends EasyMockSupport {
   private Map<String, Map<String, String>> masterHostGroupConfigurationMap = new HashMap<>();
   private Map<String, Map<String, String>> slaveHostGroupConfigurationMap = new HashMap<>();
   private Collection<String> bpServices = new HashSet<>();
-  private Collection<Service> slaveHostGroupServices = new HashSet<>();
-  private Collection<Service> masterHostGroupServices = new HashSet<>();
-  private Map<String, HostGroupV2Impl> hostGroups = new HashMap<>();
-
+  private Collection<String> slaveHostGroupServices = new HashSet<>();
+  private Collection<String> masterHostGroupServices = new HashSet<>();
+  private Map<String, HostGroup> hostGroups = new HashMap<>();
   private Map<String, Collection<String>> missingProps = new TreeMap<>();
 
   @TestSubject
@@ -103,20 +98,19 @@ public class RequiredConfigPropertiesValidatorTest extends EasyMockSupport {
 
     EasyMock.expect(clusterTopologyMock.getBlueprint()).andReturn(blueprintMock).anyTimes();
 
-    EasyMock.expect((Map<String, HostGroupV2Impl>)blueprintMock.getHostGroups()).andReturn(hostGroups);
+    EasyMock.expect(blueprintMock.getHostGroups()).andReturn(hostGroups);
+    EasyMock.expect(blueprintMock.getServices()).andReturn(bpServices);
+    EasyMock.expect(blueprintMock.getStack()).andReturn(stackMock).anyTimes();
 
     EasyMock.expect(masterHostGroupMock.getName()).andReturn("master").anyTimes();
     EasyMock.expect(masterHostGroupMock.getConfiguration()).andReturn(masterHostGroupConfigurationMock).anyTimes();
     EasyMock.expect(masterHostGroupMock.getServices()).andReturn(masterHostGroupServices);
 
+
     EasyMock.expect(slaveHostGroupMock.getName()).andReturn("slave").anyTimes();
     EasyMock.expect(slaveHostGroupMock.getConfiguration()).andReturn(slaveHostGroupConfigurationMock).anyTimes();
     EasyMock.expect(slaveHostGroupMock.getServices()).andReturn(slaveHostGroupServices);
 
-    EasyMock.expect(kerberosMock.getName()).andReturn("KERBEROS").anyTimes();
-    EasyMock.expect(kerberosMock.getType()).andReturn("KERBEROS").anyTimes();
-    EasyMock.expect(kerberosMock.getStack()).andReturn(stackMock).anyTimes();
-
     // there are 2 hostgroups to be considered by the test
     hostGroups.put("master", masterHostGroupMock);
     hostGroups.put("slave", slaveHostGroupMock);
@@ -125,8 +119,8 @@ public class RequiredConfigPropertiesValidatorTest extends EasyMockSupport {
     bpServices.addAll(Lists.newArrayList("KERBEROS", "OOZIE"));
 
     // host group services
-    masterHostGroupServices.addAll(Collections.singletonList(kerberosMock));
-    slaveHostGroupServices.addAll(Collections.singletonList(kerberosMock));
+    masterHostGroupServices.addAll(Collections.singletonList("KERBEROS"));
+    slaveHostGroupServices.addAll(Collections.singletonList("KERBEROS"));
 
     EasyMock.expect(masterHostGroupConfigurationMock.getProperties()).andReturn(masterHostGroupConfigurationMap);
     EasyMock.expect(slaveHostGroupConfigurationMock.getProperties()).andReturn(slaveHostGroupConfigurationMap);
@@ -137,9 +131,9 @@ public class RequiredConfigPropertiesValidatorTest extends EasyMockSupport {
     // required properties for listed services
     EasyMock.expect(stackMock.getRequiredConfigurationProperties("KERBEROS")).
       andReturn(Lists.newArrayList(
-        new StackV2.ConfigProperty("kerberos-env", "realm", "value"),
-        new StackV2.ConfigProperty("kerberos-env", "kdc_type", "value"), // this is missing!
-        new StackV2.ConfigProperty("krb5-conf", "domains", "smthg"))).anyTimes();
+        new Stack.ConfigProperty("kerberos-env", "realm", "value"),
+        new Stack.ConfigProperty("kerberos-env", "kdc_type", "value"), // this is missing!
+        new Stack.ConfigProperty("krb5-conf", "domains", "smthg")));
 
     EasyMock.expect(stackMock.getRequiredConfigurationProperties("OOZIE")).andReturn(Collections.EMPTY_LIST);
 
@@ -308,4 +302,4 @@ public class RequiredConfigPropertiesValidatorTest extends EasyMockSupport {
 
   }
 
-}
\ No newline at end of file
+}
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/validators/StackConfigTypeValidatorTest.java
index 28739dc..417c403 100644
... 301 lines suppressed ...

-- 
To stop receiving notification emails like this one, please contact
"commits@ambari.apache.org" <co...@ambari.apache.org>.