You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by nc...@apache.org on 2016/02/24 16:07:51 UTC

[04/50] [abbrv] ambari git commit: AMBARI-14743. Update package installation for version definition file (ncole)

AMBARI-14743. Update package installation for version definition file (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fc402ebc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fc402ebc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fc402ebc

Branch: refs/heads/trunk
Commit: fc402ebc50929f30224481d605325157cb302ec9
Parents: f31a06d
Author: Nate Cole <nc...@hortonworks.com>
Authored: Wed Jan 20 11:19:59 2016 -0500
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Wed Jan 20 12:04:15 2016 -0500

----------------------------------------------------------------------
 .../ClusterStackVersionResourceProvider.java    |  28 ++-
 .../RepositoryVersionResourceProvider.java      |  17 +-
 .../orm/entities/RepositoryVersionEntity.java   |  15 ++
 .../state/repository/AvailableService.java      |   7 +
 ...ClusterStackVersionResourceProviderTest.java | 238 ++++++++++++++++++-
 .../src/test/resources/hbase_version_test.xml   |  59 +++++
 6 files changed, 348 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fc402ebc/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
index b114ea5..ce5606e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java
@@ -73,11 +73,14 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.ServiceOsSpecific;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.repository.AvailableService;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.lang.StringUtils;
 
@@ -415,9 +418,27 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
       // determine services for the repo
       Set<String> serviceNames = new HashSet<>();
-//      for (RepositoryVersionEntity.Component component : repoVersionEnt.getComponents()) {
-//        serviceNames.add(component.getService());
-//      }
+      if (RepositoryType.STANDARD != repoVersionEnt.getType() && null != repoVersionEnt.getVersionXsd()) {
+        VersionDefinitionXml xml = null;
+        try {
+         xml = repoVersionEnt.getRepositoryXml();
+
+         Collection<AvailableService> available = xml.getAvailableServices(
+             ami.getStack(stackId.getStackName(), stackId.getStackVersion()));
+
+         // check if the service is part of the cluster
+         for (AvailableService as : available) {
+           if (cluster.getServices().containsKey(as.getName())) {
+             serviceNames.add(as.getName());
+           }
+         }
+
+        } catch (Exception e) {
+          String msg = String.format("Could not load repo xml for %s", repoVersionEnt.getDisplayName());
+          LOG.error(msg, e);
+          throw new SystemException (msg);
+        }
+      }
 
       // Populate with commands for host
       for (int i = 0; i < maxTasks && hostIterator.hasNext(); i++) {
@@ -573,6 +594,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
    */
   private boolean hostHasVersionableComponents(Cluster cluster, Set<String> serviceNames, AmbariMetaInfo ami, StackId stackId,
       Host host) throws SystemException {
+
     List<ServiceComponentHost> components = cluster.getServiceComponentHosts(host.getHostName());
 
     for (ServiceComponentHost component : components) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/fc402ebc/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
index 8b34cc8..92b14b7 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
@@ -263,15 +263,16 @@ public class RepositoryVersionResourceProvider extends AbstractAuthorizedResourc
       setResourceProperty(resource, REPOSITORY_VERSION_REPOSITORY_VERSION_PROPERTY_ID, entity.getVersion(), requestedIds);
       setResourceProperty(resource, REPOSITORY_VERSION_TYPE_PROPERTY_ID, entity.getType(), requestedIds);
 
-      if (null != entity.getVersionXsd()) {
-        final VersionDefinitionXml xml;
-        final StackInfo stack;
+      final VersionDefinitionXml xml;
 
-        try {
-          xml = VersionDefinitionXml.load(entity.getVersionXml());
-        } catch (Exception e) {
-          throw new SystemException(String.format("Could not load xml for Repository %s", entity.getId()), e);
-        }
+      try {
+        xml = entity.getRepositoryXml();
+      } catch (Exception e) {
+        throw new SystemException(String.format("Could not load xml for Repository %s", entity.getId()), e);
+      }
+
+      if (null != xml) {
+        final StackInfo stack;
 
         try {
           stack = ambariMetaInfo.getStack(entity.getStackName(), entity.getStackVersion());

http://git-wip-us.apache.org/repos/asf/ambari/blob/fc402ebc/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
index a31b135..e2e455b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/RepositoryVersionEntity.java
@@ -44,6 +44,7 @@ import javax.persistence.UniqueConstraint;
 import org.apache.ambari.server.StaticallyInject;
 import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
@@ -322,6 +323,20 @@ public class RepositoryVersionEntity {
     versionXsd = xsdLocation;
   }
 
+  /**
+   * Parse the version XML into its object representation.  This causes the XML to be lazy-loaded
+   * from storage.
+   * @return {@code null} if the XSD is not available.
+   * @throws Exception
+   */
+  public VersionDefinitionXml getRepositoryXml() throws Exception {
+    if (null == versionXsd) {
+      return null;
+    }
+
+    return VersionDefinitionXml.load(getVersionXml());
+  }
+
   @Override
   public int hashCode() {
     int result = id != null ? id.hashCode() : 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/fc402ebc/ambari-server/src/main/java/org/apache/ambari/server/state/repository/AvailableService.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/AvailableService.java b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/AvailableService.java
index ce42b69..70f3c40 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/repository/AvailableService.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/repository/AvailableService.java
@@ -45,6 +45,13 @@ public class AvailableService {
   }
 
   /**
+   * @return the service name
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
    * @return the list of versions to append additional versions.
    */
   public List<AvailableVersion> getVersions() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/fc402ebc/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 5c21433..c6d0c57 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -29,8 +29,11 @@ import static org.easymock.EasyMock.expectLastCall;
 import static org.easymock.EasyMock.replay;
 import static org.easymock.EasyMock.verify;
 
+import java.io.File;
+import java.io.FileInputStream;
 import java.lang.reflect.Field;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
@@ -51,6 +54,7 @@ import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.controller.ExecuteActionRequest;
 import org.apache.ambari.server.controller.RequestStatusResponse;
 import org.apache.ambari.server.controller.ResourceProviderFactory;
 import org.apache.ambari.server.controller.spi.Request;
@@ -80,8 +84,10 @@ import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
 import org.apache.ambari.server.state.MaintenanceState;
+import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.ServiceOsSpecific;
@@ -89,6 +95,7 @@ import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.cluster.ClusterImpl;
 import org.apache.ambari.server.topology.TopologyManager;
 import org.apache.ambari.server.utils.StageUtils;
+import org.apache.commons.io.IOUtils;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
 import org.easymock.IAnswer;
@@ -123,7 +130,7 @@ public class ClusterStackVersionResourceProviderTest {
   private HostVersionDAO hostVersionDAO;
   private HostComponentStateDAO hostComponentStateDAO;
 
-  private String operatingSystemsJson = "[\n" +
+  private static final String OS_JSON = "[\n" +
           "   {\n" +
           "      \"repositories\":[\n" +
           "         {\n" +
@@ -141,7 +148,6 @@ public class ClusterStackVersionResourceProviderTest {
           "   }\n" +
           "]";
 
-
   @Before
   public void setup() throws Exception {
     // Create instances of mocks
@@ -182,7 +188,7 @@ public class ClusterStackVersionResourceProviderTest {
 
     RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
     repoVersion.setId(1l);
-    repoVersion.setOperatingSystems(operatingSystemsJson);
+    repoVersion.setOperatingSystems(OS_JSON);
 
     Map<String, Host> hostsForCluster = new HashMap<String, Host>();
     int hostCount = 10;
@@ -345,6 +351,228 @@ public class ClusterStackVersionResourceProviderTest {
     // check that the success factor was populated in the stage
     Float successFactor = successFactors.get(Role.INSTALL_PACKAGES);
     Assert.assertEquals(Float.valueOf(0.85f), successFactor);
+
+
+  }
+
+  @Test
+  public void testCreateResourcesForPatch() throws Exception {
+    Resource.Type type = Resource.Type.ClusterStackVersion;
+
+    AmbariManagementController managementController = createMock(AmbariManagementController.class);
+    Clusters clusters = createNiceMock(Clusters.class);
+    Cluster cluster = createNiceMock(Cluster.class);
+    StackId stackId = new StackId("HDP", "2.0.1");
+
+    File f = new File("src/test/resources/hbase_version_test.xml");
+
+    RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
+    repoVersion.setId(1l);
+    repoVersion.setOperatingSystems(OS_JSON);
+    repoVersion.setVersionXml(IOUtils.toString(new FileInputStream(f)));
+    repoVersion.setVersionXsd("version_definition.xsd");
+    repoVersion.setType(RepositoryType.PATCH);
+
+    ambariMetaInfo.getComponent("HDP", "2.1.1", "HBASE", "HBASE_MASTER").setVersionAdvertised(true);
+
+
+    Map<String, Host> hostsForCluster = new HashMap<String, Host>();
+    int hostCount = 10;
+    for (int i = 0; i < hostCount; i++) {
+      String hostname = "host" + i;
+      Host host = createNiceMock(hostname, Host.class);
+      expect(host.getHostName()).andReturn(hostname).anyTimes();
+      expect(host.getOsFamily()).andReturn("redhat6").anyTimes();
+      expect(host.getMaintenanceState(EasyMock.anyLong())).andReturn(
+          MaintenanceState.OFF).anyTimes();
+      expect(host.getAllHostVersions()).andReturn(
+          Collections.<HostVersionEntity>emptyList()).anyTimes();
+
+      replay(host);
+      hostsForCluster.put(hostname, host);
+    }
+
+    Service hdfsService = createNiceMock(Service.class);
+    Service hbaseService = createNiceMock(Service.class);
+    expect(hdfsService.getName()).andReturn("HDFS").anyTimes();
+    expect(hbaseService.getName()).andReturn("HBASE").anyTimes();
+//    Service metricsService = createNiceMock(Service.class);
+
+    ServiceComponent scNameNode = createNiceMock(ServiceComponent.class);
+    ServiceComponent scDataNode = createNiceMock(ServiceComponent.class);
+    ServiceComponent scHBaseMaster = createNiceMock(ServiceComponent.class);
+    ServiceComponent scMetricCollector = createNiceMock(ServiceComponent.class);
+
+    expect(hdfsService.getServiceComponents()).andReturn(new HashMap<String, ServiceComponent>());
+    expect(hbaseService.getServiceComponents()).andReturn(new HashMap<String, ServiceComponent>());
+//    expect(metricsService.getServiceComponents()).andReturn(new HashMap<String, ServiceComponent>());
+
+
+    Map<String, Service> serviceMap = new HashMap<>();
+    serviceMap.put("HDFS", hdfsService);
+    serviceMap.put("HBASE", hbaseService);
+
+
+    final ServiceComponentHost schDatanode = createMock(ServiceComponentHost.class);
+    expect(schDatanode.getServiceName()).andReturn("HDFS").anyTimes();
+    expect(schDatanode.getServiceComponentName()).andReturn("DATANODE").anyTimes();
+
+    final ServiceComponentHost schNamenode = createMock(ServiceComponentHost.class);
+    expect(schNamenode.getServiceName()).andReturn("HDFS").anyTimes();
+    expect(schNamenode.getServiceComponentName()).andReturn("NAMENODE").anyTimes();
+
+    final ServiceComponentHost schAMS = createMock(ServiceComponentHost.class);
+    expect(schAMS.getServiceName()).andReturn("AMBARI_METRICS").anyTimes();
+    expect(schAMS.getServiceComponentName()).andReturn("METRICS_COLLECTOR").anyTimes();
+
+    final ServiceComponentHost schHBM = createMock(ServiceComponentHost.class);
+    expect(schHBM.getServiceName()).andReturn("HBASE").anyTimes();
+    expect(schHBM.getServiceComponentName()).andReturn("HBASE_MASTER").anyTimes();
+
+    // First host contains versionable components
+    final List<ServiceComponentHost> schsH1 = Arrays.asList(schDatanode, schNamenode, schAMS);
+
+    // Second host does not contain versionable components
+    final List<ServiceComponentHost> schsH2 = Arrays.asList(schAMS);
+
+    // Third host only has hbase
+    final List<ServiceComponentHost> schsH3 = Arrays.asList(schHBM);
+
+    ServiceOsSpecific.Package hdfsPackage = new ServiceOsSpecific.Package();
+    hdfsPackage.setName("hdfs");
+
+//    ServiceOsSpecific.Package hbasePackage = new ServiceOsSpecific.Package();
+//    hbasePackage.setName("hbase");
+
+    List<ServiceOsSpecific.Package> packages = Collections.singletonList(hdfsPackage);
+
+    ActionManager actionManager = createNiceMock(ActionManager.class);
+
+    RequestStatusResponse response = createNiceMock(RequestStatusResponse.class);
+    ResourceProviderFactory resourceProviderFactory = createNiceMock(ResourceProviderFactory.class);
+    ResourceProvider csvResourceProvider = createNiceMock(ClusterStackVersionResourceProvider.class);
+
+    AbstractControllerResourceProvider.init(resourceProviderFactory);
+
+    Map<String, Map<String, String>> hostConfigTags = new HashMap<String, Map<String, String>>();
+    expect(configHelper.getEffectiveDesiredTags(anyObject(ClusterImpl.class), anyObject(String.class))).andReturn(hostConfigTags);
+
+    expect(managementController.getClusters()).andReturn(clusters).anyTimes();
+    expect(managementController.getAmbariMetaInfo()).andReturn(ambariMetaInfo).anyTimes();
+    expect(managementController.getAuthName()).andReturn("admin").anyTimes();
+    expect(managementController.getActionManager()).andReturn(actionManager).anyTimes();
+    expect(managementController.getJdkResourceUrl()).andReturn("/JdkResourceUrl").anyTimes();
+    expect(managementController.getPackagesForServiceHost(anyObject(ServiceInfo.class),
+            (Map<String, String>) anyObject(List.class), anyObject(String.class))).
+            andReturn(packages).times(1); // only one host has the versionable component
+
+    expect(resourceProviderFactory.getHostResourceProvider(anyObject(Set.class), anyObject(Map.class),
+            eq(managementController))).andReturn(csvResourceProvider).anyTimes();
+
+    expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
+    expect(clusters.getHostsForCluster(anyObject(String.class))).andReturn(
+        hostsForCluster).anyTimes();
+
+    String clusterName = "Cluster100";
+    expect(cluster.getClusterId()).andReturn(1L).anyTimes();
+    expect(cluster.getHosts()).andReturn(hostsForCluster.values()).atLeastOnce();
+    expect(cluster.getServices()).andReturn(serviceMap).anyTimes();
+    expect(cluster.getCurrentStackVersion()).andReturn(stackId);
+    expect(cluster.getServiceComponentHosts(anyObject(String.class))).andAnswer(new IAnswer<List<ServiceComponentHost>>() {
+      @Override
+      public List<ServiceComponentHost> answer() throws Throwable {
+        String hostname = (String) EasyMock.getCurrentArguments()[0];
+        if (hostname.equals("host2")) {
+          return schsH2;
+        } else if (hostname.equals("host3")) {
+          return schsH3;
+        } else {
+          return schsH1;
+        }
+      }
+    }).anyTimes();
+
+    ExecutionCommand executionCommand = createNiceMock(ExecutionCommand.class);
+    ExecutionCommandWrapper executionCommandWrapper = createNiceMock(ExecutionCommandWrapper.class);
+
+    expect(executionCommandWrapper.getExecutionCommand()).andReturn(executionCommand).anyTimes();
+
+    Stage stage = createNiceMock(Stage.class);
+    expect(stage.getExecutionCommandWrapper(anyObject(String.class), anyObject(String.class))).
+            andReturn(executionCommandWrapper).anyTimes();
+
+    Map<Role, Float> successFactors = new HashMap<>();
+    expect(stage.getSuccessFactors()).andReturn(successFactors).atLeastOnce();
+
+    // Check that we create proper stage count
+    expect(stageFactory.createNew(anyLong(), anyObject(String.class),
+            anyObject(String.class), anyLong(),
+            anyObject(String.class), anyObject(String.class), anyObject(String.class),
+            anyObject(String.class))).andReturn(stage).
+            times((int) Math.ceil(hostCount / MAX_TASKS_PER_STAGE));
+
+    expect(
+            repositoryVersionDAOMock.findByStackAndVersion(
+                    anyObject(StackId.class),
+                    anyObject(String.class))).andReturn(repoVersion);
+
+    Capture<org.apache.ambari.server.actionmanager.Request> c = Capture.newInstance();
+    Capture<ExecuteActionRequest> ear = Capture.newInstance();
+
+    actionManager.sendActions(capture(c), capture(ear));
+    expectLastCall().atLeastOnce();
+    expect(actionManager.getRequestTasks(anyLong())).andReturn(Collections.<HostRoleCommand>emptyList()).anyTimes();
+
+    ClusterEntity clusterEntity = new ClusterEntity();
+    clusterEntity.setClusterId(1l);
+    clusterEntity.setClusterName(clusterName);
+    ClusterVersionEntity cve = new ClusterVersionEntity(clusterEntity,
+            repoVersion, RepositoryVersionState.INSTALL_FAILED, 0, "");
+    expect(clusterVersionDAO.findByClusterAndStackAndVersion(anyObject(String.class),
+            anyObject(StackId.class), anyObject(String.class))).andReturn(cve);
+
+    TopologyManager topologyManager = injector.getInstance(TopologyManager.class);
+    StageUtils.setTopologyManager(topologyManager);
+
+    // replay
+    replay(managementController, response, clusters, hdfsService, hbaseService, resourceProviderFactory, csvResourceProvider,
+            cluster, repositoryVersionDAOMock, configHelper, schDatanode, schNamenode, schAMS, schHBM, actionManager,
+            executionCommand, executionCommandWrapper,stage, stageFactory, clusterVersionDAO);
+
+    ResourceProvider provider = AbstractControllerResourceProvider.getResourceProvider(
+        type,
+        PropertyHelper.getPropertyIds(type),
+        PropertyHelper.getKeyPropertyIds(type),
+        managementController);
+
+    injector.injectMembers(provider);
+
+    // add the property map to a set for the request.  add more maps for multiple creates
+    Set<Map<String, Object>> propertySet = new LinkedHashSet<Map<String, Object>>();
+
+    Map<String, Object> properties = new LinkedHashMap<String, Object>();
+
+    // add properties to the request map
+    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
+    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_REPOSITORY_VERSION_PROPERTY_ID, "2.2.0.1-885");
+    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_STACK_PROPERTY_ID, "HDP");
+    properties.put(ClusterStackVersionResourceProvider.CLUSTER_STACK_VERSION_VERSION_PROPERTY_ID, "2.1.1");
+
+    propertySet.add(properties);
+
+    // create the request
+    Request request = PropertyHelper.getCreateRequest(propertySet, null);
+
+    RequestStatus status = provider.createResources(request);
+    Assert.assertNotNull(status);
+
+    // verify
+    verify(managementController, response, clusters, stageFactory, stage);
+
+    // check that the success factor was populated in the stage
+    Float successFactor = successFactors.get(Role.INSTALL_PACKAGES);
+    Assert.assertEquals(Float.valueOf(0.85f), successFactor);
+
   }
 
 
@@ -395,7 +623,7 @@ public class ClusterStackVersionResourceProviderTest {
     expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
 
     RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
-    repoVersion.setOperatingSystems(operatingSystemsJson);
+    repoVersion.setOperatingSystems(OS_JSON);
     StackEntity newDesiredStack = stackDAO.find("HDP", "2.0.1");
     repoVersion.setStack(newDesiredStack);
 
@@ -541,7 +769,7 @@ public class ClusterStackVersionResourceProviderTest {
     expect(clusters.getCluster(anyObject(String.class))).andReturn(cluster);
 
     RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
-    repoVersion.setOperatingSystems(operatingSystemsJson);
+    repoVersion.setOperatingSystems(OS_JSON);
     StackEntity newDesiredStack = stackDAO.find("HDP", "2.0.1");
     repoVersion.setStack(newDesiredStack);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fc402ebc/ambari-server/src/test/resources/hbase_version_test.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/hbase_version_test.xml b/ambari-server/src/test/resources/hbase_version_test.xml
new file mode 100644
index 0000000..9df07ed
--- /dev/null
+++ b/ambari-server/src/test/resources/hbase_version_test.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<repository-version xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:noNamespaceSchemaLocation="version_definition.xsd">
+  
+  <release>
+    <type>PATCH</type>
+    <stack-id>HDP-2.3</stack-id>
+    <version>2.3.4.0</version>
+    <build>3396</build>
+    <compatible-with>2.3.2.[0-9]</compatible-with>
+    <release-notes>http://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.3.4/</release-notes>
+  </release>
+  
+  <manifest>
+    <service id="HBASE-112" name="HBASE" version="1.1.2" version-id="2_3_4_0-3396" />
+  </manifest>
+  
+  <available-services>
+    <service idref="HBASE-112" />
+  </available-services>
+  
+  <repository-info>
+    <os family="redhat6">
+      <repo>
+        <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.3.4.0</baseurl>
+        <repoid>HDP-2.3</repoid>
+        <reponame>HDP</reponame>
+      </repo>
+      <repo>
+        <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.20/repos/centos6</baseurl>
+        <repoid>HDP-UTILS-1.1.0.20</repoid>
+        <reponame>HDP-UTILS</reponame>
+      </repo>
+    </os>
+  </repository-info>
+  
+  <upgrade>
+    <configuration type="hdfs-site">
+      <set key="foo" value="bar" />
+    </configuration>
+  </upgrade>
+</repository-version>