You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by pr...@apache.org on 2014/02/05 00:39:30 UTC
[46/50] [abbrv] git commit: updated refs/heads/rbac to 9e92197
CLOUDSTACK-5995 ; change service offering is not honouring host tags
- Check host tag when the lastHostId is set.
Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/7d0472bd
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/7d0472bd
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/7d0472bd
Branch: refs/heads/rbac
Commit: 7d0472bdaa2d25f7a3658c44a11bebcfd258bc42
Parents: 1582801
Author: Prachi Damle <pr...@cloud.com>
Authored: Thu Jan 30 11:24:46 2014 -0800
Committer: Prachi Damle <pr...@cloud.com>
Committed: Fri Jan 31 12:02:44 2014 -0800
----------------------------------------------------------------------
.../deploy/DeploymentPlanningManagerImpl.java | 97 ++++++++++++--------
1 file changed, 58 insertions(+), 39 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/7d0472bd/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
index 35a0b39..3c87b24 100644
--- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
+++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
@@ -362,49 +362,68 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
} else {
if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
- long cluster_id = host.getClusterId();
- ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id, "cpuOvercommitRatio");
- ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio");
- Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
- Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
- if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)
- && _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(), offering.getSpeed())) {
- s_logger.debug("The last host of this VM is UP and has enough capacity");
- s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " +
- host.getClusterId());
- // search for storage under the zone, pod, cluster of
- // the last host.
- DataCenterDeployment lastPlan =
- new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
- Pair<Map<Volume, List<StoragePool>>, List<Volume>> result =
- findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
- Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
- List<Volume> readyAndReusedVolumes = result.second();
-
- // choose the potential pool for this VM for this host
- if (!suitableVolumeStoragePools.isEmpty()) {
- List<Host> suitableHosts = new ArrayList<Host>();
- suitableHosts.add(host);
- Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
- suitableHosts, suitableVolumeStoragePools, avoids,
- getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
- if (potentialResources != null) {
- Pod pod = _podDao.findById(host.getPodId());
- Cluster cluster = _clusterDao.findById(host.getClusterId());
- Map<Volume, StoragePool> storageVolMap = potentialResources.second();
- // remove the reused vol<->pool from
- // destination, since we don't have to prepare
- // this volume.
- for (Volume vol : readyAndReusedVolumes) {
- storageVolMap.remove(vol);
+ boolean hostTagsMatch = true;
+ if(offering.getHostTag() != null){
+ _hostDao.loadHostTags(host);
+ if (!(host.getHostTags() != null && host.getHostTags().contains(offering.getHostTag()))) {
+ hostTagsMatch = false;
+ }
+ }
+ if (hostTagsMatch) {
+ long cluster_id = host.getClusterId();
+ ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
+ "cpuOvercommitRatio");
+ ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,
+ "memoryOvercommitRatio");
+ Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
+ Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
+ if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true,
+ cpuOvercommitRatio, memoryOvercommitRatio, true)
+ && _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(),
+ offering.getSpeed())) {
+ s_logger.debug("The last host of this VM is UP and has enough capacity");
+ s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId()
+ + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
+ // search for storage under the zone, pod, cluster
+ // of
+ // the last host.
+ DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(),
+ host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
+ Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(
+ vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
+ Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
+ List<Volume> readyAndReusedVolumes = result.second();
+
+ // choose the potential pool for this VM for this
+ // host
+ if (!suitableVolumeStoragePools.isEmpty()) {
+ List<Host> suitableHosts = new ArrayList<Host>();
+ suitableHosts.add(host);
+ Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
+ suitableHosts, suitableVolumeStoragePools, avoids,
+ getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
+ if (potentialResources != null) {
+ Pod pod = _podDao.findById(host.getPodId());
+ Cluster cluster = _clusterDao.findById(host.getClusterId());
+ Map<Volume, StoragePool> storageVolMap = potentialResources.second();
+ // remove the reused vol<->pool from
+ // destination, since we don't have to
+ // prepare
+ // this volume.
+ for (Volume vol : readyAndReusedVolumes) {
+ storageVolMap.remove(vol);
+ }
+ DeployDestination dest = new DeployDestination(dc, pod, cluster, host,
+ storageVolMap);
+ s_logger.debug("Returning Deployment Destination: " + dest);
+ return dest;
}
- DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
- s_logger.debug("Returning Deployment Destination: " + dest);
- return dest;
}
+ } else {
+ s_logger.debug("The last host of this VM does not have enough capacity");
}
} else {
- s_logger.debug("The last host of this VM does not have enough capacity");
+ s_logger.debug("Service Offering host tag does not match the last host of this VM");
}
} else {
s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: " + host.getStatus().name() + ", host resource state is: " +