You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by pr...@apache.org on 2013/04/30 04:56:50 UTC

[1/3] Adding new interface DeploymentClusterPlanner.java and refactoring planners

Updated Branches:
  refs/heads/planner_reserve 1959377d3 -> fe8cd8de2


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/25f8ae92/server/src/com/cloud/deploy/FirstFitPlanner.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java
index ee4d47f..8ac6633 100755
--- a/server/src/com/cloud/deploy/FirstFitPlanner.java
+++ b/server/src/com/cloud/deploy/FirstFitPlanner.java
@@ -49,6 +49,7 @@ import com.cloud.dc.Pod;
 import com.cloud.dc.dao.ClusterDao;
 import com.cloud.dc.dao.DataCenterDao;
 import com.cloud.dc.dao.HostPodDao;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
 import com.cloud.exception.InsufficientServerCapacityException;
 import com.cloud.host.Host;
 import com.cloud.host.HostVO;
@@ -81,7 +82,7 @@ import com.cloud.vm.dao.UserVmDao;
 import com.cloud.vm.dao.VMInstanceDao;
 
 @Local(value=DeploymentPlanner.class)
-public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
+public class FirstFitPlanner extends PlannerBase implements DeploymentClusterPlanner {
     private static final Logger s_logger = Logger.getLogger(FirstFitPlanner.class);
     @Inject protected HostDao _hostDao;
     @Inject protected DataCenterDao _dcDao;
@@ -103,28 +104,12 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
     @Inject DataStoreManager dataStoreMgr;
     @Inject protected ClusterDetailsDao _clusterDetailsDao;
 
-    protected List<StoragePoolAllocator> _storagePoolAllocators;
-    public List<StoragePoolAllocator> getStoragePoolAllocators() {
-		return _storagePoolAllocators;
-	}
-	public void setStoragePoolAllocators(
-			List<StoragePoolAllocator> _storagePoolAllocators) {
-		this._storagePoolAllocators = _storagePoolAllocators;
-	}
-
-	protected List<HostAllocator> _hostAllocators;
-    public List<HostAllocator> getHostAllocators() {
-		return _hostAllocators;
-	}
-	public void setHostAllocators(List<HostAllocator> _hostAllocators) {
-		this._hostAllocators = _hostAllocators;
-	}
 
 	protected String _allocationAlgorithm = "random";
 
 
     @Override
-    public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
+    public List<Long> orderClusters(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
             DeploymentPlan plan, ExcludeList avoid)
                     throws InsufficientServerCapacityException {
         VirtualMachine vm = vmProfile.getVirtualMachine();
@@ -138,128 +123,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
             return null;
         }
 
-        ServiceOffering offering = vmProfile.getServiceOffering();
-        int cpu_requested = offering.getCpu() * offering.getSpeed();
-        long ram_requested = offering.getRamSize() * 1024L * 1024L;
-
-
-        if (s_logger.isDebugEnabled()) {
-            s_logger.debug("DeploymentPlanner allocation algorithm: "+_allocationAlgorithm);
-
-            s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:" + plan.getPodId() + ",cluster:" + plan.getClusterId() +
-                    ", requested cpu: " + cpu_requested + ", requested ram: " + ram_requested);
-
-            s_logger.debug("Is ROOT volume READY (pool already allocated)?: " + (plan.getPoolId()!=null ? "Yes": "No"));
-        }
-
-        String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
-
-        if(plan.getHostId() != null && haVmTag == null){
-            Long hostIdSpecified = plan.getHostId();
-            if (s_logger.isDebugEnabled()){
-                s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: "
-                        + hostIdSpecified);
-            }
-            HostVO host = _hostDao.findById(hostIdSpecified);
-            if (host == null) {
-                s_logger.debug("The specified host cannot be found");
-            } else if (avoid.shouldAvoid(host)) {
-                s_logger.debug("The specified host is in avoid set");
-            } else {
-                if (s_logger.isDebugEnabled()) {
-                    s_logger.debug("Looking for suitable pools for this host under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
-                }
-
-                // search for storage under the zone, pod, cluster of the host.
-                DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(),
-                        host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
-
-                Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile,
-                        lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
-                Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
-                List<Volume> readyAndReusedVolumes = result.second();
-
-                // choose the potential pool for this VM for this host
-                if (!suitableVolumeStoragePools.isEmpty()) {
-                    List<Host> suitableHosts = new ArrayList<Host>();
-                    suitableHosts.add(host);
-
-                    Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
-                            suitableHosts, suitableVolumeStoragePools);
-                    if (potentialResources != null) {
-                        Pod pod = _podDao.findById(host.getPodId());
-                        Cluster cluster = _clusterDao.findById(host.getClusterId());
-                        Map<Volume, StoragePool> storageVolMap = potentialResources.second();
-                        // remove the reused vol<->pool from destination, since
-                        // we don't have to prepare this volume.
-                        for (Volume vol : readyAndReusedVolumes) {
-                            storageVolMap.remove(vol);
-                        }
-                        DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
-                        s_logger.debug("Returning Deployment Destination: " + dest);
-                        return dest;
-                    }
-                }
-            }
-            s_logger.debug("Cannnot deploy to specified host, returning.");
-            return null;
-        }
-
-        if (vm.getLastHostId() != null && haVmTag == null) {
-            s_logger.debug("This VM has last host_id specified, trying to choose the same host: " +vm.getLastHostId());
-
-            HostVO host = _hostDao.findById(vm.getLastHostId());
-            if(host == null){
-                s_logger.debug("The last host of this VM cannot be found");
-            }else if(avoid.shouldAvoid(host)){
-                s_logger.debug("The last host of this VM is in avoid set");
-            }else if(_capacityMgr.checkIfHostReachMaxGuestLimit(host)){
-                s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
-            }else{
-                if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
-                    long cluster_id = host.getClusterId();
-                    ClusterDetailsVO cluster_detail_cpu =  _clusterDetailsDao.findDetail(cluster_id,"cpuOvercommitRatio");
-                    ClusterDetailsVO cluster_detail_ram =  _clusterDetailsDao.findDetail(cluster_id,"memoryOvercommitRatio");
-                    Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
-                    Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
-                    if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOvercommitRatio, memoryOvercommitRatio, true)){
-                        s_logger.debug("The last host of this VM is UP and has enough capacity");
-                        s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
-                        //search for storage under the zone, pod, cluster of the last host.
-                        DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
-                        Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
-                        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
-                        List<Volume> readyAndReusedVolumes = result.second();
-                        //choose the potential pool for this VM for this host
-                        if(!suitableVolumeStoragePools.isEmpty()){
-                            List<Host> suitableHosts = new ArrayList<Host>();
-                            suitableHosts.add(host);
-
-                            Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
-                            if(potentialResources != null){
-                                Pod pod = _podDao.findById(host.getPodId());
-                                Cluster cluster = _clusterDao.findById(host.getClusterId());
-                                Map<Volume, StoragePool> storageVolMap = potentialResources.second();
-                                // remove the reused vol<->pool from destination, since we don't have to prepare this volume.
-                                for(Volume vol : readyAndReusedVolumes){
-                                    storageVolMap.remove(vol);
-                                }
-                                DeployDestination dest =  new DeployDestination(dc, pod, cluster, host, storageVolMap);
-                                s_logger.debug("Returning Deployment Destination: "+ dest);
-                                return dest;
-                            }
-                        }
-                    }else{
-                        s_logger.debug("The last host of this VM does not have enough capacity");
-                    }
-                }else{
-                    s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: "+host.getStatus().name() + ", host resource state is: "+host.getResourceState());
-                }
-            }
-            s_logger.debug("Cannot choose the last host to deploy this VM ");
-        }
-
-
         List<Long> clusterList = new ArrayList<Long>();
         if (plan.getClusterId() != null) {
             Long clusterIdSpecified = plan.getClusterId();
@@ -267,7 +130,8 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
             ClusterVO cluster = _clusterDao.findById(plan.getClusterId());
             if (cluster != null ){
                 clusterList.add(clusterIdSpecified);
-                return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
+                removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
+                return clusterList;
             }else{
                 s_logger.debug("The specified cluster cannot be found, returning.");
                 avoid.addCluster(plan.getClusterId());
@@ -280,11 +144,11 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
 
             HostPodVO pod = _podDao.findById(podIdSpecified);
             if (pod != null) {
-                DeployDestination dest = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid);
-                if(dest == null){
+                clusterList = scanClustersForDestinationInZoneOrPod(podIdSpecified, false, vmProfile, plan, avoid);
+                if (clusterList == null) {
                     avoid.addPod(plan.getPodId());
                 }
-                return dest;
+                return clusterList;
             } else {
                 s_logger.debug("The specified Pod cannot be found, returning.");
                 avoid.addPod(plan.getPodId());
@@ -305,7 +169,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
 
     }
 
-    private DeployDestination scanPodsForDestination(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
+    private List<Long> scanPodsForDestination(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
 
         ServiceOffering offering = vmProfile.getServiceOffering();
         int requiredCpu = offering.getCpu() * offering.getSpeed();
@@ -341,20 +205,24 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
         if(!podsWithCapacity.isEmpty()){
 
             prioritizedPodIds = reorderPods(podCapacityInfo, vmProfile, plan);
+            if (prioritizedPodIds == null || prioritizedPodIds.isEmpty()) {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("No Pods found for destination, returning.");
+                }
+                return null;
+            }
 
+            List<Long> clusterList = new ArrayList<Long>();
             //loop over pods
             for(Long podId : prioritizedPodIds){
                 s_logger.debug("Checking resources under Pod: "+podId);
-                DeployDestination dest = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan, avoid);
-                if(dest != null){
-                    return dest;
+                List<Long> clustersUnderPod = scanClustersForDestinationInZoneOrPod(podId, false, vmProfile, plan,
+                        avoid);
+                if (clustersUnderPod != null) {
+                    clusterList.addAll(clustersUnderPod);
                 }
-                avoid.addPod(podId);
             }
-            if (s_logger.isDebugEnabled()) {
-                s_logger.debug("No Pods found for destination, returning.");
-            }
-            return null;
+            return clusterList;
         }else{
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("No Pods found after removing disabled pods and pods in avoid list, returning.");
@@ -363,7 +231,72 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
         }
     }
 
-    private DeployDestination scanClustersForDestinationInZoneOrPod(long id, boolean isZone, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid){
+    private Map<Short, Float> getCapacityThresholdMap() {
+        // Lets build this real time so that the admin wont have to restart MS
+        // if he changes these values
+        Map<Short, Float> disableThresholdMap = new HashMap<Short, Float>();
+
+        String cpuDisableThresholdString = _configDao.getValue(Config.CPUCapacityDisableThreshold.key());
+        float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F);
+        disableThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, cpuDisableThreshold);
+
+        String memoryDisableThresholdString = _configDao.getValue(Config.MemoryCapacityDisableThreshold.key());
+        float memoryDisableThreshold = NumbersUtil.parseFloat(memoryDisableThresholdString, 0.85F);
+        disableThresholdMap.put(Capacity.CAPACITY_TYPE_MEMORY, memoryDisableThreshold);
+
+        return disableThresholdMap;
+    }
+
+    private List<Short> getCapacitiesForCheckingThreshold() {
+        List<Short> capacityList = new ArrayList<Short>();
+        capacityList.add(Capacity.CAPACITY_TYPE_CPU);
+        capacityList.add(Capacity.CAPACITY_TYPE_MEMORY);
+        return capacityList;
+    }
+
+    private void removeClustersCrossingThreshold(List<Long> clusterListForVmAllocation, ExcludeList avoid,
+            VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan) {
+
+        Map<Short, Float> capacityThresholdMap = getCapacityThresholdMap();
+        List<Short> capacityList = getCapacitiesForCheckingThreshold();
+        List<Long> clustersCrossingThreshold = new ArrayList<Long>();
+
+        ServiceOffering offering = vmProfile.getServiceOffering();
+        int cpu_requested = offering.getCpu() * offering.getSpeed();
+        long ram_requested = offering.getRamSize() * 1024L * 1024L;
+
+        // For each capacity get the cluster list crossing the threshold and
+        // remove it from the clusterList that will be used for vm allocation.
+        for (short capacity : capacityList) {
+
+            if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0) {
+                return;
+            }
+            if (capacity == Capacity.CAPACITY_TYPE_CPU) {
+                clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity,
+                        plan.getDataCenterId(), capacityThresholdMap.get(capacity), cpu_requested);
+            } else if (capacity == Capacity.CAPACITY_TYPE_MEMORY) {
+                clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity,
+                        plan.getDataCenterId(), capacityThresholdMap.get(capacity), ram_requested);
+            }
+
+            if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0) {
+                // addToAvoid Set
+                avoid.addClusterList(clustersCrossingThreshold);
+                // Remove clusters crossing disabled threshold
+                clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
+
+                s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString()
+                        + " for vm creation since their allocated percentage"
+                        + " crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity)
+                        + " for capacity Type : " + capacity + ", skipping these clusters");
+            }
+
+        }
+    }
+
+    private List<Long> scanClustersForDestinationInZoneOrPod(long id, boolean isZone,
+            VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid) {
 
         VirtualMachine vm = vmProfile.getVirtualMachine();
         ServiceOffering offering = vmProfile.getServiceOffering();
@@ -396,6 +329,9 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
                     prioritizedClusterIds.removeAll(disabledClusters);
                 }
             }
+
+            removeClustersCrossingThreshold(prioritizedClusterIds, avoid, vmProfile, plan);
+
         }else{
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("No clusters found having a host with enough capacity, returning.");
@@ -404,7 +340,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
         }
         if(!prioritizedClusterIds.isEmpty()){
             List<Long> clusterList = reorderClusters(id, isZone, clusterCapacityInfo, vmProfile, plan);
-            return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
+            return clusterList; //return checkClustersforDestination(clusterList, vmProfile, plan, avoid, dc);
         }else{
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("No clusters found after removing disabled clusters and clusters in avoid list, returning.");
@@ -452,132 +388,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
         return disabledPods;
     }
 
-    private Map<Short,Float> getCapacityThresholdMap(){
-        // Lets build this real time so that the admin wont have to restart MS if he changes these values
-        Map<Short,Float> disableThresholdMap = new HashMap<Short, Float>();
-
-        String cpuDisableThresholdString = _configDao.getValue(Config.CPUCapacityDisableThreshold.key());
-        float cpuDisableThreshold = NumbersUtil.parseFloat(cpuDisableThresholdString, 0.85F);
-        disableThresholdMap.put(Capacity.CAPACITY_TYPE_CPU, cpuDisableThreshold);
-
-        String memoryDisableThresholdString = _configDao.getValue(Config.MemoryCapacityDisableThreshold.key());
-        float memoryDisableThreshold = NumbersUtil.parseFloat(memoryDisableThresholdString, 0.85F);
-        disableThresholdMap.put(Capacity.CAPACITY_TYPE_MEMORY, memoryDisableThreshold);
-
-        return disableThresholdMap;
-    }
-
-    private List<Short> getCapacitiesForCheckingThreshold(){
-        List<Short> capacityList = new ArrayList<Short>();
-        capacityList.add(Capacity.CAPACITY_TYPE_CPU);
-        capacityList.add(Capacity.CAPACITY_TYPE_MEMORY);
-        return capacityList;
-    }
-
-    private void removeClustersCrossingThreshold(List<Long> clusterListForVmAllocation, ExcludeList avoid, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan){
-
-        Map<Short,Float> capacityThresholdMap = getCapacityThresholdMap();
-        List<Short> capacityList = getCapacitiesForCheckingThreshold();
-        List<Long> clustersCrossingThreshold = new ArrayList<Long>();
-
-        ServiceOffering offering = vmProfile.getServiceOffering();
-        int cpu_requested = offering.getCpu() * offering.getSpeed();
-        long ram_requested = offering.getRamSize() * 1024L * 1024L;
-
-        // 	For each capacity get the cluster list crossing the threshold and remove it from the clusterList that will be used for vm allocation.
-        for(short capacity : capacityList){
-
-        	if (clusterListForVmAllocation == null || clusterListForVmAllocation.size() == 0){
-           		return;
-           	}
-            if (capacity == Capacity.CAPACITY_TYPE_CPU) {
-           		clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(),
-           				capacityThresholdMap.get(capacity), cpu_requested);
-            }
-            else if (capacity == Capacity.CAPACITY_TYPE_MEMORY ) {
-                clustersCrossingThreshold = _capacityDao.listClustersCrossingThreshold(capacity, plan.getDataCenterId(),
-                        capacityThresholdMap.get(capacity), ram_requested );
-            }
-
-
-           	if (clustersCrossingThreshold != null && clustersCrossingThreshold.size() != 0){
-               	// addToAvoid Set
-           		avoid.addClusterList(clustersCrossingThreshold);
-           		// Remove clusters crossing disabled threshold
-               	clusterListForVmAllocation.removeAll(clustersCrossingThreshold);
-
-           		s_logger.debug("Cannot allocate cluster list " + clustersCrossingThreshold.toString() + " for vm creation since their allocated percentage" +
-           				" crosses the disable capacity threshold: " + capacityThresholdMap.get(capacity) + " for capacity Type : " + capacity + ", skipping these clusters");
-           	}
-
-        }
-    }
-
-    private DeployDestination checkClustersforDestination(List<Long> clusterList, VirtualMachineProfile<? extends VirtualMachine> vmProfile,
-            DeploymentPlan plan, ExcludeList avoid, DataCenter dc){
-
-        if (s_logger.isTraceEnabled()) {
-            s_logger.trace("ClusterId List to consider: " + clusterList);
-        }
-
-        removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
-
-        for(Long clusterId : clusterList){
-            Cluster clusterVO = _clusterDao.findById(clusterId);
-
-            if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) {
-                s_logger.debug("Cluster: "+clusterId + " has HyperVisorType that does not match the VM, skipping this cluster");
-                avoid.addCluster(clusterVO.getId());
-                continue;
-            }
-
-            s_logger.debug("Checking resources in Cluster: "+clusterId + " under Pod: "+clusterVO.getPodId());
-            //search for resources(hosts and storage) under this zone, pod, cluster.
-            DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());
-
-            //find suitable hosts under this cluster, need as many hosts as we get.
-            List<Host> suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
-            //if found suitable hosts in this cluster, find suitable storage pools for each volume of the VM
-            if(suitableHosts != null && !suitableHosts.isEmpty()){
-                if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) {
-                    Pod pod = _podDao.findById(clusterVO.getPodId());
-                    DeployDestination dest =  new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0));
-                    return dest;
-                }
-
-                Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
-                Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
-                List<Volume> readyAndReusedVolumes = result.second();
-
-                //choose the potential host and pool for the VM
-                if(!suitableVolumeStoragePools.isEmpty()){
-                    Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
-
-                    if(potentialResources != null){
-                        Pod pod = _podDao.findById(clusterVO.getPodId());
-                        Host host = _hostDao.findById(potentialResources.first().getId());
-                        Map<Volume, StoragePool> storageVolMap = potentialResources.second();
-                        // remove the reused vol<->pool from destination, since we don't have to prepare this volume.
-                        for(Volume vol : readyAndReusedVolumes){
-                            storageVolMap.remove(vol);
-                        }
-                        DeployDestination dest =  new DeployDestination(dc, pod, clusterVO, host, storageVolMap );
-                        s_logger.debug("Returning Deployment Destination: "+ dest);
-                        return dest;
-                    }
-                }else{
-                    s_logger.debug("No suitable storagePools found under this Cluster: "+clusterId);
-                }
-            }else{
-                s_logger.debug("No suitable hosts found under this Cluster: "+clusterId);
-            }
-            avoid.addCluster(clusterVO.getId());
-        }
-        s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. ");
-        return null;
-    }
-
-
     protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone){
         //look at the aggregate available cpu and ram per cluster
         //although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot
@@ -647,209 +457,6 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
 
     }
 
-
-    protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts, Map<Volume, List<StoragePool>> suitableVolumeStoragePools){
-        s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM");
-
-        boolean hostCanAccessPool = false;
-        boolean haveEnoughSpace = false;
-        Map<Volume, StoragePool> storage = new HashMap<Volume, StoragePool>();
-        TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<Volume>(new Comparator<Volume>() {
-            @Override
-            public int compare(Volume v1, Volume v2) {
-                if(v1.getSize() < v2.getSize())
-                    return 1;
-                else
-                    return -1;
-            }
-        });
-        volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet());
-        boolean multipleVolume = volumesOrderBySizeDesc.size() > 1;
-        for(Host potentialHost : suitableHosts){
-            Map<StoragePool,List<Volume>> volumeAllocationMap = new HashMap<StoragePool,List<Volume>>();
-            for(Volume vol : volumesOrderBySizeDesc){
-                haveEnoughSpace = false;
-                s_logger.debug("Checking if host: "+potentialHost.getId() +" can access any suitable storage pool for volume: "+ vol.getVolumeType());
-                List<StoragePool> volumePoolList = suitableVolumeStoragePools.get(vol);
-                hostCanAccessPool = false;
-                for(StoragePool potentialSPool : volumePoolList){
-                    if(hostCanAccessSPool(potentialHost, potentialSPool)){
-                        hostCanAccessPool = true;
-                        if(multipleVolume){
-                            List<Volume> requestVolumes  = null;
-                            if(volumeAllocationMap.containsKey(potentialSPool))
-                                requestVolumes = volumeAllocationMap.get(potentialSPool);
-                            else
-                                requestVolumes = new ArrayList<Volume>();
-                            requestVolumes.add(vol);
-
-                            if(!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool))
-                                continue;
-                            volumeAllocationMap.put(potentialSPool,requestVolumes);
-                        }
-                        storage.put(vol, potentialSPool);
-                        haveEnoughSpace = true;
-                        break;
-                    }
-                }
-                if(!hostCanAccessPool){
-                    break;
-                }
-                if(!haveEnoughSpace) {
-                    s_logger.warn("insufficient capacity to allocate all volumes");
-                    break;
-                }
-            }
-            if(hostCanAccessPool && haveEnoughSpace){
-                s_logger.debug("Found a potential host " + "id: "+potentialHost.getId() + " name: " +potentialHost.getName() + " and associated storage pools for this VM");
-                return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage);
-            }
-        }
-        s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM");
-        return null;
-    }
-
-    protected boolean hostCanAccessSPool(Host host, StoragePool pool){
-        boolean hostCanAccessSPool = false;
-
-        StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId());
-        if(hostPoolLinkage != null){
-            hostCanAccessSPool = true;
-        }
-
-        s_logger.debug("Host: "+ host.getId() + (hostCanAccessSPool ?" can" : " cannot") + " access pool: "+ pool.getId());
-        return hostCanAccessSPool;
-    }
-
-    protected List<Host> findSuitableHosts(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){
-        List<Host> suitableHosts = new ArrayList<Host>();
-        for(HostAllocator allocator : _hostAllocators) {
-            suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo);
-            if (suitableHosts != null && !suitableHosts.isEmpty()) {
-                break;
-            }
-        }
-
-        if(suitableHosts.isEmpty()){
-            s_logger.debug("No suitable hosts found");
-        }
-        return suitableHosts;
-    }
-
-    protected Pair<Map<Volume, List<StoragePool>>, List<Volume>> findSuitablePoolsForVolumes(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo){
-        List<VolumeVO> volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId());
-        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
-        List<Volume> readyAndReusedVolumes = new ArrayList<Volume>();
-
-        //for each volume find list of suitable storage pools by calling the allocators
-        for (VolumeVO toBeCreated : volumesTobeCreated) {
-            s_logger.debug("Checking suitable pools for volume (Id, Type): ("+toBeCreated.getId() +"," +toBeCreated.getVolumeType().name() + ")");
-
-            //If the plan specifies a poolId, it means that this VM's ROOT volume is ready and the pool should be reused.
-            //In this case, also check if rest of the volumes are ready and can be reused.
-            if(plan.getPoolId() != null){
-                s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: "+toBeCreated.getPoolId());
-                List<StoragePool> suitablePools = new ArrayList<StoragePool>();
-                StoragePool pool = null;
-                if(toBeCreated.getPoolId() != null){
-                    pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId());
-                }else{
-                    pool = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId());
-                }
-
-                if(!pool.isInMaintenance()){
-                    if(!avoid.shouldAvoid(pool)){
-                        long exstPoolDcId = pool.getDataCenterId();
-
-                        long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1;
-                        long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1;
-                        if(plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId && plan.getClusterId() == exstPoolClusterId){
-                            s_logger.debug("Planner need not allocate a pool for this volume since its READY");
-                            suitablePools.add(pool);
-                            suitableVolumeStoragePools.put(toBeCreated, suitablePools);
-                            if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) {
-                                readyAndReusedVolumes.add(toBeCreated);
-                            }
-                            continue;
-                        }else{
-                            s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume");
-                        }
-                    }else{
-                        s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume");
-                    }
-                }else{
-                    s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume");
-                }
-            }
-
-            if(s_logger.isDebugEnabled()){
-                s_logger.debug("We need to allocate new storagepool for this volume");
-            }
-            if(!isRootAdmin(plan.getReservationContext())){
-                if(!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())){
-                    if(s_logger.isDebugEnabled()){
-                        s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled");
-                        s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning.");
-                    }
-                    //Cannot find suitable storage pools under this cluster for this volume since allocation_state is disabled.
-                    //- remove any suitable pools found for other volumes.
-                    //All volumes should get suitable pools under this cluster; else we cant use this cluster.
-                    suitableVolumeStoragePools.clear();
-                    break;
-                }
-            }
-
-            s_logger.debug("Calling StoragePoolAllocators to find suitable pools");
-
-            DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId());
-            DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType());
-
-            boolean useLocalStorage = false;
-            if (vmProfile.getType() != VirtualMachine.Type.User) {
-                String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key());
-                if (ssvmUseLocalStorage.equalsIgnoreCase("true")) {
-                    useLocalStorage = true;
-                }
-            } else {
-                useLocalStorage = diskOffering.getUseLocalStorage();
-
-                // TODO: this is a hacking fix for the problem of deploy ISO-based VM on local storage
-                // when deploying VM based on ISO, we have a service offering and an additional disk offering, use-local storage flag is actually
-                // saved in service offering, overrde the flag from service offering when it is a ROOT disk
-                if(!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) {
-                    if(toBeCreated.getVolumeType() == Volume.Type.ROOT)
-                        useLocalStorage = true;
-                }
-            }
-            diskProfile.setUseLocalStorage(useLocalStorage);
-
-            boolean foundPotentialPools = false;
-            for(StoragePoolAllocator allocator : _storagePoolAllocators) {
-                final List<StoragePool> suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid, returnUpTo);
-                if (suitablePools != null && !suitablePools.isEmpty()) {
-                    suitableVolumeStoragePools.put(toBeCreated, suitablePools);
-                    foundPotentialPools = true;
-                    break;
-                }
-            }
-
-            if(!foundPotentialPools){
-                s_logger.debug("No suitable pools found for volume: "+toBeCreated +" under cluster: "+plan.getClusterId());
-                //No suitable storage pools found under this cluster for this volume. - remove any suitable pools found for other volumes.
-                //All volumes should get suitable pools under this cluster; else we cant use this cluster.
-                suitableVolumeStoragePools.clear();
-                break;
-            }
-        }
-
-        if(suitableVolumeStoragePools.isEmpty()){
-            s_logger.debug("No suitable pools found");
-        }
-
-        return new Pair<Map<Volume, List<StoragePool>>, List<Volume>>(suitableVolumeStoragePools, readyAndReusedVolumes);
-    }
-
-
     private boolean isRootAdmin(ReservationContext reservationContext) {
         if(reservationContext != null){
             if(reservationContext.getAccount() != null){
@@ -887,26 +494,16 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
         return true;
     }
 
-    private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId){
-        // Check if the zone exists in the system
-        DataCenterVO zone = _dcDao.findById(zoneId);
-        if(zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()){
-            s_logger.info("Zone is currently disabled, cannot allocate to this zone: "+ zoneId);
-            return false;
-        }
-
-        Pod pod = _podDao.findById(podId);
-        if(pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()){
-            s_logger.info("Pod is currently disabled, cannot allocate to this pod: "+ podId);
-            return false;
-        }
 
-        Cluster cluster = _clusterDao.findById(clusterId);
-        if(cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()){
-            s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: "+ clusterId);
-            return false;
-        }
+    @Override
+    public DeployDestination plan(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan,
+            ExcludeList avoid) throws InsufficientServerCapacityException {
+        // TODO Auto-generated method stub
+        return null;
+    }
 
-        return true;
+    @Override
+    public PlannerResourceUsage getResourceUsage() {
+        return PlannerResourceUsage.Shared;
     }
 }


[3/3] git commit: updated refs/heads/planner_reserve to fe8cd8d

Posted by pr...@apache.org.
Allocators need to add hosts/pools not considered to avoid list


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/fe8cd8de
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/fe8cd8de
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/fe8cd8de

Branch: refs/heads/planner_reserve
Commit: fe8cd8de2651c0ed29512c8f51cb3a2300d3e736
Parents: 25f8ae9
Author: Prachi Damle <pr...@cloud.com>
Authored: Mon Apr 29 19:55:31 2013 -0700
Committer: Prachi Damle <pr...@cloud.com>
Committed: Mon Apr 29 19:55:31 2013 -0700

----------------------------------------------------------------------
 .../ClusterScopeStoragePoolAllocator.java          |   18 ++-
 .../allocator/LocalStoragePoolAllocator.java       |   21 +++-
 .../allocator/ZoneWideStoragePoolAllocator.java    |   25 +++-
 .../manager/allocator/impl/FirstFitAllocator.java  |  110 ++++++++-------
 4 files changed, 107 insertions(+), 67 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fe8cd8de/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
index 0dd55d1..5b1f8cd 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
@@ -50,7 +50,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
 
     @Override
 	protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
-	    
+
         s_logger.debug("ClusterScopeStoragePoolAllocator looking for storage pool");
     	List<StoragePool> suitablePools = new ArrayList<StoragePool>();
 
@@ -65,6 +65,14 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
         }
 
         List<StoragePoolVO> pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
+
+        // add remaining pools in cluster, that did not match tags, to avoid set
+        List<StoragePoolVO> allPools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, null);
+        allPools.removeAll(pools);
+        for (StoragePoolVO pool : allPools) {
+            avoid.addPool(pool.getId());
+        }
+
         if (pools.size() == 0) {
             if (s_logger.isDebugEnabled()) {
                 String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString();
@@ -72,7 +80,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
             }
             return suitablePools;
         }
-    	
+
         for (StoragePoolVO pool: pools) {
         	if(suitablePools.size() == returnUpTo){
         		break;
@@ -80,13 +88,15 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
         	StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
         	if (filter(avoid, pol, dskCh, plan)) {
         		suitablePools.add(pol);
+            } else {
+                avoid.addPool(pool.getId());
         	}
         }
-        
+
         if (s_logger.isDebugEnabled()) {
             s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools");
         }
-        
+
         return suitablePools;
 	}
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fe8cd8de/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
index 7447d98..632ba43 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
@@ -74,7 +74,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
         if (!dskCh.useLocalStorage()) {
             return suitablePools;
         }
-        
+
         // data disk and host identified from deploying vm (attach volume case)
         if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
             List<StoragePoolHostVO> hostPools = _poolHostDao.listByHostId(plan.getHostId());
@@ -85,7 +85,9 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
                 	if (filter(avoid, pol, dskCh, plan)) {
                 		s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
                 		suitablePools.add(pol);
-                	}
+                    } else {
+                        avoid.addPool(pool.getId());
+                    }
                 }
 
                 if (suitablePools.size() == returnUpTo) {
@@ -101,8 +103,19 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
         		StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
         		if (filter(avoid, pol, dskCh, plan)) {
         			suitablePools.add(pol);
-        		}
+                } else {
+                    avoid.addPool(pool.getId());
+                }
         	}
+
+            // add remaining pools in cluster, that did not match tags, to avoid
+            // set
+            List<StoragePoolVO> allPools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(),
+                    plan.getPodId(), plan.getClusterId(), null);
+            allPools.removeAll(availablePools);
+            for (StoragePoolVO pool : allPools) {
+                avoid.addPool(pool.getId());
+            }
         }
 
         if (s_logger.isDebugEnabled()) {
@@ -111,7 +124,7 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
 
         return suitablePools;
     }
-   
+
     @Override
     public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
         super.configure(name, params);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fe8cd8de/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
index 1d3cd81..e976980 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
@@ -39,18 +39,18 @@ import com.cloud.vm.VirtualMachineProfile;
 @Component
 public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
 	private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class);
-	@Inject PrimaryDataStoreDao _storagePoolDao; 
-	@Inject DataStoreManager dataStoreMgr; 
-	
+	@Inject PrimaryDataStoreDao _storagePoolDao;
+	@Inject DataStoreManager dataStoreMgr;
+
 	@Override
-	protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, 
+	protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
 			 DeploymentPlan plan) {
         Volume volume =  _volumeDao.findById(dskCh.getVolumeId());
         List<Volume> requestVolumes = new ArrayList<Volume>();
         requestVolumes.add(volume);
         return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
 	}
-	
+
 	@Override
 	protected List<StoragePool> select(DiskProfile dskCh,
 			VirtualMachineProfile<? extends VirtualMachine> vmProfile,
@@ -64,9 +64,16 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
 				return suitablePools;
 			}
 		}
-		
+
 		List<StoragePoolVO> storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
-	
+
+        // add remaining pools in zone, that did not match tags, to avoid set
+        List<StoragePoolVO> allPools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), null);
+        allPools.removeAll(storagePools);
+        for (StoragePoolVO pool : allPools) {
+            avoid.addPool(pool.getId());
+        }
+
 		for (StoragePoolVO storage : storagePools) {
 			if (suitablePools.size() == returnUpTo) {
         		break;
@@ -74,7 +81,9 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
 			StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
 			if (filter(avoid, pol, dskCh, plan)) {
 				suitablePools.add(pol);
-			}
+            } else {
+                avoid.addPool(pol.getId());
+            }
 		}
 		return suitablePools;
 	}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/fe8cd8de/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java
index 0091e43..713a14f 100755
--- a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java
+++ b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java
@@ -78,7 +78,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
     @Inject ConsoleProxyDao _consoleProxyDao = null;
     @Inject SecondaryStorageVmDao _secStorgaeVmDao = null;
     @Inject ConfigurationDao _configDao = null;
-    @Inject GuestOSDao _guestOSDao = null; 
+    @Inject GuestOSDao _guestOSDao = null;
     @Inject GuestOSCategoryDao _guestOSCategoryDao = null;
     @Inject VMInstanceDao _vmInstanceDao = null;
     @Inject ResourceManager _resourceMgr;
@@ -88,17 +88,17 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
     boolean _checkHvm = true;
     protected String _allocationAlgorithm = "random";
     @Inject CapacityManager _capacityMgr;
-    
-    
+
+
 	@Override
 	public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type,
 			ExcludeList avoid, int returnUpTo) {
 	    return allocateTo(vmProfile, plan, type, avoid, returnUpTo, true);
 	}
-	
+
     @Override
     public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) {
-	
+
 	    long dcId = plan.getDataCenterId();
 		Long podId = plan.getPodId();
 		Long clusterId = plan.getClusterId();
@@ -110,19 +110,19 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
             // FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of routing or not
         	return new ArrayList<Host>();
         }
-        
+
         if(s_logger.isDebugEnabled()){
             s_logger.debug("Looking for hosts in dc: " + dcId + "  pod:" + podId + "  cluster:" + clusterId );
         }
-        
+
         String hostTagOnOffering = offering.getHostTag();
         String hostTagOnTemplate = template.getTemplateTag();
-        
+
         boolean hasSvcOfferingTag = hostTagOnOffering != null ? true : false;
         boolean hasTemplateTag = hostTagOnTemplate != null ? true : false;
-        
+
         List<HostVO> clusterHosts = new ArrayList<HostVO>();
-        
+
         String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
         if (haVmTag != null) {
             clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, haVmTag);
@@ -133,31 +133,31 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
                 List<HostVO> hostsMatchingOfferingTag = new ArrayList<HostVO>();
                 List<HostVO> hostsMatchingTemplateTag = new ArrayList<HostVO>();
                 if (hasSvcOfferingTag){
-                    if (s_logger.isDebugEnabled()){            
+                    if (s_logger.isDebugEnabled()){
                         s_logger.debug("Looking for hosts having tag specified on SvcOffering:" + hostTagOnOffering);
                     }
                     hostsMatchingOfferingTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnOffering);
-                    if (s_logger.isDebugEnabled()){            
+                    if (s_logger.isDebugEnabled()){
                         s_logger.debug("Hosts with tag '" + hostTagOnOffering + "' are:" + hostsMatchingOfferingTag);
-                    }                
+                    }
                 }
                 if (hasTemplateTag){
-                    if (s_logger.isDebugEnabled()){            
+                    if (s_logger.isDebugEnabled()){
                         s_logger.debug("Looking for hosts having tag specified on Template:" + hostTagOnTemplate);
                     }
-                    hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate);    
-                    if (s_logger.isDebugEnabled()){            
+                    hostsMatchingTemplateTag = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate);
+                    if (s_logger.isDebugEnabled()){
                         s_logger.debug("Hosts with tag '" + hostTagOnTemplate+"' are:" + hostsMatchingTemplateTag);
-                    }                  
+                    }
                 }
-                
+
                 if (hasSvcOfferingTag && hasTemplateTag){
                     hostsMatchingOfferingTag.retainAll(hostsMatchingTemplateTag);
-                    clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate);    
-                    if (s_logger.isDebugEnabled()){            
+                    clusterHosts = _hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate);
+                    if (s_logger.isDebugEnabled()){
                         s_logger.debug("Found "+ hostsMatchingOfferingTag.size() +" Hosts satisfying both tags, host ids are:" + hostsMatchingOfferingTag);
                     }
-                    
+
                     clusterHosts = hostsMatchingOfferingTag;
                 } else {
                     if (hasSvcOfferingTag){
@@ -168,7 +168,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
                 }
             }
         }
-        
+
+        // add all hosts that we are not considering to the avoid list
+        List<HostVO> allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId, null);
+        allhostsInCluster.removeAll(clusterHosts);
+        for (HostVO host : allhostsInCluster) {
+            avoid.addHost(host.getId());
+        }
+
         return allocateTo(plan, offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity, account);
     }
 
@@ -179,11 +186,11 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
         }else if(_allocationAlgorithm.equals("userdispersing")){
             hosts = reorderHostsByNumberOfVms(plan, hosts, account);
         }
-    	
+
     	if (s_logger.isDebugEnabled()) {
             s_logger.debug("FirstFitAllocator has " + hosts.size() + " hosts to check for allocation: "+hosts);
         }
-        
+
         // We will try to reorder the host lists such that we give priority to hosts that have
         // the minimums to support a VM's requirements
         hosts = prioritizeHosts(template, hosts);
@@ -195,7 +202,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
         if (s_logger.isDebugEnabled()) {
             s_logger.debug("Looking for speed=" + (offering.getCpu() * offering.getSpeed()) + "Mhz, Ram=" + offering.getRamSize());
         }
-        
+
         List<Host> suitableHosts = new ArrayList<Host>();
 
         for (HostVO host : hosts) {
@@ -208,7 +215,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
                 }
                 continue;
             }
-                        
+
             //find number of guest VMs occupying capacity on this host.
             if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)){
                 if (s_logger.isDebugEnabled()) {
@@ -238,13 +245,14 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
                 if (s_logger.isDebugEnabled()) {
                     s_logger.debug("Not using host " + host.getId() + "; numCpusGood: " + numCpusGood + "; cpuFreqGood: " + cpuFreqGood + ", host has capacity?" + hostHasCapacity);
                 }
+                avoid.addHost(host.getId());
             }
         }
-        
+
         if (s_logger.isDebugEnabled()) {
             s_logger.debug("Host Allocator returning "+suitableHosts.size() +" suitable hosts");
         }
-        
+
         return suitableHosts;
     }
 
@@ -255,26 +263,26 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
         long dcId = plan.getDataCenterId();
         Long podId = plan.getPodId();
         Long clusterId = plan.getClusterId();
-        
+
         List<Long> hostIdsByVmCount = _vmInstanceDao.listHostIdsByVmCount(dcId, podId, clusterId, account.getAccountId());
         if (s_logger.isDebugEnabled()) {
             s_logger.debug("List of hosts in ascending order of number of VMs: "+ hostIdsByVmCount);
         }
-        
+
         //now filter the given list of Hosts by this ordered list
-        Map<Long, HostVO> hostMap = new HashMap<Long, HostVO>();        
+        Map<Long, HostVO> hostMap = new HashMap<Long, HostVO>();
         for (HostVO host : hosts) {
             hostMap.put(host.getId(), host);
         }
         List<Long> matchingHostIds = new ArrayList<Long>(hostMap.keySet());
-        
+
         hostIdsByVmCount.retainAll(matchingHostIds);
-        
+
         List<HostVO> reorderedHosts = new ArrayList<HostVO>();
         for(Long id: hostIdsByVmCount){
             reorderedHosts.add(hostMap.get(id));
         }
-        
+
         return reorderedHosts;
     }
 
@@ -289,13 +297,13 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
     	if (template == null) {
     		return hosts;
     	}
-    	
+
     	// Determine the guest OS category of the template
     	String templateGuestOSCategory = getTemplateGuestOSCategory(template);
-    	
+
     	List<HostVO> prioritizedHosts = new ArrayList<HostVO>();
 	List<HostVO> noHvmHosts = new ArrayList<HostVO>();
-    	
+
     	// If a template requires HVM and a host doesn't support HVM, remove it from consideration
     	List<HostVO> hostsToCheck = new ArrayList<HostVO>();
     	if (template.isRequiresHvm()) {
@@ -309,7 +317,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
     	} else {
     		hostsToCheck.addAll(hosts);
     	}
-    	
+
 	if (s_logger.isDebugEnabled()) {
 		if (noHvmHosts.size() > 0) {
 			s_logger.debug("Not considering hosts: "  + noHvmHosts + "  to deploy template: " + template +" as they are not HVM enabled");
@@ -329,10 +337,10 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
     			lowPriorityHosts.add(host);
     		}
     	}
-    	
+
     	hostsToCheck.removeAll(highPriorityHosts);
     	hostsToCheck.removeAll(lowPriorityHosts);
-    	
+
     	// Prioritize the remaining hosts by HVM capability
     	for (HostVO host : hostsToCheck) {
     		if (!template.isRequiresHvm() && !hostSupportsHVM(host)) {
@@ -343,21 +351,21 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
     			prioritizedHosts.add(host);
     		}
     	}
-    	
+
     	// Merge the lists
     	prioritizedHosts.addAll(0, highPriorityHosts);
     	prioritizedHosts.addAll(lowPriorityHosts);
-    	
+
     	return prioritizedHosts;
     }
-    
+
     protected boolean hostSupportsHVM(HostVO host) {
         if ( !_checkHvm ) {
             return true;
         }
     	// Determine host capabilities
 		String caps = host.getCapabilities();
-		
+
 		if (caps != null) {
             String[] tokens = caps.split(",");
             for (String token : tokens) {
@@ -366,24 +374,24 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
             	}
             }
 		}
-		
+
 		return false;
     }
-    
+
     protected String getHostGuestOSCategory(HostVO host) {
 		DetailVO hostDetail = _hostDetailsDao.findDetail(host.getId(), "guest.os.category.id");
 		if (hostDetail != null) {
 			String guestOSCategoryIdString = hostDetail.getValue();
 			long guestOSCategoryId;
-			
+
 			try {
 				guestOSCategoryId = Long.parseLong(guestOSCategoryIdString);
 			} catch (Exception e) {
 				return null;
 			}
-			
+
 			GuestOSCategoryVO guestOSCategory = _guestOSCategoryDao.findById(guestOSCategoryId);
-			
+
 			if (guestOSCategory != null) {
 				return guestOSCategory.getName();
 			} else {
@@ -393,7 +401,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
 			return null;
 		}
     }
-    
+
     protected String getTemplateGuestOSCategory(VMTemplateVO template) {
     	long guestOSId = template.getGuestOSId();
     	GuestOSVO guestOS = _guestOSDao.findById(guestOSId);
@@ -408,7 +416,7 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
     		Map<String, String> configs = _configDao.getConfiguration(params);
             String opFactor = configs.get("cpu.overprovisioning.factor");
             _factor = NumbersUtil.parseFloat(opFactor, 1);
-            
+
             String allocationAlgorithm = configs.get("vm.allocation.algorithm");
             if (allocationAlgorithm != null) {
             	_allocationAlgorithm = allocationAlgorithm;


[2/3] git commit: updated refs/heads/planner_reserve to fe8cd8d

Posted by pr...@apache.org.
Adding new interface DeploymentClusterPlanner.java and refactoring planners


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/25f8ae92
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/25f8ae92
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/25f8ae92

Branch: refs/heads/planner_reserve
Commit: 25f8ae92c7be27b604ff40544c2a6b6bddb08b98
Parents: 1959377
Author: Prachi Damle <pr...@cloud.com>
Authored: Mon Apr 29 19:54:44 2013 -0700
Committer: Prachi Damle <pr...@cloud.com>
Committed: Mon Apr 29 19:54:44 2013 -0700

----------------------------------------------------------------------
 .../com/cloud/deploy/DeploymentClusterPlanner.java |    8 +-
 .../cloud/deploy/UserConcentratedPodPlanner.java   |   24 +-
 .../com/cloud/deploy/UserDispersingPlanner.java    |    2 +-
 .../deploy/DeploymentPlanningManagerImpl.java      |  682 ++++++++++++++-
 server/src/com/cloud/deploy/FirstFitPlanner.java   |  603 +++-----------
 5 files changed, 774 insertions(+), 545 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/25f8ae92/api/src/com/cloud/deploy/DeploymentClusterPlanner.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/deploy/DeploymentClusterPlanner.java b/api/src/com/cloud/deploy/DeploymentClusterPlanner.java
index 4d8ee74..1a19c71 100644
--- a/api/src/com/cloud/deploy/DeploymentClusterPlanner.java
+++ b/api/src/com/cloud/deploy/DeploymentClusterPlanner.java
@@ -17,6 +17,7 @@
 package com.cloud.deploy;
 
 import java.util.List;
+
 import com.cloud.exception.InsufficientServerCapacityException;
 import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VirtualMachineProfile;
@@ -36,10 +37,9 @@ public interface DeploymentClusterPlanner extends DeploymentPlanner {
      *            avoid these data centers, pods, clusters, or hosts.
      * @return DeployDestination for that virtual machine.
      */
-    List<Long> orderClusters(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan,
-            ExcludeList avoid) throws InsufficientServerCapacityException;
-
+    List<Long> orderClusters(VirtualMachineProfile<? extends VirtualMachine> vm, DeploymentPlan plan, ExcludeList avoid)
+            throws InsufficientServerCapacityException;
 
-    PlannerResourceUsage getResourceType();
+    PlannerResourceUsage getResourceUsage();
 
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/25f8ae92/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java
----------------------------------------------------------------------
diff --git a/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java b/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java
index 57d2bc4..d917893 100644
--- a/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java
+++ b/plugins/deployment-planners/user-concentrated-pod/src/com/cloud/deploy/UserConcentratedPodPlanner.java
@@ -11,7 +11,7 @@
 // Unless required by applicable law or agreed to in writing,
 // software distributed under the License is distributed on an
 // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the 
+// KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
 package com.cloud.deploy;
@@ -29,12 +29,12 @@ import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VirtualMachineProfile;
 
 @Local(value=DeploymentPlanner.class)
-public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentPlanner {
+public class UserConcentratedPodPlanner extends FirstFitPlanner implements DeploymentClusterPlanner {
 
     private static final Logger s_logger = Logger.getLogger(UserConcentratedPodPlanner.class);
-    
+
     /**
-     * This method should reorder the given list of Cluster Ids by applying any necessary heuristic 
+     * This method should reorder the given list of Cluster Ids by applying any necessary heuristic
      * for this planner
      * For UserConcentratedPodPlanner we need to order the clusters in a zone across pods, by considering those pods first which have more number of VMs for this account
      * This reordering is not done incase the clusters within single pod are passed when the allocation is applied at pod-level.
@@ -48,7 +48,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
         }
         return applyUserConcentrationPodHeuristicToClusters(id, clusterIdsByCapacity, vmProfile.getOwner().getAccountId());
     }
-    
+
     private List<Long> applyUserConcentrationPodHeuristicToClusters(long zoneId, List<Long> prioritizedClusterIds, long accountId){
         //user has VMs in certain pods. - prioritize those pods first
         //UserConcentratedPod strategy
@@ -60,8 +60,8 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
             clusterList = prioritizedClusterIds;
         }
         return clusterList;
-    }    
-    
+    }
+
     private List<Long> reorderClustersByPods(List<Long> clusterIds, List<Long> podIds) {
 
         if (s_logger.isDebugEnabled()) {
@@ -110,11 +110,11 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
 
         return prioritizedPods;
     }
-    
+
     /**
-     * This method should reorder the given list of Pod Ids by applying any necessary heuristic 
+     * This method should reorder the given list of Pod Ids by applying any necessary heuristic
      * for this planner
-     * For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account 
+     * For UserConcentratedPodPlanner we need to order the pods by considering those pods first which have more number of VMs for this account
      * @return List<Long> ordered list of Pod Ids
      */
     @Override
@@ -123,7 +123,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
         if(vmProfile.getOwner() == null){
             return podIdsByCapacity;
         }
-        long accountId = vmProfile.getOwner().getAccountId(); 
+        long accountId = vmProfile.getOwner().getAccountId();
 
         //user has VMs in certain pods. - prioritize those pods first
         //UserConcentratedPod strategy
@@ -137,7 +137,7 @@ public class UserConcentratedPodPlanner extends FirstFitPlanner implements Deplo
         }else{
             return podIdsByCapacity;
         }
-        
+
     }
 
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/25f8ae92/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java
----------------------------------------------------------------------
diff --git a/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java b/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java
index 5bdaa71..2b0b158 100755
--- a/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java
+++ b/plugins/deployment-planners/user-dispersing/src/com/cloud/deploy/UserDispersingPlanner.java
@@ -35,7 +35,7 @@ import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VirtualMachineProfile;
 
 @Local(value=DeploymentPlanner.class)
-public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentPlanner {
+public class UserDispersingPlanner extends FirstFitPlanner implements DeploymentClusterPlanner {
 
     private static final Logger s_logger = Logger.getLogger(UserDispersingPlanner.class);
     

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/25f8ae92/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
index e03ffb1..e380adc 100644
--- a/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
+++ b/server/src/com/cloud/deploy/DeploymentPlanningManagerImpl.java
@@ -16,8 +16,13 @@
 // under the License.
 package com.cloud.deploy;
 
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
 
 import javax.ejb.Local;
 import javax.inject.Inject;
@@ -27,26 +32,63 @@ import org.apache.cloudstack.affinity.AffinityGroupProcessor;
 import org.apache.cloudstack.affinity.AffinityGroupVMMapVO;
 import org.apache.cloudstack.affinity.dao.AffinityGroupDao;
 import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.log4j.Logger;
 
+import com.cloud.capacity.Capacity;
+import com.cloud.capacity.CapacityManager;
+import com.cloud.capacity.dao.CapacityDao;
+import com.cloud.configuration.Config;
+import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterDetailsVO;
+import com.cloud.dc.DataCenter;
 import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.Pod;
+import com.cloud.dc.dao.ClusterDao;
 import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.dc.dao.HostPodDao;
 import com.cloud.deploy.DeploymentPlanner.ExcludeList;
 import com.cloud.deploy.DeploymentPlanner.PlannerResourceUsage;
 import com.cloud.deploy.dao.PlannerHostReservationDao;
 import com.cloud.exception.AffinityConflictException;
 import com.cloud.exception.ConnectionException;
 import com.cloud.exception.InsufficientServerCapacityException;
+import com.cloud.host.Host;
 import com.cloud.host.HostVO;
 import com.cloud.host.Status;
+import com.cloud.host.dao.HostDao;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.network.security.SecurityGroupVO;
 import com.cloud.offering.ServiceOffering;
+import com.cloud.org.Cluster;
+import com.cloud.org.Grouping;
+import com.cloud.resource.ResourceState;
+import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.GuestOSCategoryDao;
+import com.cloud.storage.dao.GuestOSDao;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.user.AccountManager;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.Pair;
 import com.cloud.utils.component.ComponentContext;
 import com.cloud.utils.component.Manager;
 import com.cloud.utils.component.ManagerBase;
 import com.cloud.utils.db.DB;
 import com.cloud.utils.db.Transaction;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.ReservationContext;
 import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VirtualMachineProfile;
 import com.cloud.vm.dao.UserVmDao;
@@ -59,6 +101,7 @@ import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.Command;
 import com.cloud.agent.api.StartupCommand;
 import com.cloud.agent.api.StartupRoutingCommand;
+import com.cloud.agent.manager.allocator.HostAllocator;
 
 @Local(value = { DeploymentPlanningManager.class })
 public class DeploymentPlanningManagerImpl extends ManagerBase implements DeploymentPlanningManager, Manager, Listener {
@@ -79,6 +122,41 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
     @Inject
     PlannerHostReservationDao _plannerHostReserveDao;
 
+    protected List<StoragePoolAllocator> _storagePoolAllocators;
+    public List<StoragePoolAllocator> getStoragePoolAllocators() {
+        return _storagePoolAllocators;
+    }
+    public void setStoragePoolAllocators(
+            List<StoragePoolAllocator> _storagePoolAllocators) {
+        this._storagePoolAllocators = _storagePoolAllocators;
+    }
+
+    protected List<HostAllocator> _hostAllocators;
+    public List<HostAllocator> getHostAllocators() {
+        return _hostAllocators;
+    }
+    public void setHostAllocators(List<HostAllocator> _hostAllocators) {
+        this._hostAllocators = _hostAllocators;
+    }
+
+    @Inject protected HostDao _hostDao;
+    @Inject protected HostPodDao _podDao;
+    @Inject protected ClusterDao _clusterDao;
+    @Inject protected GuestOSDao _guestOSDao = null;
+    @Inject protected GuestOSCategoryDao _guestOSCategoryDao = null;
+    @Inject protected DiskOfferingDao _diskOfferingDao;
+    @Inject protected StoragePoolHostDao _poolHostDao;
+
+    @Inject protected VolumeDao _volsDao;
+    @Inject protected CapacityManager _capacityMgr;
+    @Inject protected ConfigurationDao _configDao;
+    @Inject protected PrimaryDataStoreDao _storagePoolDao;
+    @Inject protected CapacityDao _capacityDao;
+    @Inject protected AccountManager _accountMgr;
+    @Inject protected StorageManager _storageMgr;
+    @Inject DataStoreManager dataStoreMgr;
+    @Inject protected ClusterDetailsDao _clusterDetailsDao;
+
     protected List<DeploymentPlanner> _planners;
     public List<DeploymentPlanner> getPlanners() {
         return _planners;
@@ -116,47 +194,231 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
         }
 
         // call planners
-        DeployDestination dest = null;
-        List<Long> clusterIds = null;
+        DataCenter dc = _dcDao.findById(vm.getDataCenterId());
+        // check if datacenter is in avoid set
+        if (avoids.shouldAvoid(dc)) {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("DataCenter id = '" + dc.getId()
+                        + "' provided is in avoid set, DeploymentPlanner cannot allocate the VM, returning.");
+            }
+            return null;
+        }
+
 
         ServiceOffering offering = vmProfile.getServiceOffering();
+        DeploymentPlanner planner = ComponentContext.getComponent(offering.getDeploymentPlanner());
+
+        int cpu_requested = offering.getCpu() * offering.getSpeed();
+        long ram_requested = offering.getRamSize() * 1024L * 1024L;
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("DeploymentPlanner allocation algorithm: " + planner);
+
+            s_logger.debug("Trying to allocate a host and storage pools from dc:" + plan.getDataCenterId() + ", pod:"
+                    + plan.getPodId() + ",cluster:" + plan.getClusterId() + ", requested cpu: " + cpu_requested
+                    + ", requested ram: " + ram_requested);
+
+            s_logger.debug("Is ROOT volume READY (pool already allocated)?: "
+                    + (plan.getPoolId() != null ? "Yes" : "No"));
+        }
+
+        String haVmTag = (String) vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
+
+        if (plan.getHostId() != null && haVmTag == null) {
+            Long hostIdSpecified = plan.getHostId();
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("DeploymentPlan has host_id specified, choosing this host and making no checks on this host: "
+                        + hostIdSpecified);
+            }
+            HostVO host = _hostDao.findById(hostIdSpecified);
+            if (host == null) {
+                s_logger.debug("The specified host cannot be found");
+            } else if (avoids.shouldAvoid(host)) {
+                s_logger.debug("The specified host is in avoid set");
+            } else {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId()
+                            + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
+                }
+
+                // search for storage under the zone, pod, cluster of the host.
+                DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(),
+                        host.getClusterId(), hostIdSpecified, plan.getPoolId(), null, plan.getReservationContext());
+
+                Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile,
+                        lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
+                Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
+                List<Volume> readyAndReusedVolumes = result.second();
+
+                // choose the potential pool for this VM for this host
+                if (!suitableVolumeStoragePools.isEmpty()) {
+                    List<Host> suitableHosts = new ArrayList<Host>();
+                    suitableHosts.add(host);
+
+                    Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
+                            suitableHosts, suitableVolumeStoragePools, getPlannerUsage(planner));
+                    if (potentialResources != null) {
+                        Pod pod = _podDao.findById(host.getPodId());
+                        Cluster cluster = _clusterDao.findById(host.getClusterId());
+                        Map<Volume, StoragePool> storageVolMap = potentialResources.second();
+                        // remove the reused vol<->pool from destination, since
+                        // we don't have to prepare this volume.
+                        for (Volume vol : readyAndReusedVolumes) {
+                            storageVolMap.remove(vol);
+                        }
+                        DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
+                        s_logger.debug("Returning Deployment Destination: " + dest);
+                        return dest;
+                    }
+                }
+            }
+            s_logger.debug("Cannnot deploy to specified host, returning.");
+            return null;
+        }
+
+        if (vm.getLastHostId() != null && haVmTag == null) {
+            s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId());
+
+            HostVO host = _hostDao.findById(vm.getLastHostId());
+            if (host == null) {
+                s_logger.debug("The last host of this VM cannot be found");
+            } else if (avoids.shouldAvoid(host)) {
+                s_logger.debug("The last host of this VM is in avoid set");
+            } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
+                s_logger.debug("The last Host, hostId: "
+                        + host.getId()
+                        + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
+            } else {
+                if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
+                    long cluster_id = host.getClusterId();
+                    ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
+                            "cpuOvercommitRatio");
+                    ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,
+                            "memoryOvercommitRatio");
+                    Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
+                    Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
+                    if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true,
+                            cpuOvercommitRatio, memoryOvercommitRatio, true)) {
+                        s_logger.debug("The last host of this VM is UP and has enough capacity");
+                        s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId()
+                                + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
+                        // search for storage under the zone, pod, cluster of
+                        // the last host.
+                        DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(),
+                                host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
+                        Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(
+                                vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
+                        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
+                        List<Volume> readyAndReusedVolumes = result.second();
+                        // choose the potential pool for this VM for this host
+                        if (!suitableVolumeStoragePools.isEmpty()) {
+                            List<Host> suitableHosts = new ArrayList<Host>();
+                            suitableHosts.add(host);
+
+                            Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
+                                    suitableHosts, suitableVolumeStoragePools, getPlannerUsage(planner));
+                            if (potentialResources != null) {
+                                Pod pod = _podDao.findById(host.getPodId());
+                                Cluster cluster = _clusterDao.findById(host.getClusterId());
+                                Map<Volume, StoragePool> storageVolMap = potentialResources.second();
+                                // remove the reused vol<->pool from
+                                // destination, since we don't have to prepare
+                                // this volume.
+                                for (Volume vol : readyAndReusedVolumes) {
+                                    storageVolMap.remove(vol);
+                                }
+                                DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
+                                s_logger.debug("Returning Deployment Destination: " + dest);
+                                return dest;
+                            }
+                        }
+                    } else {
+                        s_logger.debug("The last host of this VM does not have enough capacity");
+                    }
+                } else {
+                    s_logger.debug("The last host of this VM is not UP or is not enabled, host status is: "
+                            + host.getStatus().name() + ", host resource state is: " + host.getResourceState());
+                }
+            }
+            s_logger.debug("Cannot choose the last host to deploy this VM ");
+        }
+
+        DeployDestination dest = null;
+        List<Long> clusterList = null;
+
         if (offering != null && offering.getDeploymentPlanner() != null) {
-            DeploymentPlanner planner = ComponentContext.getComponent(offering.getDeploymentPlanner());
             if (planner != null && planner.canHandle(vmProfile, plan, avoids)) {
                 while (true) {
-                    if (planner instanceof DeploymentClusterPlanner) {
-                        clusterIds = ((DeploymentClusterPlanner) planner).orderClusters(vmProfile, plan, avoids);
-                    } else {
-                        dest = planner.plan(vmProfile, plan, avoids);
-                    }
 
-                    if (dest != null) {
-                        long hostId = dest.getHost().getId();
-                        avoids.addHost(dest.getHost().getId());
+                    if (planner instanceof DeploymentClusterPlanner) {
+                        clusterList = ((DeploymentClusterPlanner) planner).orderClusters(vmProfile, plan, avoids);
+                        ExcludeList PlannerAvoidInput = new ExcludeList(avoids.getDataCentersToAvoid(),
+                                avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
+                                avoids.getPoolsToAvoid());
+
+                        if (clusterList != null && !clusterList.isEmpty()) {
+                            // planner refactoring. call allocators to list hosts
+                            ExcludeList PlannerAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(),
+                                    avoids.getPodsToAvoid(), avoids.getClustersToAvoid(), avoids.getHostsToAvoid(),
+                                    avoids.getPoolsToAvoid());
+
+                            PlannerAvoidOutput.getDataCentersToAvoid().removeAll(PlannerAvoidInput.getDataCentersToAvoid());
+                            PlannerAvoidOutput.getPodsToAvoid().removeAll(PlannerAvoidInput.getPodsToAvoid());
+                            PlannerAvoidOutput.getClustersToAvoid().removeAll(PlannerAvoidInput.getClustersToAvoid());
+                            PlannerAvoidOutput.getHostsToAvoid().removeAll(PlannerAvoidInput.getHostsToAvoid());
+                            PlannerAvoidOutput.getPoolsToAvoid().removeAll(PlannerAvoidInput.getPoolsToAvoid());
+
+
+                            dest = checkClustersforDestination(clusterList, vmProfile, plan, avoids, dc,
+                                    getPlannerUsage(planner), PlannerAvoidOutput);
+                            if(dest != null){
+                                return dest;
+                            }
+                            // reset the avoid input to the planners
+                            avoids.getDataCentersToAvoid().removeAll(PlannerAvoidOutput.getDataCentersToAvoid());
+                            avoids.getPodsToAvoid().removeAll(PlannerAvoidOutput.getPodsToAvoid());
+                            avoids.getClustersToAvoid().removeAll(PlannerAvoidOutput.getClustersToAvoid());
+                            avoids.getHostsToAvoid().removeAll(PlannerAvoidOutput.getHostsToAvoid());
+                            avoids.getPoolsToAvoid().removeAll(PlannerAvoidOutput.getPoolsToAvoid());
 
-                        if (checkIfHostCanBeUsed(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) {
-                            // found destination
-                            return dest;
                         } else {
-                            // find another host - seems some concurrent deployment picked it up for dedicated access
-                            continue;
+                            return null;
                         }
-                    } else if (clusterIds != null && !clusterIds.isEmpty()) {
-                        // planner refactoring. call allocators to list hosts
-
                     } else {
-                        return null;
+                        dest = planner.plan(vmProfile, plan, avoids);
+                        if (dest != null) {
+                            long hostId = dest.getHost().getId();
+                            avoids.addHost(dest.getHost().getId());
+
+                            if (checkIfHostFitsPlannerUsage(hostId, DeploymentPlanner.PlannerResourceUsage.Shared)) {
+                                // found destination
+                                return dest;
+                            } else {
+                                // find another host - seems some concurrent deployment picked it up for dedicated access
+                                continue;
+                            }
+                        }else{
+                            return null;
+                        }
                     }
                 }
             }
         }
 
-
         return dest;
     }
 
+    private PlannerResourceUsage getPlannerUsage(DeploymentPlanner planner) {
+        if (planner instanceof DeploymentClusterPlanner) {
+            return ((DeploymentClusterPlanner) planner).getResourceUsage();
+        } else {
+            return DeploymentPlanner.PlannerResourceUsage.Shared;
+        }
+
+    }
+
     @DB
-    private boolean checkIfHostCanBeUsed(long hostId, PlannerResourceUsage resourceTypeRequired) {
+    private boolean checkIfHostFitsPlannerUsage(long hostId, PlannerResourceUsage resourceUsageRequired) {
         // TODO Auto-generated method stub
         // check if this host has been picked up by some other planner
         // exclusively
@@ -170,7 +432,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
             PlannerResourceUsage hostResourceType = reservationEntry.getResourceUsage();
 
             if (hostResourceType != null) {
-                if (hostResourceType == resourceTypeRequired) {
+                if (hostResourceType == resourceUsageRequired) {
                     return true;
                 } else {
                     return false;
@@ -190,12 +452,12 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
                     }
                     // check before updating
                     if (lockedEntry.getResourceUsage() == null) {
-                        lockedEntry.setResourceUsage(resourceTypeRequired);
+                        lockedEntry.setResourceUsage(resourceUsageRequired);
                         _plannerHostReserveDao.persist(lockedEntry);
                         return true;
                     } else {
                         // someone updated it earlier. check if we can still use it
-                        if (lockedEntry.getResourceUsage() == resourceTypeRequired) {
+                        if (lockedEntry.getResourceUsage() == resourceUsageRequired) {
                             return true;
                         } else {
                             return false;
@@ -275,4 +537,374 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy
         return super.configure(name, params);
     }
 
+    // /refactoring planner methods
+    private DeployDestination checkClustersforDestination(List<Long> clusterList,
+            VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid,
+            DataCenter dc, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired, ExcludeList PlannerAvoidOutput) {
+
+        if (s_logger.isTraceEnabled()) {
+            s_logger.trace("ClusterId List to consider: " + clusterList);
+        }
+
+        for (Long clusterId : clusterList) {
+            Cluster clusterVO = _clusterDao.findById(clusterId);
+
+            if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) {
+                s_logger.debug("Cluster: " + clusterId
+                        + " has HyperVisorType that does not match the VM, skipping this cluster");
+                avoid.addCluster(clusterVO.getId());
+                continue;
+            }
+
+            s_logger.debug("Checking resources in Cluster: " + clusterId + " under Pod: " + clusterVO.getPodId());
+            // search for resources(hosts and storage) under this zone, pod,
+            // cluster.
+            DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(),
+                    clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());
+
+            // find suitable hosts under this cluster, need as many hosts as we
+            // get.
+            List<Host> suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
+            // if found suitable hosts in this cluster, find suitable storage
+            // pools for each volume of the VM
+            if (suitableHosts != null && !suitableHosts.isEmpty()) {
+                if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) {
+                    Pod pod = _podDao.findById(clusterVO.getPodId());
+                    DeployDestination dest = new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0));
+                    return dest;
+                }
+
+                Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile,
+                        potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
+                Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
+                List<Volume> readyAndReusedVolumes = result.second();
+
+                // choose the potential host and pool for the VM
+                if (!suitableVolumeStoragePools.isEmpty()) {
+                    Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
+                            suitableHosts, suitableVolumeStoragePools, resourceUsageRequired);
+
+                    if (potentialResources != null) {
+                        Pod pod = _podDao.findById(clusterVO.getPodId());
+                        Host host = _hostDao.findById(potentialResources.first().getId());
+                        Map<Volume, StoragePool> storageVolMap = potentialResources.second();
+                        // remove the reused vol<->pool from destination, since
+                        // we don't have to prepare this volume.
+                        for (Volume vol : readyAndReusedVolumes) {
+                            storageVolMap.remove(vol);
+                        }
+                        DeployDestination dest = new DeployDestination(dc, pod, clusterVO, host, storageVolMap);
+                        s_logger.debug("Returning Deployment Destination: " + dest);
+                        return dest;
+                    }
+                } else {
+                    s_logger.debug("No suitable storagePools found under this Cluster: " + clusterId);
+                }
+            } else {
+                s_logger.debug("No suitable hosts found under this Cluster: " + clusterId);
+            }
+
+            if (canAvoidCluster(clusterVO, avoid, PlannerAvoidOutput)) {
+                avoid.addCluster(clusterVO.getId());
+            }
+        }
+        s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. ");
+        return null;
+    }
+
+    private boolean canAvoidCluster(Cluster clusterVO, ExcludeList avoids, ExcludeList plannerAvoidOutput) {
+
+        ExcludeList allocatorAvoidOutput = new ExcludeList(avoids.getDataCentersToAvoid(), avoids.getPodsToAvoid(),
+                avoids.getClustersToAvoid(), avoids.getHostsToAvoid(), avoids.getPoolsToAvoid());
+
+        // remove any hosts/pools that the planners might have added
+        // to get the list of hosts/pools that Allocators flagged as 'avoid'
+        allocatorAvoidOutput.getHostsToAvoid().removeAll(plannerAvoidOutput.getHostsToAvoid());
+        allocatorAvoidOutput.getPoolsToAvoid().removeAll(plannerAvoidOutput.getPoolsToAvoid());
+
+        // if all hosts or all pools in the cluster are in avoid set after this
+        // pass, then put the cluster in avoid set.
+
+        List<HostVO> allhostsInCluster = _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, clusterVO.getId(),
+                clusterVO.getPodId(), clusterVO.getDataCenterId(), null);
+
+        for (HostVO host : allhostsInCluster) {
+            if (!allocatorAvoidOutput.getHostsToAvoid().contains(host.getId())) {
+                // there's some host in the cluster that is not yet in avoid set
+                return false;
+            }
+        }
+
+        List<StoragePoolVO> allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(),
+                clusterVO.getPodId(), clusterVO.getId(), null);
+        for (StoragePoolVO pool : allPoolsInCluster) {
+            if (!allocatorAvoidOutput.getPoolsToAvoid().contains(pool.getId())) {
+                // there's some pool in the cluster that is not yet in avoid set
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    protected Pair<Host, Map<Volume, StoragePool>> findPotentialDeploymentResources(List<Host> suitableHosts,
+            Map<Volume, List<StoragePool>> suitableVolumeStoragePools, DeploymentPlanner.PlannerResourceUsage resourceUsageRequired) {
+        s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM");
+
+        boolean hostCanAccessPool = false;
+        boolean haveEnoughSpace = false;
+        Map<Volume, StoragePool> storage = new HashMap<Volume, StoragePool>();
+        TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<Volume>(new Comparator<Volume>() {
+            @Override
+            public int compare(Volume v1, Volume v2) {
+                if (v1.getSize() < v2.getSize())
+                    return 1;
+                else
+                    return -1;
+            }
+        });
+        volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet());
+        boolean multipleVolume = volumesOrderBySizeDesc.size() > 1;
+        for (Host potentialHost : suitableHosts) {
+            Map<StoragePool, List<Volume>> volumeAllocationMap = new HashMap<StoragePool, List<Volume>>();
+            for (Volume vol : volumesOrderBySizeDesc) {
+                haveEnoughSpace = false;
+                s_logger.debug("Checking if host: " + potentialHost.getId()
+                        + " can access any suitable storage pool for volume: " + vol.getVolumeType());
+                List<StoragePool> volumePoolList = suitableVolumeStoragePools.get(vol);
+                hostCanAccessPool = false;
+                for (StoragePool potentialSPool : volumePoolList) {
+                    if (hostCanAccessSPool(potentialHost, potentialSPool)) {
+                        hostCanAccessPool = true;
+                        if (multipleVolume) {
+                            List<Volume> requestVolumes = null;
+                            if (volumeAllocationMap.containsKey(potentialSPool))
+                                requestVolumes = volumeAllocationMap.get(potentialSPool);
+                            else
+                                requestVolumes = new ArrayList<Volume>();
+                            requestVolumes.add(vol);
+
+                            if (!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool))
+                                continue;
+                            volumeAllocationMap.put(potentialSPool, requestVolumes);
+                        }
+                        storage.put(vol, potentialSPool);
+                        haveEnoughSpace = true;
+                        break;
+                    }
+                }
+                if (!hostCanAccessPool) {
+                    break;
+                }
+                if (!haveEnoughSpace) {
+                    s_logger.warn("insufficient capacity to allocate all volumes");
+                    break;
+                }
+            }
+            if (hostCanAccessPool && haveEnoughSpace) {
+                // check the planner host reservation
+                if (checkIfHostFitsPlannerUsage(potentialHost.getId(), resourceUsageRequired)) {
+                    s_logger.debug("Found a potential host " + "id: " + potentialHost.getId() + " name: "
+                            + potentialHost.getName() + " and associated storage pools for this VM");
+                    return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage);
+                }
+            }
+        }
+        s_logger.debug("Could not find a potential host that has associated storage pools from the suitable host/pool lists for this VM");
+        return null;
+    }
+
+    protected boolean hostCanAccessSPool(Host host, StoragePool pool) {
+        boolean hostCanAccessSPool = false;
+
+        StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId());
+        if (hostPoolLinkage != null) {
+            hostCanAccessSPool = true;
+        }
+
+        s_logger.debug("Host: " + host.getId() + (hostCanAccessSPool ? " can" : " cannot") + " access pool: "
+                + pool.getId());
+        return hostCanAccessSPool;
+    }
+
+    protected List<Host> findSuitableHosts(VirtualMachineProfile<? extends VirtualMachine> vmProfile,
+            DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+        List<Host> suitableHosts = new ArrayList<Host>();
+        for (HostAllocator allocator : _hostAllocators) {
+            suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, avoid, returnUpTo);
+            if (suitableHosts != null && !suitableHosts.isEmpty()) {
+                break;
+            }
+        }
+
+        if (suitableHosts.isEmpty()) {
+            s_logger.debug("No suitable hosts found");
+        }
+        return suitableHosts;
+    }
+
+    protected Pair<Map<Volume, List<StoragePool>>, List<Volume>> findSuitablePoolsForVolumes(
+            VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid,
+            int returnUpTo) {
+        List<VolumeVO> volumesTobeCreated = _volsDao.findUsableVolumesForInstance(vmProfile.getId());
+        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
+        List<Volume> readyAndReusedVolumes = new ArrayList<Volume>();
+
+        // for each volume find list of suitable storage pools by calling the
+        // allocators
+        for (VolumeVO toBeCreated : volumesTobeCreated) {
+            s_logger.debug("Checking suitable pools for volume (Id, Type): (" + toBeCreated.getId() + ","
+                    + toBeCreated.getVolumeType().name() + ")");
+
+            // If the plan specifies a poolId, it means that this VM's ROOT
+            // volume is ready and the pool should be reused.
+            // In this case, also check if rest of the volumes are ready and can
+            // be reused.
+            if (plan.getPoolId() != null) {
+                s_logger.debug("Volume has pool already allocated, checking if pool can be reused, poolId: "
+                        + toBeCreated.getPoolId());
+                List<StoragePool> suitablePools = new ArrayList<StoragePool>();
+                StoragePool pool = null;
+                if (toBeCreated.getPoolId() != null) {
+                    pool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(toBeCreated.getPoolId());
+                } else {
+                    pool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(plan.getPoolId());
+                }
+
+                if (!pool.isInMaintenance()) {
+                    if (!avoid.shouldAvoid(pool)) {
+                        long exstPoolDcId = pool.getDataCenterId();
+
+                        long exstPoolPodId = pool.getPodId() != null ? pool.getPodId() : -1;
+                        long exstPoolClusterId = pool.getClusterId() != null ? pool.getClusterId() : -1;
+                        if (plan.getDataCenterId() == exstPoolDcId && plan.getPodId() == exstPoolPodId
+                                && plan.getClusterId() == exstPoolClusterId) {
+                            s_logger.debug("Planner need not allocate a pool for this volume since its READY");
+                            suitablePools.add(pool);
+                            suitableVolumeStoragePools.put(toBeCreated, suitablePools);
+                            if (!(toBeCreated.getState() == Volume.State.Allocated || toBeCreated.getState() == Volume.State.Creating)) {
+                                readyAndReusedVolumes.add(toBeCreated);
+                            }
+                            continue;
+                        } else {
+                            s_logger.debug("Pool of the volume does not fit the specified plan, need to reallocate a pool for this volume");
+                        }
+                    } else {
+                        s_logger.debug("Pool of the volume is in avoid set, need to reallocate a pool for this volume");
+                    }
+                } else {
+                    s_logger.debug("Pool of the volume is in maintenance, need to reallocate a pool for this volume");
+                }
+            }
+
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("We need to allocate new storagepool for this volume");
+            }
+            if (!isRootAdmin(plan.getReservationContext())) {
+                if (!isEnabledForAllocation(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId())) {
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Cannot allocate new storagepool for this volume in this cluster, allocation state is disabled");
+                        s_logger.debug("Cannot deploy to this specified plan, allocation state is disabled, returning.");
+                    }
+                    // Cannot find suitable storage pools under this cluster for
+                    // this volume since allocation_state is disabled.
+                    // - remove any suitable pools found for other volumes.
+                    // All volumes should get suitable pools under this cluster;
+                    // else we cant use this cluster.
+                    suitableVolumeStoragePools.clear();
+                    break;
+                }
+            }
+
+            s_logger.debug("Calling StoragePoolAllocators to find suitable pools");
+
+            DiskOfferingVO diskOffering = _diskOfferingDao.findById(toBeCreated.getDiskOfferingId());
+            DiskProfile diskProfile = new DiskProfile(toBeCreated, diskOffering, vmProfile.getHypervisorType());
+
+            boolean useLocalStorage = false;
+            if (vmProfile.getType() != VirtualMachine.Type.User) {
+                String ssvmUseLocalStorage = _configDao.getValue(Config.SystemVMUseLocalStorage.key());
+                if (ssvmUseLocalStorage.equalsIgnoreCase("true")) {
+                    useLocalStorage = true;
+                }
+            } else {
+                useLocalStorage = diskOffering.getUseLocalStorage();
+
+                // TODO: this is a hacking fix for the problem of deploy
+                // ISO-based VM on local storage
+                // when deploying VM based on ISO, we have a service offering
+                // and an additional disk offering, use-local storage flag is
+                // actually
+                // saved in service offering, overrde the flag from service
+                // offering when it is a ROOT disk
+                if (!useLocalStorage && vmProfile.getServiceOffering().getUseLocalStorage()) {
+                    if (toBeCreated.getVolumeType() == Volume.Type.ROOT)
+                        useLocalStorage = true;
+                }
+            }
+            diskProfile.setUseLocalStorage(useLocalStorage);
+
+            boolean foundPotentialPools = false;
+            for (StoragePoolAllocator allocator : _storagePoolAllocators) {
+                final List<StoragePool> suitablePools = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid,
+                        returnUpTo);
+                if (suitablePools != null && !suitablePools.isEmpty()) {
+                    suitableVolumeStoragePools.put(toBeCreated, suitablePools);
+                    foundPotentialPools = true;
+                    break;
+                }
+            }
+
+            if (!foundPotentialPools) {
+                s_logger.debug("No suitable pools found for volume: " + toBeCreated + " under cluster: "
+                        + plan.getClusterId());
+                // No suitable storage pools found under this cluster for this
+                // volume. - remove any suitable pools found for other volumes.
+                // All volumes should get suitable pools under this cluster;
+                // else we cant use this cluster.
+                suitableVolumeStoragePools.clear();
+                break;
+            }
+        }
+
+        if (suitableVolumeStoragePools.isEmpty()) {
+            s_logger.debug("No suitable pools found");
+        }
+
+        return new Pair<Map<Volume, List<StoragePool>>, List<Volume>>(suitableVolumeStoragePools, readyAndReusedVolumes);
+    }
+
+    private boolean isEnabledForAllocation(long zoneId, Long podId, Long clusterId) {
+        // Check if the zone exists in the system
+        DataCenterVO zone = _dcDao.findById(zoneId);
+        if (zone != null && Grouping.AllocationState.Disabled == zone.getAllocationState()) {
+            s_logger.info("Zone is currently disabled, cannot allocate to this zone: " + zoneId);
+            return false;
+        }
+
+        Pod pod = _podDao.findById(podId);
+        if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) {
+            s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId);
+            return false;
+        }
+
+        Cluster cluster = _clusterDao.findById(clusterId);
+        if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) {
+            s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId);
+            return false;
+        }
+
+        return true;
+    }
+
+    private boolean isRootAdmin(ReservationContext reservationContext) {
+        if (reservationContext != null) {
+            if (reservationContext.getAccount() != null) {
+                return _accountMgr.isRootAdmin(reservationContext.getAccount().getType());
+            } else {
+                return false;
+            }
+        }
+        return false;
+    }
 }