You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ed...@apache.org on 2013/06/20 09:19:53 UTC

[20/50] [abbrv] Merge branch 'master' (up to commit c30d9be3cea30339cfff40c1002906634291b373) into object_store.

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
----------------------------------------------------------------------
diff --cc plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index d8d7476,5944cc8..34766a0
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@@ -37,13 -37,9 +37,14 @@@ import java.util.Random
  import java.util.TimeZone;
  import java.util.UUID;
  
+ import javax.inject.Inject;
  import javax.naming.ConfigurationException;
  
 +import org.apache.cloudstack.storage.command.DeleteCommand;
 +import org.apache.cloudstack.storage.command.StorageSubSystemCommand;
 +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO;
 +import org.apache.cloudstack.storage.to.TemplateObjectTO;
 +import org.apache.cloudstack.storage.to.VolumeObjectTO;
  import org.apache.log4j.Logger;
  import org.apache.log4j.NDC;
  
@@@ -230,15 -226,14 +235,18 @@@ import com.cloud.serializer.GsonHelper
  import com.cloud.storage.Storage;
  import com.cloud.storage.Storage.StoragePoolType;
  import com.cloud.storage.Volume;
+ import com.cloud.storage.VolumeManager;
+ import com.cloud.storage.VolumeManagerImpl;
  import com.cloud.storage.resource.StoragePoolResource;
 -import com.cloud.storage.template.TemplateInfo;
 +import com.cloud.storage.resource.StorageSubsystemCommandHandler;
 +import com.cloud.storage.resource.StorageSubsystemCommandHandlerBase;
 +import com.cloud.storage.resource.VmwareStorageProcessor;
 +import com.cloud.storage.template.TemplateProp;
  import com.cloud.utils.DateUtil;
 +import com.cloud.utils.NumbersUtil;
  import com.cloud.utils.Pair;
  import com.cloud.utils.StringUtils;
+ import com.cloud.utils.component.ComponentContext;
  import com.cloud.utils.db.DB;
  import com.cloud.utils.exception.CloudRuntimeException;
  import com.cloud.utils.exception.ExceptionUtil;
@@@ -338,9 -341,7 +354,9 @@@ public class VmwareResource implements 
      protected Gson _gson;
  
      protected volatile long _cmdSequence = 1;
-     
+ 
 +    protected StorageSubsystemCommandHandler storageHandler;
 +
      protected static HashMap<VirtualMachinePowerState, State> s_statesTable;
      static {
          s_statesTable = new HashMap<VirtualMachinePowerState, State>();
@@@ -348,11 -349,7 +364,11 @@@
          s_statesTable.put(VirtualMachinePowerState.POWERED_OFF, State.Stopped);
          s_statesTable.put(VirtualMachinePowerState.SUSPENDED, State.Stopped);
      }
-     
+ 
 +    public Gson getGson() {
 +    	return _gson;
 +    }
 +
      public VmwareResource() {
          _gson = GsonHelper.getGsonLogger();
      }
@@@ -1385,7 -1384,7 +1405,7 @@@
              if(!isVMWareToolsInstalled(vmMo)){
                  String errMsg = "vmware tools is not installed or not running, cannot add nic to vm " + vmName;
                  s_logger.debug(errMsg);
--                return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + errMsg); 
++                return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + errMsg);
              }
  
              // TODO need a way to specify the control of NIC device type
@@@ -2469,41 -2463,28 +2492,40 @@@
                          s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
                      deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT);
                  }
-                 i++;
              } else {
                  // we will always plugin a CDROM device
-             	
 -                if (volIso != null && volIso.getPath() != null && !volIso.getPath().isEmpty()) {
 -                    Pair<String, ManagedObjectReference> isoDatastoreInfo = getIsoDatastoreInfo(hyperHost, volIso.getPath());
 -                    assert (isoDatastoreInfo != null);
 -                    assert (isoDatastoreInfo.second() != null);
+ 
 -                    deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
 +                if (volIso != null) {
 +                	TemplateObjectTO iso = (TemplateObjectTO)volIso.getData();
 +
 +                	if (iso.getPath() != null && !iso.getPath().isEmpty()) {
 +                		DataStoreTO imageStore = iso.getDataStore();
 +                		if (!(imageStore instanceof NfsTO)) {
 +                			s_logger.debug("unsupported protocol");
 +                			throw new Exception("unsupported protocol");
 +                		}
 +                		NfsTO nfsImageStore = (NfsTO)imageStore;
 +                		String isoPath = nfsImageStore.getUrl() + File.separator + iso.getPath();
 +                		Pair<String, ManagedObjectReference> isoDatastoreInfo = getIsoDatastoreInfo(hyperHost, isoPath);
 +                		assert (isoDatastoreInfo != null);
 +                		assert (isoDatastoreInfo.second() != null);
 +
 +                		deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
-                 		Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, i, i + 1);
+                     Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, i + 1);
 -                    deviceConfigSpecArray[i].setDevice(isoInfo.first());
 -                    if (isoInfo.second()) {
 -                        if(s_logger.isDebugEnabled())
 -                            s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first()));
 -                        deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
 -                    } else {
 -                        if(s_logger.isDebugEnabled())
 -                            s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
 -                        deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT);
 -                    }
 +                		deviceConfigSpecArray[i].setDevice(isoInfo.first());
 +                		if (isoInfo.second()) {
 +                			if(s_logger.isDebugEnabled())
 +                				s_logger.debug("Prepare ISO volume at new device " + _gson.toJson(isoInfo.first()));
 +                			deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
 +                		} else {
 +                			if(s_logger.isDebugEnabled())
 +                				s_logger.debug("Prepare ISO volume at existing device " + _gson.toJson(isoInfo.first()));
 +                			deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT);
 +                		}
 +                	}
                  } else {
                      deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
-                     Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, i, i + 1);
+                     Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, null, null, true, true, ideUnitNumber++, i + 1);
                      deviceConfigSpecArray[i].setDevice(isoInfo.first());
                      if (isoInfo.second()) {
                          if(s_logger.isDebugEnabled())
@@@ -2517,10 -2498,9 +2539,9 @@@
                          deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.EDIT);
                      }
                  }
-                 i++;
              }
- 
+             i++;
 -            for (VolumeTO vol : sortVolumesByDeviceId(disks)) {
 +            for (DiskTO vol : sortVolumesByDeviceId(disks)) {
                  deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
  
                  if (vol.getType() == Volume.Type.ISO) {
@@@ -2555,17 -2535,20 +2576,20 @@@
                          String[] diskChain = _gson.fromJson(chainInfo, String[].class);
                          if (diskChain == null || diskChain.length < 1) {
                              s_logger.warn("Empty previously-saved chain info, fall back to the original");
-                             device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(), i, i + 1);
 -                            device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(), 
++                            device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(),
+                             		(controllerKey==ideControllerKey)?ideUnitNumber++:scsiUnitNumber++, i + 1);
                          } else {
                              s_logger.info("Attach the disk with stored chain info: " + chainInfo);
                              for (int j = 0; j < diskChain.length; j++) {
                                  diskChain[j] = String.format("[%s] %s", volumeDsDetails.second().getName(), diskChain[j]);
                              }
  
-                             device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, diskChain, volumeDsDetails.first(), i, i + 1);
 -                            device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, diskChain, volumeDsDetails.first(), 
++                            device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, diskChain, volumeDsDetails.first(),
+                             		(controllerKey==ideControllerKey)?ideUnitNumber++:scsiUnitNumber++, i + 1);
                          }
                      } else {
-                         device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(), i, i + 1);
+                         device = VmwareHelper.prepareDiskDevice(vmMo, controllerKey, new String[] { datastoreDiskPath }, volumeDsDetails.first(),
+                         		(controllerKey==ideControllerKey)?ideUnitNumber++:scsiUnitNumber++, i + 1);
                      }
                      deviceConfigSpecArray[i].setDevice(device);
                      deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
@@@ -3412,6 -3393,254 +3436,254 @@@
          }
      }
  
+     protected Answer execute(MigrateWithStorageCommand cmd) {
+ 
+         if (s_logger.isInfoEnabled()) {
+             s_logger.info("Executing resource MigrateWithStorageCommand: " + _gson.toJson(cmd));
+         }
+ 
+         VirtualMachineTO vmTo = cmd.getVirtualMachine();
+         final String vmName = vmTo.getName();
+ 
+         State state = null;
+         synchronized (_vms) {
+             state = _vms.get(vmName);
+             _vms.put(vmName, State.Stopping);
+         }
+ 
+         VmwareHypervisorHost srcHyperHost = null;
+         VmwareHypervisorHost tgtHyperHost = null;
+         VirtualMachineMO vmMo = null;
+ 
+         ManagedObjectReference morDsAtTarget = null;
+         ManagedObjectReference morDsAtSource = null;
+         ManagedObjectReference morDc = null;
+         ManagedObjectReference morDcOfTargetHost = null;
+         ManagedObjectReference morTgtHost = new ManagedObjectReference();
+         VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
+         List<VirtualMachineRelocateSpecDiskLocator> diskLocators = new ArrayList<VirtualMachineRelocateSpecDiskLocator>();
+         VirtualMachineRelocateSpecDiskLocator diskLocator = null;
+ 
+         boolean isFirstDs = true;
+         String srcDiskName = "";
+         String srcDsName = "";
+         String tgtDsName = "";
+         String tgtDsNfsHost;
+         String tgtDsNfsPath;
+         int tgtDsNfsPort;
+         VolumeTO volume;
+         StorageFilerTO filerTo;
+         Set<String> mountedDatastoresAtSource = new HashSet<String>();
+ 
+         Map<VolumeTO, StorageFilerTO> volToFiler = cmd.getVolumeToFiler();
+         String tgtHost = cmd.getTargetHost();
+         String tgtHostMorInfo = tgtHost.split("@")[0];
+         morTgtHost.setType(tgtHostMorInfo.split(":")[0]);
+         morTgtHost.setValue(tgtHostMorInfo.split(":")[1]);
+ 
+         try {
+             srcHyperHost = getHyperHost(getServiceContext());
+             tgtHyperHost = new HostMO(getServiceContext(), morTgtHost);
+             morDc = srcHyperHost.getHyperHostDatacenter();
+             morDcOfTargetHost = tgtHyperHost.getHyperHostDatacenter();
+             if (morDc != morDcOfTargetHost) {
+                 String msg = "Source host & target host are in different datacentesr";
+                 throw new CloudRuntimeException(msg);
+             }
+             VmwareManager mgr = tgtHyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
+ 
+             // find VM through datacenter (VM is not at the target host yet)
+             vmMo = srcHyperHost.findVmOnPeerHyperHost(vmName);
+             if (vmMo == null) {
+                 String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue();
+                 s_logger.error(msg);
+                 throw new Exception(msg);
+             }
+ 
+             // Get details of each target datastore & attach to source host.
+             for (Entry<VolumeTO, StorageFilerTO> entry : volToFiler.entrySet()) {
+                 volume = entry.getKey();
+                 filerTo = entry.getValue();
+ 
+                 srcDsName = volume.getPoolUuid().replace("-", "");
+                 tgtDsName = filerTo.getUuid().replace("-", "");
+                 tgtDsNfsHost = filerTo.getHost();
+                 tgtDsNfsPath = filerTo.getPath();
+                 tgtDsNfsPort = filerTo.getPort();
+ 
+                 s_logger.debug("Preparing spec for volume : " + volume.getName());
+                 morDsAtTarget = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(tgtHyperHost, filerTo.getUuid());
+                 if (morDsAtTarget == null) {
+                     String msg = "Unable to find the mounted datastore with uuid " + morDsAtTarget + " to execute MigrateWithStorageCommand";
+                     s_logger.error(msg);
+                     throw new Exception(msg);
+                 }
+                 morDsAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, filerTo.getUuid());
+                 if (morDsAtSource == null) {
+                     morDsAtSource = srcHyperHost.mountDatastore(false, tgtDsNfsHost, tgtDsNfsPort, tgtDsNfsPath, tgtDsName);
+                     if (morDsAtSource == null) {
+                         throw new Exception("Unable to mount datastore " + tgtDsNfsHost + ":/" + tgtDsNfsPath + " on " + _hostName);
+                     }
+                     mountedDatastoresAtSource.add(tgtDsName);
+                     s_logger.debug("Mounted datastore " + tgtDsNfsHost + ":/" + tgtDsNfsPath + " on " + _hostName);
+                 }
+ 
+                 if (isFirstDs) {
+                     relocateSpec.setDatastore(morDsAtSource);
+                     isFirstDs = false;
+                 }
+                 srcDiskName = String.format("[%s] %s.vmdk", srcDsName, volume.getPath());
+                 diskLocator = new VirtualMachineRelocateSpecDiskLocator();
+                 diskLocator.setDatastore(morDsAtSource);
+                 diskLocator.setDiskId(getVirtualDiskInfo(vmMo, srcDiskName));
+ 
+                 diskLocators.add(diskLocator);
+ 
+             }
+             relocateSpec.getDisk().addAll(diskLocators);
+ 
+             // Prepare network at target before migration
+             NicTO[] nics = vmTo.getNics();
+             for (NicTO nic : nics) {
+                 // prepare network on the host
+                 prepareNetworkFromNicInfo(new HostMO(getServiceContext(), morTgtHost), nic, false, vmTo.getType());
+             }
+ 
+             // Ensure secondary storage mounted on target host
+             String secStoreUrl = mgr.getSecondaryStorageStoreUrl(Long.parseLong(_dcId));
+             if(secStoreUrl == null) {
+                 String msg = "secondary storage for dc " + _dcId + " is not ready yet?";
+                 throw new Exception(msg);
+             }
+             mgr.prepareSecondaryStorageStore(secStoreUrl);
+             ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnHost(secStoreUrl);
+             if (morSecDs == null) {
+                 String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl;
+                 throw new Exception(msg);
+             }
+ 
+             // Change datastore
+             if (!vmMo.changeDatastore(relocateSpec)) {
+                 throw new Exception("Change datastore operation failed during storage migration");
+             } else {
+                 s_logger.debug("Successfully migrated storage of VM " + vmName + " to target datastore(s)");
+             }
+ 
+             // Change host
+             ManagedObjectReference morPool = tgtHyperHost.getHyperHostOwnerResourcePool();
+             if (!vmMo.migrate(morPool, tgtHyperHost.getMor())) {
+                 throw new Exception("Change datastore operation failed during storage migration");
+             } else {
+                 s_logger.debug("Successfully relocated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName());
+             }
+ 
+             state = State.Stopping;
 -            List<VolumeTO> volumeToList = null;
++            List<VolumeObjectTO> volumeToList = null;
+             return new MigrateWithStorageAnswer(cmd, volumeToList);
+         } catch (Throwable e) {
+             if (e instanceof RemoteException) {
+                 s_logger.warn("Encountered remote exception at vCenter, invalidating VMware session context");
+                 invalidateServiceContext();
+             }
+ 
+             String msg = "MigrationCommand failed due to " + VmwareHelper.getExceptionMessage(e);
+             s_logger.warn(msg, e);
+             return new MigrateWithStorageAnswer(cmd, (Exception) e);
+         } finally {
+             // Cleanup datastores mounted on source host
+             for(String mountedDatastore : mountedDatastoresAtSource) {
+                 s_logger.debug("Attempting to unmount datastore " + mountedDatastore + " at " + _hostName);
+                 try {
+                     srcHyperHost.unmountDatastore(mountedDatastore);
+                 } catch (Exception unmountEx) {
+                     s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName +
+                             ". Seems the datastore is still being used by " + _hostName +
+                             ". Please unmount manually to cleanup.");
+                 }
+                 s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + _hostName);
+             }
+             synchronized (_vms) {
+                 _vms.put(vmName, state);
+             }
+         }
+     }
+ 
+     private Answer execute(MigrateVolumeCommand cmd) {
+         String volumePath = cmd.getVolumePath();
+         StorageFilerTO poolTo = cmd.getPool();
+ 
+         if (s_logger.isInfoEnabled()) {
+             s_logger.info("Executing resource MigrateVolumeCommand: " + _gson.toJson(cmd));
+         }
+ 
+         VmwareContext context = getServiceContext();
+         VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
+         final String vmName = volMgr.getVmNameFromVolumeId(cmd.getVolumeId());
+ 
+         VirtualMachineMO vmMo = null;
+         VmwareHypervisorHost srcHyperHost = null;
+ 
+         ManagedObjectReference morDs = null;
+         ManagedObjectReference morDc = null;
+         VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec();
+         List<VirtualMachineRelocateSpecDiskLocator> diskLocators = new ArrayList<VirtualMachineRelocateSpecDiskLocator>();
+         VirtualMachineRelocateSpecDiskLocator diskLocator = null;
+ 
+         String srcDiskName = "";
+         String srcDsName = "";
+         String tgtDsName = "";
+ 
+         try {
+             srcHyperHost = getHyperHost(getServiceContext());
+             morDc = srcHyperHost.getHyperHostDatacenter();
+             srcDsName = volMgr.getStoragePoolOfVolume(cmd.getVolumeId());
+             tgtDsName = poolTo.getUuid().replace("-", "");
+ 
+             // find VM through datacenter (VM is not at the target host yet)
+             vmMo = srcHyperHost.findVmOnPeerHyperHost(vmName);
+             if (vmMo == null) {
+                 String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue();
+                 s_logger.error(msg);
+                 throw new Exception(msg);
+             }
+             morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName);
+             if (morDs == null) {
+                 String msg = "Unable to find the mounted datastore with name " + tgtDsName + " to execute MigrateVolumeCommand";
+                 s_logger.error(msg);
+                 throw new Exception(msg);
+             }
+ 
+             srcDiskName = String.format("[%s] %s.vmdk", srcDsName, volumePath);
+             diskLocator = new VirtualMachineRelocateSpecDiskLocator();
+             diskLocator.setDatastore(morDs);
+             diskLocator.setDiskId(getVirtualDiskInfo(vmMo, srcDiskName));
+ 
+             diskLocators.add(diskLocator);
+             relocateSpec.getDisk().add(diskLocator);
+ 
+             // Change datastore
+             if (!vmMo.changeDatastore(relocateSpec)) {
+                 throw new Exception("Change datastore operation failed during volume migration");
+             } else {
+                 s_logger.debug("Successfully migrated volume " + volumePath + " to target datastore " + tgtDsName);
+             }
+ 
+             return new MigrateVolumeAnswer(cmd, true, null, volumePath);
+         } catch (Exception e) {
+             String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString();
+             s_logger.error(msg, e);
+             return new MigrateVolumeAnswer(cmd, false, msg, null);
+         }
+     }
+ 
+     private int getVirtualDiskInfo(VirtualMachineMO vmMo, String srcDiskName) throws Exception {
+         Pair<VirtualDisk, String> deviceInfo = vmMo.getDiskDevice(srcDiskName, false);
+         if(deviceInfo == null) {
+             throw new Exception("No such disk device: " + srcDiskName);
+         }
+         return deviceInfo.first().getKey();
+     }
+ 
      private VmwareHypervisorHost getTargetHyperHost(DatacenterMO dcMo, String destIp) throws Exception {
  
          VmwareManager mgr = dcMo.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
@@@ -5537,5 -5756,5 +5811,5 @@@
      private boolean isVMWareToolsInstalled(VirtualMachineMO vmMo) throws Exception{
          GuestInfo guestInfo = vmMo.getVmGuestInfo();
          return (guestInfo != null && guestInfo.getGuestState() != null && guestInfo.getGuestState().equalsIgnoreCase("running"));
--    }	
++    }
  }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java
----------------------------------------------------------------------
diff --cc plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java
index 0000000,11be609..bdba61b
mode 000000,100644..100644
--- a/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java
+++ b/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java
@@@ -1,0 -1,212 +1,205 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *   http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing,
+  * software distributed under the License is distributed on an
+  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+  * KIND, either express or implied.  See the License for the
+  * specific language governing permissions and limitations
+  * under the License.
+  */
+ 
+ package org.apache.cloudstack.storage.motion;
+ 
+ import java.util.HashMap;
+ import java.util.Map;
 -import java.util.List;
 -
+ import javax.inject.Inject;
+ 
+ import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
++import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
+ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
+ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+ import org.apache.log4j.Logger;
+ import org.springframework.stereotype.Component;
+ 
+ import com.cloud.agent.AgentManager;
+ import com.cloud.agent.api.Answer;
+ import com.cloud.agent.api.MigrateWithStorageAnswer;
+ import com.cloud.agent.api.MigrateWithStorageCommand;
 -import com.cloud.agent.api.MigrateWithStorageCompleteAnswer;
 -import com.cloud.agent.api.MigrateWithStorageCompleteCommand;
 -import com.cloud.agent.api.MigrateWithStorageReceiveAnswer;
 -import com.cloud.agent.api.MigrateWithStorageReceiveCommand;
 -import com.cloud.agent.api.MigrateWithStorageSendAnswer;
 -import com.cloud.agent.api.MigrateWithStorageSendCommand;
+ import com.cloud.agent.api.to.StorageFilerTO;
+ import com.cloud.agent.api.to.VirtualMachineTO;
+ import com.cloud.agent.api.to.VolumeTO;
+ import com.cloud.exception.AgentUnavailableException;
+ import com.cloud.exception.OperationTimedoutException;
+ import com.cloud.host.Host;
+ import com.cloud.hypervisor.Hypervisor.HypervisorType;
+ import com.cloud.storage.StoragePool;
+ import com.cloud.storage.VolumeVO;
+ import com.cloud.storage.dao.VolumeDao;
+ import com.cloud.utils.exception.CloudRuntimeException;
+ import com.cloud.vm.VMInstanceVO;
+ import com.cloud.vm.dao.VMInstanceDao;
+ 
+ @Component
+ public class VmwareStorageMotionStrategy implements DataMotionStrategy {
+     private static final Logger s_logger = Logger.getLogger(VmwareStorageMotionStrategy.class);
+     @Inject AgentManager agentMgr;
+     @Inject VolumeDao volDao;
+     @Inject VolumeDataFactory volFactory;
+     @Inject PrimaryDataStoreDao storagePoolDao;
+     @Inject VMInstanceDao instanceDao;
+ 
+     @Override
+     public boolean canHandle(DataObject srcData, DataObject destData) {
+         return false;
+     }
+ 
+     @Override
+     public boolean canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
+         if (srcHost.getHypervisorType() == HypervisorType.VMware && destHost.getHypervisorType() == HypervisorType.VMware) {
+             s_logger.debug(this.getClass() + " can handle the request because the hosts have VMware hypervisor");
+             return true;
+         }
+         return false;
+     }
+ 
+     @Override
+     public Void copyAsync(DataObject srcData, DataObject destData,
+             AsyncCompletionCallback<CopyCommandResult> callback) {
+         CopyCommandResult result = new CopyCommandResult(null, null);
+         result.setResult("Unsupported operation requested for copying data.");
+         callback.complete(result);
+ 
+         return null;
+     }
+ 
+     @Override
+     public Void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost,
+             AsyncCompletionCallback<CopyCommandResult> callback) {
+         Answer answer = null;
+         String errMsg = null;
+         try {
+             VMInstanceVO instance = instanceDao.findById(vmTo.getId());
+             if (instance != null) {
+                 if (srcHost.getClusterId() == destHost.getClusterId()) {
+                     answer = migrateVmWithVolumesWithinCluster(instance, vmTo, srcHost, destHost, volumeMap);
+                 } else {
+                     answer = migrateVmWithVolumesAcrossCluster(instance, vmTo, srcHost, destHost, volumeMap);
+                 }
+             } else {
+                 throw new CloudRuntimeException("Unsupported operation requested for moving data.");
+             }
+         } catch (Exception e) {
+             s_logger.error("copy failed", e);
+             errMsg = e.toString();
+         }
+ 
+         CopyCommandResult result = new CopyCommandResult(null, answer);
+         result.setResult(errMsg);
+         callback.complete(result);
+         return null;
+     }
+ 
+     private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost,
+             Host destHost, Map<VolumeInfo, DataStore> volumeToPool) throws AgentUnavailableException {
+ 
+         // Initiate migration of a virtual machine with it's volumes.
+         try {
+             Map<VolumeTO, StorageFilerTO> volumeToFilerto = new HashMap<VolumeTO, StorageFilerTO>();
+             for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+                 VolumeInfo volume = entry.getKey();
+                 VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
+                 StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
+                 volumeToFilerto.put(volumeTo, filerTo);
+             }
+ 
+             // Migration across cluster needs to be done in three phases.
+             // 1. Send a migrate command to source resource to initiate migration
+             //	  Run validations against target!!
+             // 2. Complete the process. Update the volume details.
+             MigrateWithStorageCommand migrateWithStorageCmd = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid());
+             MigrateWithStorageAnswer migrateWithStorageAnswer = (MigrateWithStorageAnswer) agentMgr.send(
+                     srcHost.getId(), migrateWithStorageCmd);
+             if (migrateWithStorageAnswer == null) {
+                 s_logger.error("Migration with storage of vm " + vm+ " to host " + destHost + " failed.");
+                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
+             } else if (!migrateWithStorageAnswer.getResult()) {
+                 s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + migrateWithStorageAnswer.getDetails());
+                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost +
+                         ". " + migrateWithStorageAnswer.getDetails());
+             } else {
+                 // Update the volume details after migration.
+                 updateVolumesAfterMigration(volumeToPool);
+             }
+             s_logger.debug("Storage migration of VM " + vm.getInstanceName() + " completed successfully. Migrated to host " + destHost.getName());
+ 
+             return migrateWithStorageAnswer;
+         } catch (OperationTimedoutException e) {
+             s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e);
+             throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId());
+         }
+     }
+ 
+     private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost,
+             Host destHost, Map<VolumeInfo, DataStore> volumeToPool) throws AgentUnavailableException {
+ 
+         // Initiate migration of a virtual machine with it's volumes.
+         try {
+             Map<VolumeTO, StorageFilerTO> volumeToFilerto = new HashMap<VolumeTO, StorageFilerTO>();
+             for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+                 VolumeInfo volume = entry.getKey();
+                 VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
+                 StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
+                 volumeToFilerto.put(volumeTo, filerTo);
+             }
+ 
+             MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto, destHost.getGuid());
+             MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), command);
+             if (answer == null) {
+                 s_logger.error("Migration with storage of vm " + vm + " failed.");
+                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
+             } else if (!answer.getResult()) {
+                 s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + answer.getDetails());
+                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost +
+                         ". " + answer.getDetails());
+             } else {
+                 // Update the volume details after migration.
+                 updateVolumesAfterMigration(volumeToPool);
+             }
+ 
+             return answer;
+         } catch (OperationTimedoutException e) {
+             s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e);
+             throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId());
+         }
+     }
+ 
+     private void updateVolumesAfterMigration(Map<VolumeInfo, DataStore> volumeToPool) {
+         for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+             VolumeInfo volume = entry.getKey();
+             StoragePool pool = (StoragePool)entry.getValue();
+ 
+             VolumeVO volumeVO = volDao.findById(volume.getId());
+             Long oldPoolId = volumeVO.getPoolId();
+             volumeVO.setLastPoolId(oldPoolId);
+             volumeVO.setFolder(pool.getPath());
+             volumeVO.setPodId(pool.getPodId());
+             volumeVO.setPoolId(pool.getId());
+ 
+             volDao.update(volume.getId(), volumeVO);
+             s_logger.debug("Volume path was successfully updated for volume " + volume.getName() + " after it was migrated.");
+         }
+     }
+ }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java
----------------------------------------------------------------------
diff --cc plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java
index 0000000,ae4f41d..3d2ad57
mode 000000,100644..100644
--- a/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java
+++ b/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java
@@@ -1,0 -1,271 +1,271 @@@
+ // Licensed to the Apache Software Foundation (ASF) under one
+ // or more contributor license agreements.  See the NOTICE file
+ // distributed with this work for additional information
+ // regarding copyright ownership.  The ASF licenses this file
+ // to you under the Apache License, Version 2.0 (the
+ // "License"); you may not use this file except in compliance
+ // with the License.  You may obtain a copy of the License at
+ //
+ //   http://www.apache.org/licenses/LICENSE-2.0
+ //
+ // Unless required by applicable law or agreed to in writing,
+ // software distributed under the License is distributed on an
+ // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ // KIND, either express or implied.  See the License for the
+ // specific language governing permissions and limitations
+ // under the License.
+ package org.apache.cloudstack.storage.motion;
+ 
+ import static org.junit.Assert.assertFalse;
+ import static org.junit.Assert.assertTrue;
+ import static org.mockito.Matchers.anyLong;
+ import static org.mockito.Matchers.isA;
+ import static org.mockito.Mockito.mock;
+ import static org.mockito.Mockito.when;
+ 
+ import java.io.IOException;
+ import java.util.HashMap;
+ import java.util.Map;
+ 
+ import javax.inject.Inject;
+ import javax.naming.ConfigurationException;
+ 
 -import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult;
+ import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
+ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
+ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+ import org.apache.cloudstack.framework.async.AsyncCallFuture;
+ import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
+ import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+ import org.apache.cloudstack.framework.async.AsyncRpcConext;
++import org.apache.cloudstack.storage.command.CommandResult;
+ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+ import org.apache.cloudstack.test.utils.SpringUtils;
+ import org.junit.Before;
+ import org.junit.BeforeClass;
+ import org.junit.Test;
+ import org.junit.runner.RunWith;
+ import org.mockito.Mockito;
+ import org.springframework.context.annotation.Bean;
+ import org.springframework.context.annotation.ComponentScan;
+ import org.springframework.context.annotation.ComponentScan.Filter;
+ import org.springframework.context.annotation.Configuration;
+ import org.springframework.context.annotation.FilterType;
+ import org.springframework.core.type.classreading.MetadataReader;
+ import org.springframework.core.type.classreading.MetadataReaderFactory;
+ import org.springframework.core.type.filter.TypeFilter;
+ import org.springframework.test.context.ContextConfiguration;
+ import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+ import org.springframework.test.context.support.AnnotationConfigContextLoader;
+ 
+ import com.cloud.agent.AgentManager;
+ import com.cloud.agent.api.MigrateWithStorageAnswer;
+ import com.cloud.agent.api.MigrateWithStorageCommand;
+ import com.cloud.agent.api.to.VirtualMachineTO;
+ import com.cloud.host.Host;
+ import com.cloud.hypervisor.Hypervisor.HypervisorType;
+ import com.cloud.storage.dao.VolumeDao;
+ import com.cloud.utils.component.ComponentContext;
+ import com.cloud.vm.VMInstanceVO;
+ import com.cloud.vm.dao.VMInstanceDao;
+ 
+ @RunWith(SpringJUnit4ClassRunner.class)
+ @ContextConfiguration(loader = AnnotationConfigContextLoader.class)
+ public class VmwareStorageMotionStrategyTest {
+ 
+     @Inject VmwareStorageMotionStrategy strategy = new VmwareStorageMotionStrategy();
+     @Inject AgentManager agentMgr;
+     @Inject VolumeDao volDao;
+     @Inject VolumeDataFactory volFactory;
+     @Inject PrimaryDataStoreDao storagePoolDao;
+     @Inject VMInstanceDao instanceDao;
+ 
+     CopyCommandResult result;
+ 
+     @BeforeClass
+     public static void setUp() throws ConfigurationException {
+     }
+ 
+     @Before
+     public void testSetUp() {
+         ComponentContext.initComponentsLifeCycle();
+     }
+ 
+     @Test
+     public void testStrategyHandlesVmwareHosts() throws Exception {
+         Host srcHost = mock(Host.class);
+         Host destHost = mock(Host.class);
+         when(srcHost.getHypervisorType()).thenReturn(HypervisorType.VMware);
+         when(destHost.getHypervisorType()).thenReturn(HypervisorType.VMware);
+         Map<VolumeInfo, DataStore> volumeMap = new HashMap<VolumeInfo, DataStore>();
+         boolean canHandle = strategy.canHandle(volumeMap, srcHost, destHost);
+         assertTrue("The strategy is only supposed to handle vmware hosts", canHandle);
+     }
+ 
+     @Test
+     public void testStrategyDoesnotHandlesNonVmwareHosts() throws Exception {
+         Host srcHost = mock(Host.class);
+         Host destHost = mock(Host.class);
+         when(srcHost.getHypervisorType()).thenReturn(HypervisorType.XenServer);
+         when(destHost.getHypervisorType()).thenReturn(HypervisorType.XenServer);
+         Map<VolumeInfo, DataStore> volumeMap = new HashMap<VolumeInfo, DataStore>();
+         boolean canHandle = strategy.canHandle(volumeMap, srcHost, destHost);
+         assertFalse("The strategy is only supposed to handle vmware hosts", canHandle);
+     }
+ 
+     @Test
+     public void testMigrateWithinClusterSuccess() throws Exception {
+         Host srcHost = mock(Host.class);
+         Host destHost = mock(Host.class);
+         when(srcHost.getClusterId()).thenReturn(1L);
+         when(destHost.getClusterId()).thenReturn(1L);
+         Map<VolumeInfo, DataStore> volumeMap = new HashMap<VolumeInfo, DataStore>();
+         VirtualMachineTO to = mock(VirtualMachineTO.class);
+         when(to.getId()).thenReturn(6L);
+         VMInstanceVO instance = mock(VMInstanceVO.class);
+         when(instanceDao.findById(6L)).thenReturn(instance);
+ 
+         MockContext<CommandResult> context = new MockContext<CommandResult>(null, null, volumeMap);
+         AsyncCallbackDispatcher<VmwareStorageMotionStrategyTest, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
+         caller.setCallback(caller.getTarget().mockCallBack(null, null)).setContext(context);
+ 
+         MigrateWithStorageAnswer migAnswerMock = mock(MigrateWithStorageAnswer.class);
+         when(migAnswerMock.getResult()).thenReturn(true);
+         when(agentMgr.send(anyLong(), isA(MigrateWithStorageCommand.class))).thenReturn(migAnswerMock);
+ 
+         strategy.copyAsync(volumeMap, to, srcHost, destHost, caller);
+         assertTrue("Migration within cluster isn't successful.", this.result.isSuccess());
+     }
+ 
+     @Test
+     public void testMigrateWithinClusterFailure() throws Exception {
+         Host srcHost = mock(Host.class);
+         Host destHost = mock(Host.class);
+         when(srcHost.getClusterId()).thenReturn(1L);
+         when(destHost.getClusterId()).thenReturn(1L);
+         Map<VolumeInfo, DataStore> volumeMap = new HashMap<VolumeInfo, DataStore>();
+         VirtualMachineTO to = mock(VirtualMachineTO.class);
+         when(to.getId()).thenReturn(6L);
+         VMInstanceVO instance = mock(VMInstanceVO.class);
+         when(instanceDao.findById(6L)).thenReturn(instance);
+ 
+         MockContext<CommandResult> context = new MockContext<CommandResult>(null, null, volumeMap);
+         AsyncCallbackDispatcher<VmwareStorageMotionStrategyTest, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
+         caller.setCallback(caller.getTarget().mockCallBack(null, null)).setContext(context);
+ 
+         MigrateWithStorageAnswer migAnswerMock = mock(MigrateWithStorageAnswer.class);
+         when(migAnswerMock.getResult()).thenReturn(false);
+         when(agentMgr.send(anyLong(), isA(MigrateWithStorageCommand.class))).thenReturn(migAnswerMock);
+ 
+         strategy.copyAsync(volumeMap, to, srcHost, destHost, caller);
+         assertFalse("Migration within cluster didn't fail.", this.result.isSuccess());
+     }
+ 
+     @Test
+     public void testMigrateAcrossClusterSuccess() throws Exception {
+         Host srcHost = mock(Host.class);
+         Host destHost = mock(Host.class);
+         when(srcHost.getClusterId()).thenReturn(1L);
+         when(destHost.getClusterId()).thenReturn(2L);
+         Map<VolumeInfo, DataStore> volumeMap = new HashMap<VolumeInfo, DataStore>();
+         VirtualMachineTO to = mock(VirtualMachineTO.class);
+         when(to.getId()).thenReturn(6L);
+         VMInstanceVO instance = mock(VMInstanceVO.class);
+         when(instanceDao.findById(6L)).thenReturn(instance);
+ 
+         MockContext<CommandResult> context = new MockContext<CommandResult>(null, null, volumeMap);
+         AsyncCallbackDispatcher<VmwareStorageMotionStrategyTest, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
+         caller.setCallback(caller.getTarget().mockCallBack(null, null)).setContext(context);
+ 
+         MigrateWithStorageAnswer migAnswerMock = mock(MigrateWithStorageAnswer.class);
+         when(migAnswerMock.getResult()).thenReturn(true);
+         when(agentMgr.send(anyLong(), isA(MigrateWithStorageCommand.class))).thenReturn(migAnswerMock);
+ 
+         strategy.copyAsync(volumeMap, to, srcHost, destHost, caller);
+         assertTrue("Migration across cluster isn't successful.", this.result.isSuccess());
+     }
+ 
+     @Test
+     public void testMigrateAcrossClusterFailure() throws Exception {
+         Host srcHost = mock(Host.class);
+         Host destHost = mock(Host.class);
+         when(srcHost.getClusterId()).thenReturn(1L);
+         when(destHost.getClusterId()).thenReturn(2L);
+         Map<VolumeInfo, DataStore> volumeMap = new HashMap<VolumeInfo, DataStore>();
+         VirtualMachineTO to = mock(VirtualMachineTO.class);
+         when(to.getId()).thenReturn(6L);
+         VMInstanceVO instance = mock(VMInstanceVO.class);
+         when(instanceDao.findById(6L)).thenReturn(instance);
+ 
+         MockContext<CommandResult> context = new MockContext<CommandResult>(null, null, volumeMap);
+         AsyncCallbackDispatcher<VmwareStorageMotionStrategyTest, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
+         caller.setCallback(caller.getTarget().mockCallBack(null, null)).setContext(context);
+ 
+         MigrateWithStorageAnswer migAnswerMock = mock(MigrateWithStorageAnswer.class);
+         when(migAnswerMock.getResult()).thenReturn(false);
+         when(agentMgr.send(anyLong(), isA(MigrateWithStorageCommand.class))).thenReturn(migAnswerMock);
+ 
+         strategy.copyAsync(volumeMap, to, srcHost, destHost, caller);
+         assertFalse("Migration across cluster didn't fail.", this.result.isSuccess());
+     }
+ 
+     private class MockContext<T> extends AsyncRpcConext<T> {
+         final Map<VolumeInfo, DataStore> volumeToPool;
+         final AsyncCallFuture<CommandResult> future;
+         /**
+          * @param callback
+          */
+         public MockContext(AsyncCompletionCallback<T> callback, AsyncCallFuture<CommandResult> future,
+                 Map<VolumeInfo, DataStore> volumeToPool) {
+             super(callback);
+             this.volumeToPool = volumeToPool;
+             this.future = future;
+         }
+     }
+ 
+     protected Void mockCallBack(AsyncCallbackDispatcher<VmwareStorageMotionStrategyTest,
+             CopyCommandResult> callback, MockContext<CommandResult> context) {
+         this.result = callback.getResult();
+         return null;
+     }
+ 
+     @Configuration
+     @ComponentScan(basePackageClasses = { VmwareStorageMotionStrategy.class },
+             includeFilters = {@Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)},
+             useDefaultFilters = false)
+     public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration {
+ 
+         @Bean
+         public VolumeDao volumeDao() {
+             return Mockito.mock(VolumeDao.class);
+         }
+ 
+         @Bean
+         public VolumeDataFactory volumeDataFactory() {
+             return Mockito.mock(VolumeDataFactory.class);
+         }
+ 
+         @Bean
+         public PrimaryDataStoreDao primaryDataStoreDao() {
+             return Mockito.mock(PrimaryDataStoreDao.class);
+         }
+ 
+         @Bean
+         public VMInstanceDao vmInstanceDao() {
+             return Mockito.mock(VMInstanceDao.class);
+         }
+ 
+         @Bean
+         public AgentManager agentManager() {
+             return Mockito.mock(AgentManager.class);
+         }
+ 
+         public static class Library implements TypeFilter {
+             @Override
+             public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {
+                 ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class);
+                 return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs);
+             }
+         }
+     }
+ }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
----------------------------------------------------------------------
diff --cc plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
index e5a9559,870049c..5261ca0
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
@@@ -17,16 -17,6 +17,17 @@@
  
  package com.cloud.hypervisor.xen.resource;
  
 +import java.io.File;
 +import java.util.ArrayList;
 +import java.util.List;
 +import java.util.Set;
 +
 +import javax.ejb.Local;
 +
 +import org.apache.log4j.Logger;
 +import org.apache.xmlrpc.XmlRpcException;
 +
++
  import com.cloud.agent.api.Answer;
  import com.cloud.agent.api.Command;
  import com.cloud.agent.api.NetworkUsageAnswer;
@@@ -49,10 -39,20 +50,13 @@@ import com.xensource.xenapi.Types.XenAP
  import com.xensource.xenapi.VBD;
  import com.xensource.xenapi.VDI;
  import com.xensource.xenapi.VM;
 -import org.apache.log4j.Logger;
 -import org.apache.xmlrpc.XmlRpcException;
  
 -import javax.ejb.Local;
 -import java.io.File;
 -import java.util.ArrayList;
 -import java.util.List;
 -import java.util.Set;
+ 
  @Local(value=ServerResource.class)
  public class XcpOssResource extends CitrixResourceBase {
- 	 private final static Logger s_logger = Logger.getLogger(XcpServerResource.class);
+     private final static Logger s_logger = Logger.getLogger(XcpOssResource.class);
+     private static final long mem_32m = 33554432L;
+ 
      @Override
      protected List<File> getPatchFiles() {
          List<File> files = new ArrayList<File>();
@@@ -65,13 -65,13 +69,13 @@@
          files.add(file);
          return files;
      }
--    
++
      @Override
  	protected void fillHostInfo(Connection conn, StartupRoutingCommand cmd) {
      	super.fillHostInfo(conn, cmd);
      	cmd.setCaps(cmd.getCapabilities() + " , hvm");
      }
--    
++
      @Override
      protected String getGuestOsType(String stdType, boolean bootFromCD) {
      	if (stdType.equalsIgnoreCase("Debian GNU/Linux 6(64-bit)")) {
@@@ -80,7 -80,7 +84,7 @@@
      		return CitrixHelper.getXcpGuestOsType(stdType);
      	}
      }
--    
++
      protected VBD createPatchVbd(Connection conn, String vmName, VM vm) throws XmlRpcException, XenAPIException {
      	if (_host.localSRuuid != null) {
      		//create an iso vdi on it
@@@ -88,13 -88,13 +92,13 @@@
      		if (result == null || result.equalsIgnoreCase("Failed")) {
      			 throw new CloudRuntimeException("can not create systemvm vdi");
      		}
--    		
++
      		Set<VDI> vdis = VDI.getByNameLabel(conn, "systemvm-vdi");
      		if (vdis.size() != 1) {
      			throw new CloudRuntimeException("can not find systemvmiso");
      		}
      		VDI systemvmVDI = vdis.iterator().next();
--    		
++
      		VBD.Record cdromVBDR = new VBD.Record();
              cdromVBDR.VM = vm;
              cdromVBDR.empty = false;
@@@ -109,7 -109,7 +113,7 @@@
      		 throw new CloudRuntimeException("can not find local sr");
      	}
      }
--    
++
  
      protected NetworkUsageAnswer execute(NetworkUsageCommand cmd) {
          try {
@@@ -124,10 -124,10 +128,10 @@@
              return answer;
          } catch (Exception ex) {
              s_logger.warn("Failed to get network usage stats due to ", ex);
--            return new NetworkUsageAnswer(cmd, ex); 
++            return new NetworkUsageAnswer(cmd, ex);
          }
      }
--    
++
      @Override
      public Answer executeRequest(Command cmd) {
          if (cmd instanceof NetworkUsageCommand) {
@@@ -136,11 -136,11 +140,11 @@@
              return super.executeRequest(cmd);
          }
      }
--    
++
      @Override
      public StartAnswer execute(StartCommand cmd) {
      	StartAnswer answer = super.execute(cmd);
--    	
++
      	VirtualMachineTO vmSpec = cmd.getVirtualMachine();
      	if (vmSpec.getType() == VirtualMachine.Type.ConsoleProxy) {
      		Connection conn = getConnection();
@@@ -152,10 -152,10 +156,10 @@@
      		}
      		callHostPlugin(conn, "vmops", "setDNATRule", "ip", publicIp, "port", "8443", "add", "true");
      	}
--    	
++
      	return answer;
      }
--    
++
      @Override
      public StopAnswer execute(StopCommand cmd) {
      	StopAnswer answer = super.execute(cmd);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/pom.xml
----------------------------------------------------------------------
diff --cc plugins/pom.xml
index 8a5b598,eab4755..d8ae97a
--- a/plugins/pom.xml
+++ b/plugins/pom.xml
@@@ -35,7 -35,8 +35,8 @@@
      <module>api/rate-limit</module>
      <module>api/discovery</module>
      <module>acl/static-role-based</module>
 -    <module>affinity-group-processors/host-anti-affinity</module>
 +	<module>affinity-group-processors/host-anti-affinity</module>
+     <module>affinity-group-processors/explicit-dedication</module>
      <module>deployment-planners/user-concentrated-pod</module>
      <module>deployment-planners/user-dispersing</module>
      <module>deployment-planners/implicit-dedication</module>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --cc plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java
index a0b64bf,0000000..7ff56f6
mode 100644,000000..100644
--- a/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java
+++ b/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java
@@@ -1,171 -1,0 +1,173 @@@
 +// Licensed to the Apache Software Foundation (ASF) under one
 +// or more contributor license agreements.  See the NOTICE file
 +// distributed with this work for additional information
 +// regarding copyright ownership.  The ASF licenses this file
 +// to you under the Apache License, Version 2.0 (the
 +// "License"); you may not use this file except in compliance
 +// with the License.  You may obtain a copy of the License at
 +//
 +//   http://www.apache.org/licenses/LICENSE-2.0
 +//
 +// Unless required by applicable law or agreed to in writing,
 +// software distributed under the License is distributed on an
 +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 +// KIND, either express or implied.  See the License for the
 +// specific language governing permissions and limitations
 +// under the License.
 +package org.apache.cloudstack.storage.datastore.lifecycle;
 +
 +import java.net.URI;
 +import java.net.URISyntaxException;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import javax.inject.Inject;
 +
 +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
 +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
 +import org.apache.cloudstack.storage.image.datastore.ImageStoreHelper;
 +import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
 +import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle;
 +import org.apache.log4j.Logger;
 +
 +import com.cloud.agent.api.StoragePoolInfo;
 +import com.cloud.exception.InvalidParameterValueException;
++import com.cloud.hypervisor.Hypervisor.HypervisorType;
 +import com.cloud.resource.Discoverer;
 +import com.cloud.resource.ResourceManager;
 +import com.cloud.storage.DataStoreRole;
 +import com.cloud.storage.ScopeType;
 +import com.cloud.utils.UriUtils;
 +
 +public class CloudStackImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
 +
 +    private static final Logger s_logger = Logger.getLogger(CloudStackImageStoreLifeCycleImpl.class);
 +    @Inject
 +    protected ResourceManager _resourceMgr;
 +    @Inject
 +    protected ImageStoreDao imageStoreDao;
 +    @Inject
 +    ImageStoreHelper imageStoreHelper;
 +    @Inject
 +    ImageStoreProviderManager imageStoreMgr;
 +
 +    protected List<? extends Discoverer> _discoverers;
 +
 +    public List<? extends Discoverer> getDiscoverers() {
 +        return _discoverers;
 +    }
 +
 +    public void setDiscoverers(List<? extends Discoverer> _discoverers) {
 +        this._discoverers = _discoverers;
 +    }
 +
 +    public CloudStackImageStoreLifeCycleImpl() {
 +    }
 +
 +    @SuppressWarnings("unchecked")
 +    @Override
 +    public DataStore initialize(Map<String, Object> dsInfos) {
 +
 +        Long dcId = (Long) dsInfos.get("zoneId");
 +        String url = (String) dsInfos.get("url");
 +        String name = (String) dsInfos.get("name");
 +        if (name == null) {
 +            name = url;
 +        }
 +        String providerName = (String) dsInfos.get("providerName");
 +        DataStoreRole role = (DataStoreRole) dsInfos.get("role");
 +        Map<String, String> details = (Map<String, String>) dsInfos.get("details");
 +
 +        s_logger.info("Trying to add a new host at " + url + " in data center " + dcId);
 +
 +        URI uri = null;
 +        try {
 +            uri = new URI(UriUtils.encodeURIComponent(url));
 +            if (uri.getScheme() == null) {
 +                throw new InvalidParameterValueException("uri.scheme is null " + url + ", add nfs:// as a prefix");
 +            } else if (uri.getScheme().equalsIgnoreCase("nfs")) {
 +                if (uri.getHost() == null || uri.getHost().equalsIgnoreCase("") || uri.getPath() == null
 +                        || uri.getPath().equalsIgnoreCase("")) {
 +                    throw new InvalidParameterValueException(
 +                            "Your host and/or path is wrong.  Make sure it's of the format nfs://hostname/path");
 +                }
 +            }
 +        } catch (URISyntaxException e) {
 +            throw new InvalidParameterValueException(url + " is not a valid uri");
 +        }
 +
 +        if (dcId == null) {
 +            throw new InvalidParameterValueException(
 +                    "DataCenter id is null, and cloudstack default image storehas to be associated with a data center");
 +        }
 +
 +        Map<String, Object> imageStoreParameters = new HashMap<String, Object>();
 +        imageStoreParameters.put("name", name);
 +        imageStoreParameters.put("zoneId", dcId);
 +        imageStoreParameters.put("url", url);
 +        imageStoreParameters.put("protocol", uri.getScheme().toLowerCase());
 +        imageStoreParameters.put("scope", ScopeType.ZONE); // default cloudstack
 +                                                           // provider only
 +                                                           // supports zone-wide
 +                                                           // image store
 +        imageStoreParameters.put("providerName", providerName);
 +        imageStoreParameters.put("role", role);
 +
 +        ImageStoreVO ids = imageStoreHelper.createImageStore(imageStoreParameters, details);
 +        return imageStoreMgr.getImageStore(ids.getId());
 +    }
 +
 +    @Override
 +    public boolean attachCluster(DataStore store, ClusterScope scope) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
++
 +    @Override
-     public boolean attachZone(DataStore dataStore, ZoneScope scope) {
++    public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean dettach() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean unmanaged() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean maintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean cancelMaintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean deleteDataStore(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --cc plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java
index 674a13d,0000000..6965a15
mode 100644,000000..100644
--- a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java
+++ b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java
@@@ -1,160 -1,0 +1,163 @@@
 +// Licensed to the Apache Software Foundation (ASF) under one
 +// or more contributor license agreements.  See the NOTICE file
 +// distributed with this work for additional information
 +// regarding copyright ownership.  The ASF licenses this file
 +// to you under the Apache License, Version 2.0 (the
 +// "License"); you may not use this file except in compliance
 +// with the License.  You may obtain a copy of the License at
 +//
 +//   http://www.apache.org/licenses/LICENSE-2.0
 +//
 +// Unless required by applicable law or agreed to in writing,
 +// software distributed under the License is distributed on an
 +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 +// KIND, either express or implied.  See the License for the
 +// specific language governing permissions and limitations
 +// under the License.
 +package org.apache.cloudstack.storage.datastore.lifecycle;
 +
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import javax.inject.Inject;
 +
 +import org.apache.cloudstack.api.ApiConstants;
 +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
 +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
 +import org.apache.cloudstack.storage.image.datastore.ImageStoreHelper;
 +import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
 +import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle;
 +import org.apache.log4j.Logger;
 +
 +import com.cloud.agent.api.StoragePoolInfo;
++import com.cloud.hypervisor.Hypervisor.HypervisorType;
 +import com.cloud.resource.Discoverer;
 +import com.cloud.resource.ResourceManager;
 +import com.cloud.storage.DataStoreRole;
 +import com.cloud.storage.ScopeType;
 +import com.cloud.storage.s3.S3Manager;
 +
 +public class S3ImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
 +
 +    private static final Logger s_logger = Logger.getLogger(S3ImageStoreLifeCycleImpl.class);
 +    @Inject
 +    protected ResourceManager _resourceMgr;
 +    @Inject
 +    protected ImageStoreDao imageStoreDao;
 +    @Inject
 +    ImageStoreHelper imageStoreHelper;
 +    @Inject
 +    ImageStoreProviderManager imageStoreMgr;
 +    @Inject
 +    S3Manager _s3Mgr;
 +
 +    protected List<? extends Discoverer> _discoverers;
 +
 +    public List<? extends Discoverer> getDiscoverers() {
 +        return _discoverers;
 +    }
 +
 +    public void setDiscoverers(List<? extends Discoverer> _discoverers) {
 +        this._discoverers = _discoverers;
 +    }
 +
 +    public S3ImageStoreLifeCycleImpl() {
 +    }
 +
 +    @SuppressWarnings("unchecked")
 +    @Override
 +    public DataStore initialize(Map<String, Object> dsInfos) {
 +
 +        Long dcId = (Long) dsInfos.get("zoneId");
 +        String url = (String) dsInfos.get("url");
 +        String name = (String) dsInfos.get("name");
 +        String providerName = (String) dsInfos.get("providerName");
 +        ScopeType scope = (ScopeType) dsInfos.get("scope");
 +        DataStoreRole role = (DataStoreRole) dsInfos.get("role");
 +        Map<String, String> details = (Map<String, String>) dsInfos.get("details");
 +
 +        s_logger.info("Trying to add a S3 store in data center " + dcId);
 +
 +        /*
 +         * try{ // verify S3 parameters _s3Mgr.verifyS3Fields(details); } catch
 +         * (DiscoveryException ex){ throw new
 +         * InvalidParameterValueException("failed to verify S3 parameters!"); }
 +         */
 +
 +        Map<String, Object> imageStoreParameters = new HashMap<String, Object>();
 +        imageStoreParameters.put("name", name);
 +        imageStoreParameters.put("zoneId", dcId);
 +        imageStoreParameters.put("url", url);
 +        String protocol = "http";
 +        String useHttps = details.get(ApiConstants.S3_HTTPS_FLAG);
 +        if (useHttps != null && Boolean.parseBoolean(useHttps)) {
 +            protocol = "https";
 +        }
 +        imageStoreParameters.put("protocol", protocol);
 +        if (scope != null) {
 +            imageStoreParameters.put("scope", scope);
 +        } else {
 +            imageStoreParameters.put("scope", ScopeType.REGION);
 +        }
 +        imageStoreParameters.put("providerName", providerName);
 +        imageStoreParameters.put("role", role);
 +
 +        ImageStoreVO ids = imageStoreHelper.createImageStore(imageStoreParameters, details);
 +        return imageStoreMgr.getImageStore(ids.getId());
 +    }
 +
 +    @Override
 +    public boolean attachCluster(DataStore store, ClusterScope scope) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
++
++
 +    @Override
-     public boolean attachZone(DataStore dataStore, ZoneScope scope) {
++    public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean dettach() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean unmanaged() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean maintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean cancelMaintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean deleteDataStore(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --cc plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java
index e001c0b,0000000..c7e4801
mode 100644,000000..100644
--- a/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java
+++ b/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java
@@@ -1,99 -1,0 +1,100 @@@
 +// Licensed to the Apache Software Foundation (ASF) under one
 +// or more contributor license agreements.  See the NOTICE file
 +// distributed with this work for additional information
 +// regarding copyright ownership.  The ASF licenses this file
 +// to you under the Apache License, Version 2.0 (the
 +// "License"); you may not use this file except in compliance
 +// with the License.  You may obtain a copy of the License at
 +//
 +//   http://www.apache.org/licenses/LICENSE-2.0
 +//
 +// Unless required by applicable law or agreed to in writing,
 +// software distributed under the License is distributed on an
 +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 +// KIND, either express or implied.  See the License for the
 +// specific language governing permissions and limitations
 +// under the License.
 +package org.apache.cloudstack.storage.datastore.lifecycle;
 +
 +import java.util.Map;
 +
 +import javax.inject.Inject;
 +
 +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
 +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
 +import org.apache.cloudstack.storage.image.datastore.ImageStoreHelper;
 +import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
 +import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle;
 +
 +import com.cloud.agent.api.StoragePoolInfo;
++import com.cloud.hypervisor.Hypervisor.HypervisorType;
 +
 +public class SampleImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
 +    @Inject
 +    protected ImageStoreDao imageStoreDao;
 +    @Inject
 +    ImageStoreHelper imageStoreHelper;
 +    @Inject
 +    ImageStoreProviderManager imageStoreMgr;
 +
 +    public SampleImageStoreLifeCycleImpl() {
 +    }
 +
 +    @Override
 +    public DataStore initialize(Map<String, Object> dsInfos) {
 +        ImageStoreVO ids = imageStoreHelper.createImageStore(dsInfos);
 +        return imageStoreMgr.getImageStore(ids.getId());
 +    }
 +
 +    @Override
 +    public boolean attachCluster(DataStore store, ClusterScope scope) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
-     public boolean attachZone(DataStore dataStore, ZoneScope scope) {
++    public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisor) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean dettach() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean unmanaged() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean maintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean cancelMaintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean deleteDataStore(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --cc plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java
index 3ba3b31,0000000..38e2007
mode 100644,000000..100644
--- a/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java
+++ b/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java
@@@ -1,146 -1,0 +1,148 @@@
 +// Licensed to the Apache Software Foundation (ASF) under one
 +// or more contributor license agreements.  See the NOTICE file
 +// distributed with this work for additional information
 +// regarding copyright ownership.  The ASF licenses this file
 +// to you under the Apache License, Version 2.0 (the
 +// "License"); you may not use this file except in compliance
 +// with the License.  You may obtain a copy of the License at
 +//
 +//   http://www.apache.org/licenses/LICENSE-2.0
 +//
 +// Unless required by applicable law or agreed to in writing,
 +// software distributed under the License is distributed on an
 +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 +// KIND, either express or implied.  See the License for the
 +// specific language governing permissions and limitations
 +// under the License.
 +package org.apache.cloudstack.storage.datastore.lifecycle;
 +
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import javax.inject.Inject;
 +
 +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
 +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO;
 +import org.apache.cloudstack.storage.image.datastore.ImageStoreHelper;
 +import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager;
 +import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle;
 +import org.apache.log4j.Logger;
 +
 +import com.cloud.agent.api.StoragePoolInfo;
++import com.cloud.hypervisor.Hypervisor.HypervisorType;
 +import com.cloud.resource.Discoverer;
 +import com.cloud.resource.ResourceManager;
 +import com.cloud.storage.DataStoreRole;
 +import com.cloud.storage.ScopeType;
 +
 +public class SwiftImageStoreLifeCycleImpl implements ImageStoreLifeCycle {
 +
 +    private static final Logger s_logger = Logger.getLogger(SwiftImageStoreLifeCycleImpl.class);
 +    @Inject
 +    protected ResourceManager _resourceMgr;
 +    @Inject
 +    protected ImageStoreDao imageStoreDao;
 +    @Inject
 +    ImageStoreHelper imageStoreHelper;
 +    @Inject
 +    ImageStoreProviderManager imageStoreMgr;
 +
 +    protected List<? extends Discoverer> _discoverers;
 +
 +    public List<? extends Discoverer> getDiscoverers() {
 +        return _discoverers;
 +    }
 +
 +    public void setDiscoverers(List<? extends Discoverer> _discoverers) {
 +        this._discoverers = _discoverers;
 +    }
 +
 +    public SwiftImageStoreLifeCycleImpl() {
 +    }
 +
 +    @Override
 +    public DataStore initialize(Map<String, Object> dsInfos) {
 +
 +        Long dcId = (Long) dsInfos.get("zoneId");
 +        String url = (String) dsInfos.get("url");
 +        String name = (String) dsInfos.get("name");
 +        ScopeType scope = (ScopeType) dsInfos.get("scope");
 +        String providerName = (String) dsInfos.get("providerName");
 +        DataStoreRole role = (DataStoreRole) dsInfos.get("role");
 +
 +        Map<String, String> details = (Map<String, String>) dsInfos.get("details");
 +
 +        s_logger.info("Trying to add a swift store at " + url + " in data center " + dcId);
 +
 +        // just need to insert an entry in DB
 +        Map<String, Object> imageStoreParameters = new HashMap<String, Object>();
 +        imageStoreParameters.put("name", name);
 +        imageStoreParameters.put("zoneId", dcId);
 +        imageStoreParameters.put("url", url);
 +        imageStoreParameters.put("protocol", "http");
 +        if (scope != null) {
 +            imageStoreParameters.put("scope", scope);
 +        } else {
 +            imageStoreParameters.put("scope", ScopeType.REGION);
 +        }
 +        imageStoreParameters.put("providerName", providerName);
 +        imageStoreParameters.put("role", role);
 +
 +        ImageStoreVO ids = imageStoreHelper.createImageStore(imageStoreParameters, details);
 +        return imageStoreMgr.getImageStore(ids.getId());
 +    }
 +
 +    @Override
 +    public boolean attachCluster(DataStore store, ClusterScope scope) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
++
 +    @Override
-     public boolean attachZone(DataStore dataStore, ZoneScope scope) {
++    public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean dettach() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean unmanaged() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean maintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean cancelMaintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean deleteDataStore(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --cc plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
index 693ab01,fb37e8f..38dd5a9
--- a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
+++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
@@@ -404,17 -441,18 +404,16 @@@ public class CloudStackPrimaryDataStore
      }
  
      @Override
-     public boolean attachZone(DataStore dataStore, ZoneScope scope) {
-         List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM,
-                 scope.getScopeId());
+     public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
+         List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId());
          for (HostVO host : hosts) {
              try {
 -                this.storageMgr.connectHostToSharedPool(host.getId(),
 -                        dataStore.getId());
 +                this.storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId());
              } catch (Exception e) {
 -                s_logger.warn("Unable to establish a connection between " + host
 -                        + " and " + dataStore, e);
 +                s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
              }
          }
-         this.dataStoreHelper.attachZone(dataStore);
+         this.dataStoreHelper.attachZone(dataStore, hypervisorType);
          return true;
      }
  

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java
----------------------------------------------------------------------
diff --cc plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java
index 504cb9a,0000000..7ee8565
mode 100644,000000..100644
--- a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java
+++ b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java
@@@ -1,147 -1,0 +1,147 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *   http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing,
 + * software distributed under the License is distributed on an
 + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 + * KIND, either express or implied.  See the License for the
 + * specific language governing permissions and limitations
 + * under the License.
 + */
 +package org.apache.cloudstack.storage.datastore.lifecycle;
 +
 +import java.util.List;
 +import java.util.Map;
 +
 +import javax.inject.Inject;
 +
 +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
 +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
 +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
 +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
 +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
 +import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd;
 +import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd;
 +import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
 +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
 +
 +import com.cloud.agent.api.StoragePoolInfo;
 +import com.cloud.host.HostVO;
 +import com.cloud.host.dao.HostDao;
 +import com.cloud.hypervisor.Hypervisor.HypervisorType;
 +import com.cloud.storage.StoragePoolStatus;
 +
 +public class SamplePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
 +    @Inject
 +    EndPointSelector selector;
 +    @Inject
 +    PrimaryDataStoreDao dataStoreDao;
 +    @Inject
 +    HostDao hostDao;
 +    @Inject
 +    PrimaryDataStoreHelper primaryStoreHelper;
 +    @Inject
 +    PrimaryDataStoreProviderManager providerMgr;
 +
 +    public SamplePrimaryDataStoreLifeCycleImpl() {
 +    }
 +
 +    @Override
 +    public DataStore initialize(Map<String, Object> dsInfos) {
 +
 +        DataStore store = primaryStoreHelper.createPrimaryDataStore(null);
 +        return providerMgr.getPrimaryDataStore(store.getId());
 +    }
 +
 +    protected void attachCluster(DataStore store) {
 +        // send down AttachPrimaryDataStoreCmd command to all the hosts in the
 +        // cluster
 +        List<EndPoint> endPoints = selector.selectAll(store);
 +        CreatePrimaryDataStoreCmd createCmd = new CreatePrimaryDataStoreCmd(store.getUri());
 +        EndPoint ep = endPoints.get(0);
 +        HostVO host = hostDao.findById(ep.getId());
 +        if (host.getHypervisorType() == HypervisorType.XenServer) {
 +            ep.sendMessage(createCmd);
 +        }
 +
 +        endPoints.get(0).sendMessage(createCmd);
 +        AttachPrimaryDataStoreCmd cmd = new AttachPrimaryDataStoreCmd(store.getUri());
 +        for (EndPoint endp : endPoints) {
 +            endp.sendMessage(cmd);
 +        }
 +    }
 +
 +    @Override
 +    public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
 +        StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStore.getId());
 +        dataStoreVO.setDataCenterId(scope.getZoneId());
 +        dataStoreVO.setPodId(scope.getPodId());
 +        dataStoreVO.setClusterId(scope.getScopeId());
 +        dataStoreVO.setStatus(StoragePoolStatus.Attaching);
 +        dataStoreVO.setScope(scope.getScopeType());
 +        dataStoreDao.update(dataStoreVO.getId(), dataStoreVO);
 +
 +        attachCluster(dataStore);
 +
 +        dataStoreVO = dataStoreDao.findById(dataStore.getId());
 +        dataStoreVO.setStatus(StoragePoolStatus.Up);
 +        dataStoreDao.update(dataStoreVO.getId(), dataStoreVO);
 +
 +        return true;
 +    }
 +
 +    @Override
 +    public boolean dettach() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean unmanaged() {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
-     public boolean attachZone(DataStore dataStore, ZoneScope scope) {
++    public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean maintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean cancelMaintain(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +    @Override
 +    public boolean deleteDataStore(DataStore store) {
 +        // TODO Auto-generated method stub
 +        return false;
 +    }
 +
 +}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/18aeef3e/server/src/com/cloud/api/ApiDBUtils.java
----------------------------------------------------------------------
diff --cc server/src/com/cloud/api/ApiDBUtils.java
index bf31b68,e5fa2e1..073e4c2
--- a/server/src/com/cloud/api/ApiDBUtils.java
+++ b/server/src/com/cloud/api/ApiDBUtils.java
@@@ -60,9 -59,7 +60,8 @@@ import org.apache.cloudstack.api.respon
  import org.apache.cloudstack.api.response.ResourceTagResponse;
  import org.apache.cloudstack.api.response.SecurityGroupResponse;
  import org.apache.cloudstack.api.response.ServiceOfferingResponse;
- import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse;
  import org.apache.cloudstack.api.response.StoragePoolResponse;
 +import org.apache.cloudstack.api.response.TemplateResponse;
  import org.apache.cloudstack.api.response.UserResponse;
  import org.apache.cloudstack.api.response.UserVmResponse;
  import org.apache.cloudstack.api.response.VolumeResponse;