You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by GitBox <gi...@apache.org> on 2018/12/24 00:33:56 UTC

[GitHub] GabrielBrascher closed pull request #1709: CLOUDSTACK-7982: KVM live migration with local storage

GabrielBrascher closed pull request #1709: CLOUDSTACK-7982: KVM live migration with local storage
URL: https://github.com/apache/cloudstack/pull/1709
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/api/src/main/java/com/cloud/vm/DiskProfile.java b/api/src/main/java/com/cloud/vm/DiskProfile.java
index d9097748363..dcd1928237c 100644
--- a/api/src/main/java/com/cloud/vm/DiskProfile.java
+++ b/api/src/main/java/com/cloud/vm/DiskProfile.java
@@ -72,6 +72,7 @@ public DiskProfile(Volume vol, DiskOffering offering, HypervisorType hyperType)
             offering.isCustomized(),
             null);
         this.hyperType = hyperType;
+        this.path = vol.getPath();
     }
 
     public DiskProfile(DiskProfile dp) {
diff --git a/core/src/main/java/com/cloud/agent/api/CancelMigrationAnswer.java b/core/src/main/java/com/cloud/agent/api/CancelMigrationAnswer.java
new file mode 100644
index 00000000000..7465a6ffe59
--- /dev/null
+++ b/core/src/main/java/com/cloud/agent/api/CancelMigrationAnswer.java
@@ -0,0 +1,27 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+public class CancelMigrationAnswer extends Answer {
+
+    public CancelMigrationAnswer(Command cmd, Exception e) {
+        super(cmd, e);
+    }
+    public CancelMigrationAnswer(Command cmd, boolean success, String details) {
+        super(cmd, success, details);
+    }
+}
diff --git a/core/src/main/java/com/cloud/agent/api/CancelMigrationCommand.java b/core/src/main/java/com/cloud/agent/api/CancelMigrationCommand.java
new file mode 100644
index 00000000000..0db094f7414
--- /dev/null
+++ b/core/src/main/java/com/cloud/agent/api/CancelMigrationCommand.java
@@ -0,0 +1,35 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+public class CancelMigrationCommand extends Command {
+    String vmName;
+    boolean executeInSequence = false;
+
+    public CancelMigrationCommand(String vmName) {
+        this.vmName = vmName;
+    }
+
+    public String getVmName() {
+        return vmName;
+    }
+
+    @Override
+    public boolean executeInSequence() {
+        return executeInSequence;
+    }
+}
diff --git a/core/src/main/java/com/cloud/agent/api/MigrateWithStorageAnswer.java b/core/src/main/java/com/cloud/agent/api/MigrateWithStorageAnswer.java
index d863cb05ea2..efbcae80093 100644
--- a/core/src/main/java/com/cloud/agent/api/MigrateWithStorageAnswer.java
+++ b/core/src/main/java/com/cloud/agent/api/MigrateWithStorageAnswer.java
@@ -25,19 +25,26 @@
 
 public class MigrateWithStorageAnswer extends Answer {
 
-    List<VolumeObjectTO> volumeTos;
+    final List<VolumeObjectTO> volumeTos;
+    boolean aborted = false;
 
-    public MigrateWithStorageAnswer(MigrateWithStorageCommand cmd, Exception ex) {
-        super(cmd, ex);
+    public MigrateWithStorageAnswer(MigrateWithStorageCommand cmd, boolean result, boolean aborted, Exception ex) {
+        super(cmd, result, ex.toString());
+        this.aborted = aborted;
         volumeTos = null;
     }
 
-    public MigrateWithStorageAnswer(MigrateWithStorageCommand cmd, List<VolumeObjectTO> volumeTos) {
-        super(cmd, true, null);
+    public MigrateWithStorageAnswer(MigrateWithStorageCommand cmd, List<VolumeObjectTO> volumeTos, boolean aborted, String details) {
+        super(cmd, !aborted, details);
         this.volumeTos = volumeTos;
+        this.aborted = aborted;
     }
 
     public List<VolumeObjectTO> getVolumeTos() {
         return volumeTos;
     }
+
+    public boolean isAborted() {
+        return aborted;
+    }
 }
diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java
index 556ec86f060..618d0202bd7 100644
--- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java
+++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java
@@ -21,6 +21,7 @@
 import java.util.List;
 import java.util.Map;
 
+import com.cloud.exception.VirtualMachineMigrationException;
 import org.apache.cloudstack.framework.config.ConfigKey;
 
 import com.cloud.agent.api.to.NicTO;
@@ -118,7 +119,7 @@ void orchestrateStart(String vmUuid, Map<VirtualMachineProfile.Param, Object> pa
 
     void migrate(String vmUuid, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException;
 
-    void migrateWithStorage(String vmUuid, long srcId, long destId, Map<Long, Long> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException;
+    void migrateWithStorage(String vmUuid, long srcId, long destId, Map<Long, Long> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException, VirtualMachineMigrationException;
 
     void reboot(String vmUuid, Map<VirtualMachineProfile.Param, Object> params) throws InsufficientCapacityException, ResourceUnavailableException;
 
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
index fa6f2c6fb9d..e2dc508f7cb 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
@@ -21,6 +21,7 @@
 import java.util.Map;
 import java.util.Set;
 
+import com.cloud.exception.VirtualMachineMigrationException;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
@@ -105,12 +106,14 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon
 
     void revokeAccess(long vmId, long hostId);
 
-    void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHost, Host destHost, Map<Volume, StoragePool> volumeToPool);
+    void liveMigrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHost, Host destHost, Map<Volume, StoragePool> volumeToPool) throws VirtualMachineMigrationException;
 
     boolean storageMigration(VirtualMachineProfile vm, StoragePool destPool) throws StorageUnavailableException;
 
     void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest);
 
+    void confirmMigration(VirtualMachineProfile vm, long srcHostId, long destHostId, boolean migrationSuccess);
+
     void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException;
 
     boolean canVmRestartOnAnotherServer(long vmId);
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java
index bb5ba14d25d..6608b7526eb 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/CopyCommandResult.java
@@ -39,4 +39,10 @@ public String getPath() {
     public Answer getAnswer() {
         return this.answer;
     }
+
+    @Override
+    public String toString() {
+        String sup = super.toString();
+        return sup.substring(0, sup.length()-1) + ", path: " + (path == null ? "<null>" : path) + ", answer: " + (answer == null ? "<null>" : answer) + "}";
+    }
 }
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
index 04aefbec31f..b48ce262f9d 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
@@ -109,4 +109,6 @@ public VolumeInfo getVolume() {
 
     VolumeInfo updateHypervisorSnapshotReserveForVolume(DiskOffering diskOffering, long volumeId, HypervisorType hyperType);
 
+    boolean deleteVolumeOnDataStore(DataStore store, long volumeId);
+
 }
diff --git a/engine/api/src/main/java/org/apache/cloudstack/storage/command/CommandResult.java b/engine/api/src/main/java/org/apache/cloudstack/storage/command/CommandResult.java
index 0a33c519f01..2e1d9ac16d2 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/storage/command/CommandResult.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/storage/command/CommandResult.java
@@ -20,10 +20,12 @@
 
 public class CommandResult {
     private boolean success;
+    private boolean aborted;
     private String result;
 
     public CommandResult() {
         this.success = true;
+        this.aborted = false;
         this.result = "";
     }
 
@@ -39,6 +41,14 @@ public void setSuccess(boolean success) {
         this.success = success;
     }
 
+    public boolean isAborted() {
+        return aborted;
+    }
+
+    public void setAborted(boolean aborted) {
+        this.aborted = aborted;
+    }
+
     public String getResult() {
         return this.result;
     }
@@ -49,4 +59,9 @@ public void setResult(String result) {
             this.success = false;
         }
     }
+
+    @Override
+    public String toString() {
+        return "CommandResult={success: " + success + ", aborted: " + aborted + ", result: " + (result == null ? "<null>" : result) + "}";
+    }
 }
diff --git a/engine/orchestration/pom.xml b/engine/orchestration/pom.xml
index 5cb2e6d8801..f86f05b0af6 100755
--- a/engine/orchestration/pom.xml
+++ b/engine/orchestration/pom.xml
@@ -68,6 +68,7 @@
       <artifactId>cloud-server</artifactId>
       <version>${project.version}</version>
     </dependency>
+
   </dependencies>
   <build>
     <plugins>
diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
index 1d430c6f253..cc64fef26ac 100755
--- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -74,6 +74,7 @@
 import org.apache.commons.collections.MapUtils;
 import org.apache.log4j.Logger;
 
+
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.Listener;
 import com.cloud.agent.api.AgentControlAnswer;
@@ -147,6 +148,7 @@
 import com.cloud.exception.OperationTimedoutException;
 import com.cloud.exception.ResourceUnavailableException;
 import com.cloud.exception.StorageUnavailableException;
+import com.cloud.exception.VirtualMachineMigrationException;
 import com.cloud.gpu.dao.VGPUTypesDao;
 import com.cloud.ha.HighAvailabilityManager;
 import com.cloud.ha.HighAvailabilityManager.WorkType;
@@ -901,7 +903,7 @@ public void advanceStart(final String vmUuid, final Map<VirtualMachineProfile.Pa
             } catch (final InterruptedException e) {
                 throw new RuntimeException("Operation is interrupted", e);
             } catch (final java.util.concurrent.ExecutionException e) {
-                throw new RuntimeException("Execution excetion", e);
+                throw new RuntimeException("Execution exception", e);
             }
 
             final Object jobResult = _jobMgr.unmarshallResultObject(outcome.getJob());
@@ -2437,7 +2439,7 @@ private void createVolumeToStoragePoolMappingIfNeeded(VirtualMachineProfile prof
 
     @Override
     public void migrateWithStorage(final String vmUuid, final long srcHostId, final long destHostId, final Map<Long, Long> volumeToPool)
-            throws ResourceUnavailableException, ConcurrentOperationException {
+            throws ResourceUnavailableException, ConcurrentOperationException, VirtualMachineMigrationException {
 
         final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext();
         if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) {
@@ -2481,7 +2483,7 @@ public void migrateWithStorage(final String vmUuid, final long srcHostId, final
     }
 
     private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHostId, final long destHostId, final Map<Long, Long> volumeToPool) throws ResourceUnavailableException,
-    ConcurrentOperationException {
+    ConcurrentOperationException, VirtualMachineMigrationException {
 
         final VMInstanceVO vm = _vmDao.findByUuid(vmUuid);
 
@@ -2493,6 +2495,10 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo
         final HostPodVO pod = _podDao.findById(destHost.getPodId());
         final Cluster cluster = _clusterDao.findById(destHost.getClusterId());
         final DeployDestination destination = new DeployDestination(dc, pod, cluster, destHost);
+        final VirtualMachineProfile vmSrc = new VirtualMachineProfileImpl(vm);
+        for (NicProfile nic : _networkMgr.getNicProfiles(vm)) {
+            vmSrc.addNic(nic);
+        }
 
         // Create a map of which volume should go in which storage pool.
         final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
@@ -2573,47 +2579,50 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo
             }
 
             // Migrate the vm and its volume.
-            volumeMgr.migrateVolumes(vm, to, srcHost, destHost, volumeToPoolMap);
-
-            // Put the vm back to running state.
-            moveVmOutofMigratingStateOnSuccess(vm, destHost.getId(), work);
+            volumeMgr.liveMigrateVolumes(vm, to, srcHost, destHost, volumeToPoolMap);
+            migrated = true;
 
             try {
-                if (!checkVmOnHost(vm, destHostId)) {
+                if (migrated && !checkVmOnHost(vm, destHostId)) {
                     s_logger.error("Vm not found on destination host. Unable to complete migration for " + vm);
-                    try {
-                        _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null);
-                    } catch (final AgentUnavailableException e) {
-                        s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId);
-                    }
-                    cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.AgentReportStopped, true);
-                    throw new CloudRuntimeException("VM not found on desintation host. Unable to complete migration for " + vm);
+                    migrated = false;
                 }
             } catch (final OperationTimedoutException e) {
                 s_logger.warn("Error while checking the vm " + vm + " is on host " + destHost, e);
             }
 
-            migrated = true;
         } finally {
+
             if (!migrated) {
-                s_logger.info("Migration was unsuccessful.  Cleaning up: " + vm);
+                s_logger.info("Migration was unsuccessful for " + vm);
                 _alertMgr.sendAlert(alertType, srcHost.getDataCenterId(), srcHost.getPodId(),
                         "Unable to migrate vm " + vm.getInstanceName() + " from host " + srcHost.getName() + " in zone " + dc.getName() + " and pod " + dc.getName(),
-                        "Migrate Command failed.  Please check logs.");
+                        "Migrate Command failed. Please check logs.");
+                vm.setHostId(srcHostId);
+                _vmDao.update(vm.getId(), vm);
                 try {
-                    _agentMgr.send(destHostId, new Commands(cleanup(vm.getInstanceName())), null);
-                    vm.setPodIdToDeployIn(srcHost.getPodId());
+                    _networkMgr.rollbackNicForMigration(vmSrc, profile);
                     stateTransitTo(vm, Event.OperationFailed, srcHostId);
-                } catch (final AgentUnavailableException e) {
-                    s_logger.warn("Looks like the destination Host is unavailable for cleanup.", e);
-                } catch (final NoTransitionException e) {
+                } catch (NoTransitionException e) {
                     s_logger.error("Error while transitioning vm from migrating to running state.", e);
                 }
+            } else {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Migration was successful, committing it.");
+                }
+                // Put the vm back to running state.
+                moveVmOutofMigratingStateOnSuccess(vm, destHost.getId(), work);
             }
 
+            volumeMgr.confirmMigration(profile, srcHostId, destHostId, migrated);
+
             work.setStep(Step.Done);
             _workDao.update(work.getId(), work);
         }
+
+        if (!migrated) {
+            throw new VirtualMachineMigrationException("Migration failed");
+        }
     }
 
     @Override
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index 829a4d26fcb..7233880d0ae 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -85,6 +85,7 @@
 import com.cloud.exception.InsufficientStorageCapacityException;
 import com.cloud.exception.InvalidParameterValueException;
 import com.cloud.exception.StorageUnavailableException;
+import com.cloud.exception.VirtualMachineMigrationException;
 import com.cloud.host.Host;
 import com.cloud.host.HostVO;
 import com.cloud.host.dao.HostDao;
@@ -99,11 +100,13 @@
 import com.cloud.storage.Storage.ImageFormat;
 import com.cloud.storage.StorageManager;
 import com.cloud.storage.StoragePool;
+import com.cloud.storage.VMTemplateVO;
 import com.cloud.storage.VMTemplateStorageResourceAssoc;
 import com.cloud.storage.Volume;
 import com.cloud.storage.Volume.Type;
 import com.cloud.storage.VolumeVO;
 import com.cloud.storage.dao.SnapshotDao;
+import com.cloud.storage.dao.VMTemplateDao;
 import com.cloud.storage.dao.VolumeDao;
 import com.cloud.storage.dao.VolumeDetailsDao;
 import com.cloud.template.TemplateManager;
@@ -157,6 +160,10 @@
     @Inject
     protected VolumeDao _volumeDao;
     @Inject
+    protected UserVmDao _userVmDao;
+    @Inject
+    private VMTemplateDao _tmpltDao;
+    @Inject
     protected SnapshotDao _snapshotDao;
     @Inject
     protected SnapshotDataStoreDao _snapshotDataStoreDao;
@@ -181,8 +188,6 @@
     @Inject
     SnapshotService _snapshotSrv;
     @Inject
-    protected UserVmDao _userVmDao;
-    @Inject
     protected AsyncJobManager _jobMgr;
     @Inject
     ClusterManager clusterManager;
@@ -982,28 +987,8 @@ public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageU
         }
     }
 
-    @DB
-    protected Volume liveMigrateVolume(Volume volume, StoragePool destPool) {
-        VolumeInfo vol = volFactory.getVolume(volume.getId());
-        AsyncCallFuture<VolumeApiResult> future = volService.migrateVolume(vol, (DataStore)destPool);
-        try {
-            VolumeApiResult result = future.get();
-            if (result.isFailed()) {
-                s_logger.debug("migrate volume failed:" + result.getResult());
-                return null;
-            }
-            return result.getVolume();
-        } catch (InterruptedException e) {
-            s_logger.debug("migrate volume failed", e);
-            return null;
-        } catch (ExecutionException e) {
-            s_logger.debug("migrate volume failed", e);
-            return null;
-        }
-    }
-
     @Override
-    public void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHost, Host destHost, Map<Volume, StoragePool> volumeToPool) {
+    public void liveMigrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHost, Host destHost, Map<Volume, StoragePool> volumeToPool) throws VirtualMachineMigrationException {
         // Check if all the vms being migrated belong to the vm.
         // Check if the storage pool is of the right type.
         // Create a VolumeInfo to DataStore map too.
@@ -1014,7 +999,7 @@ public void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHos
             StoragePool destPool = (StoragePool)dataStoreMgr.getDataStore(storagePool.getId(), DataStoreRole.Primary);
 
             if (volume.getInstanceId() != vm.getId()) {
-                throw new CloudRuntimeException("Volume " + volume + " that has to be migrated doesn't belong to the" + " instance " + vm);
+                throw new CloudRuntimeException("Volume " + volume + " that has to be migrated doesn't belong to the instance " + vm);
             }
 
             if (destPool == null) {
@@ -1028,8 +1013,8 @@ public void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHos
         try {
             CommandResult result = future.get();
             if (result.isFailed()) {
-                s_logger.debug("Failed to migrated vm " + vm + " along with its volumes. " + result.getResult());
-                throw new CloudRuntimeException("Failed to migrated vm " + vm + " along with its volumes. ");
+                s_logger.debug("Failed to migrate vm " + vm + " along with its volumes. " + result.getResult());
+                throw new VirtualMachineMigrationException("Failed to migrate vm " + vm + " along with its volumes.");
             }
         } catch (InterruptedException e) {
             s_logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
@@ -1078,6 +1063,8 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
             s_logger.debug("Preparing " + vols.size() + " volumes for " + vm);
         }
 
+
+
         for (VolumeVO vol : vols) {
             VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
             DataTO volTO = volumeInfo.getTO();
@@ -1087,6 +1074,14 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
             disk.setDetails(getDetails(volumeInfo, dataStore));
 
             vm.addDisk(disk);
+
+            // Ensure that the template is available on the destination host
+            if (vm.getType() == VirtualMachine.Type.User && vol.getVolumeType().equals(Type.ROOT)) {
+                VMTemplateVO vmTemplate = _tmpltDao.findById(vol.getTemplateId());
+                StoragePool destStoragePool = storageMgr.findLocalStorageOnHost(dest.getHost().getId());
+                StoragePool destDataStore = (StoragePool)dataStoreMgr.getDataStore(destStoragePool.getId(), DataStoreRole.Primary);
+                _tmpltMgr.prepareTemplateForCreate(vmTemplate, destDataStore);
+            }
         }
 
         //if (vm.getType() == VirtualMachine.Type.User && vm.getTemplate().getFormat() == ImageFormat.ISO) {
@@ -1098,6 +1093,34 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
         }
     }
 
+    @Override
+    public void confirmMigration(VirtualMachineProfile vm, final long srcHostId, final long destHostId, boolean migrationSuccessful) {
+        List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
+        StoragePool poolToCleanup = storageMgr.findLocalStorageOnHost((migrationSuccessful ? srcHostId : destHostId));
+        StoragePool pool = storageMgr.findLocalStorageOnHost((migrationSuccessful ? destHostId : srcHostId));
+        Volume volume = null;
+        for (VolumeVO vol : vols) {
+            if (vm.getType() == VirtualMachine.Type.User && vol.getVolumeType().equals(Type.ROOT)) {
+                volume = vol;
+            }
+            if (vol.getPoolId() == poolToCleanup.getId()) {
+                s_logger.info("Volume " + volume.getName() + " is listed on the wrong pool [" + vol.getPoolId() + "] but should be on ["
+                        + pool.getId() + "] which should also be the last pool id[" + vol.getLastPoolId() + "]. Fixing it now.");
+                vol.setPoolId(pool.getId());
+                _volsDao.update(vol.getId(), vol);
+            }
+        }
+
+        if (migrationSuccessful) {
+            DataStore dataStoreToCleanup = dataStoreMgr.getDataStore(poolToCleanup.getId(), DataStoreRole.Primary);
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Will delete the volume " + volume.getName() + " on host " + dataStoreToCleanup.getName());
+            }
+            volService.deleteVolumeOnDataStore(dataStoreToCleanup, volume.getId());
+        }
+        // When the migration is not successful, we decide to delete or not the volume in the motion strategy.
+    }
+
     private Map<String, String> getDetails(VolumeInfo volumeInfo, DataStore dataStore) {
         Map<String, String> details = new HashMap<String, String>();
 
diff --git a/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java b/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java
index a4341380ddc..e673b81405e 100644
--- a/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java
@@ -71,7 +71,7 @@ public HypervisorCapabilitiesVO findByHypervisorTypeAndVersion(HypervisorType hy
         }
         SearchCriteria<HypervisorCapabilitiesVO> sc = HypervisorTypeAndVersionSearch.create();
         sc.setParameters("hypervisorType", hypervisorType);
-        sc.setParameters("hypervisorVersion", hypervisorVersion);
+        sc.setParameters("hypervisorVersion", (hypervisorVersion == null ? DEFAULT_VERSION : hypervisorVersion));
         return findOneBy(sc);
     }
 
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41110to41200.sql b/engine/schema/src/main/resources/META-INF/db/schema-41110to41200.sql
index d5e6d61ea71..e25d5fbab22 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-41110to41200.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41110to41200.sql
@@ -32,4 +32,7 @@ ALTER TABLE `vlan` CHANGE `description` `ip4_range` varchar(255);
 -- We are only adding the permission to the default rules. Any custom rule must be configured by the root admin.
 INSERT INTO `cloud`.`role_permissions` (`uuid`, `role_id`, `rule`, `permission`, `sort_order`) values (UUID(), 2, 'moveNetworkAclItem', 'ALLOW', 100) ON DUPLICATE KEY UPDATE rule=rule;
 INSERT INTO `cloud`.`role_permissions` (`uuid`, `role_id`, `rule`, `permission`, `sort_order`) values (UUID(), 3, 'moveNetworkAclItem', 'ALLOW', 302) ON DUPLICATE KEY UPDATE rule=rule;
-INSERT INTO `cloud`.`role_permissions` (`uuid`, `role_id`, `rule`, `permission`, `sort_order`) values (UUID(), 4, 'moveNetworkAclItem', 'ALLOW', 260) ON DUPLICATE KEY UPDATE rule=rule;
\ No newline at end of file
+INSERT INTO `cloud`.`role_permissions` (`uuid`, `role_id`, `rule`, `permission`, `sort_order`) values (UUID(), 4, 'moveNetworkAclItem', 'ALLOW', 260) ON DUPLICATE KEY UPDATE rule=rule;
+
+-- Enable storage motion for KVM hypervisor
+UPDATE  `cloud`.`hypervisor_capabilities` SET  `storage_motion_supported` =  1 WHERE  `hypervisor_capabilities`.`hypervisor_type` =  'KVM';
diff --git a/engine/storage/datamotion/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-datamotion-storage-context.xml b/engine/storage/datamotion/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-datamotion-storage-context.xml
index 1cefc517eed..4800642c389 100644
--- a/engine/storage/datamotion/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-datamotion-storage-context.xml
+++ b/engine/storage/datamotion/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-datamotion-storage-context.xml
@@ -34,4 +34,6 @@
         class="org.apache.cloudstack.storage.motion.HypervStorageMotionStrategy" />
     <bean id="storageSystemDataMotionStrategy"
         class="org.apache.cloudstack.storage.motion.StorageSystemDataMotionStrategy" />
+    <bean id="kvmStorageMotionStrategy"
+          class="org.apache.cloudstack.storage.motion.KVMStorageMotionStrategy" />
 </beans>
diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index 36313058e92..ce5a8ec092e 100644
--- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -28,6 +28,7 @@
 
 import javax.inject.Inject;
 
+import com.cloud.agent.api.MigrateWithStorageAnswer;
 import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
 import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
@@ -1250,6 +1251,31 @@ public boolean destroyVolume(long volumeId) throws ConcurrentOperationException
         return true;
     }
 
+    @Override
+    public boolean deleteVolumeOnDataStore(DataStore store, long volumeId) {
+        VolumeInfo vol = volFactory.getVolume(volumeId);
+        VolumeObjectTO volTO = new VolumeObjectTO(vol);
+        volTO.setDataStore(store.getTO());
+        DeleteCommand dtCommand = new DeleteCommand(volTO);
+        EndPoint ep = _epSelector.select(store);
+        Answer answer = null;
+        if (ep == null) {
+            String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
+            s_logger.error(errMsg);
+            answer = new Answer(dtCommand, false, errMsg);
+        } else {
+            answer = ep.sendMessage(dtCommand);
+        }
+        if (answer == null || !answer.getResult()) {
+            s_logger.info("Failed to deleted volume at store: " + store.getName());
+        } else {
+            String description = "Deleted volume " + vol.getName() + " on storage " + store.getName();
+            s_logger.info(description);
+            return true;
+        }
+        return false;
+    }
+
     @Override
     public AsyncCallFuture<VolumeApiResult> createVolumeFromSnapshot(VolumeInfo volume, DataStore store, SnapshotInfo snapshot) {
         AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
@@ -1646,7 +1672,7 @@ public MigrateVmWithVolumesContext(AsyncCompletionCallback<T> callback, AsyncCal
             motionSrv.copyAsync(volumeMap, vmTo, srcHost, destHost, caller);
 
         } catch (Exception e) {
-            s_logger.debug("Failed to copy volume", e);
+            s_logger.debug("Failed to migrate volume", e);
             res.setResult(e.toString());
             future.complete(res);
         }
@@ -1654,34 +1680,35 @@ public MigrateVmWithVolumesContext(AsyncCompletionCallback<T> callback, AsyncCal
         return future;
     }
 
-    protected Void
-    migrateVmWithVolumesCallBack(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, MigrateVmWithVolumesContext<CommandResult> context) {
+    protected Void migrateVmWithVolumesCallBack(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback, MigrateVmWithVolumesContext<CommandResult> context) {
         Map<VolumeInfo, DataStore> volumeToPool = context.volumeToPool;
         CopyCommandResult result = callback.getResult();
         AsyncCallFuture<CommandResult> future = context.future;
         CommandResult res = new CommandResult();
-        try {
-            if (result.isFailed()) {
-                res.setResult(result.getResult());
-                for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
-                    VolumeInfo volume = entry.getKey();
-                    volume.processEvent(Event.OperationFailed);
-                }
-                future.complete(res);
+
+
+        if (result.isFailed()) {
+            res.setSuccess(false);
+            MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer)result.getAnswer();
+            if (answer != null) {
+                res.setAborted(answer.isAborted());
+            }
+            res.setResult(result.getResult());
+        }
+
+        for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+            VolumeInfo volume = entry.getKey();
+            if (result.isFailed() || result.isAborted()) {
+                volume.processEvent(Event.OperationFailed);
             } else {
-                for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
-                    VolumeInfo volume = entry.getKey();
-                    snapshotMgr.cleanupSnapshotsByVolume(volume.getId());
-                    volume.processEvent(Event.OperationSuccessed);
-                }
-                future.complete(res);
+                snapshotMgr.cleanupSnapshotsByVolume(volume.getId());
+                volume.processEvent(Event.OperationSuccessed);
             }
-        } catch (Exception e) {
-            s_logger.error("Failed to process copy volume callback", e);
-            res.setResult(e.toString());
-            future.complete(res);
+
         }
 
+        future.complete(res);
+
         return null;
     }
 
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index f26d8ded0a4..e4ee2c65218 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -213,7 +213,9 @@
     private String _dcId;
     private String _pod;
     private String _clusterId;
-
+    private long _migrateWithStorageFlags;
+    private long _migrateFlags;
+    private MigrationBy _migrateBy;
     private long _hvVersion;
     private Duration _timeout;
     private static final int NUMMEMSTATS =2;
@@ -228,6 +230,7 @@
     private String _mountPoint = "/mnt";
     private StorageLayer _storage;
     private KVMStoragePoolManager _storagePoolMgr;
+    private String _libvirtConnectionProtocol = "qemu://";
 
     private VifDriver _defaultVifDriver;
     private Map<TrafficType, VifDriver> _trafficTypeVifDrivers;
@@ -421,6 +424,22 @@ public int getMigrateSpeed() {
         return _migrateSpeed;
     }
 
+    public MigrationBy getMigrateBy() {
+        return _migrateBy;
+    }
+
+    public long getMigrateFlags() {
+        return _migrateFlags;
+    }
+
+    public long getMigrateWithStorageFlags() {
+        return _migrateWithStorageFlags;
+    }
+
+    public String getLibvirtConnectionProtocol() {
+        return _libvirtConnectionProtocol;
+    }
+
     public String getPingTestPath() {
         return _pingTestPath;
     }
@@ -1022,6 +1041,11 @@ public boolean configure(final String name, final Map<String, Object> params) th
             _mountPoint = "/mnt";
         }
 
+        _libvirtConnectionProtocol = (String) params.get("libvirt.connection.protocol");
+        if (_libvirtConnectionProtocol == null) {
+            _libvirtConnectionProtocol = "qemu://";
+        }
+
         value = (String) params.get("vm.migrate.downtime");
         _migrateDowntime = NumbersUtil.parseInt(value, -1);
 
@@ -1048,6 +1072,17 @@ public boolean configure(final String name, final Map<String, Object> params) th
             params.put("vm.migrate.speed", String.valueOf(_migrateSpeed));
         }
 
+        value = (String)params.get("vm.migratewithstorage.flags");
+        _migrateWithStorageFlags = NumbersUtil.parseLong(value, 4481);
+
+        value = (String)params.get("vm.migrate.flags");
+        _migrateFlags = NumbersUtil.parseInt(value, 1);
+
+        _migrateBy = MigrationBy.fromString((String) params.get("vm.migrate.by"));
+        if (_migrateBy == null) {
+            _migrateBy = MigrationBy.IP;
+        }
+
         bridges.put("linklocal", _linkLocalBridgeName);
         bridges.put("public", _publicBridgeName);
         bridges.put("private", _privBridgeName);
@@ -1434,7 +1469,6 @@ public boolean stop() {
 
     @Override
     public Answer executeRequest(final Command cmd) {
-
         final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance();
         try {
             return wrapper.execute(cmd, this);
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java
index 4b2afa6a59b..af81e725467 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrateKVMAsync.java
@@ -26,8 +26,6 @@
 
 public class MigrateKVMAsync implements Callable<Domain> {
 
-    private final LibvirtComputingResource libvirtComputingResource;
-
     private Domain dm = null;
     private Connect dconn = null;
     private String dxml = "";
@@ -35,18 +33,19 @@
     private String destIp = "";
     private boolean migrateStorage;
     private boolean autoConvergence;
+    private long flags = 0L;
+    private int migrationSpeed = 0;
 
     public MigrateKVMAsync(final LibvirtComputingResource libvirtComputingResource, final Domain dm, final Connect dconn, final String dxml,
-                           final boolean migrateStorage, final boolean autoConvergence, final String vmName, final String destIp) {
-        this.libvirtComputingResource = libvirtComputingResource;
-
+                           final boolean migrateStorage, final boolean autoConvergence, final String vmName, final long flags) {
         this.dm = dm;
         this.dconn = dconn;
         this.dxml = dxml;
         this.migrateStorage = migrateStorage;
         this.autoConvergence = autoConvergence;
         this.vmName = vmName;
-        this.destIp = destIp;
+        this.migrationSpeed = libvirtComputingResource.getMigrateSpeed();
+        this.flags = flags;
     }
 
     @Override
@@ -66,6 +65,6 @@ public Domain call() throws LibvirtException {
             flags |= 1 << 13;
         }
 
-        return dm.migrate(dconn, flags, dxml, vmName, "tcp:" + destIp, libvirtComputingResource.getMigrateSpeed());
+        return dm.migrate(dconn, flags, dxml, vmName, null, migrationSpeed);
     }
 }
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrationBy.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrationBy.java
new file mode 100644
index 00000000000..6cd851d8ce9
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/MigrationBy.java
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.hypervisor.kvm.resource;
+
+public enum MigrationBy {
+    IP, HOSTNAME;
+
+    public static MigrationBy fromString(String value) {
+        for(MigrationBy mig : MigrationBy.values()) {
+            if (mig.name().equalsIgnoreCase(value)) {
+                return mig;
+            }
+        }
+        return null;
+    }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCancelMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCancelMigrationCommandWrapper.java
new file mode 100644
index 00000000000..3879b7971b0
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCancelMigrationCommandWrapper.java
@@ -0,0 +1,71 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.CancelMigrationAnswer;
+import com.cloud.agent.api.CancelMigrationCommand;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import org.apache.log4j.Logger;
+import org.libvirt.Connect;
+import org.libvirt.Domain;
+import org.libvirt.LibvirtException;
+
+@ResourceWrapper(handles = CancelMigrationCommand.class)
+public class LibvirtCancelMigrationCommandWrapper extends CommandWrapper<CancelMigrationCommand, Answer, LibvirtComputingResource> {
+
+    private static final Logger s_logger = Logger.getLogger(LibvirtCancelMigrationCommandWrapper.class);
+
+    @Override
+    public Answer execute(final CancelMigrationCommand cmd, final LibvirtComputingResource libvirtComputingResource) {
+        Connect conn = null;
+        Domain domain = null;
+        final String vmName = cmd.getVmName();
+        int result= 0;
+
+        try {
+            conn = LibvirtConnection.getConnectionByVmName(vmName);
+            domain = conn.domainLookupByName(vmName);
+
+            if (s_logger.isInfoEnabled()) {
+                s_logger.info("Cancel migration for vm " + vmName);
+            }
+            result = domain.abortJob();
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Canceled migration for vm " + vmName + " with result=" + result);
+            }
+
+
+        } catch (LibvirtException e) {
+            s_logger.error("Exception while cancelling the migration job, maybe it was finished just before trying to abort it. You must check the logs!");
+            return new CancelMigrationAnswer(cmd, e);
+        }
+
+        if (result == 0) {
+            return new CancelMigrationAnswer(cmd, true, null);
+        } else {
+            return new CancelMigrationAnswer(cmd, false, "Failed to abort vm migration");
+        }
+    }
+
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java
index 1796dc50f31..e717a18b022 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateCommandWrapper.java
@@ -59,7 +59,10 @@ public Answer execute(final CreateCommand command, final LibvirtComputingResourc
                     vol = libvirtComputingResource.templateToPrimaryDownload(command.getTemplateUrl(), primaryPool, dskch.getPath());
                 } else {
                     baseVol = primaryPool.getPhysicalDisk(command.getTemplateUrl());
-                    vol = storagePoolMgr.createDiskFromTemplate(baseVol, dskch.getPath(), dskch.getProvisioningType(), primaryPool, 0);
+                    if (baseVol.getSize() > disksize) {
+                        disksize = baseVol.getSize();
+                    }
+                    vol = storagePoolMgr.createDiskFromTemplate(baseVol, dskch.getPath(), dskch.getProvisioningType(), primaryPool, disksize, 0);
                 }
                 if (vol == null) {
                     return new Answer(command, false, " Can't create storage volume on storage pool");
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
index 67ec1b731af..2ef50989c17 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
@@ -19,419 +19,20 @@
 
 package com.cloud.hypervisor.kvm.resource.wrapper;
 
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import javax.xml.parsers.DocumentBuilder;
-import javax.xml.parsers.DocumentBuilderFactory;
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.transform.Transformer;
-import javax.xml.transform.TransformerConfigurationException;
-import javax.xml.transform.TransformerException;
-import javax.xml.transform.TransformerFactory;
-import javax.xml.transform.dom.DOMSource;
-import javax.xml.transform.stream.StreamResult;
-
-import org.apache.commons.collections.MapUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.log4j.Logger;
-import org.libvirt.Connect;
-import org.libvirt.Domain;
-import org.libvirt.DomainInfo.DomainState;
-import org.libvirt.LibvirtException;
-import org.w3c.dom.Document;
-import org.w3c.dom.Element;
-import org.w3c.dom.NamedNodeMap;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.xml.sax.SAXException;
-
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.MigrateAnswer;
 import com.cloud.agent.api.MigrateCommand;
 import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
-import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
-import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef;
-import com.cloud.hypervisor.kvm.resource.MigrateKVMAsync;
-import com.cloud.hypervisor.kvm.resource.VifDriver;
 import com.cloud.resource.CommandWrapper;
 import com.cloud.resource.ResourceWrapper;
-import com.cloud.utils.Ternary;
-import com.cloud.utils.exception.CloudRuntimeException;
-import com.google.common.base.Strings;
 
 @ResourceWrapper(handles =  MigrateCommand.class)
 public final class LibvirtMigrateCommandWrapper extends CommandWrapper<MigrateCommand, Answer, LibvirtComputingResource> {
 
-    private static final String GRAPHICS_ELEM_END = "/graphics>";
-    private static final String GRAPHICS_ELEM_START = "<graphics";
-    private static final String CONTENTS_WILDCARD = "(?s).*";
-    private static final Logger s_logger = Logger.getLogger(LibvirtMigrateCommandWrapper.class);
-
-    protected String createMigrationURI(final String destinationIp, final LibvirtComputingResource libvirtComputingResource) {
-        if (Strings.isNullOrEmpty(destinationIp)) {
-            throw new CloudRuntimeException("Provided libvirt destination ip is invalid");
-        }
-        return String.format("%s://%s/system", libvirtComputingResource.isHostSecured() ? "qemu+tls" : "qemu+tcp", destinationIp);
-    }
-
     @Override
     public Answer execute(final MigrateCommand command, final LibvirtComputingResource libvirtComputingResource) {
-        final String vmName = command.getVmName();
-        final String destinationUri = createMigrationURI(command.getDestinationIp(), libvirtComputingResource);
-
-        String result = null;
-
-        List<InterfaceDef> ifaces = null;
-        List<DiskDef> disks;
-
-        Domain dm = null;
-        Connect dconn = null;
-        Domain destDomain = null;
-        Connect conn = null;
-        String xmlDesc = null;
-        List<Ternary<String, Boolean, String>> vmsnapshots = null;
-
-        try {
-            final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
-
-            conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
-            ifaces = libvirtComputingResource.getInterfaces(conn, vmName);
-            disks = libvirtComputingResource.getDisks(conn, vmName);
-            dm = conn.domainLookupByName(vmName);
-            /*
-                We replace the private IP address with the address of the destination host.
-                This is because the VNC listens on the private IP address of the hypervisor,
-                but that address is of course different on the target host.
-
-                MigrateCommand.getDestinationIp() returns the private IP address of the target
-                hypervisor. So it's safe to use.
-
-                The Domain.migrate method from libvirt supports passing a different XML
-                description for the instance to be used on the target host.
-
-                This is supported by libvirt-java from version 0.50.0
-
-                CVE-2015-3252: Get XML with sensitive information suitable for migration by using
-                               VIR_DOMAIN_XML_MIGRATABLE flag (value = 8)
-                               https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainXMLFlags
-
-                               Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
-             */
-            final int xmlFlag = conn.getLibVirVersion() >= 1000000 ? 8 : 1; // 1000000 equals v1.0.0
-
-            final String target = command.getDestinationIp();
-            xmlDesc = dm.getXMLDesc(xmlFlag);
-            xmlDesc = replaceIpForVNCInDescFile(xmlDesc, target);
-
-            // delete the metadata of vm snapshots before migration
-            vmsnapshots = libvirtComputingResource.cleanVMSnapshotMetadata(dm);
-
-            Map<String, MigrateCommand.MigrateDiskInfo> mapMigrateStorage = command.getMigrateStorage();
-            // migrateStorage is declared as final because the replaceStorage method may mutate mapMigrateStorage, but
-            // migrateStorage's value should always only be associated with the initial state of mapMigrateStorage.
-            final boolean migrateStorage = MapUtils.isNotEmpty(mapMigrateStorage);
-
-            if (migrateStorage) {
-                xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage);
-            }
-
-            dconn = libvirtUtilitiesHelper.retrieveQemuConnection(destinationUri);
-
-            //run migration in thread so we can monitor it
-            s_logger.info("Live migration of instance " + vmName + " initiated to destination host: " + dconn.getURI());
-            final ExecutorService executor = Executors.newFixedThreadPool(1);
-            final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, migrateStorage,
-                    command.isAutoConvergence(), vmName, command.getDestinationIp());
-            final Future<Domain> migrateThread = executor.submit(worker);
-            executor.shutdown();
-            long sleeptime = 0;
-            while (!executor.isTerminated()) {
-                Thread.sleep(100);
-                sleeptime += 100;
-                if (sleeptime == 1000) { // wait 1s before attempting to set downtime on migration, since I don't know of a VIR_DOMAIN_MIGRATING state
-                    final int migrateDowntime = libvirtComputingResource.getMigrateDowntime();
-                    if (migrateDowntime > 0 ) {
-                        try {
-                            final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime);
-                            if (setDowntime == 0 ) {
-                                s_logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms");
-                            }
-                        } catch (final LibvirtException e) {
-                            s_logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage());
-                        }
-                    }
-                }
-                if (sleeptime % 1000 == 0) {
-                    s_logger.info("Waiting for migration of " + vmName + " to complete, waited " + sleeptime + "ms");
-                }
-
-                // pause vm if we meet the vm.migrate.pauseafter threshold and not already paused
-                final int migratePauseAfter = libvirtComputingResource.getMigratePauseAfter();
-                if (migratePauseAfter > 0 && sleeptime > migratePauseAfter) {
-                    DomainState state = null;
-                    try {
-                        state = dm.getInfo().state;
-                    } catch (final LibvirtException e) {
-                        s_logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage());
-                    }
-                    if (state != null && state == DomainState.VIR_DOMAIN_RUNNING) {
-                        try {
-                            s_logger.info("Pausing VM " + vmName + " due to property vm.migrate.pauseafter setting to " + migratePauseAfter + "ms to complete migration");
-                            dm.suspend();
-                        } catch (final LibvirtException e) {
-                            // pause could be racy if it attempts to pause right when vm is finished, simply warn
-                            s_logger.info("Failed to pause vm " + vmName + " : " + e.getMessage());
-                        }
-                    }
-                }
-            }
-            s_logger.info("Migration thread for " + vmName + " is done");
-
-            destDomain = migrateThread.get(10, TimeUnit.SECONDS);
-
-            if (destDomain != null) {
-                for (final DiskDef disk : disks) {
-                    libvirtComputingResource.cleanupDisk(disk);
-                }
-            }
-
-        } catch (final LibvirtException e) {
-            s_logger.debug("Can't migrate domain: " + e.getMessage());
-            result = e.getMessage();
-            if (result.startsWith("unable to connect to server") && result.endsWith("refused")) {
-                result = String.format("Migration was refused connection to destination: %s. Please check libvirt configuration compatibility and firewall rules on the source and destination hosts.", destinationUri);
-            }
-        } catch (final InterruptedException e) {
-            s_logger.debug("Interrupted while migrating domain: " + e.getMessage());
-            result = e.getMessage();
-        } catch (final ExecutionException e) {
-            s_logger.debug("Failed to execute while migrating domain: " + e.getMessage());
-            result = e.getMessage();
-        } catch (final TimeoutException e) {
-            s_logger.debug("Timed out while migrating domain: " + e.getMessage());
-            result = e.getMessage();
-        } catch (final IOException e) {
-            s_logger.debug("IOException: " + e.getMessage());
-            result = e.getMessage();
-        } catch (final ParserConfigurationException e) {
-            s_logger.debug("ParserConfigurationException: " + e.getMessage());
-            result = e.getMessage();
-        } catch (final SAXException e) {
-            s_logger.debug("SAXException: " + e.getMessage());
-            result = e.getMessage();
-        } catch (final TransformerConfigurationException e) {
-            s_logger.debug("TransformerConfigurationException: " + e.getMessage());
-            result = e.getMessage();
-        } catch (final TransformerException e) {
-            s_logger.debug("TransformerException: " + e.getMessage());
-            result = e.getMessage();
-        } finally {
-            try {
-                if (dm != null && result != null) {
-                    // restore vm snapshots in case of failed migration
-                    if (vmsnapshots != null) {
-                        libvirtComputingResource.restoreVMSnapshotMetadata(dm, vmName, vmsnapshots);
-                    }
-                }
-                if (dm != null) {
-                    if (dm.isPersistent() == 1) {
-                        dm.undefine();
-                    }
-                    dm.free();
-                }
-                if (dconn != null) {
-                    dconn.close();
-                }
-                if (destDomain != null) {
-                    destDomain.free();
-                }
-            } catch (final LibvirtException e) {
-                s_logger.trace("Ignoring libvirt error.", e);
-            }
-        }
-
-        if (result != null) {
-        } else {
-            libvirtComputingResource.destroyNetworkRulesForVM(conn, vmName);
-            for (final InterfaceDef iface : ifaces) {
-                // We don't know which "traffic type" is associated with
-                // each interface at this point, so inform all vif drivers
-                final List<VifDriver> allVifDrivers = libvirtComputingResource.getAllVifDrivers();
-                for (final VifDriver vifDriver : allVifDrivers) {
-                    vifDriver.unplug(iface);
-                }
-            }
-        }
-
+        final String result = LibvirtMigrationHelper.executeMigrationWithFlags(libvirtComputingResource, command.getVmName(), command.getDestinationIp(), libvirtComputingResource.getMigrateFlags(), command.getMigrateStorage(), command.isAutoConvergence());
         return new MigrateAnswer(command, result == null, result, null);
     }
 
-    /**
-     * This function assumes an qemu machine description containing a single graphics element like
-     *     <graphics type='vnc' port='5900' autoport='yes' listen='10.10.10.1'>
-     *       <listen type='address' address='10.10.10.1'/>
-     *     </graphics>
-     * @param xmlDesc the qemu xml description
-     * @param target the ip address to migrate to
-     * @return the new xmlDesc
-     */
-    String replaceIpForVNCInDescFile(String xmlDesc, final String target) {
-        final int begin = xmlDesc.indexOf(GRAPHICS_ELEM_START);
-        if (begin >= 0) {
-            final int end = xmlDesc.lastIndexOf(GRAPHICS_ELEM_END) + GRAPHICS_ELEM_END.length();
-            if (end > begin) {
-                String graphElem = xmlDesc.substring(begin, end);
-                graphElem = graphElem.replaceAll("listen='[a-zA-Z0-9\\.]*'", "listen='" + target + "'");
-                graphElem = graphElem.replaceAll("address='[a-zA-Z0-9\\.]*'", "address='" + target + "'");
-                xmlDesc = xmlDesc.replaceAll(GRAPHICS_ELEM_START + CONTENTS_WILDCARD + GRAPHICS_ELEM_END, graphElem);
-            }
-        }
-        return xmlDesc;
-    }
-
-    // Pass in a list of the disks to update in the XML (xmlDesc). Each disk passed in needs to have a serial number. If any disk's serial number in the
-    // list does not match a disk in the XML, an exception should be thrown.
-    // In addition to the serial number, each disk in the list needs the following info:
-    //   * The value of the 'type' of the disk (ex. file, block)
-    //   * The value of the 'type' of the driver of the disk (ex. qcow2, raw)
-    //   * The source of the disk needs an attribute that is either 'file' or 'dev' as well as its corresponding value.
-    private String replaceStorage(String xmlDesc, Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage)
-            throws IOException, ParserConfigurationException, SAXException, TransformerException {
-        InputStream in = IOUtils.toInputStream(xmlDesc);
-
-        DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
-        DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
-        Document doc = docBuilder.parse(in);
-
-        // Get the root element
-        Node domainNode = doc.getFirstChild();
-
-        NodeList domainChildNodes = domainNode.getChildNodes();
-
-        for (int i = 0; i < domainChildNodes.getLength(); i++) {
-            Node domainChildNode = domainChildNodes.item(i);
-
-            if ("devices".equals(domainChildNode.getNodeName())) {
-                NodeList devicesChildNodes = domainChildNode.getChildNodes();
-
-                for (int x = 0; x < devicesChildNodes.getLength(); x++) {
-                    Node deviceChildNode = devicesChildNodes.item(x);
-
-                    if ("disk".equals(deviceChildNode.getNodeName())) {
-                        Node diskNode = deviceChildNode;
-
-                        String sourceFileDevText = getSourceFileDevText(diskNode);
-
-                        String path = getPathFromSourceFileDevText(migrateStorage.keySet(), sourceFileDevText);
-
-                        if (path != null) {
-                            MigrateCommand.MigrateDiskInfo migrateDiskInfo = migrateStorage.remove(path);
-
-                            NamedNodeMap diskNodeAttributes = diskNode.getAttributes();
-                            Node diskNodeAttribute = diskNodeAttributes.getNamedItem("type");
-
-                            diskNodeAttribute.setTextContent(migrateDiskInfo.getDiskType().toString());
-
-                            NodeList diskChildNodes = diskNode.getChildNodes();
-
-                            for (int z = 0; z < diskChildNodes.getLength(); z++) {
-                                Node diskChildNode = diskChildNodes.item(z);
-
-                                if ("driver".equals(diskChildNode.getNodeName())) {
-                                    Node driverNode = diskChildNode;
-
-                                    NamedNodeMap driverNodeAttributes = driverNode.getAttributes();
-                                    Node driverNodeAttribute = driverNodeAttributes.getNamedItem("type");
-
-                                    driverNodeAttribute.setTextContent(migrateDiskInfo.getDriverType().toString());
-                                } else if ("source".equals(diskChildNode.getNodeName())) {
-                                    diskNode.removeChild(diskChildNode);
-
-                                    Element newChildSourceNode = doc.createElement("source");
-
-                                    newChildSourceNode.setAttribute(migrateDiskInfo.getSource().toString(), migrateDiskInfo.getSourceText());
-
-                                    diskNode.appendChild(newChildSourceNode);
-                                } else if ("auth".equals(diskChildNode.getNodeName())) {
-                                    diskNode.removeChild(diskChildNode);
-                                } else if ("iotune".equals(diskChildNode.getNodeName())) {
-                                    diskNode.removeChild(diskChildNode);
-                                }
-                            }
-                        }
-                    }
-                }
-            }
-        }
-
-        if (!migrateStorage.isEmpty()) {
-            throw new CloudRuntimeException("Disk info was passed into LibvirtMigrateCommandWrapper.replaceStorage that was not used.");
-        }
-
-        return getXml(doc);
-    }
-
-    private String getPathFromSourceFileDevText(Set<String> paths, String sourceFileDevText) {
-        if (paths != null && sourceFileDevText != null) {
-            for (String path : paths) {
-                if (sourceFileDevText.contains(path)) {
-                    return path;
-                }
-            }
-        }
-
-        return null;
-    }
-
-    private String getSourceFileDevText(Node diskNode) {
-        NodeList diskChildNodes = diskNode.getChildNodes();
-
-        for (int i = 0; i < diskChildNodes.getLength(); i++) {
-            Node diskChildNode = diskChildNodes.item(i);
-
-            if ("source".equals(diskChildNode.getNodeName())) {
-                NamedNodeMap diskNodeAttributes = diskChildNode.getAttributes();
-
-                Node diskNodeAttribute = diskNodeAttributes.getNamedItem("file");
-
-                if (diskNodeAttribute != null) {
-                    return diskNodeAttribute.getTextContent();
-                }
-
-                diskNodeAttribute = diskNodeAttributes.getNamedItem("dev");
-
-                if (diskNodeAttribute != null) {
-                    return diskNodeAttribute.getTextContent();
-                }
-            }
-        }
-
-        return null;
-    }
-
-    private String getXml(Document doc) throws TransformerException {
-        TransformerFactory transformerFactory = TransformerFactory.newInstance();
-        Transformer transformer = transformerFactory.newTransformer();
-
-        DOMSource source = new DOMSource(doc);
-
-        ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
-        StreamResult result = new StreamResult(byteArrayOutputStream);
-
-        transformer.transform(source, result);
-
-        return byteArrayOutputStream.toString();
-    }
 }
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateWithStorageCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateWithStorageCommandWrapper.java
new file mode 100644
index 00000000000..5b449c0cec4
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateWithStorageCommandWrapper.java
@@ -0,0 +1,56 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.MigrateWithStorageAnswer;
+import com.cloud.agent.api.MigrateWithStorageCommand;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.DiskTO;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+@ResourceWrapper(handles = MigrateWithStorageCommand.class)
+public final class  LibvirtMigrateWithStorageCommandWrapper extends CommandWrapper<MigrateWithStorageCommand, Answer, LibvirtComputingResource> {
+
+    @Override
+    public Answer execute(final MigrateWithStorageCommand command, final LibvirtComputingResource libvirtComputingResource) {
+        List<VolumeObjectTO> volumes = new ArrayList<>();
+        VirtualMachineTO vm = command.getVirtualMachine();
+        List<DiskTO> disks = Arrays.asList(vm.getDisks());
+        for (DiskTO disk : disks) {
+            DataTO data = disk.getData();
+            if (data instanceof VolumeObjectTO) {
+                volumes.add((VolumeObjectTO) data);
+            }
+        }
+
+        final String result = LibvirtMigrationHelper.executeMigrationWithFlags(libvirtComputingResource, vm.getName(), command.getTargetHost(), libvirtComputingResource.getMigrateWithStorageFlags(), null, true);
+        return new MigrateWithStorageAnswer(command, volumes, result != null, result);
+
+    }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrationHelper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrationHelper.java
new file mode 100644
index 00000000000..8fd05599325
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrationHelper.java
@@ -0,0 +1,441 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import com.cloud.agent.api.MigrateCommand;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
+import com.cloud.hypervisor.kvm.resource.MigrateKVMAsync;
+import com.cloud.hypervisor.kvm.resource.MigrationBy;
+import com.cloud.hypervisor.kvm.resource.VifDriver;
+import com.cloud.utils.Ternary;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.google.common.base.Strings;
+import org.apache.commons.collections.MapUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.log4j.Logger;
+import org.libvirt.Connect;
+import org.libvirt.Domain;
+import org.libvirt.DomainInfo;
+import org.libvirt.LibvirtException;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NamedNodeMap;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+import org.xml.sax.SAXException;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerConfigurationException;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * Helper class for MigrateCommand and MigrateWithStorageCommand migration's tasks.
+ */
+final class LibvirtMigrationHelper {
+
+    private static final String GRAPHICS_ELEM_END = "/graphics>";
+    private static final String GRAPHICS_ELEM_START = "<graphics";
+    private static final String CONTENTS_WILDCARD = "(?s).*";
+    private static final Logger s_logger = Logger.getLogger(LibvirtMigrationHelper.class);
+
+    static protected String executeMigrationWithFlags(final LibvirtComputingResource libvirtComputingResource, final String vmName, final String target, final long flags, Map<String, MigrateCommand.MigrateDiskInfo> mapMigrateStorage, boolean autoconverge) {
+        String result = null;
+
+        List<LibvirtVMDef.InterfaceDef> ifaces = null;
+        List<LibvirtVMDef.DiskDef> disks = null;
+
+        Domain dm = null;
+        Connect dconn = null;
+        Domain destDomain = null;
+        Connect conn = null;
+        String xmlDesc = null;
+        List<Ternary<String, Boolean, String>> vmsnapshots = null;
+        String migrationDestinationHost = null;
+        String destinationUri = null;
+        try {
+            final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
+
+            conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
+            ifaces = libvirtComputingResource.getInterfaces(conn, vmName);
+            disks = libvirtComputingResource.getDisks(conn, vmName);
+            dm = conn.domainLookupByName(vmName);
+            /*
+                We replace the private IP address with the address of the destination host.
+                This is because the VNC listens on the private IP address of the hypervisor,
+                but that address is ofcourse different on the target host.
+
+                MigrateCommand.getDestinationIp() returns the private IP address of the target
+                hypervisor. So it's safe to use.
+
+                The Domain.migrate method from libvirt supports passing a different XML
+                description for the instance to be used on the target host.
+
+                This is supported by libvirt-java from version 0.50.0
+
+                CVE-2015-3252: Get XML with sensitive information suitable for migration by using
+                               VIR_DOMAIN_XML_MIGRATABLE flag (value = 8)
+                               https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainXMLFlags
+
+                               Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
+             */
+            final int xmlFlag = conn.getLibVirVersion() >= 1000000 ? 8 : 1; // 1000000 equals v1.0.0
+
+            xmlDesc = dm.getXMLDesc(xmlFlag);
+            xmlDesc = replaceIpForVNCInDescFile(xmlDesc, target);
+
+            // delete the metadata of vm snapshots before migration
+            vmsnapshots = libvirtComputingResource.cleanVMSnapshotMetadata(dm);
+
+            // migrateStorage is declared as final because the replaceStorage method may mutate mapMigrateStorage, but
+            // migrateStorage's value should always only be associated with the initial state of mapMigrateStorage.
+            final boolean migrateStorage = MapUtils.isNotEmpty(mapMigrateStorage);
+
+            if (migrateStorage) {
+                xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage);
+            }
+
+            if(libvirtComputingResource.getMigrateBy() == MigrationBy.HOSTNAME) {
+                InetAddress destAddress = null;
+                try {
+                    destAddress = InetAddress.getByName(target);
+                    migrationDestinationHost = destAddress.getCanonicalHostName();
+                } catch (UnknownHostException e) {
+                    s_logger.warn("Could not find host", e);
+                }
+            } else {
+                migrationDestinationHost = target;
+            }
+            destinationUri = createMigrationURI(migrationDestinationHost, libvirtComputingResource);
+
+            dconn = libvirtUtilitiesHelper.retrieveQemuConnection(destinationUri);
+
+            //run migration in thread so we can monitor it
+            s_logger.info("Live migration of instance " + vmName + " initiated to destination host " + migrationDestinationHost + " with flags " + flags + " to destination host " + dconn.getURI());
+            final ExecutorService executor = Executors.newFixedThreadPool(1);
+            final Callable<Domain> worker = new MigrateKVMAsync(libvirtComputingResource, dm, dconn, xmlDesc, migrateStorage, autoconverge, vmName, flags);
+            final Future<Domain> migrateThread = executor.submit(worker);
+            executor.shutdown();
+            long sleeptime = 0;
+            while (!executor.isTerminated()) {
+                Thread.sleep(100);
+                sleeptime += 100;
+                if (sleeptime == 1000) { // wait 1s before attempting to set downtime on migration, since I don't know of a VIR_DOMAIN_MIGRATING state
+                    final int migrateDowntime = libvirtComputingResource.getMigrateDowntime();
+                    if (migrateDowntime > 0 ) {
+                        try {
+                            final int setDowntime = dm.migrateSetMaxDowntime(migrateDowntime);
+                            if (setDowntime == 0 ) {
+                                s_logger.debug("Set max downtime for migration of " + vmName + " to " + String.valueOf(migrateDowntime) + "ms");
+                            }
+                        } catch (final LibvirtException e) {
+                            s_logger.debug("Failed to set max downtime for migration, perhaps migration completed? Error: " + e.getMessage());
+                        }
+                    }
+                }
+                if (sleeptime % 1000 == 0) {
+                    s_logger.info("Waiting for migration of " + vmName + " to complete, waited " + sleeptime + "ms");
+                }
+
+                // pause vm if we meet the vm.migrate.pauseafter threshold and not already paused
+                final int migratePauseAfter = libvirtComputingResource.getMigratePauseAfter();
+                if (migratePauseAfter > 0 && sleeptime > migratePauseAfter) {
+                    DomainInfo.DomainState state = null;
+                    try {
+                        state = dm.getInfo().state;
+                    } catch (final LibvirtException e) {
+                        s_logger.info("Couldn't get VM domain state after " + sleeptime + "ms: " + e.getMessage());
+                    }
+                    if (state != null && state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING) {
+                        try {
+                            s_logger.info("Pausing VM " + vmName + " due to property vm.migrate.pauseafter setting to " + migratePauseAfter + "ms to complete migration");
+                            dm.suspend();
+                        } catch (final LibvirtException e) {
+                            // pause could be racy if it attempts to pause right when vm is finished, simply warn
+                            s_logger.info("Failed to pause vm " + vmName + " : " + e.getMessage());
+                        }
+                    }
+                }
+            }
+            s_logger.info("Migration thread for " + vmName + " is done");
+
+            destDomain = migrateThread.get(10, TimeUnit.SECONDS);
+
+            if (destDomain != null) {
+                for (final LibvirtVMDef.DiskDef disk : disks) {
+                    libvirtComputingResource.cleanupDisk(disk);
+                }
+            }
+        } catch (final LibvirtException e) {
+            s_logger.debug("Can't migrate domain: " + e.getMessage());
+            result = e.getMessage();
+            if (result.startsWith("unable to connect to server") && result.endsWith("refused")) {
+                result = String.format("Migration was refused connection to destination: %s. Please check libvirt configuration compatibility and firewall rules on the source and destination hosts.", destinationUri);
+            }
+        } catch (final InterruptedException e) {
+            s_logger.debug("Interrupted while migrating domain: " + e.getMessage());
+            result = e.getMessage();
+        } catch (final ExecutionException e) {
+            s_logger.debug("Failed to execute while migrating domain: " + e.getMessage());
+            result = e.getMessage();
+        } catch (final TimeoutException e) {
+            s_logger.debug("Timed out while migrating domain: " + e.getMessage());
+            result = e.getMessage();
+        } catch (final IOException e) {
+            s_logger.debug("IOException: " + e.getMessage());
+            result = e.getMessage();
+        } catch (final ParserConfigurationException e) {
+            s_logger.debug("ParserConfigurationException: " + e.getMessage());
+            result = e.getMessage();
+        } catch (final SAXException e) {
+            s_logger.debug("SAXException: " + e.getMessage());
+            result = e.getMessage();
+        } catch (final TransformerConfigurationException e) {
+            s_logger.debug("TransformerConfigurationException: " + e.getMessage());
+            result = e.getMessage();
+        } catch (final TransformerException e) {
+            s_logger.debug("TransformerException: " + e.getMessage());
+            result = e.getMessage();
+        } finally {
+            try {
+                if (dm != null && result != null) {
+                    // restore vm snapshots in case of failed migration
+                    if (vmsnapshots != null) {
+                        libvirtComputingResource.restoreVMSnapshotMetadata(dm, vmName, vmsnapshots);
+                    }
+                }
+                if (dm != null) {
+                    if (dm.isPersistent() == 1) {
+                        dm.undefine();
+                    }
+                    dm.free();
+                }
+                if (dconn != null) {
+                    dconn.close();
+                }
+                if (destDomain != null) {
+                    destDomain.free();
+                }
+            } catch (final LibvirtException e) {
+                s_logger.trace("Ignoring libvirt error.", e);
+            }
+        }
+
+        if (result != null) {
+        } else {
+            libvirtComputingResource.destroyNetworkRulesForVM(conn, vmName);
+            for (final LibvirtVMDef.InterfaceDef iface : ifaces) {
+                // We don't know which "traffic type" is associated with
+                // each interface at this point, so inform all vif drivers
+                final List<VifDriver> allVifDrivers = libvirtComputingResource.getAllVifDrivers();
+                for (final VifDriver vifDriver : allVifDrivers) {
+                    vifDriver.unplug(iface);
+                }
+            }
+        }
+
+        return result;
+    }
+
+    /**
+     * This function assumes an qemu machine description containing a single graphics element like
+     *     <graphics type='vnc' port='5900' autoport='yes' listen='10.10.10.1'>
+     *       <listen type='address' address='10.10.10.1'/>
+     *     </graphics>
+     * @param xmlDesc the qemu xml description
+     * @param target the ip address to migrate to
+     * @return the new xmlDesc
+     */
+    static protected String replaceIpForVNCInDescFile(String xmlDesc, final String target) {
+        final int begin = xmlDesc.indexOf(GRAPHICS_ELEM_START);
+        if (begin >= 0) {
+            final int end = xmlDesc.lastIndexOf(GRAPHICS_ELEM_END) + GRAPHICS_ELEM_END.length();
+            if (end > begin) {
+                String graphElem = xmlDesc.substring(begin, end);
+                graphElem = graphElem.replaceAll("listen='[a-zA-Z0-9\\.]*'", "listen='" + target + "'");
+                graphElem = graphElem.replaceAll("address='[a-zA-Z0-9\\.]*'", "address='" + target + "'");
+                xmlDesc = xmlDesc.replaceAll(GRAPHICS_ELEM_START + CONTENTS_WILDCARD + GRAPHICS_ELEM_END, graphElem);
+            }
+        }
+        return xmlDesc;
+    }
+
+    // Pass in a list of the disks to update in the XML (xmlDesc). Each disk passed in needs to have a serial number. If any disk's serial number in the
+    // list does not match a disk in the XML, an exception should be thrown.
+    // In addition to the serial number, each disk in the list needs the following info:
+    //   * The value of the 'type' of the disk (ex. file, block)
+    //   * The value of the 'type' of the driver of the disk (ex. qcow2, raw)
+    //   * The source of the disk needs an attribute that is either 'file' or 'dev' as well as its corresponding value.
+    static private String replaceStorage(String xmlDesc, Map<String, MigrateCommand.MigrateDiskInfo> migrateStorage)
+            throws IOException, ParserConfigurationException, SAXException, TransformerException {
+        InputStream in = IOUtils.toInputStream(xmlDesc);
+
+        DocumentBuilderFactory docFactory = DocumentBuilderFactory.newInstance();
+        DocumentBuilder docBuilder = docFactory.newDocumentBuilder();
+        Document doc = docBuilder.parse(in);
+
+        // Get the root element
+        Node domainNode = doc.getFirstChild();
+
+        NodeList domainChildNodes = domainNode.getChildNodes();
+
+        for (int i = 0; i < domainChildNodes.getLength(); i++) {
+            Node domainChildNode = domainChildNodes.item(i);
+
+            if ("devices".equals(domainChildNode.getNodeName())) {
+                NodeList devicesChildNodes = domainChildNode.getChildNodes();
+
+                for (int x = 0; x < devicesChildNodes.getLength(); x++) {
+                    Node deviceChildNode = devicesChildNodes.item(x);
+
+                    if ("disk".equals(deviceChildNode.getNodeName())) {
+                        Node diskNode = deviceChildNode;
+
+                        String sourceFileDevText = getSourceFileDevText(diskNode);
+
+                        String path = getPathFromSourceFileDevText(migrateStorage.keySet(), sourceFileDevText);
+
+                        if (path != null) {
+                            MigrateCommand.MigrateDiskInfo migrateDiskInfo = migrateStorage.remove(path);
+
+                            NamedNodeMap diskNodeAttributes = diskNode.getAttributes();
+                            Node diskNodeAttribute = diskNodeAttributes.getNamedItem("type");
+
+                            diskNodeAttribute.setTextContent(migrateDiskInfo.getDiskType().toString());
+
+                            NodeList diskChildNodes = diskNode.getChildNodes();
+
+                            for (int z = 0; z < diskChildNodes.getLength(); z++) {
+                                Node diskChildNode = diskChildNodes.item(z);
+
+                                if ("driver".equals(diskChildNode.getNodeName())) {
+                                    Node driverNode = diskChildNode;
+
+                                    NamedNodeMap driverNodeAttributes = driverNode.getAttributes();
+                                    Node driverNodeAttribute = driverNodeAttributes.getNamedItem("type");
+
+                                    driverNodeAttribute.setTextContent(migrateDiskInfo.getDriverType().toString());
+                                } else if ("source".equals(diskChildNode.getNodeName())) {
+                                    diskNode.removeChild(diskChildNode);
+
+                                    Element newChildSourceNode = doc.createElement("source");
+
+                                    newChildSourceNode.setAttribute(migrateDiskInfo.getSource().toString(), migrateDiskInfo.getSourceText());
+
+                                    diskNode.appendChild(newChildSourceNode);
+                                } else if ("auth".equals(diskChildNode.getNodeName())) {
+                                    diskNode.removeChild(diskChildNode);
+                                } else if ("iotune".equals(diskChildNode.getNodeName())) {
+                                    diskNode.removeChild(diskChildNode);
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        if (!migrateStorage.isEmpty()) {
+            throw new CloudRuntimeException("Disk info was passed into LibvirtMigrateCommandWrapper.replaceStorage that was not used.");
+        }
+
+        return getXml(doc);
+    }
+
+    static private String getPathFromSourceFileDevText(Set<String> paths, String sourceFileDevText) {
+        if (paths != null && sourceFileDevText != null) {
+            for (String path : paths) {
+                if (sourceFileDevText.contains(path)) {
+                    return path;
+                }
+            }
+        }
+
+        return null;
+    }
+
+    static private String getSourceFileDevText(Node diskNode) {
+        NodeList diskChildNodes = diskNode.getChildNodes();
+
+        for (int i = 0; i < diskChildNodes.getLength(); i++) {
+            Node diskChildNode = diskChildNodes.item(i);
+
+            if ("source".equals(diskChildNode.getNodeName())) {
+                NamedNodeMap diskNodeAttributes = diskChildNode.getAttributes();
+
+                Node diskNodeAttribute = diskNodeAttributes.getNamedItem("file");
+
+                if (diskNodeAttribute != null) {
+                    return diskNodeAttribute.getTextContent();
+                }
+
+                diskNodeAttribute = diskNodeAttributes.getNamedItem("dev");
+
+                if (diskNodeAttribute != null) {
+                    return diskNodeAttribute.getTextContent();
+                }
+            }
+        }
+
+        return null;
+    }
+
+    static private String getXml(Document doc) throws TransformerException {
+        TransformerFactory transformerFactory = TransformerFactory.newInstance();
+        Transformer transformer = transformerFactory.newTransformer();
+
+        DOMSource source = new DOMSource(doc);
+
+        ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+        StreamResult result = new StreamResult(byteArrayOutputStream);
+
+        transformer.transform(source, result);
+
+        return byteArrayOutputStream.toString();
+    }
+
+    static protected String createMigrationURI(final String destinationIp, final LibvirtComputingResource libvirtComputingResource) {
+        if (Strings.isNullOrEmpty(destinationIp)) {
+            throw new CloudRuntimeException("Provided libvirt destination ip is invalid");
+        }
+        return String.format("%s://%s/system", libvirtComputingResource.isHostSecured() ? "qemu+tls" : "qemu+tcp", destinationIp);
+    }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
index c6135080671..360984b21a2 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
@@ -348,11 +348,6 @@ public boolean deleteStoragePool(StoragePoolType type, String uuid) {
         return true;
     }
 
-    public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, Storage.ProvisioningType provisioningType,
-                                                    KVMStoragePool destPool, int timeout) {
-        return createDiskFromTemplate(template, name, provisioningType, destPool, template.getSize(), timeout);
-    }
-
     public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, Storage.ProvisioningType provisioningType,
                                                     KVMStoragePool destPool, long size, int timeout) {
         StorageAdaptor adaptor = getStorageAdaptor(destPool.getType());
diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/storage/motion/KVMStorageMotionStrategy.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/storage/motion/KVMStorageMotionStrategy.java
new file mode 100644
index 00000000000..54b66163989
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/storage/motion/KVMStorageMotionStrategy.java
@@ -0,0 +1,282 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.motion;
+
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.CancelMigrationAnswer;
+import com.cloud.agent.api.CancelMigrationCommand;
+import com.cloud.agent.api.MigrateWithStorageAnswer;
+import com.cloud.agent.api.MigrateWithStorageCommand;
+import com.cloud.agent.api.storage.CreateAnswer;
+import com.cloud.agent.api.storage.CreateCommand;
+import com.cloud.agent.api.to.DataObjectType;
+import com.cloud.agent.api.to.StorageFilerTO;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.agent.api.to.VolumeTO;
+import com.cloud.exception.AgentUnavailableException;
+import com.cloud.exception.OperationTimedoutException;
+import com.cloud.host.Host;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.Pair;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
+import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
+import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.framework.config.ConfigKey;
+import org.apache.cloudstack.storage.command.DeleteCommand;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.to.VolumeObjectTO;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+@Component
+public class KVMStorageMotionStrategy  implements DataMotionStrategy {
+    private static final Logger s_logger = Logger.getLogger(KVMStorageMotionStrategy.class);
+
+    protected final ConfigKey<Integer> MigrateWait = new ConfigKey<Integer>(Integer.class, "migratewait", "Advanced", "3600", "Time (in seconds) to wait for VM migrate finish.", true, ConfigKey.Scope.Global, null);
+
+    @Inject
+    AgentManager agentMgr;
+    @Inject
+    VolumeDao volDao;
+    @Inject
+    VolumeDataFactory volFactory;
+    @Inject
+    PrimaryDataStoreDao storagePoolDao;
+    @Inject
+    VMInstanceDao instanceDao;
+    @Inject
+    DiskOfferingDao diskOfferingDao;
+    @Inject
+    TemplateDataFactory tmplFactory;
+    @Inject
+    StorageManager storageManager;
+    @Inject
+    EndPointSelector endPointSelector;
+    @Inject
+    DataStoreManager dataStoreManager;
+
+    @Override
+    public StrategyPriority canHandle(DataObject srcData, DataObject destData) {
+        return StrategyPriority.CANT_HANDLE;
+    }
+
+    @Override
+    public StrategyPriority canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
+        if (srcHost.getHypervisorType() == Hypervisor.HypervisorType.KVM && destHost.getHypervisorType() == Hypervisor.HypervisorType.KVM) {
+            return StrategyPriority.HYPERVISOR;
+        }
+        return StrategyPriority.CANT_HANDLE;
+    }
+
+    @Override
+    public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
+        throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
+        Answer answer = null;
+        String errMsg = null;
+        try {
+            VMInstanceVO instance = instanceDao.findById(vmTo.getId());
+            if (instance != null) {
+                answer = migrateVmWithVolumes(instance, vmTo, srcHost, destHost, volumeMap);
+                errMsg = answer.getDetails();
+
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Got answer from migration, result: " + (answer.getResult() ? "ok" : "failed, reason: " + (answer.getDetails() == null ? "<null>" : answer.getDetails())));
+                }
+
+            } else {
+                throw new CloudRuntimeException("Unsupported operation requested for moving data.");
+            }
+        } catch (Exception e) {
+            s_logger.error("copyAsync failed", e);
+            errMsg = e.getMessage();
+        }
+
+        CopyCommandResult result = new CopyCommandResult("", answer);
+
+        if (answer != null) {
+            result.setSuccess(answer.getResult());
+            if (!answer.getResult()) {
+                result.setResult(errMsg);
+            }
+        } else {
+            result.setSuccess(false);
+            result.setResult(errMsg);
+        }
+
+        callback.complete(result);
+    }
+
+    private Answer migrateVmWithVolumes(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map<VolumeInfo, DataStore> volumeToPool) throws AgentUnavailableException {
+
+        // Initiate migration of a virtual machine with it's volumes.
+        List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerTo = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
+        VolumeVO rootVolume = null;
+        VolumeInfo rootVolumeInfo = null;
+        for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+            VolumeInfo volume = entry.getKey();
+            VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
+            StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
+            volumeToFilerTo.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
+            if (volume.getType().equals(DataObjectType.VOLUME) && volume.getVolumeType().equals(Volume.Type.ROOT)) {
+                rootVolume = volDao.findById(volume.getId());
+                rootVolumeInfo = volume;
+            }
+        }
+
+        DiskOfferingVO diskOffering = diskOfferingDao.findById(rootVolume.getDiskOfferingId());
+        DiskProfile diskProfile = new DiskProfile(rootVolume, diskOffering, vm.getHypervisorType());
+        StoragePool destStoragePool = storageManager.findLocalStorageOnHost(destHost.getId());
+        TemplateInfo templateImage = tmplFactory.getTemplate(rootVolume.getTemplateId(), DataStoreRole.Image);
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Provisioning disk " + diskProfile.toString() + " on destination host");
+        }
+
+        try {
+            CreateCommand provisioningCommand = new CreateCommand(diskProfile, templateImage.getUuid(), destStoragePool, true);
+            CreateAnswer provisioningAnwer = (CreateAnswer) agentMgr.send(destHost.getId(), provisioningCommand);
+            if (provisioningAnwer == null) {
+                s_logger.error("Migration with storage of vm " + vm + " failed while provisioning root image");
+                throw new CloudRuntimeException("Error while provisioning root image for the vm " + vm + " during migration to host " + destHost);
+            } else if (!provisioningAnwer.getResult()) {
+                s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + provisioningAnwer.getDetails());
+                throw new CloudRuntimeException("Error while provisioning root image for the vm " + vm + " during migration to host " + destHost +
+                        ". " + provisioningAnwer.getDetails());
+            }
+        } catch (OperationTimedoutException e) {
+            throw new AgentUnavailableException("Operation timed out while provisioning for migration for " + vm, destHost.getId());
+        }
+
+        MigrateWithStorageCommand command = null;
+        try {
+            command = new MigrateWithStorageCommand(to, volumeToFilerTo, destHost.getPrivateIpAddress());
+            command.setWait(MigrateWait.value());
+            MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer) agentMgr.send(srcHost.getId(), command);
+            if (answer == null) {
+                s_logger.error("Migration with storage of vm " + vm + " failed.");
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
+            } else if (!answer.getResult()) {
+                s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + answer.getDetails());
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost +
+                        ". " + answer.getDetails());
+            } else {
+                // Update the volume details after migration.
+                updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos());
+            }
+
+            return answer;
+
+        } catch (OperationTimedoutException e) {
+            s_logger.error("Operation timeout error while migrating vm " + vm + " to host " + destHost + ", aborting the migration.", e);
+            // Trying to abort the migration when we reach the timeout. It required to abort the job but we're not
+            // sure it will work. Only when the command has been aborted successfully should we delete the remote
+            // disk. Otherwise we leave everything as it is and leave it to a manual intervention to decide.
+            try {
+                CancelMigrationCommand cancelMigrationCommand = new CancelMigrationCommand(vm.getInstanceName());
+                CancelMigrationAnswer cancelMigrationAnswer = (CancelMigrationAnswer) agentMgr.send(srcHost.getId(), cancelMigrationCommand);
+
+                if (cancelMigrationAnswer.getResult()) {
+                    s_logger.info("Migration aborted successfully.");
+                    // We can safely delete the previously created disk at the destination
+                    VolumeObjectTO volumeTO = new VolumeObjectTO(rootVolumeInfo);
+                    DataStore destDataStore = dataStoreManager.getDataStore(destStoragePool.getId(), DataStoreRole.Primary);
+                    s_logger.info("Requesting volume deletion on destination host " + destDataStore.getId());
+                    volumeTO.setDataStore(destDataStore.getTO());
+                    DeleteCommand dtCommand = new DeleteCommand(volumeTO);
+                    EndPoint ep = endPointSelector.select(destDataStore);
+                    if (ep != null) {
+                        Answer answer = ep.sendMessage(dtCommand);
+                        if (answer.getResult()) {
+                            s_logger.info("Volume on the migration destination has been removed.");
+                        } else {
+                            s_logger.warn("Could not remove the volume on the migration destination.");
+                        }
+                    } else {
+                        s_logger.error("Could not find an endpoint to send the delete command");
+                    }
+
+                } else {
+                    s_logger.fatal("Could not abort the migration, manual intervention is required!");
+                }
+                return new MigrateWithStorageAnswer(command, false, cancelMigrationAnswer.getResult(), e);
+            } catch (OperationTimedoutException e1) {
+                s_logger.error("Timeout error while trying to abort the migration job", e);
+                throw new AgentUnavailableException("Operation timed out on storage motion for " + vm + " but migration job could not be aborted. Manual intervention required!", destHost.getId());
+            }
+        }
+    }
+
+    private void updateVolumePathsAfterMigration(Map<VolumeInfo, DataStore> volumeToPool, List<VolumeObjectTO> volumeTos) {
+        for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+            boolean updated = false;
+            VolumeInfo volume = entry.getKey();
+            StoragePool pool = (StoragePool)entry.getValue();
+            for (VolumeObjectTO volumeTo : volumeTos) {
+                if (volume.getId() == volumeTo.getId()) {
+                    VolumeVO volumeVO = volDao.findById(volume.getId());
+                    Long oldPoolId = volumeVO.getPoolId();
+                    volumeVO.setPath(volumeTo.getPath());
+                    volumeVO.setPodId(pool.getPodId());
+                    volumeVO.setPoolId(pool.getId());
+                    volumeVO.setLastPoolId(oldPoolId);
+                    volumeVO.setFolder(pool.getPath());
+
+                    volDao.update(volume.getId(), volumeVO);
+                    updated = true;
+                    break;
+                }
+            }
+
+            if (!updated) {
+                s_logger.error("Volume path wasn't updated for volume " + volume + " after it was migrated.");
+            }
+        }
+    }
+}
diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
index 795b96175ab..8b62615ad58 100644
--- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
+++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
@@ -1259,6 +1259,7 @@ public void testMigrateCommand() {
         final MigrateCommand command = new MigrateCommand(vmName, destIp, isWindows, vmTO, executeInSequence );
 
         when(libvirtComputingResource.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper);
+        when(libvirtComputingResource.getLibvirtConnectionProtocol()).thenReturn("qemu+tcp://");
         try {
             when(libvirtUtilitiesHelper.getConnectionByVmName(vmName)).thenReturn(conn);
             when(libvirtUtilitiesHelper.retrieveQemuConnection("qemu+tcp://" + command.getDestinationIp() + "/system")).thenReturn(dconn);
@@ -1750,7 +1751,7 @@ public void testCreateCommandCLVM() {
 
         when(primary.getPhysicalDisk(command.getTemplateUrl())).thenReturn(baseVol);
         when(poolManager.createDiskFromTemplate(baseVol,
-                diskCharacteristics.getPath(), diskCharacteristics.getProvisioningType(), primary, 0)).thenReturn(vol);
+                diskCharacteristics.getPath(), diskCharacteristics.getProvisioningType(), primary, baseVol.getSize(), 0)).thenReturn(vol);
 
         final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance();
         assertNotNull(wrapper);
diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java
index da71e40c30f..ca7652ce85d 100644
--- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java
+++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapperTest.java
@@ -255,8 +255,7 @@
     @Test
     public void testReplaceIpForVNCInDescFile() {
         final String targetIp = "192.168.22.21";
-        final LibvirtMigrateCommandWrapper lw = new LibvirtMigrateCommandWrapper();
-        final String result = lw.replaceIpForVNCInDescFile(fullfile, targetIp);
+        final String result = LibvirtMigrationHelper.replaceIpForVNCInDescFile(fullfile, targetIp);
         assertTrue("transformation does not live up to expectation:\n" + result, targetfile.equals(result));
     }
 
@@ -279,8 +278,7 @@ public void testReplaceIpForVNCInDesc() {
                 "  </devices>" +
                 "</domain>";
         final String targetIp = "10.10.10.10";
-        final LibvirtMigrateCommandWrapper lw = new LibvirtMigrateCommandWrapper();
-        final String result = lw.replaceIpForVNCInDescFile(xmlDesc, targetIp);
+        final String result = LibvirtMigrationHelper.replaceIpForVNCInDescFile(xmlDesc, targetIp);
         assertTrue("transformation does not live up to expectation:\n" + result, expectedXmlDesc.equals(result));
     }
 
@@ -303,26 +301,23 @@ public void testReplaceFqdnForVNCInDesc() {
                 "  </devices>" +
                 "</domain>";
         final String targetIp = "localhost.localdomain";
-        final LibvirtMigrateCommandWrapper lw = new LibvirtMigrateCommandWrapper();
-        final String result = lw.replaceIpForVNCInDescFile(xmlDesc, targetIp);
+        final String result = LibvirtMigrationHelper.replaceIpForVNCInDescFile(xmlDesc, targetIp);
         assertTrue("transformation does not live up to expectation:\n" + result, expectedXmlDesc.equals(result));
     }
 
     @Test
     public void testMigrationUri() {
         final String ip = "10.1.1.1";
-        LibvirtMigrateCommandWrapper lw = new LibvirtMigrateCommandWrapper();
         LibvirtComputingResource lcr = new LibvirtComputingResource();
         if (lcr.isHostSecured()) {
-            assertEquals(lw.createMigrationURI(ip, lcr), String.format("qemu+tls://%s/system", ip));
+            assertEquals(LibvirtMigrationHelper.createMigrationURI(ip, lcr), String.format("qemu+tls://%s/system", ip));
         } else {
-            assertEquals(lw.createMigrationURI(ip, lcr), String.format("qemu+tcp://%s/system", ip));
+            assertEquals(LibvirtMigrationHelper.createMigrationURI(ip, lcr), String.format("qemu+tcp://%s/system", ip));
         }
     }
 
     @Test(expected = CloudRuntimeException.class)
     public void testMigrationUriException() {
-        LibvirtMigrateCommandWrapper lw = new LibvirtMigrateCommandWrapper();
-        lw.createMigrationURI(null, new LibvirtComputingResource());
+        LibvirtMigrationHelper.createMigrationURI(null, new LibvirtComputingResource());
     }
 }
diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index d2e8b91d7f0..ebe98bcc7b1 100644
--- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -4029,7 +4029,7 @@ protected Answer execute(MigrateWithStorageCommand cmd) {
                 }
             }
 
-            return new MigrateWithStorageAnswer(cmd, volumeToList);
+            return new MigrateWithStorageAnswer(cmd, volumeToList, false, null);
         } catch (Throwable e) {
             if (e instanceof RemoteException) {
                 s_logger.warn("Encountered remote exception at vCenter, invalidating VMware session context");
@@ -4038,7 +4038,7 @@ protected Answer execute(MigrateWithStorageCommand cmd) {
 
             String msg = "MigrationCommand failed due to " + VmwareHelper.getExceptionMessage(e);
             s_logger.warn(msg, e);
-            return new MigrateWithStorageAnswer(cmd, (Exception)e);
+            return new MigrateWithStorageAnswer(cmd, false, false, (Exception)e);
         } finally {
             // Cleanup datastores mounted on source host
             for (String mountedDatastore : mountedDatastoresAtSource) {
diff --git a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCommandWrapper.java b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCommandWrapper.java
index f3f9f643466..056753420a4 100644
--- a/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/main/java/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageCommandWrapper.java
@@ -124,10 +124,10 @@ public Answer execute(final MigrateWithStorageCommand command, final XenServer61
             // Volume paths would have changed. Return that information.
             final List<VolumeObjectTO> volumeToList = xenServer610Resource.getUpdatedVolumePathsOfMigratedVm(connection, vmToMigrate, vmSpec.getDisks());
             vmToMigrate.setAffinity(connection, host);
-            return new MigrateWithStorageAnswer(command, volumeToList);
+            return new MigrateWithStorageAnswer(command, volumeToList, false, null);
         } catch (final Exception e) {
             s_logger.warn("Catch Exception " + e.getClass().getName() + ". Storage motion failed due to " + e.toString(), e);
-            return new MigrateWithStorageAnswer(command, e);
+            return new MigrateWithStorageAnswer(command, false, false, e);
         } finally {
             if (task != null) {
                 try {
diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java
index 3082f1dfa72..fc5f1375516 100644
--- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java
+++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java
@@ -1253,8 +1253,7 @@ public boolean deleteEvents(final DeleteEventsCmd cmd) {
                     }
                 }
             }
-
-            plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null);
+            plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null);
         } else {
             final Long cluster = srcHost.getClusterId();
             if (s_logger.isDebugEnabled()) {
diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
index 7558419661e..80bf5c4e83f 100644
--- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
@@ -39,6 +39,7 @@
 import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
+import com.cloud.storage.StorageManager;
 import org.apache.cloudstack.acl.ControlledEntity.ACLType;
 import org.apache.cloudstack.acl.SecurityChecker.AccessType;
 import org.apache.cloudstack.affinity.AffinityGroupService;
@@ -473,6 +474,8 @@
     private TemplateApiService _tmplService;
     @Inject
     private ConfigurationDao _configDao;
+    @Inject
+    private StorageManager _storageMgr;
 
     private ScheduledExecutorService _executor = null;
     private ScheduledExecutorService _vmIpFetchExecutor = null;
@@ -5468,7 +5471,13 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio
         HostVO destinationHostVO = _hostDao.findById(destinationHost.getId());
         if (_capacityMgr.checkIfHostReachMaxGuestLimit(destinationHostVO)) {
             throw new VirtualMachineMigrationException("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId()
-            + " already has max running vms (count includes system VMs). Cannot" + " migrate to this host");
+                    + " already has max running vms (count includes system VMs). Cannot migrate to this host");
+        }
+
+        // Check if the destination host has enough space
+        if (!_storageMgr.storagePoolHasEnoughSpace(new ArrayList<Volume>(vmVolumes), (StoragePool)_dataStoreMgr.getDataStore(_storageMgr.findLocalStorageOnHost(destinationHost.getId()).getId(), DataStoreRole.Primary))) {
+            throw new VirtualMachineMigrationException("Host name: " + destinationHost.getName() + ", hostId: " + destinationHost.getId()
+                    + " does not have enough space on its storage pool. Cannot migrate to this host");
         }
 
         checkHostsDedication(vm, srcHostId, destinationHost.getId());
diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py
index d882c1fc9ae..4fd6bfa2089 100644
--- a/test/integration/smoke/test_vm_life_cycle.py
+++ b/test/integration/smoke/test_vm_life_cycle.py
@@ -620,7 +620,7 @@ def test_08_migrate_vm(self):
                                         migrate_host.id
                                         ))
 
-        self.vm_to_migrate.migrate(self.apiclient, migrate_host.id)
+        self.vm_to_migrate.migrate_vm_with_volume(self.apiclient, migrate_host.id)
 
         retries_cnt = 3
         while retries_cnt >=0:


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services