You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ap...@apache.org on 2013/04/19 08:14:27 UTC

[1/4] Storage motion for Xenserver changes: 1. Implemented Api findStoragePoolsForMigration. Added a new response objects to list storage pools available for migration. 2. Updated migrateVolume api for allowing migrating volumes of running vms. These cha

Updated Branches:
  refs/heads/master eae22d2ff -> 21ce3befc


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/setup/db/db/schema-410to420.sql
----------------------------------------------------------------------
diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql
index ce651a0..14aa2eb 100644
--- a/setup/db/db/schema-410to420.sql
+++ b/setup/db/db/schema-410to420.sql
@@ -23,7 +23,9 @@
 SET foreign_key_checks = 0;
 
 ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `max_hosts_per_cluster` int unsigned DEFAULT NULL COMMENT 'Max. hosts in cluster supported by hypervisor';
+ALTER TABLE `cloud`.`hypervisor_capabilities` ADD COLUMN `storage_motion_supported` int(1) unsigned DEFAULT 0 COMMENT 'Is storage motion supported';
 UPDATE `cloud`.`hypervisor_capabilities` SET `max_hosts_per_cluster`=32 WHERE `hypervisor_type`='VMware';
+INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_data_volumes_limit, storage_motion_supported) VALUES ('XenServer', '6.1.0', 50, 1, 13, 1);
 INSERT IGNORE INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled, max_hosts_per_cluster) VALUES ('VMware', '5.1', 128, 0, 32);
 DELETE FROM `cloud`.`configuration` where name='vmware.percluster.host.max';
 INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'AgentManager', 'xen.nics.max', '7', 'Maximum allowed nics for Vms created on Xen');

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/test/integration/component/test_storage_motion.py
----------------------------------------------------------------------
diff --git a/test/integration/component/test_storage_motion.py b/test/integration/component/test_storage_motion.py
new file mode 100644
index 0000000..cc55a08
--- /dev/null
+++ b/test/integration/component/test_storage_motion.py
@@ -0,0 +1,298 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+""" P1 tests for Storage motion
+"""
+#Import Local Modules
+import marvin
+from marvin.cloudstackTestCase import *
+from marvin.cloudstackAPI import *
+from marvin.remoteSSHClient import remoteSSHClient
+from marvin.integration.lib.utils import *
+from marvin.integration.lib.base import *
+from marvin.integration.lib.common import *
+from nose.plugins.attrib import attr
+#Import System modules
+import time
+
+_multiprocess_shared_ = True
+class Services:
+    """Test VM Life Cycle Services
+    """
+
+    def __init__(self):
+        self.services = {
+                "disk_offering":{
+                    "displaytext": "Small",
+                    "name": "Small",
+                    "disksize": 1
+                },
+                "account": {
+                    "email": "test@test.com",
+                    "firstname": "Test",
+                    "lastname": "User",
+                    "username": "test",
+                    # Random characters are appended in create account to
+                    # ensure unique username generated each time
+                    "password": "password",
+                },
+                "small":
+                # Create a small virtual machine instance with disk offering
+                {
+                    "displayname": "testserver",
+                    "username": "root", # VM creds for SSH
+                    "password": "password",
+                    "ssh_port": 22,
+                    "hypervisor": 'XenServer',
+                    "privateport": 22,
+                    "publicport": 22,
+                    "protocol": 'TCP',
+                },
+                "service_offerings":
+                {
+                 "small":
+                    {
+                     # Small service offering ID to for change VM
+                     # service offering from medium to small
+                        "name": "Small Instance",
+                        "displaytext": "Small Instance",
+                        "cpunumber": 1,
+                        "cpuspeed": 100,
+                        "memory": 256,
+                    }
+                },
+                "template": {
+                    "displaytext": "Cent OS Template",
+                    "name": "Cent OS Template",
+                    "passwordenabled": True,
+                },
+            "diskdevice": '/dev/xvdd',
+            # Disk device where ISO is attached to instance
+            "mount_dir": "/mnt/tmp",
+            "sleep": 60,
+            "timeout": 10,
+            #Migrate VM to hostid
+            "ostype": 'CentOS 5.3 (64-bit)',
+            # CentOS 5.3 (64-bit)
+        }
+
+class TestStorageMotion(cloudstackTestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.api_client = super(TestStorageMotion, cls).getClsTestClient().getApiClient()
+        cls.services = Services().services
+
+        # Get Zone, Domain and templates
+        domain = get_domain(cls.api_client, cls.services)
+        zone = get_zone(cls.api_client, cls.services)
+        cls.services['mode'] = zone.networktype
+
+        template = get_template(
+                            cls.api_client,
+                            zone.id,
+                            cls.services["ostype"]
+                            )
+        # Set Zones and disk offerings
+        cls.services["small"]["zoneid"] = zone.id
+        cls.services["small"]["template"] = template.id
+
+        # Create VMs, NAT Rules etc
+        cls.account = Account.create(
+                            cls.api_client,
+                            cls.services["account"],
+                            domainid=domain.id
+                            )
+
+        cls.small_offering = ServiceOffering.create(
+                                    cls.api_client,
+                                    cls.services["service_offerings"]["small"]
+                                    )
+
+        #create a virtual machine
+        cls.virtual_machine = VirtualMachine.create(
+                                        cls.api_client,
+                                        cls.services["small"],
+                                        accountid=cls.account.account.name,
+                                        domainid=cls.account.account.domainid,
+                                        serviceofferingid=cls.small_offering.id,
+                                        mode=cls.services["mode"]
+                                        )
+        cls._cleanup = [
+                        cls.small_offering,
+                        cls.account
+                        ]
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.api_client = super(TestStorageMotion, cls).getClsTestClient().getApiClient()
+        cleanup_resources(cls.api_client, cls._cleanup)
+        return
+
+    def setUp(self):
+        self.apiclient = self.testClient.getApiClient()
+        self.dbclient = self.testClient.getDbConnection()
+        self.cleanup = []
+
+    def tearDown(self):
+        #Clean up, terminate the created ISOs
+        cleanup_resources(self.apiclient, self.cleanup)
+        return
+
+    @attr(tags = ["advanced", "basic", "multicluster", "storagemotion", "xenserver"])
+    def test_01_migrate_vm_with_volume(self):
+        """Test migrate virtual machine with its volumes
+        """
+        # Validate the following
+        # 1. List hosts for migration of a vm. Pick a host that
+        # requires storage motion too.
+        # 2. Migrate vm to a host.
+        # 3. listVM command should return this VM.State of this VM
+        #    should be "Running" and the host should be the host
+        #    to which the VM was migrated to in a different cluster
+
+        hosts = Host.listForMigration(
+                          self.apiclient,
+                          virtualmachineid=self.virtual_machine.id
+                          )
+
+        self.assertEqual(
+                         isinstance(hosts, list),
+                         True,
+                         "Check the number of hosts in the zone"
+                         )
+
+        # Migrate to a host that requires storage motion
+        hosts[:] = [host for host in hosts if host.requiresStorageMotion]
+
+        host = hosts[0]
+        self.debug("Migrating VM-ID: %s to Host: %s" % (
+                                        self.virtual_machine.id,
+                                        host.id
+                                        ))
+
+        cmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd()
+        cmd.hostid = host.id
+        cmd.virtualmachineid = self.virtual_machine.id
+        self.apiclient.migrateVirtualMachineWithVolume(cmd)
+
+        list_vm_response = list_virtual_machines(
+                                            self.apiclient,
+                                            id=self.virtual_machine.id
+                                            )
+        self.assertEqual(
+                        isinstance(list_vm_response, list),
+                        True,
+                        "Check list response returns a valid list"
+                        )
+
+        self.assertNotEqual(
+                            list_vm_response,
+                            None,
+                            "Check virtual machine is listVirtualMachines"
+                            )
+
+        vm_response = list_vm_response[0]
+
+        self.assertEqual(
+                        vm_response.id,
+                        self.virtual_machine.id,
+                        "Check virtual machine ID of migrated VM"
+                        )
+
+        self.assertEqual(
+                        vm_response.hostid,
+                        host.id,
+                        "Check destination hostID of migrated VM"
+                        )
+
+        self.assertEqual(
+                        vm_response.state,
+                        'Running',
+                        "Check the state of VM"
+                        )
+        return
+
+    @attr(tags = ["advanced", "basic", "multipool", "storagemotion", "xenserver"])
+    def test_02_migrate_volume(self):
+        """Test migrate volume of a running vm
+        """
+        # Validate the following
+        # 1. List all the volumes of a vm. For each volume do step 2 to 4.
+        # 2. List storage pools for migrating volume of a vm. Multiple
+        #    storage pools should be present in the cluster.
+        # 3. Migrate volume of the vm to another pool.
+        # 4. Check volume is present in the new pool and is in Ready state.
+
+        list_volumes_response = list_volumes(
+                                    self.apiclient,
+                                    virtualmachineid=self.virtual_machine.id,
+                                    listall=True
+                                    )
+        self.assertEqual(
+                         isinstance(list_volumes_response, list),
+                         True,
+                         "Check list volumes response for valid list"
+                        )
+        self.assertNotEqual(
+                        list_volumes_response,
+                        None,
+                        "Check if volume exists in ListVolumes"
+                        )
+
+        for volume in list_volumes_response:
+            pools = StoragePool.listForMigration(
+                              self.apiclient,
+                              id=volume.id
+                              )
+            pool = pools[0]
+            self.debug("Migrating Volume-ID: %s to Pool: %s" % (
+                                volume.id,
+                                pool.id
+                                ))
+            Volume.migrate(
+                           self.apiclient,
+                           volumeid=volume.id,
+                           storageid=pool.id,
+                           livemigrate='true'
+                           )
+            migrated_volume_response = list_volumes(
+                                             self.apiclient,
+                                             id=volume.id
+                                             )
+            self.assertEqual(
+                             isinstance(migrated_volume_response, list),
+                             True,
+                             "Check list volumes response for valid list"
+                             )
+            self.assertNotEqual(
+                                migrated_volume_response,
+                                None,
+                                "Check if volume exists in ListVolumes"
+                                )
+            migrated_volume = migrated_volume_response[0]
+            self.assertEqual(
+                             migrated_volume.state,
+                             'Ready',
+                             "Check migrated volume is in Ready state"
+                             )
+            self.assertEqual(
+                             migrated_volume.storage,
+                             pool.name,
+                             "Check volume is on migrated pool"
+                             )
+
+        return
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/tools/marvin/marvin/integration/lib/base.py
----------------------------------------------------------------------
diff --git a/tools/marvin/marvin/integration/lib/base.py b/tools/marvin/marvin/integration/lib/base.py
index 0185c87..915d478 100755
--- a/tools/marvin/marvin/integration/lib/base.py
+++ b/tools/marvin/marvin/integration/lib/base.py
@@ -539,6 +539,13 @@ class Volume:
         [setattr(cmd, k, v) for k, v in kwargs.items()]
         return(apiclient.resizeVolume(cmd))
 
+    @classmethod
+    def migrate(cls, apiclient, **kwargs):
+        """Migrate a volume"""
+        cmd = migrateVolume.migrateVolumeCmd()
+        [setattr(cmd, k, v) for k, v in kwargs.items()]
+        return(apiclient.migrateVolume(cmd))
+
 class Snapshot:
     """Manage Snapshot Lifecycle
     """
@@ -1493,6 +1500,14 @@ class Host:
         [setattr(cmd, k, v) for k, v in kwargs.items()]
         return(apiclient.listHosts(cmd))
 
+    @classmethod
+    def listForMigration(cls, apiclient, **kwargs):
+        """List all Hosts for migration matching criteria"""
+
+        cmd = findHostsForMigration.findHostsForMigrationCmd()
+        [setattr(cmd, k, v) for k, v in kwargs.items()]
+        return(apiclient.findHostsForMigration(cmd))
+
 
 class StoragePool:
     """Manage Storage pools (Primary Storage)"""
@@ -1554,6 +1569,13 @@ class StoragePool:
         [setattr(cmd, k, v) for k, v in kwargs.items()]
         return(apiclient.listStoragePools(cmd))
 
+    @classmethod
+    def listForMigration(cls, apiclient, **kwargs):
+        """List all storage pools for migration matching criteria"""
+
+        cmd = findStoragePoolsForMigration.findStoragePoolsForMigrationCmd()
+        [setattr(cmd, k, v) for k, v in kwargs.items()]
+        return(apiclient.findStoragePoolsForMigration(cmd))
 
 class Network:
     """Manage Network pools"""


[2/4] Storage motion for Xenserver changes: 1. Implemented Api findStoragePoolsForMigration. Added a new response objects to list storage pools available for migration. 2. Updated migrateVolume api for allowing migrating volumes of running vms. These cha

Posted by ap...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java
index 1adff40..7796529 100644
--- a/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/HostJoinDaoImpl.java
@@ -29,6 +29,7 @@ import javax.inject.Inject;
 
 import org.apache.cloudstack.api.ApiConstants.HostDetails;
 import org.apache.cloudstack.api.response.HostResponse;
+import org.apache.cloudstack.api.response.HostForMigrationResponse;
 import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
@@ -190,10 +191,6 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
     }
 
 
-
-
-
-
     @Override
     public HostResponse setHostResponse(HostResponse response, HostJoinVO host) {
         String tag = host.getTag();
@@ -208,7 +205,137 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
         return response;
     }
 
+    @Override
+    public HostForMigrationResponse newHostForMigrationResponse(HostJoinVO host, EnumSet<HostDetails> details) {
+        HostForMigrationResponse hostResponse = new HostForMigrationResponse();
+        hostResponse.setId(host.getUuid());
+        hostResponse.setCapabilities(host.getCapabilities());
+        hostResponse.setClusterId(host.getClusterUuid());
+        hostResponse.setCpuNumber(host.getCpus());
+        hostResponse.setZoneId(host.getZoneUuid());
+        hostResponse.setDisconnectedOn(host.getDisconnectedOn());
+        hostResponse.setHypervisor(host.getHypervisorType());
+        hostResponse.setHostType(host.getType());
+        hostResponse.setLastPinged(new Date(host.getLastPinged()));
+        hostResponse.setManagementServerId(host.getManagementServerId());
+        hostResponse.setName(host.getName());
+        hostResponse.setPodId(host.getPodUuid());
+        hostResponse.setRemoved(host.getRemoved());
+        hostResponse.setCpuSpeed(host.getSpeed());
+        hostResponse.setState(host.getStatus());
+        hostResponse.setIpAddress(host.getPrivateIpAddress());
+        hostResponse.setVersion(host.getVersion());
+        hostResponse.setCreated(host.getCreated());
+
+        if (details.contains(HostDetails.all) || details.contains(HostDetails.capacity)
+                || details.contains(HostDetails.stats) || details.contains(HostDetails.events)) {
 
+            hostResponse.setOsCategoryId(host.getOsCategoryUuid());
+            hostResponse.setOsCategoryName(host.getOsCategoryName());
+            hostResponse.setZoneName(host.getZoneName());
+            hostResponse.setPodName(host.getPodName());
+            if ( host.getClusterId() > 0) {
+                hostResponse.setClusterName(host.getClusterName());
+                hostResponse.setClusterType(host.getClusterType().toString());
+            }
+        }
+
+        DecimalFormat decimalFormat = new DecimalFormat("#.##");
+        if (host.getType() == Host.Type.Routing) {
+            if (details.contains(HostDetails.all) || details.contains(HostDetails.capacity)) {
+                // set allocated capacities
+                Long mem = host.getMemReservedCapacity() + host.getMemUsedCapacity();
+                Long cpu = host.getCpuReservedCapacity() + host.getCpuReservedCapacity();
+
+                hostResponse.setMemoryAllocated(mem);
+                hostResponse.setMemoryTotal(host.getTotalMemory());
+
+                String hostTags = host.getTag();
+                hostResponse.setHostTags(host.getTag());
+
+                String haTag = ApiDBUtils.getHaTag();
+                if (haTag != null && !haTag.isEmpty() && hostTags != null && !hostTags.isEmpty()) {
+                    if (haTag.equalsIgnoreCase(hostTags)) {
+                        hostResponse.setHaHost(true);
+                    } else {
+                        hostResponse.setHaHost(false);
+                    }
+                } else {
+                    hostResponse.setHaHost(false);
+                }
+
+                hostResponse.setHypervisorVersion(host.getHypervisorVersion());
+
+                String cpuAlloc = decimalFormat.format(((float) cpu / (float) (host.getCpus() * host.getSpeed())) * 100f) + "%";
+                hostResponse.setCpuAllocated(cpuAlloc);
+                String cpuWithOverprovisioning = new Float(host.getCpus() * host.getSpeed() * ApiDBUtils.getCpuOverprovisioningFactor()).toString();
+                hostResponse.setCpuWithOverprovisioning(cpuWithOverprovisioning);
+            }
+
+            if (details.contains(HostDetails.all) || details.contains(HostDetails.stats)) {
+                // set CPU/RAM/Network stats
+                String cpuUsed = null;
+                HostStats hostStats = ApiDBUtils.getHostStatistics(host.getId());
+                if (hostStats != null) {
+                    float cpuUtil = (float) hostStats.getCpuUtilization();
+                    cpuUsed = decimalFormat.format(cpuUtil) + "%";
+                    hostResponse.setCpuUsed(cpuUsed);
+                    hostResponse.setMemoryUsed((new Double(hostStats.getUsedMemory())).longValue());
+                    hostResponse.setNetworkKbsRead((new Double(hostStats.getNetworkReadKBs())).longValue());
+                    hostResponse.setNetworkKbsWrite((new Double(hostStats.getNetworkWriteKBs())).longValue());
+
+                }
+            }
+
+        } else if (host.getType() == Host.Type.SecondaryStorage) {
+            StorageStats secStorageStats = ApiDBUtils.getSecondaryStorageStatistics(host.getId());
+            if (secStorageStats != null) {
+                hostResponse.setDiskSizeTotal(secStorageStats.getCapacityBytes());
+                hostResponse.setDiskSizeAllocated(secStorageStats.getByteUsed());
+            }
+        }
+
+        hostResponse.setLocalStorageActive(ApiDBUtils.isLocalStorageActiveOnHost(host.getId()));
+
+        if (details.contains(HostDetails.all) || details.contains(HostDetails.events)) {
+            Set<com.cloud.host.Status.Event> possibleEvents = host.getStatus().getPossibleEvents();
+            if ((possibleEvents != null) && !possibleEvents.isEmpty()) {
+                String events = "";
+                Iterator<com.cloud.host.Status.Event> iter = possibleEvents.iterator();
+                while (iter.hasNext()) {
+                    com.cloud.host.Status.Event event = iter.next();
+                    events += event.toString();
+                    if (iter.hasNext()) {
+                        events += "; ";
+                    }
+                }
+                hostResponse.setEvents(events);
+            }
+        }
+
+        hostResponse.setResourceState(host.getResourceState().toString());
+
+        // set async job
+        hostResponse.setJobId(host.getJobUuid());
+        hostResponse.setJobStatus(host.getJobStatus());
+
+        hostResponse.setObjectName("host");
+
+        return hostResponse;
+    }
+
+    @Override
+    public HostForMigrationResponse setHostForMigrationResponse(HostForMigrationResponse response, HostJoinVO host) {
+        String tag = host.getTag();
+        if (tag != null) {
+            if (response.getHostTags() != null && response.getHostTags().length() > 0) {
+                response.setHostTags(response.getHostTags() + "," + tag);
+            } else {
+                response.setHostTags(tag);
+            }
+        }
+        return response;
+    }
 
     @Override
     public List<HostJoinVO> newHostView(Host host) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/api/query/dao/StoragePoolJoinDao.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/StoragePoolJoinDao.java b/server/src/com/cloud/api/query/dao/StoragePoolJoinDao.java
index bbb0242..b7e467f 100644
--- a/server/src/com/cloud/api/query/dao/StoragePoolJoinDao.java
+++ b/server/src/com/cloud/api/query/dao/StoragePoolJoinDao.java
@@ -18,6 +18,7 @@ package com.cloud.api.query.dao;
 
 import java.util.List;
 
+import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse;
 import org.apache.cloudstack.api.response.StoragePoolResponse;
 
 import com.cloud.api.query.vo.StoragePoolJoinVO;
@@ -30,6 +31,11 @@ public interface StoragePoolJoinDao extends GenericDao<StoragePoolJoinVO, Long>
 
     StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, StoragePoolJoinVO host);
 
+    StoragePoolForMigrationResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO host);
+
+    StoragePoolForMigrationResponse setStoragePoolForMigrationResponse(StoragePoolForMigrationResponse response,
+            StoragePoolJoinVO host);
+
     List<StoragePoolJoinVO> newStoragePoolView(StoragePool group);
 
     List<StoragePoolJoinVO> searchByIds(Long... spIds);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
index 58968df..34b88ba 100644
--- a/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
@@ -22,6 +22,7 @@ import java.util.List;
 import javax.ejb.Local;
 import javax.inject.Inject;
 
+import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse;
 import org.apache.cloudstack.api.response.StoragePoolResponse;
 import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
@@ -108,10 +109,6 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
         return poolResponse;
     }
 
-
-
-
-
     @Override
     public StoragePoolResponse setStoragePoolResponse(StoragePoolResponse response, StoragePoolJoinVO sp) {
         String tag = sp.getTag();
@@ -126,7 +123,61 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
         return response;
     }
 
+    @Override
+    public StoragePoolForMigrationResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO pool) {
+        StoragePoolForMigrationResponse poolResponse = new StoragePoolForMigrationResponse();
+        poolResponse.setId(pool.getUuid());
+        poolResponse.setName(pool.getName());
+        poolResponse.setState(pool.getStatus());
+        poolResponse.setPath(pool.getPath());
+        poolResponse.setIpAddress(pool.getHostAddress());
+        poolResponse.setZoneId(pool.getZoneUuid());
+        poolResponse.setZoneName(pool.getZoneName());
+        if (pool.getPoolType() != null) {
+            poolResponse.setType(pool.getPoolType().toString());
+        }
+        poolResponse.setPodId(pool.getPodUuid());
+        poolResponse.setPodName(pool.getPodName());
+        poolResponse.setCreated(pool.getCreated());
+        poolResponse.setScope(pool.getScope().toString());
+
 
+        long allocatedSize = pool.getUsedCapacity() +  pool.getReservedCapacity();
+        poolResponse.setDiskSizeTotal(pool.getCapacityBytes());
+        poolResponse.setDiskSizeAllocated(allocatedSize);
+
+        //TODO: StatsCollector does not persist data
+        StorageStats stats = ApiDBUtils.getStoragePoolStatistics(pool.getId());
+        if (stats != null) {
+            Long used = stats.getByteUsed();
+            poolResponse.setDiskSizeUsed(used);
+        }
+
+        poolResponse.setClusterId(pool.getClusterUuid());
+        poolResponse.setClusterName(pool.getClusterName());
+        poolResponse.setTags(pool.getTag());
+
+        // set async job
+        poolResponse.setJobId(pool.getJobUuid());
+        poolResponse.setJobStatus(pool.getJobStatus());
+
+        poolResponse.setObjectName("storagepool");
+        return poolResponse;
+    }
+
+    @Override
+    public StoragePoolForMigrationResponse setStoragePoolForMigrationResponse(StoragePoolForMigrationResponse response,
+            StoragePoolJoinVO sp) {
+        String tag = sp.getTag();
+        if (tag != null) {
+            if ( response.getTags() != null && response.getTags().length() > 0){
+                response.setTags(response.getTags() + "," + tag);
+            } else {
+                response.setTags(tag);
+            }
+        }
+        return response;
+    }
 
     @Override
     public List<StoragePoolJoinVO> newStoragePoolView(StoragePool host) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/server/ManagementServerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java
index db8db8a..16127a2 100755
--- a/server/src/com/cloud/server/ManagementServerImpl.java
+++ b/server/src/com/cloud/server/ManagementServerImpl.java
@@ -214,6 +214,7 @@ import org.apache.cloudstack.api.command.admin.vlan.DeleteVlanIpRangeCmd;
 import org.apache.cloudstack.api.command.admin.vlan.ListVlanIpRangesCmd;
 import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd;
 import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd;
+import org.apache.cloudstack.api.command.admin.vm.MigrateVirtualMachineWithVolumeCmd;
 import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd;
 import org.apache.cloudstack.api.command.admin.zone.CreateZoneCmd;
 import org.apache.cloudstack.api.command.admin.zone.DeleteZoneCmd;
@@ -256,6 +257,7 @@ import org.apache.cloudstack.api.command.user.vmsnapshot.ListVMSnapshotCmd;
 import org.apache.cloudstack.api.command.user.vmsnapshot.RevertToSnapshotCmd;
 import org.apache.cloudstack.api.command.user.zone.ListZonesByCmd;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 
@@ -329,6 +331,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
     @Inject
     private StorageManager _storageMgr;
     @Inject
+    private VolumeManager _volumeMgr;
+    @Inject
     private VirtualMachineManager _itMgr;
     @Inject
     private HostPodDao _hostPodDao;
@@ -352,10 +356,10 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
     private LoadBalancerDao _loadbalancerDao;
     @Inject
     private HypervisorCapabilitiesDao _hypervisorCapabilitiesDao;
-
     private List<HostAllocator> _hostAllocators;
-
-	@Inject
+    @Inject
+    private List<StoragePoolAllocator> _storagePoolAllocators;
+    @Inject
     private ConfigurationManager _configMgr;
     @Inject
     private ResourceTagDao _resourceTagDao;
@@ -679,12 +683,14 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         Object resourceState = cmd.getResourceState();
         Object haHosts = cmd.getHaHost();
 
-        Pair<List<HostVO>, Integer> result = searchForServers(cmd.getStartIndex(), cmd.getPageSizeVal(), name, type, state, zoneId, pod, cluster, id, keyword, resourceState, haHosts);
+        Pair<List<HostVO>, Integer> result = searchForServers(cmd.getStartIndex(), cmd.getPageSizeVal(), name, type,
+                state, zoneId, pod, cluster, id, keyword, resourceState, haHosts, null, null);
         return new Pair<List<? extends Host>, Integer>(result.first(), result.second());
     }
 
     @Override
-    public Pair<Pair<List<? extends Host>, Integer>, List<? extends Host>> listHostsForMigrationOfVM(Long vmId, Long startIndex, Long pageSize) {
+    public Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>>
+            listHostsForMigrationOfVM(Long vmId, Long startIndex, Long pageSize) {
         // access check - only root admin can migrate VM
         Account caller = UserContext.current().getCaller();
         if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN) {
@@ -700,12 +706,13 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
             ex.addProxyObject(vm, vmId, "vmId");
             throw ex;
         }
-        // business logic
+
         if (vm.getState() != State.Running) {
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug("VM is not Running, unable to migrate the vm" + vm);
             }
-            InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, unable to migrate the vm with specified id");
+            InvalidParameterValueException ex = new InvalidParameterValueException("VM is not Running, unable to" +
+                    " migrate the vm with specified id");
             ex.addProxyObject(vm, vmId, "vmId");
             throw ex;
         }
@@ -715,17 +722,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
             if (s_logger.isDebugEnabled()) {
                 s_logger.debug(vm + " is not XenServer/VMware/KVM/OVM, cannot migrate this VM.");
             }
-            throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support XenServer/VMware/KVM only");
-        }
-        ServiceOfferingVO svcOffering = _offeringsDao.findById(vm.getServiceOfferingId());
-        if (svcOffering.getUseLocalStorage()) {
-            if (s_logger.isDebugEnabled()) {
-                s_logger.debug(vm + " is using Local Storage, cannot migrate this VM.");
-            }
-            throw new InvalidParameterValueException("Unsupported operation, VM uses Local storage, cannot migrate");
+            throw new InvalidParameterValueException("Unsupported Hypervisor Type for VM migration, we support " +
+                    "XenServer/VMware/KVM/Ovm only");
         }
+
         long srcHostId = vm.getHostId();
-        // why is this not HostVO?
         Host srcHost = _hostDao.findById(srcHostId);
         if (srcHost == null) {
             if (s_logger.isDebugEnabled()) {
@@ -737,32 +738,73 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
             ex.addProxyObject(vm, vmId, "vmId");
             throw ex;
         }
-        Long cluster = srcHost.getClusterId();
-        Type hostType = srcHost.getType();
-        if (s_logger.isDebugEnabled()) {
-            s_logger.debug("Searching for all hosts in cluster: " + cluster + " for migrating VM " + vm);
-        }
-
-        Pair<List<HostVO>, Integer> allHostsInClusterPair = searchForServers(startIndex, pageSize, null, hostType, null, null, null, cluster, null, null, null, null);
 
-        // filter out the current host
-        List<HostVO> allHostsInCluster = allHostsInClusterPair.first();
-        allHostsInCluster.remove(srcHost);
-        Pair<List<? extends Host>, Integer> otherHostsInCluster = new Pair<List <? extends Host>, Integer>(allHostsInCluster, new Integer(allHostsInClusterPair.second().intValue()-1));
+        // Check if the vm can be migrated with storage.
+        boolean canMigrateWithStorage = false;
+        HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(
+                srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
+        if (capabilities != null) {
+            canMigrateWithStorage = capabilities.isStorageMotionSupported();
+        }
 
-        if (s_logger.isDebugEnabled()) {
-            s_logger.debug("Other Hosts in this cluster: " + allHostsInCluster);
+        // Check if the vm is using any disks on local storage.
+        VirtualMachineProfile<VMInstanceVO> vmProfile = new VirtualMachineProfileImpl<VMInstanceVO>(vm);
+        List<VolumeVO> volumes = _volumeDao.findCreatedByInstance(vmProfile.getId());
+        boolean usesLocal = false;
+        for (VolumeVO volume : volumes) {
+            DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
+            DiskProfile diskProfile = new DiskProfile(volume, diskOffering, vmProfile.getHypervisorType());
+            if (diskProfile.useLocalStorage()) {
+                usesLocal = true;
+                break;
+            }
         }
 
-        if (s_logger.isDebugEnabled()) {
-            s_logger.debug("Calling HostAllocators to search for hosts in cluster: " + cluster + " having enough capacity and suitable for migration");
+        if (!canMigrateWithStorage && usesLocal) {
+            throw new InvalidParameterValueException("Unsupported operation, VM uses Local storage, cannot migrate");
         }
 
-        List<Host> suitableHosts = new ArrayList<Host>();
+        Type hostType = srcHost.getType();
+        Pair<List<HostVO>, Integer> allHostsPair = null;
+        List<HostVO> allHosts = null;
+        Map<Host, Boolean> requiresStorageMotion = new HashMap<Host, Boolean>();
+        DataCenterDeployment plan = null;
+        if (canMigrateWithStorage) {
+            allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), null,
+                    null, null, null, null, null, srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
+            allHosts = allHostsPair.first();
+            allHosts.remove(srcHost);
+
+            // Check if the host has storage pools for all the volumes of the vm to be migrated.
+            for (Host host : allHosts) {
+                Map<Volume, List<StoragePool>> volumePools = findSuitablePoolsForVolumes(vmProfile, host);
+                if (volumePools.isEmpty()) {
+                    allHosts.remove(host);
+                } else {
+                    if (host.getClusterId() != srcHost.getClusterId() || usesLocal) {
+                        requiresStorageMotion.put(host, true);
+                    }
+                }
+            }
 
-        VirtualMachineProfile<VMInstanceVO> vmProfile = new VirtualMachineProfileImpl<VMInstanceVO>(vm);
+            plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null);
+        } else {
+            Long cluster = srcHost.getClusterId();
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Searching for all hosts in cluster " + cluster + " for migrating VM " + vm);
+            }
+            allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, null, null, cluster, null, null,
+                    null, null, null, null);
+            // Filter out the current host.
+            allHosts = allHostsPair.first();
+            allHosts.remove(srcHost);
+            plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(),
+                    null, null, null);
+        }
 
-        DataCenterDeployment plan = new DataCenterDeployment(srcHost.getDataCenterId(), srcHost.getPodId(), srcHost.getClusterId(), null, null, null);
+        Pair<List<? extends Host>, Integer> otherHosts = new Pair<List <? extends Host>, Integer>(allHosts,
+                new Integer(allHosts.size()));
+        List<Host> suitableHosts = new ArrayList<Host>();
         ExcludeList excludes = new ExcludeList();
         excludes.addHost(srcHostId);
 
@@ -776,25 +818,174 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         }
 
         for (HostAllocator allocator : _hostAllocators) {
-            suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);
+            if  (canMigrateWithStorage) {
+                suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, allHosts,
+                        HostAllocator.RETURN_UPTO_ALL, false);
+            } else {
+                suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes,
+                        HostAllocator.RETURN_UPTO_ALL, false);
+            }
+
             if (suitableHosts != null && !suitableHosts.isEmpty()) {
                 break;
             }
         }
 
-        if (suitableHosts.isEmpty()) {
-            s_logger.debug("No suitable hosts found");
-        } else {
-            if (s_logger.isDebugEnabled()) {
+        if (s_logger.isDebugEnabled()) {
+            if (suitableHosts.isEmpty()) {
+                s_logger.debug("No suitable hosts found");
+            } else {
                 s_logger.debug("Hosts having capacity and suitable for migration: " + suitableHosts);
             }
         }
 
-        return new Pair<Pair<List<? extends Host>, Integer>, List<? extends Host>>(otherHostsInCluster, suitableHosts);
+        return new Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> (otherHosts,
+                suitableHosts, requiresStorageMotion);
+    }
+
+    private Map<Volume, List<StoragePool>> findSuitablePoolsForVolumes(VirtualMachineProfile<VMInstanceVO> vmProfile,
+            Host host) {
+        List<VolumeVO> volumes = _volumeDao.findCreatedByInstance(vmProfile.getId());
+        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = new HashMap<Volume, List<StoragePool>>();
+
+        // For each volume find list of suitable storage pools by calling the allocators
+        for (VolumeVO volume : volumes) {
+            DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
+            DiskProfile diskProfile = new DiskProfile(volume, diskOffering, vmProfile.getHypervisorType());
+            DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(),
+                    host.getClusterId(), host.getId(), null, null);
+            ExcludeList avoid = new ExcludeList();
+
+            boolean foundPools = false;
+            for (StoragePoolAllocator allocator : _storagePoolAllocators) {
+                List<StoragePool> poolList = allocator.allocateToPool(diskProfile, vmProfile, plan, avoid,
+                        StoragePoolAllocator.RETURN_UPTO_ALL);
+                if (poolList != null && !poolList.isEmpty()) {
+                    suitableVolumeStoragePools.put(volume, poolList);
+                    foundPools = true;
+                    break;
+                }
+            }
+
+            if (!foundPools) {
+                suitableVolumeStoragePools.clear();
+                break;
+            }
+        }
+
+        return suitableVolumeStoragePools;
+    }
+
+    @Override
+    public Pair<List<? extends StoragePool>, List<? extends StoragePool>> listStoragePoolsForMigrationOfVolume(Long volumeId) {
+        // Access check - only root administrator can migrate volumes.
+        Account caller = UserContext.current().getCaller();
+        if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN) {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Caller is not a root admin, permission denied to migrate the volume");
+            }
+            throw new PermissionDeniedException("No permission to migrate volume, only root admin can migrate a volume");
+        }
+
+        VolumeVO volume = _volumeDao.findById(volumeId);
+        if (volume == null) {
+            InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find volume with" +
+                    " specified id.");
+            ex.addProxyObject(volume, volumeId, "volumeId");
+            throw ex;
+        }
+
+        // Volume must be attached to an instance for live migration.
+        List<StoragePool> allPools = new ArrayList<StoragePool>();
+        List<StoragePool> suitablePools = new ArrayList<StoragePool>();
+        Long instanceId = volume.getInstanceId();
+        VMInstanceVO vm = null;
+        if (instanceId != null) {
+            vm = _vmInstanceDao.findById(instanceId);
+        }
+
+        // Check that the VM is in correct state.
+        if (vm == null || vm.getState() != State.Running) {
+            s_logger.info("Volume " + volume + " isn't attached to any running vm. Only volumes attached to a running" +
+                    " VM can be migrated.");
+            return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
+        }
+
+        // Volume must be in Ready state to be migrated.
+        if (!Volume.State.Ready.equals(volume.getState())) {
+            s_logger.info("Volume " + volume + " must be in ready state for migration.");
+            return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
+        }
+
+        if (!_volumeMgr.volumeOnSharedStoragePool(volume)) {
+            s_logger.info("Volume " + volume + " is on local storage. It cannot be migrated to another pool.");
+            return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
+        }
+
+        // Check if the underlying hypervisor supports storage motion.
+        boolean storageMotionSupported = false;
+        Long hostId = vm.getHostId();
+        if (hostId != null) {
+            HostVO host = _hostDao.findById(hostId);
+            HypervisorCapabilitiesVO capabilities = null;
+            if (host != null) {
+                capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(host.getHypervisorType(),
+                        host.getHypervisorVersion());
+            } else {
+                s_logger.error("Details of the host on which the vm " + vm + ", to which volume "+ volume + " is "
+                        + "attached, couldn't be retrieved.");
+            }
+
+            if (capabilities != null) {
+                storageMotionSupported = capabilities.isStorageMotionSupported();
+            } else {
+                s_logger.error("Capabilities for host " + host + " couldn't be retrieved.");
+            }
+        }
+
+        if (storageMotionSupported) {
+            // Source pool of the volume.
+            StoragePoolVO srcVolumePool = _poolDao.findById(volume.getPoolId());
+
+            // Get all the pools available. Only shared pools are considered because only a volume on a shared pools
+            // can be live migrated while the virtual machine stays on the same host.
+            List<StoragePoolVO> storagePools = _poolDao.findPoolsByTags(volume.getDataCenterId(),
+                    volume.getPodId(), srcVolumePool.getClusterId(), null);
+            storagePools.remove(srcVolumePool);
+            for (StoragePoolVO pool : storagePools) {
+                if (pool.isShared()) {
+                    allPools.add((StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId()));
+                }
+            }
+
+            // Get all the suitable pools.
+            // Exclude the current pool from the list of pools to which the volume can be migrated.
+            ExcludeList avoid = new ExcludeList();
+            avoid.addPool(srcVolumePool.getId());
+
+            // Volume stays in the same cluster after migration.
+            DataCenterDeployment plan = new DataCenterDeployment(volume.getDataCenterId(), volume.getPodId(),
+                    srcVolumePool.getClusterId(), null, null, null);
+            VirtualMachineProfile<VMInstanceVO> profile = new VirtualMachineProfileImpl<VMInstanceVO>(vm);
+
+            DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
+            DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
+
+            // Call the storage pool allocator to find the list of storage pools.
+            for (StoragePoolAllocator allocator : _storagePoolAllocators) {
+                List<StoragePool> pools = allocator.allocateToPool(diskProfile, profile, plan, avoid,
+                        StoragePoolAllocator.RETURN_UPTO_ALL);
+                if (pools != null && !pools.isEmpty()) {
+                    suitablePools.addAll(pools);
+                    break;
+                }
+            }
+        }
+        return new Pair<List<? extends StoragePool>, List<? extends StoragePool>>(allPools, suitablePools);
     }
 
     private Pair<List<HostVO>, Integer> searchForServers(Long startIndex, Long pageSize, Object name, Object type, Object state, Object zone, Object pod, Object cluster, Object id, Object keyword,
-            Object resourceState, Object haHosts) {
+            Object resourceState, Object haHosts, Object hypervisorType, Object hypervisorVersion) {
         Filter searchFilter = new Filter(HostVO.class, "id", Boolean.TRUE, startIndex, pageSize);
 
         SearchBuilder<HostVO> sb = _hostDao.createSearchBuilder();
@@ -806,6 +997,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         sb.and("podId", sb.entity().getPodId(), SearchCriteria.Op.EQ);
         sb.and("clusterId", sb.entity().getClusterId(), SearchCriteria.Op.EQ);
         sb.and("resourceState", sb.entity().getResourceState(), SearchCriteria.Op.EQ);
+        sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ);
+        sb.and("hypervisorVersion", sb.entity().getHypervisorVersion(), SearchCriteria.Op.EQ);
 
         String haTag = _haMgr.getHaTag();
         SearchBuilder<HostTagVO> hostTagSearch = null;
@@ -855,6 +1048,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         if (cluster != null) {
             sc.setParameters("clusterId", cluster);
         }
+        if (hypervisorType != null) {
+            sc.setParameters("hypervisorType", hypervisorType);
+        }
+        if (hypervisorVersion != null) {
+            sc.setParameters("hypervisorVersion", hypervisorVersion);
+        }
 
         if (resourceState != null) {
             sc.setParameters("resourceState", resourceState);
@@ -1969,6 +2168,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         cmdList.add(CancelMaintenanceCmd.class);
         cmdList.add(DeleteHostCmd.class);
         cmdList.add(ListHostsCmd.class);
+        cmdList.add(FindHostsForMigrationCmd.class);
         cmdList.add(PrepareForMaintenanceCmd.class);
         cmdList.add(ReconnectHostCmd.class);
         cmdList.add(UpdateHostCmd.class);
@@ -2025,6 +2225,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         cmdList.add(DeletePoolCmd.class);
         cmdList.add(ListS3sCmd.class);
         cmdList.add(ListStoragePoolsCmd.class);
+        cmdList.add(FindStoragePoolsForMigrationCmd.class);
         cmdList.add(PreparePrimaryStorageForMaintenanceCmd.class);
         cmdList.add(UpdateStoragePoolCmd.class);
         cmdList.add(AddSwiftCmd.class);
@@ -2064,6 +2265,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         cmdList.add(ReleasePublicIpRangeCmd.class);
         cmdList.add(AssignVMCmd.class);
         cmdList.add(MigrateVMCmd.class);
+        cmdList.add(MigrateVirtualMachineWithVolumeCmd.class);
         cmdList.add(RecoverVMCmd.class);
         cmdList.add(CreatePrivateGatewayCmd.class);
         cmdList.add(CreateVPCOfferingCmd.class);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/storage/VolumeManager.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/VolumeManager.java b/server/src/com/cloud/storage/VolumeManager.java
index 2101038..d198e5d 100644
--- a/server/src/com/cloud/storage/VolumeManager.java
+++ b/server/src/com/cloud/storage/VolumeManager.java
@@ -18,6 +18,8 @@
  */
 package com.cloud.storage;
 
+import java.util.Map;
+
 import org.apache.cloudstack.api.command.user.volume.AttachVolumeCmd;
 import org.apache.cloudstack.api.command.user.volume.CreateVolumeCmd;
 import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd;
@@ -25,12 +27,15 @@ import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
 import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
 import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 
+import com.cloud.agent.api.to.VirtualMachineTO;
 import com.cloud.deploy.DeployDestination;
 import com.cloud.exception.ConcurrentOperationException;
 import com.cloud.exception.InsufficientStorageCapacityException;
 import com.cloud.exception.ResourceAllocationException;
 import com.cloud.exception.StorageUnavailableException;
+import com.cloud.host.Host;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.storage.Volume.Type;
 import com.cloud.user.Account;
@@ -80,6 +85,9 @@ public interface VolumeManager extends VolumeApiService {
 
     Volume migrateVolume(MigrateVolumeCmd cmd);
 
+    <T extends VMInstanceVO> void migrateVolumes(T vm, VirtualMachineTO vmTo, Host srcHost, Host destHost,
+            Map<VolumeVO, StoragePoolVO> volumeToPool);
+
     boolean storageMigration(
             VirtualMachineProfile<? extends VirtualMachine> vm,
             StoragePool destPool);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/storage/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java
index 1e8edaf..e57d393 100644
--- a/server/src/com/cloud/storage/VolumeManagerImpl.java
+++ b/server/src/com/cloud/storage/VolumeManagerImpl.java
@@ -28,6 +28,7 @@ import java.util.Date;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.HashMap;
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.ExecutionException;
@@ -42,6 +43,7 @@ import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd;
 import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd;
 import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd;
 import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd;
+import org.apache.cloudstack.engine.subsystem.api.storage.CommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
@@ -68,6 +70,7 @@ import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.AttachVolumeAnswer;
 import com.cloud.agent.api.AttachVolumeCommand;
+import com.cloud.agent.api.to.VirtualMachineTO;
 import com.cloud.agent.api.to.VolumeTO;
 import com.cloud.alert.AlertManager;
 import com.cloud.api.ApiDBUtils;
@@ -102,11 +105,13 @@ import com.cloud.exception.InvalidParameterValueException;
 import com.cloud.exception.PermissionDeniedException;
 import com.cloud.exception.ResourceAllocationException;
 import com.cloud.exception.StorageUnavailableException;
+import com.cloud.host.Host;
 import com.cloud.host.HostVO;
 import com.cloud.host.dao.HostDao;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.hypervisor.HypervisorGuruManager;
 import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
+import com.cloud.hypervisor.HypervisorCapabilitiesVO;
 import com.cloud.network.NetworkModel;
 import com.cloud.org.Grouping;
 import com.cloud.resource.ResourceManager;
@@ -2003,7 +2008,7 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
     public Volume migrateVolume(MigrateVolumeCmd cmd) {
         Long volumeId = cmd.getVolumeId();
         Long storagePoolId = cmd.getStoragePoolId();
-        
+
         VolumeVO vol = _volsDao.findById(volumeId);
         if (vol == null) {
             throw new InvalidParameterValueException(
@@ -2015,9 +2020,39 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
                     "Volume must be in ready state");
         }
 
-        if (vol.getInstanceId() != null) {
-            throw new InvalidParameterValueException(
-                    "Volume needs to be dettached from VM");
+        boolean liveMigrateVolume = false;
+        Long instanceId = vol.getInstanceId();
+        VMInstanceVO vm = null;
+        if (instanceId != null) {
+            vm = _vmInstanceDao.findById(instanceId);
+        }
+
+        if (vm != null && vm.getState() == State.Running) {
+            // Check if the underlying hypervisor supports storage motion.
+            Long hostId = vm.getHostId();
+            if (hostId != null) {
+                HostVO host = _hostDao.findById(hostId);
+                HypervisorCapabilitiesVO capabilities = null;
+                if (host != null) {
+                    capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(host.getHypervisorType(),
+                            host.getHypervisorVersion());
+                }
+
+                if (capabilities != null) {
+                    liveMigrateVolume = capabilities.isStorageMotionSupported();
+                }
+            }
+        }
+
+        // If the disk is not attached to any VM then it can be moved. Otherwise, it needs to be attached to a vm
+        // running on a hypervisor that supports storage motion so that it be be migrated.
+        if (instanceId != null && !liveMigrateVolume) {
+            throw new InvalidParameterValueException("Volume needs to be detached from VM");
+        }
+
+        if (liveMigrateVolume && !cmd.isLiveMigrate()) {
+            throw new InvalidParameterValueException("The volume " + vol + "is attached to a vm and for migrating it " +
+                    "the parameter livemigrate should be specified");
         }
 
         StoragePool destPool = (StoragePool)this.dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary);
@@ -2032,12 +2067,15 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
                     "Migration of volume from local storage pool is not supported");
         }
 
-        Volume newVol = migrateVolume(vol, destPool);
+        Volume newVol = null;
+        if (liveMigrateVolume) {
+            newVol = liveMigrateVolume(vol, destPool);
+        } else {
+            newVol = migrateVolume(vol, destPool);
+        }
         return newVol;
     }
 
-    
-    
     @DB
     protected Volume migrateVolume(Volume volume, StoragePool destPool) {
         VolumeInfo vol = this.volFactory.getVolume(volume.getId());
@@ -2058,6 +2096,66 @@ public class VolumeManagerImpl extends ManagerBase implements VolumeManager {
         }
     }
 
+    @DB
+    protected Volume liveMigrateVolume(Volume volume, StoragePool destPool) {
+        VolumeInfo vol = this.volFactory.getVolume(volume.getId());
+        AsyncCallFuture<VolumeApiResult> future = this.volService.migrateVolume(vol, (DataStore)destPool);
+        try {
+            VolumeApiResult result = future.get();
+            if (result.isFailed()) {
+                s_logger.debug("migrate volume failed:" + result.getResult());
+                return null;
+            }
+            return result.getVolume();
+        } catch (InterruptedException e) {
+            s_logger.debug("migrate volume failed", e);
+            return null;
+        } catch (ExecutionException e) {
+            s_logger.debug("migrate volume failed", e);
+            return null;
+        }
+    }
+
+    @Override
+    public <T extends VMInstanceVO> void migrateVolumes(T vm, VirtualMachineTO vmTo, Host srcHost, Host destHost,
+            Map<VolumeVO, StoragePoolVO> volumeToPool) {
+        // Check if all the vms being migrated belong to the vm.
+        // Check if the storage pool is of the right type.
+        // Create a VolumeInfo to DataStore map too.
+        Map<VolumeInfo, DataStore> volumeMap = new HashMap<VolumeInfo, DataStore>();
+        for (Map.Entry<VolumeVO, StoragePoolVO> entry : volumeToPool.entrySet()) {
+            VolumeVO volume = entry.getKey();
+            StoragePoolVO storagePool = entry.getValue();
+            StoragePool destPool = (StoragePool)this.dataStoreMgr.getDataStore(storagePool.getId(),
+                    DataStoreRole.Primary);
+
+            if (volume.getInstanceId() != vm.getId()) {
+                throw new CloudRuntimeException("Volume " + volume + " that has to be migrated doesn't belong to the" +
+                        " instance " + vm);
+            }
+
+            if (destPool == null) {
+                throw new CloudRuntimeException("Failed to find the destination storage pool " + storagePool.getId());
+            }
+
+            volumeMap.put(this.volFactory.getVolume(volume.getId()), (DataStore)destPool);
+        }
+
+        AsyncCallFuture<CommandResult> future = this.volService.migrateVolumes(volumeMap, vmTo, srcHost, destHost);
+        try {
+            CommandResult result = future.get();
+            if (result.isFailed()) {
+                s_logger.debug("Failed to migrated vm " + vm + " along with its volumes. " + result.getResult());
+                throw new CloudRuntimeException("Failed to migrated vm " + vm + " along with its volumes. " +
+                        result.getResult());
+            }
+        } catch (InterruptedException e) {
+            s_logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
+        } catch (ExecutionException e) {
+            s_logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
+        }
+    }
+
     @Override
     public boolean storageMigration(
             VirtualMachineProfile<? extends VirtualMachine> vm,

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/vm/UserVmManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index 3ecdf42..bc6237f 100755
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -46,6 +46,7 @@ import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd;
 import org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntity;
 import org.apache.cloudstack.engine.service.api.OrchestrationService;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.log4j.Logger;
 
@@ -108,6 +109,7 @@ import com.cloud.host.Host;
 import com.cloud.host.HostVO;
 import com.cloud.host.dao.HostDao;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.hypervisor.HypervisorCapabilitiesVO;
 import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
 import com.cloud.network.Network;
 import com.cloud.network.Network.IpAddresses;
@@ -3502,6 +3504,127 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Use
         return migratedVm;
     }
 
+    @Override
+    @ActionEvent(eventType = EventTypes.EVENT_VM_MIGRATE, eventDescription = "migrating VM", async = true)
+    public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinationHost,
+            Map<String, String> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException,
+            ManagementServerException, VirtualMachineMigrationException {
+        // Access check - only root administrator can migrate VM.
+        Account caller = UserContext.current().getCaller();
+        if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN) {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Caller is not a root admin, permission denied to migrate the VM");
+            }
+            throw new PermissionDeniedException("No permission to migrate VM, Only Root Admin can migrate a VM!");
+        }
+
+        VMInstanceVO vm = _vmInstanceDao.findById(vmId);
+        if (vm == null) {
+            throw new InvalidParameterValueException("Unable to find the vm by id " + vmId);
+        }
+
+        if (vm.getState() != State.Running) {
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("VM is not Running, unable to migrate the vm " + vm);
+            }
+            CloudRuntimeException ex = new CloudRuntimeException("VM is not Running, unable to migrate the vm with" +
+                    " specified id");
+            ex.addProxyObject(vm, vmId, "vmId");
+            throw ex;
+        }
+
+        if (!vm.getHypervisorType().equals(HypervisorType.XenServer) &&
+                !vm.getHypervisorType().equals(HypervisorType.VMware) &&
+                !vm.getHypervisorType().equals(HypervisorType.KVM) &&
+                !vm.getHypervisorType().equals(HypervisorType.Ovm)) {
+            throw new InvalidParameterValueException("Unsupported hypervisor type for vm migration, we support" +
+                    " XenServer/VMware/KVM only");
+        }
+
+        long srcHostId = vm.getHostId();
+        Host srcHost = _resourceMgr.getHost(srcHostId);
+        // Check if src and destination hosts are valid and migrating to same host
+        if (destinationHost.getId() == srcHostId) {
+            throw new InvalidParameterValueException("Cannot migrate VM, VM is already present on this host, please" +
+                    " specify valid destination host to migrate the VM");
+        }
+
+        // Check if the source and destination hosts are of the same type and support storage motion.
+        if (!(srcHost.getHypervisorType().equals(destinationHost.getHypervisorType()) &&
+            srcHost.getHypervisorVersion().equals(destinationHost.getHypervisorVersion()))) {
+            throw new CloudRuntimeException("The source and destination hosts are not of the same type and version. " +
+                "Source hypervisor type and version: " + srcHost.getHypervisorType().toString() + " " +
+                srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " +
+                destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion());
+        }
+
+        HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(
+                srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
+        if (!capabilities.isStorageMotionSupported()) {
+            throw new CloudRuntimeException("Migration with storage isn't supported on hypervisor " +
+                    srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion());
+        }
+
+        // Check if destination host is up.
+        if (destinationHost.getStatus() != com.cloud.host.Status.Up ||
+                destinationHost.getResourceState() != ResourceState.Enabled){
+            throw new CloudRuntimeException("Cannot migrate VM, destination host is not in correct state, has " +
+                    "status: " + destinationHost.getStatus() + ", state: " + destinationHost.getResourceState());
+        }
+
+        List<VolumeVO> vmVolumes = _volsDao.findUsableVolumesForInstance(vm.getId());
+        Map<VolumeVO, StoragePoolVO> volToPoolObjectMap = new HashMap<VolumeVO, StoragePoolVO>();
+        if (!isVMUsingLocalStorage(vm) && destinationHost.getClusterId() == srcHost.getClusterId()) {
+            if (volumeToPool.isEmpty()) {
+                // If the destination host is in the same cluster and volumes do not have to be migrated across pools
+                // then fail the call. migrateVirtualMachine api should have been used.
+                throw new InvalidParameterValueException("Migration of the vm " + vm + "from host " + srcHost +
+                        " to destination host " + destinationHost + " doesn't involve migrating the volumes.");
+            }
+        }
+
+        if (!volumeToPool.isEmpty()) {
+            // Check if all the volumes and pools passed as parameters are valid.
+            for (Map.Entry<String, String> entry : volumeToPool.entrySet()) {
+                VolumeVO volume = _volsDao.findByUuid(entry.getKey());
+                StoragePoolVO pool = _storagePoolDao.findByUuid(entry.getValue());
+                if (volume == null) {
+                    throw new InvalidParameterValueException("There is no volume present with the given id " +
+                            entry.getKey());
+                } else if (pool == null) {
+                    throw new InvalidParameterValueException("There is no storage pool present with the given id " +
+                            entry.getValue());
+                } else {
+                    // Verify the volume given belongs to the vm.
+                    if (!vmVolumes.contains(volume)) {
+                        throw new InvalidParameterValueException("There volume " + volume + " doesn't belong to " +
+                                "the virtual machine "+ vm + " that has to be migrated");
+                    }
+                    volToPoolObjectMap.put(volume, pool);
+                }
+            }
+        }
+
+        // Check if all the volumes are in the correct state.
+        for (VolumeVO volume : vmVolumes) {
+            if (volume.getState() != Volume.State.Ready) {
+                throw new CloudRuntimeException("Volume " + volume + " of the VM is not in Ready state. Cannot " +
+                        "migrate the vm with its volumes.");
+            }
+        }
+
+        // Check max guest vm limit for the destinationHost.
+        HostVO destinationHostVO = _hostDao.findById(destinationHost.getId());
+        if(_capacityMgr.checkIfHostReachMaxGuestLimit(destinationHostVO)){
+            throw new VirtualMachineMigrationException("Host name: " + destinationHost.getName() + ", hostId: " +
+                    destinationHost.getId() + " already has max running vms (count includes system VMs). Cannot" +
+                    " migrate to this host");
+        }
+
+        VMInstanceVO migratedVm = _itMgr.migrateWithStorage(vm, srcHostId, destinationHost.getId(), volToPoolObjectMap);
+        return migratedVm;
+    }
+
     @DB
     @Override
     @ActionEvent(eventType = EventTypes.EVENT_VM_MOVE, eventDescription = "move VM to another user", async = false)

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/vm/VirtualMachineManager.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/vm/VirtualMachineManager.java b/server/src/com/cloud/vm/VirtualMachineManager.java
index 4a30d97..ea9f7bb 100644
--- a/server/src/com/cloud/vm/VirtualMachineManager.java
+++ b/server/src/com/cloud/vm/VirtualMachineManager.java
@@ -20,6 +20,7 @@ import java.net.URI;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 
 import com.cloud.agent.api.to.NicTO;
 import com.cloud.agent.api.to.VirtualMachineTO;
@@ -41,6 +42,7 @@ import com.cloud.service.ServiceOfferingVO;
 import com.cloud.storage.DiskOfferingVO;
 import com.cloud.storage.StoragePool;
 import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.VolumeVO;
 import com.cloud.user.Account;
 import com.cloud.user.User;
 import com.cloud.utils.Pair;
@@ -109,6 +111,8 @@ public interface VirtualMachineManager extends Manager {
 
     <T extends VMInstanceVO> T migrate(T vm, long srcHostId, DeployDestination dest) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException;
 
+    <T extends VMInstanceVO> T migrateWithStorage(T vm, long srcId, long destId, Map<VolumeVO, StoragePoolVO> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException;
+
     <T extends VMInstanceVO> T reboot(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account) throws InsufficientCapacityException, ResourceUnavailableException;
 
     <T extends VMInstanceVO> T advanceReboot(T vm, Map<VirtualMachineProfile.Param, Object> params, User caller, Account account) throws InsufficientCapacityException, ResourceUnavailableException, ConcurrentOperationException, OperationTimedoutException;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/vm/VirtualMachineManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java
index 19f4005..2ecece2 100755
--- a/server/src/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/server/src/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -38,7 +38,12 @@ import javax.naming.ConfigurationException;
 
 import com.cloud.capacity.CapacityManager;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 
 import com.cloud.dc.*;
 import com.cloud.agent.api.*;
@@ -50,6 +55,8 @@ import com.cloud.agent.Listener;
 import com.cloud.agent.api.StartupRoutingCommand.VmState;
 import com.cloud.agent.api.to.NicTO;
 import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.agent.api.to.StorageFilerTO;
+import com.cloud.agent.api.to.VolumeTO;
 import com.cloud.agent.manager.Commands;
 import com.cloud.agent.manager.allocator.HostAllocator;
 import com.cloud.alert.AlertManager;
@@ -57,6 +64,7 @@ import com.cloud.cluster.ClusterManager;
 import com.cloud.configuration.Config;
 import com.cloud.configuration.ConfigurationManager;
 import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.dc.dao.ClusterDao;
 import com.cloud.dc.DataCenter;
 import com.cloud.dc.DataCenterVO;
 import com.cloud.dc.HostPodVO;
@@ -111,10 +119,13 @@ import com.cloud.storage.Volume;
 import com.cloud.storage.Volume.Type;
 import com.cloud.storage.VolumeManager;
 import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.DiskOfferingDao;
 import com.cloud.storage.dao.GuestOSCategoryDao;
 import com.cloud.storage.dao.GuestOSDao;
 import com.cloud.storage.dao.VMTemplateDao;
 import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.storage.snapshot.SnapshotManager;
 import com.cloud.user.Account;
 import com.cloud.user.AccountManager;
 import com.cloud.user.User;
@@ -164,6 +175,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     @Inject
     protected ServiceOfferingDao _offeringDao;
     @Inject
+    protected DiskOfferingDao _diskOfferingDao;
+    @Inject
     protected VMTemplateDao _templateDao;
     @Inject
     protected UserDao _userDao;
@@ -202,13 +215,19 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
     @Inject
     protected DataCenterDao _dcDao;
     @Inject
+    protected ClusterDao _clusterDao;
+    @Inject
     protected PrimaryDataStoreDao _storagePoolDao;
     @Inject
     protected HypervisorGuruManager _hvGuruMgr;
     @Inject
     protected NetworkDao _networkDao;
     @Inject
+    protected StoragePoolHostDao _poolHostDao;
+    @Inject
     protected VMSnapshotDao _vmSnapshotDao;
+    @Inject
+    protected VolumeDataFactory volFactory;
 
     protected List<DeploymentPlanner> _planners;
     public List<DeploymentPlanner> getPlanners() {
@@ -226,10 +245,16 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 		this._hostAllocators = _hostAllocators;
 	}
 
-	@Inject
+    @Inject
+    protected List<StoragePoolAllocator> _storagePoolAllocators;
+
+    @Inject
     protected ResourceManager _resourceMgr;
 
     @Inject
+    protected SnapshotManager _snapshotMgr;
+
+    @Inject
     protected VMSnapshotManager _vmSnapshotMgr = null;
     @Inject
     protected ClusterDetailsDao  _clusterDetailsDao;
@@ -1427,6 +1452,189 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
         }
     }
 
+    private Map<VolumeVO, StoragePoolVO> getPoolListForVolumesForMigration(VirtualMachineProfile<VMInstanceVO> profile,
+            Host host, Map<VolumeVO, StoragePoolVO> volumeToPool) {
+        List<VolumeVO> allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId());
+        for (VolumeVO volume : allVolumes) {
+            StoragePoolVO pool = volumeToPool.get(volume);
+            DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
+            StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId());
+            if (pool != null) {
+                // Check if pool is accessible from the destination host and disk offering with which the volume was
+                // created is compliant with the pool type.
+                if (_poolHostDao.findByPoolHost(pool.getId(), host.getId()) == null ||
+                        pool.isLocal() != diskOffering.getUseLocalStorage()) {
+                    // Cannot find a pool for the volume. Throw an exception.
+                    throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + pool +
+                            " while migrating vm to host " + host + ". Either the pool is not accessible from the " +
+                            "host or because of the offering with which the volume is created it cannot be placed on " +
+                            "the given pool.");
+                } else if (pool.getId() == currentPool.getId()){
+                    // If the pool to migrate too is the same as current pool, remove the volume from the list of
+                    // volumes to be migrated.
+                    volumeToPool.remove(volume);
+                }
+            } else {
+                // Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
+                DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
+                DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(),
+                        host.getClusterId(), host.getId(), null, null);
+                ExcludeList avoid = new ExcludeList();
+                boolean currentPoolAvailable = false;
+
+                for (StoragePoolAllocator allocator : _storagePoolAllocators) {
+                    List<StoragePool> poolList = allocator.allocateToPool(diskProfile, profile, plan, avoid,
+                            StoragePoolAllocator.RETURN_UPTO_ALL);
+                    if (poolList != null && !poolList.isEmpty()) {
+                        // Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the
+                        // volume to a pool only if it is required; that is the current pool on which the volume resides
+                        // is not available on the destination host.
+                        if (poolList.contains(currentPool)) {
+                            currentPoolAvailable = true;
+                        } else {
+                            volumeToPool.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid()));
+                        }
+
+                        break;
+                    }
+                }
+
+                if (!currentPoolAvailable && !volumeToPool.containsKey(volume)) {
+                    // Cannot find a pool for the volume. Throw an exception.
+                    throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " +
+                            volume + " while migrating virtual machine " + profile.getVirtualMachine() + " to host " +
+                            host);
+                }
+            }
+        }
+
+        return volumeToPool;
+    }
+
+    private <T extends VMInstanceVO> void moveVmToMigratingState(T vm, Long hostId, ItWorkVO work)
+            throws ConcurrentOperationException {
+        // Put the vm in migrating state.
+        try {
+            if (!changeState(vm, Event.MigrationRequested, hostId, work, Step.Migrating)) {
+                s_logger.info("Migration cancelled because state has changed: " + vm);
+                throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm);
+            }
+        } catch (NoTransitionException e) {
+            s_logger.info("Migration cancelled because " + e.getMessage());
+            throw new ConcurrentOperationException("Migration cancelled because " + e.getMessage());
+        }
+    }
+
+    private <T extends VMInstanceVO> void moveVmOutofMigratingStateOnSuccess(T vm, Long hostId, ItWorkVO work)
+            throws ConcurrentOperationException {
+        // Put the vm in running state.
+        try {
+            if (!changeState(vm, Event.OperationSucceeded, hostId, work, Step.Started)) {
+                s_logger.error("Unable to change the state for " + vm);
+                throw new ConcurrentOperationException("Unable to change the state for " + vm);
+            }
+        } catch (NoTransitionException e) {
+            s_logger.error("Unable to change state due to " + e.getMessage());
+            throw new ConcurrentOperationException("Unable to change state due to " + e.getMessage());
+        }
+    }
+
+    @Override
+    public <T extends VMInstanceVO> T migrateWithStorage(T vm, long srcHostId, long destHostId,
+            Map<VolumeVO, StoragePoolVO> volumeToPool) throws ResourceUnavailableException, ConcurrentOperationException,
+            ManagementServerException, VirtualMachineMigrationException {
+
+        HostVO srcHost = _hostDao.findById(srcHostId);
+        HostVO destHost = _hostDao.findById(destHostId);
+        VirtualMachineGuru<T> vmGuru = getVmGuru(vm);
+
+        DataCenterVO dc = _dcDao.findById(destHost.getDataCenterId());
+        HostPodVO pod = _podDao.findById(destHost.getPodId());
+        Cluster cluster = _clusterDao.findById(destHost.getClusterId());
+        DeployDestination destination = new DeployDestination(dc, pod, cluster, destHost);
+
+        // Create a map of which volume should go in which storage pool.
+        long vmId = vm.getId();
+        vm = vmGuru.findById(vmId);
+        VirtualMachineProfile<VMInstanceVO> profile = new VirtualMachineProfileImpl<VMInstanceVO>(vm);
+        volumeToPool = getPoolListForVolumesForMigration(profile, destHost, volumeToPool);
+
+        // If none of the volumes have to be migrated, fail the call. Administrator needs to make a call for migrating
+        // a vm and not migrating a vm with storage.
+        if (volumeToPool.isEmpty()) {
+            throw new InvalidParameterValueException("Migration of the vm " + vm + "from host " + srcHost +
+                    " to destination host " + destHost + " doesn't involve migrating the volumes.");
+        }
+
+        short alertType = AlertManager.ALERT_TYPE_USERVM_MIGRATE;
+        if (VirtualMachine.Type.DomainRouter.equals(vm.getType())) {
+            alertType = AlertManager.ALERT_TYPE_DOMAIN_ROUTER_MIGRATE;
+        } else if (VirtualMachine.Type.ConsoleProxy.equals(vm.getType())) {
+            alertType = AlertManager.ALERT_TYPE_CONSOLE_PROXY_MIGRATE;
+        }
+
+        _networkMgr.prepareNicForMigration(profile, destination);
+        this.volumeMgr.prepareForMigration(profile, destination);
+        HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
+        VirtualMachineTO to = hvGuru.implement(profile);
+
+        ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Migrating, vm.getType(), vm.getId());
+        work.setStep(Step.Prepare);
+        work.setResourceType(ItWorkVO.ResourceType.Host);
+        work.setResourceId(destHostId);
+        work = _workDao.persist(work);
+
+        // Put the vm in migrating state.
+        vm.setLastHostId(srcHostId);
+        moveVmToMigratingState(vm, destHostId, work);
+
+        boolean migrated = false;
+        try {
+            // Migrate the vm and its volume.
+            this.volumeMgr.migrateVolumes(vm, to, srcHost, destHost, volumeToPool);
+
+            // Put the vm back to running state.
+            moveVmOutofMigratingStateOnSuccess(vm, destHost.getId(), work);
+
+            try {
+                if (!checkVmOnHost(vm, destHostId)) {
+                    s_logger.error("Vm not found on destination host. Unable to complete migration for " + vm);
+                    try {
+                        _agentMgr.send(srcHostId, new Commands(cleanup(vm.getInstanceName())), null);
+                    } catch (AgentUnavailableException e) {
+                        s_logger.error("AgentUnavailableException while cleanup on source host: " + srcHostId);
+                    }
+                    cleanup(vmGuru, new VirtualMachineProfileImpl<T>(vm), work, Event.AgentReportStopped, true,
+                            _accountMgr.getSystemUser(), _accountMgr.getSystemAccount());
+                    return null;
+                }
+            } catch (OperationTimedoutException e) {
+                s_logger.warn("Error while checking the vm " + vm + " is on host " + destHost, e);
+            }
+
+            migrated = true;
+            return vm;
+        } finally {
+            if (!migrated) {
+                s_logger.info("Migration was unsuccessful.  Cleaning up: " + vm);
+                _alertMgr.sendAlert(alertType, srcHost.getDataCenterId(), srcHost.getPodId(), "Unable to migrate vm " +
+                        vm.getInstanceName() + " from host " + srcHost.getName() + " in zone " + dc.getName() +
+                        " and pod " + dc.getName(), "Migrate Command failed.  Please check logs.");
+                try {
+                    _agentMgr.send(destHostId, new Commands(cleanup(vm.getInstanceName())), null);
+                    stateTransitTo(vm, Event.OperationFailed, srcHostId);
+                } catch (AgentUnavailableException e) {
+                    s_logger.warn("Looks like the destination Host is unavailable for cleanup.", e);
+                } catch (NoTransitionException e) {
+                    s_logger.error("Error while transitioning vm from migrating to running state.", e);
+                }
+            }
+
+            work.setStep(Step.Done);
+            _workDao.update(work.getId(), work);
+        }
+    }
+
     @Override
     public VirtualMachineTO toVmTO(VirtualMachineProfile<? extends VMInstanceVO> profile) {
         HypervisorGuru hvGuru = _hvGuruMgr.getGuru(profile.getVirtualMachine().getHypervisorType());

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/test/com/cloud/vm/MockUserVmManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/test/com/cloud/vm/MockUserVmManagerImpl.java b/server/test/com/cloud/vm/MockUserVmManagerImpl.java
index fd826d9..0d0a8f4 100644
--- a/server/test/com/cloud/vm/MockUserVmManagerImpl.java
+++ b/server/test/com/cloud/vm/MockUserVmManagerImpl.java
@@ -367,6 +367,14 @@ public class MockUserVmManagerImpl extends ManagerBase implements UserVmManager,
     }
 
     @Override
+    public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinationHost, Map<String, String> volumeToPool)
+            throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException,
+            VirtualMachineMigrationException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
     public UserVm moveVMToUser(AssignVMCmd moveUserVMCmd)
             throws ResourceAllocationException, ConcurrentOperationException,
             ResourceUnavailableException, InsufficientCapacityException {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/test/com/cloud/vm/MockVirtualMachineManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/test/com/cloud/vm/MockVirtualMachineManagerImpl.java b/server/test/com/cloud/vm/MockVirtualMachineManagerImpl.java
index 4917e77..94ddea6 100755
--- a/server/test/com/cloud/vm/MockVirtualMachineManagerImpl.java
+++ b/server/test/com/cloud/vm/MockVirtualMachineManagerImpl.java
@@ -23,6 +23,7 @@ import java.util.Map;
 import javax.ejb.Local;
 import javax.naming.ConfigurationException;
 
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.springframework.stereotype.Component;
 
 import com.cloud.agent.api.to.NicTO;
@@ -45,6 +46,7 @@ import com.cloud.service.ServiceOfferingVO;
 import com.cloud.storage.DiskOfferingVO;
 import com.cloud.storage.StoragePool;
 import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.VolumeVO;
 import com.cloud.user.Account;
 import com.cloud.user.User;
 import com.cloud.utils.Pair;
@@ -143,6 +145,14 @@ public class MockVirtualMachineManagerImpl extends ManagerBase implements Virtua
     }
 
     @Override
+    public <T extends VMInstanceVO> T migrateWithStorage(T vm, long srcHostId, long destHostId,
+            Map<VolumeVO, StoragePoolVO> volumeToPool) throws ResourceUnavailableException,
+            ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException {
+        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
     public VMInstanceVO findByIdAndType(Type type, long vmId) {
         // TODO Auto-generated method stub
         return null;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/test/com/cloud/vm/VirtualMachineManagerImplTest.java
----------------------------------------------------------------------
diff --git a/server/test/com/cloud/vm/VirtualMachineManagerImplTest.java b/server/test/com/cloud/vm/VirtualMachineManagerImplTest.java
index 322f051..dd51e74 100644
--- a/server/test/com/cloud/vm/VirtualMachineManagerImplTest.java
+++ b/server/test/com/cloud/vm/VirtualMachineManagerImplTest.java
@@ -31,6 +31,41 @@ import com.cloud.service.ServiceOfferingVO;
 import com.cloud.storage.VMTemplateVO;
 import com.cloud.storage.VolumeManager;
 import com.cloud.storage.VolumeVO;
+import com.cloud.agent.api.PrepareForMigrationAnswer;
+import com.cloud.agent.api.PrepareForMigrationCommand;
+import com.cloud.agent.api.MigrateWithStorageAnswer;
+import com.cloud.agent.api.MigrateWithStorageCommand;
+import com.cloud.agent.api.MigrateWithStorageReceiveAnswer;
+import com.cloud.agent.api.MigrateWithStorageReceiveCommand;
+import com.cloud.agent.api.MigrateWithStorageSendAnswer;
+import com.cloud.agent.api.MigrateWithStorageSendCommand;
+import com.cloud.agent.api.MigrateWithStorageCompleteAnswer;
+import com.cloud.agent.api.MigrateWithStorageCompleteCommand;
+import com.cloud.agent.api.CheckVirtualMachineAnswer;
+import com.cloud.agent.api.CheckVirtualMachineCommand;
+import com.cloud.capacity.CapacityManager;
+import com.cloud.configuration.ConfigurationManager;
+import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.dc.dao.HostPodDao;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.exception.VirtualMachineMigrationException;
+import com.cloud.exception.OperationTimedoutException;
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.hypervisor.HypervisorGuru;
+import com.cloud.hypervisor.HypervisorGuruManager;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.network.NetworkManager;
+import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.VolumeManager;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.StoragePoolHostDao;
 import com.cloud.storage.dao.VMTemplateDao;
 import com.cloud.storage.dao.VolumeDao;
 import com.cloud.user.*;
@@ -41,20 +76,33 @@ import com.cloud.vm.dao.UserVmDao;
 import com.cloud.vm.dao.VMInstanceDao;
 import org.apache.cloudstack.api.command.user.vm.RestoreVMCmd;
 import org.apache.cloudstack.api.command.user.vm.ScaleVMCmd;
+import com.cloud.utils.Pair;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
+import com.cloud.vm.snapshot.VMSnapshotManager;
+import com.cloud.vm.VirtualMachine.Event;
+import com.cloud.vm.VirtualMachine.State;
+
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+
 import org.junit.Test;
 import org.junit.Before;
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
 import org.mockito.Spy;
+import static org.mockito.Matchers.anyLong;
 import static org.mockito.Mockito.*;
 
-
 import java.lang.reflect.Field;
 import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Iterator;
 
 public class VirtualMachineManagerImplTest {
 
-
         @Spy VirtualMachineManagerImpl _vmMgr = new VirtualMachineManagerImpl();
         @Mock
         VolumeManager _storageMgr;
@@ -106,6 +154,25 @@ public class VirtualMachineManagerImplTest {
         List<VolumeVO> _rootVols;
         @Mock
         ItWorkVO _work;
+
+        @Mock ClusterDao _clusterDao;
+        @Mock HostPodDao _podDao;
+        @Mock DataCenterDao _dcDao;
+        @Mock DiskOfferingDao _diskOfferingDao;
+        @Mock PrimaryDataStoreDao _storagePoolDao;
+        @Mock StoragePoolHostDao _poolHostDao;
+        @Mock NetworkManager _networkMgr;
+        @Mock HypervisorGuruManager _hvGuruMgr;
+        @Mock VMSnapshotManager _vmSnapshotMgr;
+
+        // Mock objects for vm migration with storage test.
+        @Mock DiskOfferingVO _diskOfferingMock;
+        @Mock StoragePoolVO _srcStoragePoolMock;
+        @Mock StoragePoolVO _destStoragePoolMock;
+        @Mock HostVO _srcHostMock;
+        @Mock HostVO _destHostMock;
+        @Mock Map<VolumeVO, StoragePoolVO> _volumeToPoolMock;
+
         @Before
         public void setup(){
             MockitoAnnotations.initMocks(this);
@@ -122,6 +189,16 @@ public class VirtualMachineManagerImplTest {
             _vmMgr._nodeId = 1L;
             _vmMgr._workDao = _workDao;
             _vmMgr._agentMgr = _agentMgr;
+            _vmMgr._podDao = _podDao;
+            _vmMgr._clusterDao = _clusterDao;
+            _vmMgr._dcDao = _dcDao;
+            _vmMgr._diskOfferingDao = _diskOfferingDao;
+            _vmMgr._storagePoolDao = _storagePoolDao;
+            _vmMgr._poolHostDao= _poolHostDao;
+            _vmMgr._networkMgr = _networkMgr;
+            _vmMgr._hvGuruMgr = _hvGuruMgr;
+            _vmMgr._vmSnapshotMgr = _vmSnapshotMgr;
+            _vmMgr._vmDao = _vmInstanceDao;
 
             when(_vmMock.getId()).thenReturn(314l);
             when(_vmInstance.getId()).thenReturn(1L);
@@ -204,5 +281,155 @@ public class VirtualMachineManagerImplTest {
         return serviceOffering;
     }
 
+    private void initializeMockConfigForMigratingVmWithVolumes() throws OperationTimedoutException,
+        ResourceUnavailableException {
+
+        // Mock the source and destination hosts.
+        when(_srcHostMock.getId()).thenReturn(5L);
+        when(_destHostMock.getId()).thenReturn(6L);
+        when(_hostDao.findById(5L)).thenReturn(_srcHostMock);
+        when(_hostDao.findById(6L)).thenReturn(_destHostMock);
+
+        // Mock the vm being migrated.
+        when(_vmMock.getId()).thenReturn(1L);
+        when(_vmMock.getHypervisorType()).thenReturn(HypervisorType.XenServer);
+        when(_vmMock.getState()).thenReturn(State.Running).thenReturn(State.Running).thenReturn(State.Migrating)
+            .thenReturn(State.Migrating);
+        when(_vmMock.getHostId()).thenReturn(5L);
+        when(_vmInstance.getId()).thenReturn(1L);
+        when(_vmInstance.getServiceOfferingId()).thenReturn(2L);
+        when(_vmInstance.getInstanceName()).thenReturn("myVm");
+        when(_vmInstance.getHostId()).thenReturn(5L);
+        when(_vmInstance.getType()).thenReturn(VirtualMachine.Type.User);
+        when(_vmInstance.getState()).thenReturn(State.Running).thenReturn(State.Running).thenReturn(State.Migrating)
+            .thenReturn(State.Migrating);
+
+        // Mock the work item.
+        when(_workDao.persist(any(ItWorkVO.class))).thenReturn(_work);
+        when(_workDao.update("1", _work)).thenReturn(true);
+        when(_work.getId()).thenReturn("1");
+        doNothing().when(_work).setStep(ItWorkVO.Step.Done);
+
+        // Mock the vm guru and the user vm object that gets returned.
+        _vmMgr._vmGurus = new HashMap<VirtualMachine.Type, VirtualMachineGuru<? extends VMInstanceVO>>();
+        UserVmManagerImpl userVmManager = mock(UserVmManagerImpl.class);
+        _vmMgr.registerGuru(VirtualMachine.Type.User, userVmManager);
+        when(userVmManager.findById(anyLong())).thenReturn(_vmMock);
+
+        // Mock the iteration over all the volumes of an instance.
+        Iterator<VolumeVO> volumeIterator = mock(Iterator.class);
+        when(_volsDao.findUsableVolumesForInstance(anyLong())).thenReturn(_rootVols);
+        when(_rootVols.iterator()).thenReturn(volumeIterator);
+        when(volumeIterator.hasNext()).thenReturn(true, false);
+        when(volumeIterator.next()).thenReturn(_volumeMock);
+
+        // Mock the disk offering and pool objects for a volume.
+        when(_volumeMock.getDiskOfferingId()).thenReturn(5L);
+        when(_volumeMock.getPoolId()).thenReturn(200L);
+        when(_diskOfferingDao.findById(anyLong())).thenReturn(_diskOfferingMock);
+        when(_storagePoolDao.findById(anyLong())).thenReturn(_srcStoragePoolMock);
+
+        // Mock the volume to pool mapping.
+        when(_volumeToPoolMock.get(_volumeMock)).thenReturn(_destStoragePoolMock);
+        when(_destStoragePoolMock.getId()).thenReturn(201L);
+        when(_srcStoragePoolMock.getId()).thenReturn(200L);
+        when(_destStoragePoolMock.isLocal()).thenReturn(false);
+        when(_diskOfferingMock.getUseLocalStorage()).thenReturn(false);
+        when(_poolHostDao.findByPoolHost(anyLong(), anyLong())).thenReturn(mock(StoragePoolHostVO.class));
+
+        // Mock hypervisor guru.
+        HypervisorGuru guruMock = mock(HypervisorGuru.class);
+        when(_hvGuruMgr.getGuru(HypervisorType.XenServer)).thenReturn(guruMock);
+
+        when(_srcHostMock.getClusterId()).thenReturn(3L);
+        when(_destHostMock.getClusterId()).thenReturn(3L);
+
+        // Mock the commands and answers to the agent.
+        PrepareForMigrationAnswer prepAnswerMock = mock(PrepareForMigrationAnswer.class);
+        when(prepAnswerMock.getResult()).thenReturn(true);
+        when(_agentMgr.send(anyLong(), isA(PrepareForMigrationCommand.class))).thenReturn(prepAnswerMock);
+
+        MigrateWithStorageAnswer migAnswerMock = mock(MigrateWithStorageAnswer.class);
+        when(migAnswerMock.getResult()).thenReturn(true);
+        when(_agentMgr.send(anyLong(), isA(MigrateWithStorageCommand.class))).thenReturn(migAnswerMock);
+
+        MigrateWithStorageReceiveAnswer migRecAnswerMock = mock(MigrateWithStorageReceiveAnswer.class);
+        when(migRecAnswerMock.getResult()).thenReturn(true);
+        when(_agentMgr.send(anyLong(), isA(MigrateWithStorageReceiveCommand.class))).thenReturn(migRecAnswerMock);
+
+        MigrateWithStorageSendAnswer migSendAnswerMock = mock(MigrateWithStorageSendAnswer.class);
+        when(migSendAnswerMock.getResult()).thenReturn(true);
+        when(_agentMgr.send(anyLong(), isA(MigrateWithStorageSendCommand.class))).thenReturn(migSendAnswerMock);
+
+        MigrateWithStorageCompleteAnswer migCompleteAnswerMock = mock(MigrateWithStorageCompleteAnswer.class);
+        when(migCompleteAnswerMock.getResult()).thenReturn(true);
+        when(_agentMgr.send(anyLong(), isA(MigrateWithStorageCompleteCommand.class))).thenReturn(migCompleteAnswerMock);
+
+        CheckVirtualMachineAnswer checkVmAnswerMock = mock(CheckVirtualMachineAnswer.class);
+        when(checkVmAnswerMock.getResult()).thenReturn(true);
+        when(checkVmAnswerMock.getState()).thenReturn(State.Running);
+        when(_agentMgr.send(anyLong(), isA(CheckVirtualMachineCommand.class))).thenReturn(checkVmAnswerMock);
+
+        // Mock the state transitions of vm.
+        Pair<Long, Long> opaqueMock = new Pair<Long, Long> (_vmMock.getHostId(), _destHostMock.getId());
+        when(_vmSnapshotMgr.hasActiveVMSnapshotTasks(anyLong())).thenReturn(false);
+        when(_vmInstanceDao.updateState(State.Running, Event.MigrationRequested, State.Migrating, _vmMock, opaqueMock))
+            .thenReturn(true);
+        when(_vmInstanceDao.updateState(State.Migrating, Event.OperationSucceeded, State.Running, _vmMock, opaqueMock))
+            .thenReturn(true);
+    }
+
+    // Check migration of a vm with its volumes within a cluster.
+    @Test
+    public void testMigrateWithVolumeWithinCluster() throws ResourceUnavailableException, ConcurrentOperationException,
+        ManagementServerException, VirtualMachineMigrationException, OperationTimedoutException {
 
+        initializeMockConfigForMigratingVmWithVolumes();
+        when(_srcHostMock.getClusterId()).thenReturn(3L);
+        when(_destHostMock.getClusterId()).thenReturn(3L);
+
+        _vmMgr.migrateWithStorage(_vmInstance, _srcHostMock.getId(), _destHostMock.getId(), _volumeToPoolMock);
+    }
+
+    // Check migration of a vm with its volumes across a cluster.
+    @Test
+    public void testMigrateWithVolumeAcrossCluster() throws ResourceUnavailableException, ConcurrentOperationException,
+        ManagementServerException, VirtualMachineMigrationException, OperationTimedoutException {
+
+        initializeMockConfigForMigratingVmWithVolumes();
+        when(_srcHostMock.getClusterId()).thenReturn(3L);
+        when(_destHostMock.getClusterId()).thenReturn(4L);
+
+        _vmMgr.migrateWithStorage(_vmInstance, _srcHostMock.getId(), _destHostMock.getId(), _volumeToPoolMock);
+    }
+
+    // Check migration of a vm fails when src and destination pool are not of same type; that is, one is shared and
+    // other is local.
+    @Test(expected=CloudRuntimeException.class)
+    public void testMigrateWithVolumeFail1() throws ResourceUnavailableException, ConcurrentOperationException,
+        ManagementServerException, VirtualMachineMigrationException, OperationTimedoutException {
+
+        initializeMockConfigForMigratingVmWithVolumes();
+        when(_srcHostMock.getClusterId()).thenReturn(3L);
+        when(_destHostMock.getClusterId()).thenReturn(3L);
+
+        when(_destStoragePoolMock.isLocal()).thenReturn(true);
+        when(_diskOfferingMock.getUseLocalStorage()).thenReturn(false);
+
+        _vmMgr.migrateWithStorage(_vmInstance, _srcHostMock.getId(), _destHostMock.getId(), _volumeToPoolMock);
+    }
+
+    // Check migration of a vm fails when vm is not in Running state.
+    @Test(expected=ConcurrentOperationException.class)
+    public void testMigrateWithVolumeFail2() throws ResourceUnavailableException, ConcurrentOperationException,
+        ManagementServerException, VirtualMachineMigrationException, OperationTimedoutException {
+
+        initializeMockConfigForMigratingVmWithVolumes();
+        when(_srcHostMock.getClusterId()).thenReturn(3L);
+        when(_destHostMock.getClusterId()).thenReturn(3L);
+
+        when(_vmMock.getState()).thenReturn(State.Stopped);
+
+        _vmMgr.migrateWithStorage(_vmInstance, _srcHostMock.getId(), _destHostMock.getId(), _volumeToPoolMock);
+    }
 }


[3/4] Storage motion for Xenserver changes: 1. Implemented Api findStoragePoolsForMigration. Added a new response objects to list storage pools available for migration. 2. Updated migrateVolume api for allowing migrating volumes of running vms. These cha

Posted by ap...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java
----------------------------------------------------------------------
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java
index b619ee9..a84f308 100644
--- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java
@@ -18,11 +18,18 @@
  */
 package org.apache.cloudstack.storage.test;
 
+import java.util.Map;
+
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.storage.motion.DataMotionStrategy;
 
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.host.Host;
+
 public class MockStorageMotionStrategy implements DataMotionStrategy {
 
     @Override
@@ -32,6 +39,11 @@ public class MockStorageMotionStrategy implements DataMotionStrategy {
     }
 
     @Override
+    public boolean canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
+        return true;
+    }
+
+    @Override
     public Void copyAsync(DataObject srcData, DataObject destData,
             AsyncCompletionCallback<CopyCommandResult> callback) {
         CopyCommandResult result = new CopyCommandResult("something", null);
@@ -39,4 +51,11 @@ public class MockStorageMotionStrategy implements DataMotionStrategy {
         return null;
     }
 
+    @Override
+    public Void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost,
+            AsyncCompletionCallback<CopyCommandResult> callback) {
+        CopyCommandResult result = new CopyCommandResult("something", null);
+        callback.complete(result);
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
index 3602bb1..ad9238a 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
@@ -20,12 +20,14 @@ package org.apache.cloudstack.storage.motion;
 
 import java.util.Date;
 import java.util.List;
+import java.util.Map;
 
 import javax.inject.Inject;
 
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectType;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
 import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
@@ -36,6 +38,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
+import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.BackupSnapshotAnswer;
 import com.cloud.agent.api.BackupSnapshotCommand;
@@ -47,15 +50,21 @@ import com.cloud.agent.api.CreateVolumeFromSnapshotCommand;
 import com.cloud.agent.api.UpgradeSnapshotCommand;
 import com.cloud.agent.api.storage.CopyVolumeAnswer;
 import com.cloud.agent.api.storage.CopyVolumeCommand;
+import com.cloud.agent.api.storage.MigrateVolumeAnswer;
+import com.cloud.agent.api.storage.MigrateVolumeCommand;
 import com.cloud.agent.api.storage.CreateAnswer;
 import com.cloud.agent.api.storage.CreateCommand;
 import com.cloud.agent.api.storage.CreatePrivateTemplateAnswer;
 import com.cloud.agent.api.to.S3TO;
 import com.cloud.agent.api.to.StorageFilerTO;
 import com.cloud.agent.api.to.SwiftTO;
+import com.cloud.agent.api.to.VirtualMachineTO;
 import com.cloud.configuration.Config;
 import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.exception.AgentUnavailableException;
+import com.cloud.exception.OperationTimedoutException;
 import com.cloud.exception.StorageUnavailableException;
+import com.cloud.host.Host;
 import com.cloud.host.HostVO;
 import com.cloud.host.dao.HostDao;
 import com.cloud.storage.DiskOfferingVO;
@@ -86,6 +95,8 @@ import com.cloud.utils.db.DB;
 import com.cloud.utils.db.Transaction;
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
 
 @Component
 public class AncientDataMotionStrategy implements DataMotionStrategy {
@@ -102,8 +113,12 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
     @Inject
     StorageManager storageMgr;
     @Inject
+    AgentManager agentMgr;
+    @Inject
     VolumeDao volDao;
     @Inject
+    VMInstanceDao instanceDao;
+    @Inject
     VMTemplateDao templateDao;
     @Inject
     SnapshotManager snapshotMgr;
@@ -130,6 +145,11 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
         return true;
     }
 
+    @Override
+    public boolean canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
+        return false;
+    }
+
     @DB
     protected Answer copyVolumeFromImage(DataObject srcData, DataObject destData) {
         String value = configDao.getValue(Config.RecreateSystemVmEnabled.key());
@@ -393,6 +413,53 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
         return cvAnswer;
     }
 
+    protected Answer migrateVolumeToPool(DataObject srcData, DataStore destStore) {
+        VolumeInfo volume = (VolumeInfo)srcData;
+        Long instanceId = volume.getInstanceId();
+        StoragePool destPool = (StoragePool)this.dataStoreMgr.getDataStore(destStore.getId(), DataStoreRole.Primary);
+        MigrateVolumeAnswer answer = null;
+        VMInstanceVO vmInstance = null;
+        if (instanceId != null) {
+            vmInstance = instanceDao.findById(instanceId);
+        }
+
+        Long hostId = null;
+        if (vmInstance != null) {
+            hostId = vmInstance.getHostId();
+        }
+
+        try {
+            if (hostId != null) {
+                MigrateVolumeCommand command = new MigrateVolumeCommand(volume.getId(), volume.getPath(), destPool);
+                answer = (MigrateVolumeAnswer) this.agentMgr.send(hostId, command);
+            }
+        } catch (OperationTimedoutException e) {
+            s_logger.error("Operation timed out on storage motion for volume " + volume, e);
+            throw new CloudRuntimeException("Failed to live migrate volume " + volume + " to storage pool " +
+                    destPool, e);
+        } catch (AgentUnavailableException e) {
+            s_logger.error("Agent unavailable exception while doing storage motion for volume " + volume, e);
+            throw new CloudRuntimeException("Failed to live migrate volume " + volume + " to storage pool " +
+                    destPool, e);
+        }
+
+        if (answer == null || !answer.getResult()) {
+            throw new CloudRuntimeException("Failed to migrate volume " + volume + " to storage pool " + destPool);
+        } else {
+            // Update the volume details after migration.
+            VolumeVO volumeVo = this.volDao.findById(volume.getId());
+            Long oldPoolId = volume.getPoolId();
+            volumeVo.setPath(answer.getVolumePath());
+            volumeVo.setFolder(destPool.getPath());
+            volumeVo.setPodId(destPool.getPodId());
+            volumeVo.setPoolId(destPool.getId());
+            volumeVo.setLastPoolId(oldPoolId);
+            this.volDao.update(volume.getId(), volumeVo);
+        }
+
+        return answer;
+    }
+
     @Override
     public Void copyAsync(DataObject srcData, DataObject destData,
             AsyncCompletionCallback<CopyCommandResult> callback) {
@@ -419,7 +486,12 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
             	answer = cloneVolume(srcData, destData);
             } else if (destData.getType() == DataObjectType.VOLUME
                     && srcData.getType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Primary) {
-            	answer = copyVolumeBetweenPools(srcData, destData);
+                if (srcData.getId() == destData.getId()) {
+                    // The volume has to be migrated across storage pools.
+                    answer = migrateVolumeToPool(srcData, destData.getDataStore());
+                } else {
+                    answer = copyVolumeBetweenPools(srcData, destData);
+                }
             } else if (srcData.getType() == DataObjectType.SNAPSHOT &&
             		destData.getType() == DataObjectType.SNAPSHOT) {
             	answer = copySnapshot(srcData, destData);
@@ -435,6 +507,16 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
         return null;
     }
 
+    @Override
+    public Void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost,
+            AsyncCompletionCallback<CopyCommandResult> callback) {
+        CopyCommandResult result = new CopyCommandResult(null, null);
+        result.setResult("Unsupported operation requested for copying data.");
+        callback.complete(result);
+
+        return null;
+    }
+
     @DB
     protected Answer createTemplateFromSnashot(DataObject srcData,
             DataObject destData) {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionService.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionService.java b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionService.java
index db36f64..5ecbcb3 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionService.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionService.java
@@ -18,11 +18,20 @@
  */
 package org.apache.cloudstack.storage.motion;
 
+import java.util.Map;
+
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.host.Host;
+
 public interface DataMotionService {
     public void copyAsync(DataObject srcData, DataObject destData,
             AsyncCompletionCallback<CopyCommandResult> callback);
+    public void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo,
+            Host srcHost, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java
index 343140f..b74e10c 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java
@@ -19,14 +19,19 @@
 package org.apache.cloudstack.storage.motion;
 
 import java.util.List;
+import java.util.Map;
 
 import javax.inject.Inject;
 
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.springframework.stereotype.Component;
 
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.host.Host;
 import com.cloud.utils.exception.CloudRuntimeException;
 
 @Component
@@ -58,4 +63,15 @@ public class DataMotionServiceImpl implements DataMotionService {
         throw new CloudRuntimeException("can't find strategy to move data");
     }
 
+    @Override
+    public void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo,
+            Host srcHost, Host destHost, AsyncCompletionCallback<CopyCommandResult> callback) {
+        for (DataMotionStrategy strategy : strategies) {
+            if (strategy.canHandle(volumeMap, srcHost, destHost)) {
+                strategy.copyAsync(volumeMap, vmTo, srcHost, destHost, callback);
+                return;
+            }
+        }
+        throw new CloudRuntimeException("can't find strategy to move data");
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionStrategy.java
----------------------------------------------------------------------
diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionStrategy.java
index ba40c6d..e3859b4 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionStrategy.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/motion/DataMotionStrategy.java
@@ -18,13 +18,23 @@
  */
 package org.apache.cloudstack.storage.motion;
 
+import java.util.Map;
+
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.host.Host;
+
 public interface DataMotionStrategy {
     public boolean canHandle(DataObject srcData, DataObject destData);
+    public boolean canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost);
 
     public Void copyAsync(DataObject srcData, DataObject destData,
             AsyncCompletionCallback<CopyCommandResult> callback);
+    public Void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost,
+            AsyncCompletionCallback<CopyCommandResult> callback);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
index ceadb25..ea31be3 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeObject.java
@@ -176,6 +176,8 @@ public class VolumeObject implements VolumeInfo {
                     volEvent = Volume.Event.CreateRequested;
                 } else if (event == ObjectInDataStoreStateMachine.Event.CopyingRequested) {
                     volEvent = Volume.Event.CopyRequested;
+                } else if (event == ObjectInDataStoreStateMachine.Event.MigrationRequested) {
+                    volEvent = Volume.Event.MigrationRequested;
                 }
             }
             

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
----------------------------------------------------------------------
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index 32e7d27..e3526de 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -18,6 +18,10 @@
  */
 package org.apache.cloudstack.storage.volume;
 
+import java.util.Map;
+import java.util.List;
+import java.util.ArrayList;
+
 import javax.inject.Inject;
 
 import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
@@ -27,6 +31,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
@@ -40,11 +45,14 @@ import org.apache.cloudstack.storage.datastore.DataObjectManager;
 import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager;
 import org.apache.cloudstack.storage.datastore.PrimaryDataStore;
 import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
 import org.apache.cloudstack.storage.motion.DataMotionService;
 import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
+import com.cloud.agent.api.to.VirtualMachineTO;
 import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.host.Host;
 import com.cloud.storage.StoragePool;
 import com.cloud.storage.Volume;
 import com.cloud.storage.Volume.Type;
@@ -561,7 +569,163 @@ public class VolumeServiceImpl implements VolumeService {
         
         return null;
     }
-    
+
+    private class MigrateVolumeContext<T> extends AsyncRpcConext<T> {
+        final VolumeInfo srcVolume;
+        final VolumeInfo destVolume;
+        final DataStore destStore;
+        final AsyncCallFuture<VolumeApiResult> future;
+        /**
+         * @param callback
+         */
+        public MigrateVolumeContext(AsyncCompletionCallback<T> callback, AsyncCallFuture<VolumeApiResult> future,
+                VolumeInfo srcVolume, VolumeInfo destVolume, DataStore destStore) {
+            super(callback);
+            this.srcVolume = srcVolume;
+            this.destVolume = destVolume;
+            this.destStore = destStore;
+            this.future = future;
+        }
+    }
+
+    @Override
+    public AsyncCallFuture<VolumeApiResult> migrateVolume(VolumeInfo srcVolume, DataStore destStore) {
+        AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
+        VolumeApiResult res = new VolumeApiResult(srcVolume);
+        try {
+            if (!this.snapshotMgr.canOperateOnVolume(srcVolume)) {
+                s_logger.debug("Snapshots are being created on this volume. This volume cannot be migrated now.");
+                res.setResult("Snapshots are being created on this volume. This volume cannot be migrated now.");
+                future.complete(res);
+                return future;
+            }
+
+            VolumeInfo destVolume = this.volFactory.getVolume(srcVolume.getId(), destStore);
+            srcVolume.processEvent(Event.MigrationRequested);
+            MigrateVolumeContext<VolumeApiResult> context = new MigrateVolumeContext<VolumeApiResult>(null, future,
+                    srcVolume, destVolume, destStore);
+            AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
+            caller.setCallback(caller.getTarget().migrateVolumeCallBack(null, null)).setContext(context);
+            this.motionSrv.copyAsync(srcVolume, destVolume, caller);
+        } catch (Exception e) {
+            s_logger.debug("Failed to copy volume", e);
+            res.setResult(e.toString());
+            future.complete(res);
+        }
+        return future;
+    }
+
+    protected Void migrateVolumeCallBack(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback,
+            MigrateVolumeContext<VolumeApiResult> context) {
+        VolumeInfo srcVolume = context.srcVolume;
+        VolumeInfo destVolume = context.destVolume;
+        CopyCommandResult result = callback.getResult();
+        AsyncCallFuture<VolumeApiResult> future = context.future;
+        VolumeApiResult res = new VolumeApiResult(srcVolume);
+        try {
+            if (result.isFailed()) {
+                res.setResult(result.getResult());
+                srcVolume.processEvent(Event.OperationFailed);
+                future.complete(res);
+            } else {
+                srcVolume.processEvent(Event.OperationSuccessed);
+                future.complete(res);
+            }
+        } catch (Exception e) {
+            s_logger.error("Failed to process copy volume callback", e);
+            res.setResult(e.toString());
+            future.complete(res);
+        }
+
+        return null;
+    }
+
+    private class MigrateVmWithVolumesContext<T> extends AsyncRpcConext<T> {
+        final Map<VolumeInfo, DataStore> volumeToPool;
+        final AsyncCallFuture<CommandResult> future;
+        /**
+         * @param callback
+         */
+        public MigrateVmWithVolumesContext(AsyncCompletionCallback<T> callback, AsyncCallFuture<CommandResult> future,
+                Map<VolumeInfo, DataStore> volumeToPool) {
+            super(callback);
+            this.volumeToPool = volumeToPool;
+            this.future = future;
+        }
+    }
+
+    @Override
+    public AsyncCallFuture<CommandResult> migrateVolumes(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo,
+            Host srcHost, Host destHost) {
+        AsyncCallFuture<CommandResult> future = new AsyncCallFuture<CommandResult>();
+        CommandResult res = new CommandResult();
+        try {
+            // Check to make sure there are no snapshot operations on a volume and
+            // put it in the migrating state.
+            List<VolumeInfo> volumesMigrating = new ArrayList<VolumeInfo>();
+            for (Map.Entry<VolumeInfo, DataStore> entry : volumeMap.entrySet()) {
+                VolumeInfo volume = entry.getKey();
+                if (!this.snapshotMgr.canOperateOnVolume(volume)) {
+                    s_logger.debug("Snapshots are being created on a volume. Volumes cannot be migrated now.");
+                    res.setResult("Snapshots are being created on a volume. Volumes cannot be migrated now.");
+                    future.complete(res);
+
+                    // All the volumes that are already in migrating state need to be put back in ready state.
+                    for (VolumeInfo volumeMigrating : volumesMigrating) {
+                        volumeMigrating.processEvent(Event.OperationFailed);
+                    }
+                    return future;
+                } else {
+                    volume.processEvent(Event.MigrationRequested);
+                    volumesMigrating.add(volume);
+                }
+            }
+
+            MigrateVmWithVolumesContext<CommandResult> context = new MigrateVmWithVolumesContext<CommandResult>(null,
+                    future, volumeMap);
+            AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> caller = AsyncCallbackDispatcher.create(this);
+            caller.setCallback(caller.getTarget().migrateVmWithVolumesCallBack(null, null)).setContext(context);
+            this.motionSrv.copyAsync(volumeMap, vmTo, srcHost, destHost, caller);
+
+        } catch (Exception e) {
+            s_logger.debug("Failed to copy volume", e);
+            res.setResult(e.toString());
+            future.complete(res);
+        }
+
+        return future;
+    }
+
+    protected Void migrateVmWithVolumesCallBack(AsyncCallbackDispatcher<VolumeServiceImpl, CopyCommandResult> callback,
+            MigrateVmWithVolumesContext<CommandResult> context) {
+        Map<VolumeInfo, DataStore> volumeToPool = context.volumeToPool;
+        CopyCommandResult result = callback.getResult();
+        AsyncCallFuture<CommandResult> future = context.future;
+        CommandResult res = new CommandResult();
+        try {
+            if (result.isFailed()) {
+                res.setResult(result.getResult());
+                for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+                    VolumeInfo volume = entry.getKey();
+                    volume.processEvent(Event.OperationFailed);
+                }
+                future.complete(res);
+            } else {
+                for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+                    VolumeInfo volume = entry.getKey();
+                    volume.processEvent(Event.OperationSuccessed);
+                }
+                future.complete(res);
+            }
+        } catch (Exception e) {
+            s_logger.error("Failed to process copy volume callback", e);
+            res.setResult(e.toString());
+            future.complete(res);
+        }
+
+        return null;
+    }
+
     @Override
     public AsyncCallFuture<VolumeApiResult> registerVolume(VolumeInfo volume, DataStore store) {
         

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java
----------------------------------------------------------------------
diff --git a/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java b/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java
index a672efd..8243f3a 100755
--- a/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java
+++ b/plugins/host-allocators/random/src/com/cloud/agent/manager/allocator/impl/RandomAllocator.java
@@ -55,6 +55,62 @@ public class RandomAllocator extends AdapterBase implements HostAllocator {
 
     @Override
     public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type,
+            ExcludeList avoid, List<HostVO> hosts, int returnUpTo, boolean considerReservedCapacity) {
+        long dcId = plan.getDataCenterId();
+        Long podId = plan.getPodId();
+        Long clusterId = plan.getClusterId();
+        ServiceOffering offering = vmProfile.getServiceOffering();
+        List<Host> suitableHosts = new ArrayList<Host>();
+
+        if (type == Host.Type.Storage) {
+            return suitableHosts;
+        }
+
+        String hostTag = offering.getHostTag();
+        if(hostTag != null){
+            s_logger.debug("Looking for hosts in dc: " + dcId + "  pod:" + podId + "  cluster:" + clusterId +
+                    " having host tag:" + hostTag);
+        }else{
+            s_logger.debug("Looking for hosts in dc: " + dcId + "  pod:" + podId + "  cluster:" + clusterId);
+        }
+
+        // list all computing hosts, regardless of whether they support routing...it's random after all
+        if(hostTag != null){
+            hosts.retainAll(_hostDao.listByHostTag(type, clusterId, podId, dcId, hostTag));
+        }else{
+            hosts.retainAll(_resourceMgr.listAllUpAndEnabledHosts(type, clusterId, podId, dcId));
+        }
+
+        s_logger.debug("Random Allocator found " + hosts.size() + "  hosts");
+        if (hosts.size() == 0) {
+            return suitableHosts;
+        }
+
+        Collections.shuffle(hosts);
+        for (Host host : hosts) {
+            if(suitableHosts.size() == returnUpTo){
+                break;
+            }
+
+            if (!avoid.shouldAvoid(host)) {
+                suitableHosts.add(host);
+            } else {
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Host name: " + host.getName() + ", hostId: "+ host.getId() +" is in avoid set, " +
+                            "skipping this and trying other available hosts");
+                }
+            }
+        }
+
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Random Host Allocator returning "+suitableHosts.size() +" suitable hosts");
+        }
+
+        return suitableHosts;
+    }
+
+    @Override
+    public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type,
             ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) {
 
         long dcId = plan.getDataCenterId();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
index 4ef583a..46ae35a 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java
@@ -3358,7 +3358,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         vm.setMemoryLimits(conn, maxMemsize, maxMemsize, minMemsize, maxMemsize);
     }
 
-    private void waitForTask(Connection c, Task task, long pollInterval, long timeout) throws XenAPIException, XmlRpcException {
+    protected void waitForTask(Connection c, Task task, long pollInterval, long timeout) throws XenAPIException, XmlRpcException {
         long beginTime = System.currentTimeMillis();
         while (task.getStatus(c) == Types.TaskStatusType.PENDING) {
             try {
@@ -3374,7 +3374,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
         }
     }
 
-    private void checkForSuccess(Connection c, Task task) throws XenAPIException, XmlRpcException {
+    protected void checkForSuccess(Connection c, Task task) throws XenAPIException, XmlRpcException {
         if (task.getStatus(c) == Types.TaskStatusType.SUCCESS) {
             return;
         } else {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java
index d64e173..96a90a6 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java
@@ -132,6 +132,7 @@ public class XenServer56FP1Resource extends XenServer56Resource {
         record.affinity = host;
         record.otherConfig.remove("disks");
         record.otherConfig.remove("default_template");
+        record.otherConfig.remove("mac_seed");
         record.isATemplate = false;
         record.nameLabel = vmSpec.getName();
         record.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java
index 8d267b1..bb31136 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java
@@ -20,6 +20,9 @@ package com.cloud.hypervisor.xen.resource;
 import java.io.File;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
 
 import javax.ejb.Local;
 
@@ -28,7 +31,34 @@ import org.apache.log4j.Logger;
 import com.cloud.resource.ServerResource;
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.utils.script.Script;
-
+import com.cloud.vm.VirtualMachine.State;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.Command;
+import com.cloud.agent.api.storage.MigrateVolumeAnswer;
+import com.cloud.agent.api.storage.MigrateVolumeCommand;
+import com.cloud.agent.api.MigrateWithStorageAnswer;
+import com.cloud.agent.api.MigrateWithStorageCommand;
+import com.cloud.agent.api.MigrateWithStorageReceiveAnswer;
+import com.cloud.agent.api.MigrateWithStorageReceiveCommand;
+import com.cloud.agent.api.MigrateWithStorageSendAnswer;
+import com.cloud.agent.api.MigrateWithStorageSendCommand;
+import com.cloud.agent.api.MigrateWithStorageCompleteAnswer;
+import com.cloud.agent.api.MigrateWithStorageCompleteCommand;
+import com.cloud.agent.api.to.StorageFilerTO;
+import com.cloud.network.Networks.TrafficType;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.agent.api.to.VolumeTO;
+import com.cloud.agent.api.to.NicTO;
+import com.xensource.xenapi.Connection;
+import com.xensource.xenapi.Host;
+import com.xensource.xenapi.Network;
+import com.xensource.xenapi.SR;
+import com.xensource.xenapi.Task;
+import com.xensource.xenapi.Types;
+import com.xensource.xenapi.VBD;
+import com.xensource.xenapi.VDI;
+import com.xensource.xenapi.VIF;
+import com.xensource.xenapi.VM;
 
 @Local(value=ServerResource.class)
 public class XenServer610Resource extends XenServer56FP1Resource {
@@ -55,4 +85,331 @@ public class XenServer610Resource extends XenServer56FP1Resource {
         files.add(file);
         return files;
     }
+
+    @Override
+    public Answer executeRequest(Command cmd) {
+        if (cmd instanceof MigrateWithStorageCommand) {
+            return execute((MigrateWithStorageCommand) cmd);
+        } else if (cmd instanceof MigrateWithStorageReceiveCommand) {
+            return execute((MigrateWithStorageReceiveCommand) cmd);
+        } else if (cmd instanceof MigrateWithStorageSendCommand) {
+            return execute((MigrateWithStorageSendCommand) cmd);
+        } else if (cmd instanceof MigrateWithStorageCompleteCommand) {
+            return execute((MigrateWithStorageCompleteCommand) cmd);
+        } else if (cmd instanceof MigrateVolumeCommand) {
+            return execute((MigrateVolumeCommand) cmd);
+        } else {
+            return super.executeRequest(cmd);
+        }
+    }
+
+    private List<VolumeTO> getUpdatedVolumePathsOfMigratedVm(Connection connection, VM migratedVm,
+            VolumeTO[] volumes) throws CloudRuntimeException {
+        List<VolumeTO> volumeToList = new ArrayList<VolumeTO>();
+
+        try {
+            // Volume paths would have changed. Return that information.
+            Set<VBD> vbds = migratedVm.getVBDs(connection);
+            Map<String, VDI> deviceIdToVdiMap = new HashMap<String, VDI>();
+            // get vdi:vbdr to a map
+            for (VBD vbd : vbds) {
+                VBD.Record vbdr = vbd.getRecord(connection);
+                if (vbdr.type == Types.VbdType.DISK) {
+                    VDI vdi = vbdr.VDI;
+                    deviceIdToVdiMap.put(vbdr.userdevice, vdi);
+                }
+            }
+
+            for (VolumeTO volumeTo : volumes) {
+                Long deviceId = volumeTo.getDeviceId();
+                VDI vdi = deviceIdToVdiMap.get(deviceId.toString());
+                volumeTo.setPath(vdi.getUuid(connection));
+                volumeToList.add(volumeTo);
+            }
+        } catch (Exception e) {
+            s_logger.error("Unable to get the updated VDI paths of the migrated vm " + e.toString(), e);
+            throw new CloudRuntimeException("Unable to get the updated VDI paths of the migrated vm " + e.toString(), e);
+        }
+
+        return volumeToList;
+    }
+
+    protected MigrateWithStorageAnswer execute(MigrateWithStorageCommand cmd) {
+        Connection connection = getConnection();
+        VirtualMachineTO vmSpec = cmd.getVirtualMachine();
+        Map<VolumeTO, StorageFilerTO> volumeToFiler = cmd.getVolumeToFiler();
+        final String vmName = vmSpec.getName();
+        State state = s_vms.getState(_cluster, vmName);
+        Task task = null;
+
+        synchronized (_cluster.intern()) {
+            s_vms.put(_cluster, _name, vmName, State.Stopping);
+        }
+
+        try {
+            prepareISO(connection, vmSpec.getName());
+            Map<String, String> other = new HashMap<String, String>();
+            other.put("live", "true");
+            Network networkForSm = getNativeNetworkForTraffic(connection, TrafficType.Storage, null).getNetwork();
+            Host host = Host.getByUuid(connection, _host.uuid);
+            Map<String,String> token = host.migrateReceive(connection, networkForSm, other);
+
+            // Get the vm to migrate.
+            Set<VM> vms = VM.getByNameLabel(connection, vmSpec.getName());
+            VM vmToMigrate = vms.iterator().next();
+
+            // Create the vif map. The vm stays in the same cluster so we have to pass an empty vif map.
+            Map<VIF, Network> vifMap = new HashMap<VIF, Network>();
+            Map<VDI, SR> vdiMap = new HashMap<VDI, SR>();
+            for (Map.Entry<VolumeTO, StorageFilerTO> entry : volumeToFiler.entrySet()) {
+                vdiMap.put(getVDIbyUuid(connection, entry.getKey().getPath()),
+                        getStorageRepository(connection, entry.getValue().getUuid()));
+            }
+
+            // Check migration with storage is possible.
+            task = vmToMigrate.assertCanMigrateAsync(connection, token, true, vdiMap, vifMap, other);
+            try {
+                // poll every 1 seconds
+                long timeout = (_migratewait) * 1000L;
+                waitForTask(connection, task, 1000, timeout);
+                checkForSuccess(connection, task);
+            } catch (Types.HandleInvalid e) {
+                s_logger.error("Error while checking if vm " + vmName + " can be migrated to the destination host " +
+                        host, e);
+                throw new CloudRuntimeException("Error while checking if vm " + vmName + " can be migrated to the " +
+                        "destination host " + host, e);
+            }
+
+            // Migrate now.
+            task = vmToMigrate.migrateSendAsync(connection, token, true, vdiMap, vifMap, other);
+            try {
+                // poll every 1 seconds.
+                long timeout = (_migratewait) * 1000L;
+                waitForTask(connection, task, 1000, timeout);
+                checkForSuccess(connection, task);
+            } catch (Types.HandleInvalid e) {
+                s_logger.error("Error while migrating vm " + vmName + " to the destination host " + host, e);
+                throw new CloudRuntimeException("Error while migrating vm " + vmName + " to the destination host " +
+                        host, e);
+            }
+
+            // Volume paths would have changed. Return that information.
+            List<VolumeTO> volumeToList = getUpdatedVolumePathsOfMigratedVm(connection, vmToMigrate, vmSpec.getDisks());
+            vmToMigrate.setAffinity(connection, host);
+            state = State.Stopping;
+
+            return new MigrateWithStorageAnswer(cmd, volumeToList);
+        } catch (Exception e) {
+            s_logger.warn("Catch Exception " + e.getClass().getName() + ". Storage motion failed due to " +
+                    e.toString(), e);
+            return new MigrateWithStorageAnswer(cmd, e);
+        } finally {
+            if (task != null) {
+                try {
+                    task.destroy(connection);
+                } catch (Exception e) {
+                    s_logger.debug("Unable to destroy task " + task.toString() + " on host " + _host.uuid +" due to " +
+                            e.toString());
+                }
+            }
+
+            synchronized (_cluster.intern()) {
+                s_vms.put(_cluster, _name, vmName, state);
+            }
+        }
+    }
+
+    protected MigrateWithStorageReceiveAnswer execute(MigrateWithStorageReceiveCommand cmd) {
+        Connection connection = getConnection();
+        VirtualMachineTO vmSpec = cmd.getVirtualMachine();
+        Map<VolumeTO, StorageFilerTO> volumeToFiler = cmd.getVolumeToFiler();
+
+        try {
+            // Get a map of all the SRs to which the vdis will be migrated.
+            Map<VolumeTO, Object> volumeToSr = new HashMap<VolumeTO, Object>();
+            for (Map.Entry<VolumeTO, StorageFilerTO> entry : volumeToFiler.entrySet()) {
+                SR sr = getStorageRepository(connection, entry.getValue().getUuid());
+                volumeToSr.put(entry.getKey(), sr);
+            }
+
+            // Get the list of networks to which the vifs will attach.
+            Map<NicTO, Object> nicToNetwork = new HashMap<NicTO, Object>();
+            for (NicTO nicTo : vmSpec.getNics()) {
+                Network network = getNetwork(connection, nicTo);
+                nicToNetwork.put(nicTo, network);
+            }
+
+            Map<String, String> other = new HashMap<String, String>();
+            other.put("live", "true");
+            Network network = getNativeNetworkForTraffic(connection, TrafficType.Storage, null).getNetwork();
+            Host host = Host.getByUuid(connection, _host.uuid);
+            Map<String,String> token = host.migrateReceive(connection, network, other);
+
+            return new MigrateWithStorageReceiveAnswer(cmd, volumeToSr, nicToNetwork, token);
+        } catch (CloudRuntimeException e) {
+            s_logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e);
+            return new MigrateWithStorageReceiveAnswer(cmd, e);
+        } catch (Exception e) {
+            s_logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e);
+            return new MigrateWithStorageReceiveAnswer(cmd, e);
+        }
+    }
+
+    protected MigrateWithStorageSendAnswer execute(MigrateWithStorageSendCommand cmd) {
+        Connection connection = getConnection();
+        VirtualMachineTO vmSpec = cmd.getVirtualMachine();
+        Map<VolumeTO, Object> volumeToSr = cmd.getVolumeToSr();
+        Map<NicTO, Object> nicToNetwork = cmd.getNicToNetwork();
+        Map<String, String> token = cmd.getToken();
+        final String vmName = vmSpec.getName();
+        State state = s_vms.getState(_cluster, vmName);
+        Set<VolumeTO> volumeToSet = null;
+        boolean migrated = false;
+        Task task = null;
+
+        synchronized (_cluster.intern()) {
+            s_vms.put(_cluster, _name, vmName, State.Stopping);
+        }
+
+        try {
+            Set<VM> vms = VM.getByNameLabel(connection, vmSpec.getName());
+            VM vmToMigrate = vms.iterator().next();
+            Map<String, String> other = new HashMap<String, String>();
+            other.put("live", "true");
+
+            // Create the vdi map which tells what volumes of the vm need to go on which sr on the destination.
+            Map<VDI, SR> vdiMap = new HashMap<VDI, SR>();
+            for (Map.Entry<VolumeTO, Object> entry : volumeToSr.entrySet()) {
+                if  (entry.getValue() instanceof SR) {
+                    SR sr = (SR)entry.getValue();
+                    VDI vdi = getVDIbyUuid(connection, entry.getKey().getPath());
+                    vdiMap.put(vdi, sr);
+                } else {
+                    throw new CloudRuntimeException("The object " + entry.getValue() + " passed is not of type SR.");
+                }
+            }
+
+            // Create the vif map.
+            Map<VIF, Network> vifMap = new HashMap<VIF, Network>();
+            for (Map.Entry<NicTO, Object> entry : nicToNetwork.entrySet()) {
+                if (entry.getValue() instanceof Network) {
+                    Network network = (Network)entry.getValue();
+                    VIF vif = getVifByMac(connection, vmToMigrate, entry.getKey().getMac());
+                    vifMap.put(vif, network);
+                } else {
+                    throw new CloudRuntimeException("The object " + entry.getValue() + " passed is not of type Network.");
+                }
+            }
+
+            // Check migration with storage is possible.
+            task = vmToMigrate.assertCanMigrateAsync(connection, token, true, vdiMap, vifMap, other);
+            try {
+                // poll every 1 seconds.
+                long timeout = (_migratewait) * 1000L;
+                waitForTask(connection, task, 1000, timeout);
+                checkForSuccess(connection, task);
+            } catch (Types.HandleInvalid e) {
+                s_logger.error("Error while checking if vm " + vmName + " can be migrated.", e);
+                throw new CloudRuntimeException("Error while checking if vm " + vmName + " can be migrated.", e);
+            }
+
+            // Migrate now.
+            task = vmToMigrate.migrateSendAsync(connection, token, true, vdiMap, vifMap, other);
+            try {
+                // poll every 1 seconds.
+                long timeout = (_migratewait) * 1000L;
+                waitForTask(connection, task, 1000, timeout);
+                checkForSuccess(connection, task);
+            } catch (Types.HandleInvalid e) {
+                s_logger.error("Error while migrating vm " + vmName, e);
+                throw new CloudRuntimeException("Error while migrating vm " + vmName, e);
+            }
+
+            migrated = true;
+            return new MigrateWithStorageSendAnswer(cmd, volumeToSet);
+        } catch (CloudRuntimeException e) {
+            s_logger.error("Migration of vm " + vmName + " with storage failed due to " + e.toString(), e);
+            return new MigrateWithStorageSendAnswer(cmd, e);
+        } catch (Exception e) {
+            s_logger.error("Migration of vm " + vmName + " with storage failed due to " + e.toString(), e);
+            return new MigrateWithStorageSendAnswer(cmd, e);
+        } finally {
+            if (task != null) {
+                try {
+                    task.destroy(connection);
+                } catch (Exception e) {
+                    s_logger.debug("Unable to destroy task " + task.toString() + " on host " + _host.uuid +" due to " +
+                            e.toString());
+                }
+            }
+
+            // Keep cluster/vm sync happy.
+            synchronized (_cluster.intern()) {
+                if (migrated) {
+                    s_vms.remove(_cluster, _name, vmName);
+                } else {
+                    s_vms.put(_cluster, _name, vmName, state);
+                }
+            }
+        }
+    }
+
+    protected MigrateWithStorageCompleteAnswer execute(MigrateWithStorageCompleteCommand cmd) {
+        Connection connection = getConnection();
+        VirtualMachineTO vmSpec = cmd.getVirtualMachine();
+
+        try {
+            Host host = Host.getByUuid(connection, _host.uuid);
+            Set<VM> vms = VM.getByNameLabel(connection, vmSpec.getName());
+            VM migratedVm = vms.iterator().next();
+
+            // Check the vm is present on the new host.
+            if (migratedVm == null) {
+                throw new CloudRuntimeException("Couldn't find the migrated vm " + vmSpec.getName() +
+                        " on the destination host.");
+            }
+
+            // Volume paths would have changed. Return that information.
+            List<VolumeTO > volumeToSet = getUpdatedVolumePathsOfMigratedVm(connection, migratedVm, vmSpec.getDisks());
+            migratedVm.setAffinity(connection, host);
+
+            synchronized (_cluster.intern()) {
+                s_vms.put(_cluster, _name, vmSpec.getName(), State.Running);
+            }
+
+            return new MigrateWithStorageCompleteAnswer(cmd, volumeToSet);
+        } catch (CloudRuntimeException e) {
+            s_logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e);
+            return new MigrateWithStorageCompleteAnswer(cmd, e);
+        } catch (Exception e) {
+            s_logger.error("Migration of vm " + vmSpec.getName() + " with storage failed due to " + e.toString(), e);
+            return new MigrateWithStorageCompleteAnswer(cmd, e);
+        }
+    }
+
+    protected MigrateVolumeAnswer execute(MigrateVolumeCommand cmd) {
+        Connection connection = getConnection();
+        String volumeUUID = cmd.getVolumePath();
+        StorageFilerTO poolTO = cmd.getPool();
+
+        try {
+            SR destinationPool = getStorageRepository(connection, poolTO.getUuid());
+            VDI srcVolume = getVDIbyUuid(connection, volumeUUID);
+            Map<String, String> other = new HashMap<String, String>();
+            other.put("live", "true");
+
+            // Live migrate the vdi across pool.
+            Task task = srcVolume.poolMigrateAsync(connection, destinationPool, other);
+            long timeout = (_migratewait) * 1000L;
+            waitForTask(connection, task, 1000, timeout);
+            checkForSuccess(connection, task);
+            VDI dvdi = Types.toVDI(task, connection);
+
+            return new MigrateVolumeAnswer(cmd, true, null, dvdi.getUuid(connection));
+        } catch (Exception e) {
+            String msg = "Catch Exception " + e.getClass().getName() + " due to " + e.toString();
+            s_logger.error(msg, e);
+            return new MigrateVolumeAnswer(cmd, false, msg, null);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/plugins/hypervisors/xen/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xen/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java b/plugins/hypervisors/xen/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java
new file mode 100644
index 0000000..353f2b5
--- /dev/null
+++ b/plugins/hypervisors/xen/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.motion;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.MigrateWithStorageAnswer;
+import com.cloud.agent.api.MigrateWithStorageCommand;
+import com.cloud.agent.api.MigrateWithStorageCompleteAnswer;
+import com.cloud.agent.api.MigrateWithStorageCompleteCommand;
+import com.cloud.agent.api.MigrateWithStorageReceiveAnswer;
+import com.cloud.agent.api.MigrateWithStorageReceiveCommand;
+import com.cloud.agent.api.MigrateWithStorageSendAnswer;
+import com.cloud.agent.api.MigrateWithStorageSendCommand;
+import com.cloud.agent.api.to.StorageFilerTO;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.agent.api.to.VolumeTO;
+import com.cloud.exception.AgentUnavailableException;
+import com.cloud.exception.OperationTimedoutException;
+import com.cloud.host.Host;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
+
+@Component
+public class XenServerStorageMotionStrategy implements DataMotionStrategy {
+    private static final Logger s_logger = Logger.getLogger(XenServerStorageMotionStrategy.class);
+    @Inject AgentManager agentMgr;
+    @Inject VolumeDao volDao;
+    @Inject VolumeDataFactory volFactory;
+    @Inject PrimaryDataStoreDao storagePoolDao;
+    @Inject VMInstanceDao instanceDao;
+
+    @Override
+    public boolean canHandle(DataObject srcData, DataObject destData) {
+        return false;
+    }
+
+    @Override
+    public boolean canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
+        return true;
+    }
+
+    @Override
+    public Void copyAsync(DataObject srcData, DataObject destData,
+            AsyncCompletionCallback<CopyCommandResult> callback) {
+        CopyCommandResult result = new CopyCommandResult(null, null);
+        result.setResult("Unsupported operation requested for copying data.");
+        callback.complete(result);
+
+        return null;
+    }
+
+    @Override
+    public Void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost,
+            AsyncCompletionCallback<CopyCommandResult> callback) {
+        Answer answer = null;
+        String errMsg = null;
+        try {
+            VMInstanceVO instance = instanceDao.findById(vmTo.getId());
+            if (instance != null) {
+                if (srcHost.getClusterId() == destHost.getClusterId()) {
+                    answer = migrateVmWithVolumesWithinCluster(instance, vmTo, srcHost, destHost, volumeMap);
+                } else {
+                    answer = migrateVmWithVolumesAcrossCluster(instance, vmTo, srcHost, destHost, volumeMap);
+                }
+            } else {
+                throw new CloudRuntimeException("Unsupported operation requested for moving data.");
+            }
+        } catch (Exception e) {
+            s_logger.error("copy failed", e);
+            errMsg = e.toString();
+        }
+
+        CopyCommandResult result = new CopyCommandResult(null, answer);
+        result.setResult(errMsg);
+        callback.complete(result);
+        return null;
+    }
+
+    private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost,
+            Host destHost, Map<VolumeInfo, DataStore> volumeToPool) throws AgentUnavailableException {
+
+        // Initiate migration of a virtual machine with it's volumes.
+        try {
+            Map<VolumeTO, StorageFilerTO> volumeToFilerto = new HashMap<VolumeTO, StorageFilerTO>();
+            for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+                VolumeInfo volume = entry.getKey();
+                VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
+                StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
+                volumeToFilerto.put(volumeTo, filerTo);
+            }
+
+            // Migration across cluster needs to be done in three phases.
+            // 1. Send a migrate receive command to the destination host so that it is ready to receive a vm.
+            // 2. Send a migrate send command to the source host. This actually migrates the vm to the destination.
+            // 3. Complete the process. Update the volume details.
+            MigrateWithStorageReceiveCommand receiveCmd = new MigrateWithStorageReceiveCommand(to, volumeToFilerto);
+            MigrateWithStorageReceiveAnswer receiveAnswer = (MigrateWithStorageReceiveAnswer) agentMgr.send(
+                    destHost.getId(), receiveCmd);
+            if (receiveAnswer == null) {
+                s_logger.error("Migration with storage of vm " + vm+ " to host " + destHost + " failed.");
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
+            } else if (!receiveAnswer.getResult()) {
+                s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + receiveAnswer.getDetails());
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost +
+                        ". " + receiveAnswer.getDetails());
+            }
+
+            MigrateWithStorageSendCommand sendCmd = new MigrateWithStorageSendCommand(to, receiveAnswer.getVolumeToSr(),
+                    receiveAnswer.getNicToNetwork(), receiveAnswer.getToken());
+            MigrateWithStorageSendAnswer sendAnswer = (MigrateWithStorageSendAnswer) agentMgr.send(
+                    srcHost.getId(), sendCmd);
+            if (sendAnswer == null) {
+                s_logger.error("Migration with storage of vm " + vm+ " to host " + destHost + " failed.");
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
+            } else if (!sendAnswer.getResult()) {
+                s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + sendAnswer.getDetails());
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost +
+                        ". " + sendAnswer.getDetails());
+            }
+
+            MigrateWithStorageCompleteCommand command = new MigrateWithStorageCompleteCommand(to);
+            MigrateWithStorageCompleteAnswer answer = (MigrateWithStorageCompleteAnswer) agentMgr.send(
+                    destHost.getId(), command);
+            if (answer == null) {
+                s_logger.error("Migration with storage of vm " + vm + " failed.");
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
+            } else if (!answer.getResult()) {
+                s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + answer.getDetails());
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost +
+                        ". " + answer.getDetails());
+            } else {
+                // Update the volume details after migration.
+                updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos());
+            }
+
+            return answer;
+        } catch (OperationTimedoutException e) {
+            s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e);
+            throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId());
+        }
+    }
+
+    private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost,
+            Host destHost, Map<VolumeInfo, DataStore> volumeToPool) throws AgentUnavailableException {
+
+        // Initiate migration of a virtual machine with it's volumes.
+        try {
+            Map<VolumeTO, StorageFilerTO> volumeToFilerto = new HashMap<VolumeTO, StorageFilerTO>();
+            for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+                VolumeInfo volume = entry.getKey();
+                VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
+                StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
+                volumeToFilerto.put(volumeTo, filerTo);
+            }
+
+            MigrateWithStorageCommand command = new MigrateWithStorageCommand(to, volumeToFilerto);
+            MigrateWithStorageAnswer answer = (MigrateWithStorageAnswer) agentMgr.send(destHost.getId(), command);
+            if (answer == null) {
+                s_logger.error("Migration with storage of vm " + vm + " failed.");
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
+            } else if (!answer.getResult()) {
+                s_logger.error("Migration with storage of vm " + vm+ " failed. Details: " + answer.getDetails());
+                throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost +
+                        ". " + answer.getDetails());
+            } else {
+                // Update the volume details after migration.
+                updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos());
+            }
+
+            return answer;
+        } catch (OperationTimedoutException e) {
+            s_logger.error("Error while migrating vm " + vm + " to host " + destHost, e);
+            throw new AgentUnavailableException("Operation timed out on storage motion for " + vm, destHost.getId());
+        }
+    }
+
+    private void updateVolumePathsAfterMigration(Map<VolumeInfo, DataStore> volumeToPool, List<VolumeTO> volumeTos) {
+        for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+            boolean updated = false;
+            VolumeInfo volume = entry.getKey();
+            StoragePool pool = (StoragePool)entry.getValue();
+            for (VolumeTO volumeTo : volumeTos) {
+                if (volume.getId() == volumeTo.getId()) {
+                    VolumeVO volumeVO = volDao.findById(volume.getId());
+                    Long oldPoolId = volumeVO.getPoolId();
+                    volumeVO.setPath(volumeTo.getPath());
+                    volumeVO.setFolder(pool.getPath());
+                    volumeVO.setPodId(pool.getPodId());
+                    volumeVO.setPoolId(pool.getId());
+                    volumeVO.setLastPoolId(oldPoolId);
+                    volDao.update(volume.getId(), volumeVO);
+                    updated = true;
+                    break;
+                }
+            }
+
+            if (!updated) {
+                s_logger.error("Volume path wasn't updated for volume " + volume + " after it was migrated.");
+            }
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/agent/manager/allocator/HostAllocator.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/agent/manager/allocator/HostAllocator.java b/server/src/com/cloud/agent/manager/allocator/HostAllocator.java
index 60027e7..6700f22 100755
--- a/server/src/com/cloud/agent/manager/allocator/HostAllocator.java
+++ b/server/src/com/cloud/agent/manager/allocator/HostAllocator.java
@@ -21,6 +21,7 @@ import java.util.List;
 import com.cloud.deploy.DeploymentPlan;
 import com.cloud.deploy.DeploymentPlanner.ExcludeList;
 import com.cloud.host.Host;
+import com.cloud.host.HostVO;
 import com.cloud.host.Host.Type;
 import com.cloud.offering.ServiceOffering;
 import com.cloud.utils.component.Adapter;
@@ -63,8 +64,22 @@ public interface HostAllocator extends Adapter {
     **/ 
     
     public List<Host> allocateTo(VirtualMachineProfile<?extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity);
-	
-	
-	public static int RETURN_UPTO_ALL = -1;
-		
+
+    /**
+     * Determines which physical hosts are suitable to
+     * allocate the guest virtual machines on
+     *
+     * @param VirtualMachineProfile vmProfile
+     * @param DeploymentPlan plan
+     * @param GuestType type
+     * @param ExcludeList avoid
+     * @param List<HostVO> hosts
+     * @param int returnUpTo (use -1 to return all possible hosts)
+     * @param boolean considerReservedCapacity (default should be true, set to false if host capacity calculation should not look at reserved capacity)
+     * @return List<Host> List of hosts that are suitable for VM allocation
+     **/
+     public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type, ExcludeList avoid, List<HostVO> hosts, int returnUpTo, boolean considerReservedCapacity);
+
+     public static int RETURN_UPTO_ALL = -1;
+
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java
index 0091e43..b54b1c1 100755
--- a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java
+++ b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java
@@ -172,6 +172,53 @@ public class FirstFitAllocator extends AdapterBase implements HostAllocator {
         return allocateTo(plan, offering, template, avoid, clusterHosts, returnUpTo, considerReservedCapacity, account);
     }
 
+    @Override
+    public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan,
+            Type type, ExcludeList avoid, List<HostVO> hosts, int returnUpTo, boolean considerReservedCapacity) {
+        long dcId = plan.getDataCenterId();
+        Long podId = plan.getPodId();
+        Long clusterId = plan.getClusterId();
+        ServiceOffering offering = vmProfile.getServiceOffering();
+        VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
+        Account account = vmProfile.getOwner();
+        List<Host> suitableHosts = new ArrayList<Host>();
+
+        if (type == Host.Type.Storage) {
+            // FirstFitAllocator should be used for user VMs only since it won't care whether the host is capable of
+            // routing or not.
+            return suitableHosts;
+        }
+
+        String hostTagOnOffering = offering.getHostTag();
+        String hostTagOnTemplate = template.getTemplateTag();
+        boolean hasSvcOfferingTag = hostTagOnOffering != null ? true : false;
+        boolean hasTemplateTag = hostTagOnTemplate != null ? true : false;
+
+        String haVmTag = (String)vmProfile.getParameter(VirtualMachineProfile.Param.HaTag);
+        if (haVmTag != null) {
+            hosts.retainAll(_hostDao.listByHostTag(type, clusterId, podId, dcId, haVmTag));
+        } else {
+            if (hostTagOnOffering == null && hostTagOnTemplate == null){
+                hosts.retainAll(_resourceMgr.listAllUpAndEnabledNonHAHosts(type, clusterId, podId, dcId));
+            } else {
+                if (hasSvcOfferingTag) {
+                    hosts.retainAll(_hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnOffering));
+                }
+
+                if (hasTemplateTag) {
+                    hosts.retainAll(_hostDao.listByHostTag(type, clusterId, podId, dcId, hostTagOnTemplate));
+                }
+            }
+        }
+
+        if (!hosts.isEmpty()) {
+            suitableHosts = allocateTo(plan, offering, template, avoid, hosts, returnUpTo, considerReservedCapacity,
+                    account);
+        }
+
+        return suitableHosts;
+    }
+
     protected List<Host> allocateTo(DeploymentPlan plan, ServiceOffering offering, VMTemplateVO template, ExcludeList avoid, List<HostVO> hosts, int returnUpTo, boolean considerReservedCapacity, Account account) {
         if (_allocationAlgorithm.equals("random") || _allocationAlgorithm.equals("userconcentratedpod_random")) {
         	// Shuffle this so that we don't check the hosts in the same order.

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java
index 90bd956..890c047 100755
--- a/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java
+++ b/server/src/com/cloud/agent/manager/allocator/impl/TestingAllocator.java
@@ -29,6 +29,7 @@ import com.cloud.agent.manager.allocator.HostAllocator;
 import com.cloud.deploy.DeploymentPlan;
 import com.cloud.deploy.DeploymentPlanner.ExcludeList;
 import com.cloud.host.Host;
+import com.cloud.host.HostVO;
 import com.cloud.host.Host.Type;
 import com.cloud.host.dao.HostDao;
 import com.cloud.offering.ServiceOffering;
@@ -52,6 +53,12 @@ public class TestingAllocator extends AdapterBase implements HostAllocator {
 
     @Override
     public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type,
+            ExcludeList avoid, List<HostVO> hosts, int returnUpTo, boolean considerReservedCapacity) {
+        return allocateTo(vmProfile, plan, type, avoid, returnUpTo, considerReservedCapacity);
+    }
+
+    @Override
+    public List<Host> allocateTo(VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, Type type,
             ExcludeList avoid, int returnUpTo, boolean considerReservedCapacity) {
         List<Host> availableHosts = new ArrayList<Host>();
         Host host = null;    	

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/api/ApiDBUtils.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/ApiDBUtils.java b/server/src/com/cloud/api/ApiDBUtils.java
index 303f328..c60af27 100755
--- a/server/src/com/cloud/api/ApiDBUtils.java
+++ b/server/src/com/cloud/api/ApiDBUtils.java
@@ -36,6 +36,7 @@ import org.apache.cloudstack.api.response.DiskOfferingResponse;
 import org.apache.cloudstack.api.response.DomainRouterResponse;
 import org.apache.cloudstack.api.response.EventResponse;
 import org.apache.cloudstack.api.response.HostResponse;
+import org.apache.cloudstack.api.response.HostForMigrationResponse;
 import org.apache.cloudstack.api.response.InstanceGroupResponse;
 import org.apache.cloudstack.api.response.ProjectAccountResponse;
 import org.apache.cloudstack.api.response.ProjectInvitationResponse;
@@ -43,6 +44,7 @@ import org.apache.cloudstack.api.response.ProjectResponse;
 import org.apache.cloudstack.api.response.ResourceTagResponse;
 import org.apache.cloudstack.api.response.SecurityGroupResponse;
 import org.apache.cloudstack.api.response.ServiceOfferingResponse;
+import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse;
 import org.apache.cloudstack.api.response.StoragePoolResponse;
 import org.apache.cloudstack.api.response.UserResponse;
 import org.apache.cloudstack.api.response.UserVmResponse;
@@ -1518,6 +1520,14 @@ public class ApiDBUtils {
         return _hostJoinDao.setHostResponse(vrData, vr);
     }
 
+    public static HostForMigrationResponse newHostForMigrationResponse(HostJoinVO vr, EnumSet<HostDetails> details) {
+        return _hostJoinDao.newHostForMigrationResponse(vr, details);
+    }
+
+    public static HostForMigrationResponse fillHostForMigrationDetails(HostForMigrationResponse vrData, HostJoinVO vr) {
+        return _hostJoinDao.setHostForMigrationResponse(vrData, vr);
+    }
+
     public static List<HostJoinVO> newHostView(Host vr){
         return _hostJoinDao.newHostView(vr);
     }
@@ -1543,6 +1553,15 @@ public class ApiDBUtils {
         return _poolJoinDao.setStoragePoolResponse(vrData, vr);
     }
 
+    public static StoragePoolForMigrationResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO vr) {
+        return _poolJoinDao.newStoragePoolForMigrationResponse(vr);
+    }
+
+    public static StoragePoolForMigrationResponse fillStoragePoolForMigrationDetails(StoragePoolForMigrationResponse
+            vrData, StoragePoolJoinVO vr){
+        return _poolJoinDao.setStoragePoolForMigrationResponse(vrData, vr);
+    }
+
     public static List<StoragePoolJoinVO> newStoragePoolView(StoragePool vr){
         return _poolJoinDao.newStoragePoolView(vr);
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/api/ApiResponseHelper.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java
index 7629e5e..a7d6165 100755
--- a/server/src/com/cloud/api/ApiResponseHelper.java
+++ b/server/src/com/cloud/api/ApiResponseHelper.java
@@ -65,6 +65,7 @@ import org.apache.cloudstack.api.response.FirewallRuleResponse;
 import org.apache.cloudstack.api.response.GlobalLoadBalancerResponse;
 import org.apache.cloudstack.api.response.GuestOSResponse;
 import org.apache.cloudstack.api.response.HostResponse;
+import org.apache.cloudstack.api.response.HostForMigrationResponse;
 import org.apache.cloudstack.api.response.HypervisorCapabilitiesResponse;
 import org.apache.cloudstack.api.response.IPAddressResponse;
 import org.apache.cloudstack.api.response.InstanceGroupResponse;
@@ -105,6 +106,7 @@ import org.apache.cloudstack.api.response.SnapshotResponse;
 import org.apache.cloudstack.api.response.SnapshotScheduleResponse;
 import org.apache.cloudstack.api.response.StaticRouteResponse;
 import org.apache.cloudstack.api.response.StorageNetworkIpRangeResponse;
+import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse;
 import org.apache.cloudstack.api.response.StoragePoolResponse;
 import org.apache.cloudstack.api.response.SwiftResponse;
 import org.apache.cloudstack.api.response.SystemVmInstanceResponse;
@@ -511,6 +513,20 @@ public class ApiResponseHelper implements ResponseGenerator {
     }
 
     @Override
+    public HostForMigrationResponse createHostForMigrationResponse(Host host) {
+        return createHostForMigrationResponse(host, EnumSet.of(HostDetails.all));
+    }
+
+    @Override
+    public HostForMigrationResponse createHostForMigrationResponse(Host host, EnumSet<HostDetails> details) {
+        List<HostJoinVO> viewHosts = ApiDBUtils.newHostView(host);
+        List<HostForMigrationResponse> listHosts = ViewResponseHelper.createHostForMigrationResponse(details,
+                viewHosts.toArray(new HostJoinVO[viewHosts.size()]));
+        assert listHosts != null && listHosts.size() == 1 : "There should be one host returned";
+        return listHosts.get(0);
+    }
+
+    @Override
     public SwiftResponse createSwiftResponse(Swift swift) {
         SwiftResponse swiftResponse = new SwiftResponse();
         swiftResponse.setId(swift.getUuid());
@@ -908,16 +924,21 @@ public class ApiResponseHelper implements ResponseGenerator {
 
     }
 
-
-
     @Override
     public StoragePoolResponse createStoragePoolResponse(StoragePool pool) {
         List<StoragePoolJoinVO> viewPools = ApiDBUtils.newStoragePoolView(pool);
         List<StoragePoolResponse> listPools = ViewResponseHelper.createStoragePoolResponse(viewPools.toArray(new StoragePoolJoinVO[viewPools.size()]));
         assert listPools != null && listPools.size() == 1 : "There should be one storage pool returned";
         return listPools.get(0);
+    }
 
-
+    @Override
+    public StoragePoolForMigrationResponse createStoragePoolForMigrationResponse(StoragePool pool) {
+        List<StoragePoolJoinVO> viewPools = ApiDBUtils.newStoragePoolView(pool);
+        List<StoragePoolForMigrationResponse> listPools = ViewResponseHelper.createStoragePoolForMigrationResponse(
+                viewPools.toArray(new StoragePoolJoinVO[viewPools.size()]));
+        assert listPools != null && listPools.size() == 1 : "There should be one storage pool returned";
+        return listPools.get(0);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/api/query/ViewResponseHelper.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/ViewResponseHelper.java b/server/src/com/cloud/api/query/ViewResponseHelper.java
index dc2727e..827ae7b 100644
--- a/server/src/com/cloud/api/query/ViewResponseHelper.java
+++ b/server/src/com/cloud/api/query/ViewResponseHelper.java
@@ -30,6 +30,7 @@ import org.apache.cloudstack.api.response.DiskOfferingResponse;
 import org.apache.cloudstack.api.response.DomainRouterResponse;
 import org.apache.cloudstack.api.response.EventResponse;
 import org.apache.cloudstack.api.response.HostResponse;
+import org.apache.cloudstack.api.response.HostForMigrationResponse;
 import org.apache.cloudstack.api.response.InstanceGroupResponse;
 import org.apache.cloudstack.api.response.ProjectAccountResponse;
 import org.apache.cloudstack.api.response.ProjectInvitationResponse;
@@ -38,6 +39,7 @@ import org.apache.cloudstack.api.response.ResourceTagResponse;
 import org.apache.cloudstack.api.response.SecurityGroupResponse;
 import org.apache.cloudstack.api.response.ServiceOfferingResponse;
 import org.apache.cloudstack.api.response.StoragePoolResponse;
+import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse;
 import org.apache.cloudstack.api.response.UserResponse;
 import org.apache.cloudstack.api.response.UserVmResponse;
 import org.apache.cloudstack.api.response.VolumeResponse;
@@ -230,6 +232,24 @@ public class ViewResponseHelper {
         return new ArrayList<HostResponse>(vrDataList.values());
     }
 
+    public static List<HostForMigrationResponse> createHostForMigrationResponse(EnumSet<HostDetails> details,
+            HostJoinVO... hosts) {
+        Hashtable<Long, HostForMigrationResponse> vrDataList = new Hashtable<Long, HostForMigrationResponse>();
+        // Initialise the vrdatalist with the input data
+        for (HostJoinVO vr : hosts) {
+            HostForMigrationResponse vrData = vrDataList.get(vr.getId());
+            if ( vrData == null ) {
+                // first time encountering this vm
+                vrData = ApiDBUtils.newHostForMigrationResponse(vr, details);
+            } else {
+                // update tags
+                vrData = ApiDBUtils.fillHostForMigrationDetails(vrData, vr);
+            }
+            vrDataList.put(vr.getId(), vrData);
+        }
+        return new ArrayList<HostForMigrationResponse>(vrDataList.values());
+    }
+
     public static List<VolumeResponse> createVolumeResponse(VolumeJoinVO... volumes) {
         Hashtable<Long, VolumeResponse> vrDataList = new Hashtable<Long, VolumeResponse>();
         for (VolumeJoinVO vr : volumes) {
@@ -265,6 +285,23 @@ public class ViewResponseHelper {
         return new ArrayList<StoragePoolResponse>(vrDataList.values());
     }
 
+    public static List<StoragePoolForMigrationResponse> createStoragePoolForMigrationResponse(StoragePoolJoinVO... pools) {
+        Hashtable<Long, StoragePoolForMigrationResponse> vrDataList = new Hashtable<Long, StoragePoolForMigrationResponse>();
+        // Initialise the vrdatalist with the input data
+        for (StoragePoolJoinVO vr : pools) {
+            StoragePoolForMigrationResponse vrData = vrDataList.get(vr.getId());
+            if ( vrData == null ) {
+                // first time encountering this vm
+                vrData = ApiDBUtils.newStoragePoolForMigrationResponse(vr);
+            } else {
+                // update tags
+                vrData = ApiDBUtils.fillStoragePoolForMigrationDetails(vrData, vr);
+            }
+            vrDataList.put(vr.getId(), vrData);
+        }
+        return new ArrayList<StoragePoolForMigrationResponse>(vrDataList.values());
+    }
+
 
     public static List<AccountResponse> createAccountResponse(AccountJoinVO... accounts) {
         List<AccountResponse> respList = new ArrayList<AccountResponse>();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/server/src/com/cloud/api/query/dao/HostJoinDao.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/HostJoinDao.java b/server/src/com/cloud/api/query/dao/HostJoinDao.java
index 1a21299..f526ca3 100644
--- a/server/src/com/cloud/api/query/dao/HostJoinDao.java
+++ b/server/src/com/cloud/api/query/dao/HostJoinDao.java
@@ -21,6 +21,7 @@ import java.util.List;
 
 import org.apache.cloudstack.api.ApiConstants.HostDetails;
 import org.apache.cloudstack.api.response.HostResponse;
+import org.apache.cloudstack.api.response.HostForMigrationResponse;
 import com.cloud.api.query.vo.HostJoinVO;
 import com.cloud.host.Host;
 import com.cloud.utils.db.GenericDao;
@@ -31,6 +32,10 @@ public interface HostJoinDao extends GenericDao<HostJoinVO, Long> {
 
     HostResponse setHostResponse(HostResponse response, HostJoinVO host);
 
+    HostForMigrationResponse newHostForMigrationResponse(HostJoinVO host, EnumSet<HostDetails> details);
+
+    HostForMigrationResponse setHostForMigrationResponse(HostForMigrationResponse response, HostJoinVO host);
+
     List<HostJoinVO> newHostView(Host group);
 
     List<HostJoinVO> searchByIds(Long... ids);


[4/4] git commit: updated refs/heads/master to 21ce3be

Posted by ap...@apache.org.
Storage motion for Xenserver changes: 1. Implemented Api findStoragePoolsForMigration. Added a new response objects to list storage pools available for migration. 2. Updated migrateVolume api for allowing migrating volumes of running vms. These changes are integrated into the latest storage refactoring changes. 3. Added the implementation for findHostsForMigration api. It lists the hosts to which an instance can be migrated, including hosts from within and across clusters to which an instance may be migrated with storage motion. The work of migrating a volume of a running vm is also done in copyAsync. 4. Updated the listHosts api for backward compatibility. 5. Added the implementation for migrateVirtualMachineWithVolume api. It migrates an instance with its volumes within a cluster and also across clusters. Also introduced a new XenServerStorageMotionStrategy for migrating volumes of a vm. When a vm is being migrated with its volumes, the vm is put in migrating state and a request is
  send to the volume manager to migrate the vm and its volumes. Volume manager calls into the volume service which forwards the request to data motion service after moving all the volumes to migrating state. Data motion service enumerates the strategies and the request reaches the XenServerStorageMotionStrategy. It calls in to the resource to complete the operation. 6. Resolved an issue where storage xenmotion of 2nd VM created from the same template to a host was failing with duplicate_vm exception. Made changes to remove the mac_seed key value pair from other_config when vms are created. This is was storage motion to fail. 7. Updated the db upgrade schema script. 8. Added the right permissions in commands.properties 9. Marvin tests for testing storage motion. Following scenarios are tested. 9.1. A virtual machine is migrated to another host. Its volumes are also migrated to another storage pool. 9.2. Just the volumes of a vm are migrated to another storage pool while the vm continu
 es to run on the same host. 10. Unit tests for testing migration of a vm with its volumes.

Signed-off-by: Abhinandan Prateek <ap...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/21ce3bef
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/21ce3bef
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/21ce3bef

Branch: refs/heads/master
Commit: 21ce3befc8ea9e1a6de449a21499a50ff141a183
Parents: eae22d2
Author: Devdeep Singh <de...@gmail.com>
Authored: Mon Apr 15 11:42:18 2013 +0530
Committer: Abhinandan Prateek <ap...@apache.org>
Committed: Fri Apr 19 11:36:42 2013 +0530

----------------------------------------------------------------------
 .../cloud/agent/api/MigrateWithStorageAnswer.java  |   39 ++
 .../cloud/agent/api/MigrateWithStorageCommand.java |   45 ++
 .../api/MigrateWithStorageCompleteAnswer.java      |   38 ++
 .../api/MigrateWithStorageCompleteCommand.java     |   36 ++
 .../agent/api/MigrateWithStorageReceiveAnswer.java |   55 +++
 .../api/MigrateWithStorageReceiveCommand.java      |   45 ++
 .../agent/api/MigrateWithStorageSendAnswer.java    |   39 ++
 .../agent/api/MigrateWithStorageSendCommand.java   |   58 +++
 .../agent/api/storage/MigrateVolumeAnswer.java     |   38 ++
 .../agent/api/storage/MigrateVolumeCommand.java    |   51 ++
 .../cloud/hypervisor/HypervisorCapabilities.java   |    2 +
 api/src/com/cloud/server/ManagementService.java    |   19 +-
 api/src/com/cloud/vm/UserVmService.java            |   27 ++
 .../org/apache/cloudstack/api/ApiConstants.java    |    2 +
 .../apache/cloudstack/api/ResponseGenerator.java   |    6 +
 .../admin/host/FindHostsForMigrationCmd.java       |  107 +++++
 .../api/command/admin/host/ListHostsCmd.java       |    7 +-
 .../storage/FindStoragePoolsForMigrationCmd.java   |   98 ++++
 .../vm/MigrateVirtualMachineWithVolumeCmd.java     |  160 +++++++
 .../api/command/user/volume/MigrateVolumeCmd.java  |    8 +
 .../api/response/HostForMigrationResponse.java     |  365 +++++++++++++++
 .../cloudstack/api/response/HostResponse.java      |    1 -
 .../response/StoragePoolForMigrationResponse.java  |  248 ++++++++++
 .../api/response/StoragePoolResponse.java          |    3 -
 client/tomcatconf/applicationContext.xml.in        |    1 +
 client/tomcatconf/commands.properties.in           |    3 +
 .../cloud/hypervisor/HypervisorCapabilitiesVO.java |   24 +-
 .../api/storage/ObjectInDataStoreStateMachine.java |    2 +
 .../subsystem/api/storage/VolumeService.java       |    7 +-
 .../image/motion/DefaultImageMotionStrategy.java   |   18 +
 .../storage/test/MockStorageMotionStrategy.java    |   19 +
 .../storage/motion/AncientDataMotionStrategy.java  |   84 ++++-
 .../storage/motion/DataMotionService.java          |    9 +
 .../storage/motion/DataMotionServiceImpl.java      |   16 +
 .../storage/motion/DataMotionStrategy.java         |   10 +
 .../cloudstack/storage/volume/VolumeObject.java    |    2 +
 .../storage/volume/VolumeServiceImpl.java          |  166 +++++++-
 .../manager/allocator/impl/RandomAllocator.java    |   56 +++
 .../xen/resource/CitrixResourceBase.java           |    4 +-
 .../xen/resource/XenServer56FP1Resource.java       |    1 +
 .../xen/resource/XenServer610Resource.java         |  359 ++++++++++++++-
 .../motion/XenServerStorageMotionStrategy.java     |  239 ++++++++++
 .../agent/manager/allocator/HostAllocator.java     |   23 +-
 .../manager/allocator/impl/FirstFitAllocator.java  |   47 ++
 .../manager/allocator/impl/TestingAllocator.java   |    7 +
 server/src/com/cloud/api/ApiDBUtils.java           |   19 +
 server/src/com/cloud/api/ApiResponseHelper.java    |   27 +-
 .../com/cloud/api/query/ViewResponseHelper.java    |   37 ++
 .../src/com/cloud/api/query/dao/HostJoinDao.java   |    5 +
 .../com/cloud/api/query/dao/HostJoinDaoImpl.java   |  135 ++++++-
 .../cloud/api/query/dao/StoragePoolJoinDao.java    |    6 +
 .../api/query/dao/StoragePoolJoinDaoImpl.java      |   59 +++-
 .../src/com/cloud/server/ManagementServerImpl.java |  284 ++++++++++--
 server/src/com/cloud/storage/VolumeManager.java    |    8 +
 .../src/com/cloud/storage/VolumeManagerImpl.java   |  112 +++++-
 server/src/com/cloud/vm/UserVmManagerImpl.java     |  123 +++++
 server/src/com/cloud/vm/VirtualMachineManager.java |    4 +
 .../com/cloud/vm/VirtualMachineManagerImpl.java    |  210 +++++++++-
 .../test/com/cloud/vm/MockUserVmManagerImpl.java   |    8 +
 .../cloud/vm/MockVirtualMachineManagerImpl.java    |   10 +
 .../cloud/vm/VirtualMachineManagerImplTest.java    |  231 +++++++++-
 setup/db/db/schema-410to420.sql                    |    2 +
 test/integration/component/test_storage_motion.py  |  298 ++++++++++++
 tools/marvin/marvin/integration/lib/base.py        |   22 +
 64 files changed, 4109 insertions(+), 85 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/MigrateWithStorageAnswer.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/MigrateWithStorageAnswer.java b/api/src/com/cloud/agent/api/MigrateWithStorageAnswer.java
new file mode 100644
index 0000000..06aff32
--- /dev/null
+++ b/api/src/com/cloud/agent/api/MigrateWithStorageAnswer.java
@@ -0,0 +1,39 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+import java.util.List;
+import com.cloud.agent.api.to.VolumeTO;
+
+public class MigrateWithStorageAnswer extends Answer {
+
+    List<VolumeTO> volumeTos;
+
+    public MigrateWithStorageAnswer(MigrateWithStorageCommand cmd, Exception ex) {
+        super(cmd, ex);
+        volumeTos = null;
+    }
+
+    public MigrateWithStorageAnswer(MigrateWithStorageCommand cmd, List<VolumeTO> volumeTos) {
+        super(cmd, true, null);
+        this.volumeTos = volumeTos;
+    }
+
+    public List<VolumeTO> getVolumeTos() {
+        return volumeTos;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/MigrateWithStorageCommand.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/MigrateWithStorageCommand.java b/api/src/com/cloud/agent/api/MigrateWithStorageCommand.java
new file mode 100644
index 0000000..058aa15
--- /dev/null
+++ b/api/src/com/cloud/agent/api/MigrateWithStorageCommand.java
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+import java.util.Map;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.agent.api.to.VolumeTO;
+import com.cloud.agent.api.to.StorageFilerTO;
+
+public class MigrateWithStorageCommand extends Command {
+    VirtualMachineTO vm;
+    Map<VolumeTO, StorageFilerTO> volumeToFiler;
+
+    public MigrateWithStorageCommand(VirtualMachineTO vm, Map<VolumeTO, StorageFilerTO> volumeToFiler) {
+        this.vm = vm;
+        this.volumeToFiler = volumeToFiler;
+    }
+
+    public VirtualMachineTO getVirtualMachine() {
+        return vm;
+    }
+
+    public Map<VolumeTO, StorageFilerTO> getVolumeToFiler() {
+        return volumeToFiler;
+    }
+
+    @Override
+    public boolean executeInSequence() {
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/MigrateWithStorageCompleteAnswer.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/MigrateWithStorageCompleteAnswer.java b/api/src/com/cloud/agent/api/MigrateWithStorageCompleteAnswer.java
new file mode 100644
index 0000000..920cf48
--- /dev/null
+++ b/api/src/com/cloud/agent/api/MigrateWithStorageCompleteAnswer.java
@@ -0,0 +1,38 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+import java.util.List;
+import com.cloud.agent.api.to.VolumeTO;
+
+public class MigrateWithStorageCompleteAnswer extends Answer {
+    List<VolumeTO> volumeTos;
+
+    public MigrateWithStorageCompleteAnswer(MigrateWithStorageCompleteCommand cmd, Exception ex) {
+        super(cmd, ex);
+        volumeTos = null;
+    }
+
+    public MigrateWithStorageCompleteAnswer(MigrateWithStorageCompleteCommand cmd, List<VolumeTO> volumeTos) {
+        super(cmd, true, null);
+        this.volumeTos = volumeTos;
+    }
+
+    public List<VolumeTO> getVolumeTos() {
+        return volumeTos;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/MigrateWithStorageCompleteCommand.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/MigrateWithStorageCompleteCommand.java b/api/src/com/cloud/agent/api/MigrateWithStorageCompleteCommand.java
new file mode 100644
index 0000000..1303c07
--- /dev/null
+++ b/api/src/com/cloud/agent/api/MigrateWithStorageCompleteCommand.java
@@ -0,0 +1,36 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+import com.cloud.agent.api.to.VirtualMachineTO;
+
+public class MigrateWithStorageCompleteCommand extends Command {
+    VirtualMachineTO vm;
+
+    public MigrateWithStorageCompleteCommand(VirtualMachineTO vm) {
+        this.vm = vm;
+    }
+
+    public VirtualMachineTO getVirtualMachine() {
+        return vm;
+    }
+
+    @Override
+    public boolean executeInSequence() {
+        return false;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/MigrateWithStorageReceiveAnswer.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/MigrateWithStorageReceiveAnswer.java b/api/src/com/cloud/agent/api/MigrateWithStorageReceiveAnswer.java
new file mode 100644
index 0000000..3bf521c
--- /dev/null
+++ b/api/src/com/cloud/agent/api/MigrateWithStorageReceiveAnswer.java
@@ -0,0 +1,55 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+import java.util.Map;
+import com.cloud.agent.api.to.VolumeTO;
+import com.cloud.agent.api.to.NicTO;
+
+public class MigrateWithStorageReceiveAnswer extends Answer {
+
+    Map<VolumeTO, Object> volumeToSr;
+    Map<NicTO, Object> nicToNetwork;
+    Map<String, String> token;
+
+    public MigrateWithStorageReceiveAnswer(MigrateWithStorageReceiveCommand cmd, Exception ex) {
+        super(cmd, ex);
+        volumeToSr = null;
+        nicToNetwork = null;
+        token = null;
+    }
+
+    public MigrateWithStorageReceiveAnswer(MigrateWithStorageReceiveCommand cmd, Map<VolumeTO, Object> volumeToSr,
+            Map<NicTO, Object> nicToNetwork, Map<String, String> token) {
+        super(cmd, true, null);
+        this.volumeToSr = volumeToSr;
+        this.nicToNetwork = nicToNetwork;
+        this.token = token;
+    }
+
+    public Map<VolumeTO, Object> getVolumeToSr() {
+        return volumeToSr;
+    }
+
+    public Map<NicTO, Object> getNicToNetwork() {
+        return nicToNetwork;
+    }
+
+    public Map<String, String> getToken() {
+        return token;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java b/api/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java
new file mode 100644
index 0000000..df67405
--- /dev/null
+++ b/api/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+import java.util.Map;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.agent.api.to.VolumeTO;
+import com.cloud.agent.api.to.StorageFilerTO;
+
+public class MigrateWithStorageReceiveCommand extends Command {
+    VirtualMachineTO vm;
+    Map<VolumeTO, StorageFilerTO> volumeToFiler;
+
+    public MigrateWithStorageReceiveCommand(VirtualMachineTO vm, Map<VolumeTO, StorageFilerTO> volumeToFiler) {
+        this.vm = vm;
+        this.volumeToFiler = volumeToFiler;
+    }
+
+    public VirtualMachineTO getVirtualMachine() {
+        return vm;
+    }
+
+    public Map<VolumeTO, StorageFilerTO> getVolumeToFiler() {
+        return volumeToFiler;
+    }
+
+    @Override
+    public boolean executeInSequence() {
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/MigrateWithStorageSendAnswer.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/MigrateWithStorageSendAnswer.java b/api/src/com/cloud/agent/api/MigrateWithStorageSendAnswer.java
new file mode 100644
index 0000000..7cf641f
--- /dev/null
+++ b/api/src/com/cloud/agent/api/MigrateWithStorageSendAnswer.java
@@ -0,0 +1,39 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+import java.util.Set;
+import com.cloud.agent.api.to.VolumeTO;
+
+public class MigrateWithStorageSendAnswer extends Answer {
+
+    Set<VolumeTO> volumeToSet;
+
+    public MigrateWithStorageSendAnswer(MigrateWithStorageSendCommand cmd, Exception ex) {
+        super(cmd, ex);
+        volumeToSet = null;
+    }
+
+    public MigrateWithStorageSendAnswer(MigrateWithStorageSendCommand cmd, Set<VolumeTO> volumeToSet) {
+        super(cmd, true, null);
+        this.volumeToSet = volumeToSet;
+    }
+
+    public Set<VolumeTO> getVolumeToSet() {
+        return volumeToSet;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/MigrateWithStorageSendCommand.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/MigrateWithStorageSendCommand.java b/api/src/com/cloud/agent/api/MigrateWithStorageSendCommand.java
new file mode 100644
index 0000000..d10db30
--- /dev/null
+++ b/api/src/com/cloud/agent/api/MigrateWithStorageSendCommand.java
@@ -0,0 +1,58 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api;
+
+import java.util.Map;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.agent.api.to.VolumeTO;
+import com.cloud.agent.api.to.NicTO;
+
+public class MigrateWithStorageSendCommand extends Command {
+    VirtualMachineTO vm;
+    Map<VolumeTO, Object> volumeToSr;
+    Map<NicTO, Object> nicToNetwork;
+    Map<String, String> token;
+
+    public MigrateWithStorageSendCommand(VirtualMachineTO vm, Map<VolumeTO, Object> volumeToSr,
+            Map<NicTO, Object> nicToNetwork, Map<String, String> token) {
+        this.vm = vm;
+        this.volumeToSr = volumeToSr;
+        this.nicToNetwork = nicToNetwork;
+        this.token = token;
+    }
+
+    public VirtualMachineTO getVirtualMachine() {
+        return vm;
+    }
+
+    public Map<VolumeTO, Object> getVolumeToSr() {
+        return volumeToSr;
+    }
+
+    public Map<NicTO, Object> getNicToNetwork() {
+        return nicToNetwork;
+    }
+
+    public Map<String, String> getToken() {
+        return token;
+    }
+
+    @Override
+    public boolean executeInSequence() {
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/storage/MigrateVolumeAnswer.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/storage/MigrateVolumeAnswer.java b/api/src/com/cloud/agent/api/storage/MigrateVolumeAnswer.java
new file mode 100644
index 0000000..d5efa95
--- /dev/null
+++ b/api/src/com/cloud/agent/api/storage/MigrateVolumeAnswer.java
@@ -0,0 +1,38 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api.storage;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.Command;
+
+public class MigrateVolumeAnswer extends Answer {
+    private String volumePath;
+
+    public MigrateVolumeAnswer(Command command, boolean success, String details, String volumePath) {
+        super(command, success, details);
+        this.volumePath = volumePath;
+    }
+
+    public MigrateVolumeAnswer(Command command) {
+        super(command);
+        this.volumePath = null;
+    }
+
+    public String getVolumePath() {
+        return volumePath;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/agent/api/storage/MigrateVolumeCommand.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/agent/api/storage/MigrateVolumeCommand.java b/api/src/com/cloud/agent/api/storage/MigrateVolumeCommand.java
new file mode 100644
index 0000000..b82d848
--- /dev/null
+++ b/api/src/com/cloud/agent/api/storage/MigrateVolumeCommand.java
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api.storage;
+
+import com.cloud.agent.api.Command;
+import com.cloud.agent.api.to.StorageFilerTO;
+import com.cloud.storage.StoragePool;
+
+public class MigrateVolumeCommand extends Command {
+
+    long volumeId;
+    String volumePath;
+    StorageFilerTO pool;
+
+    public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool pool) {
+        this.volumeId = volumeId;
+        this.volumePath = volumePath;
+        this.pool = new StorageFilerTO(pool);
+    }
+
+    @Override
+    public boolean executeInSequence() {
+        return true;
+    }
+
+    public String getVolumePath() {
+        return volumePath;
+    }
+
+    public long getVolumeId() {
+        return volumeId;
+    }
+
+    public StorageFilerTO getPool() {
+        return pool;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/hypervisor/HypervisorCapabilities.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/hypervisor/HypervisorCapabilities.java b/api/src/com/cloud/hypervisor/HypervisorCapabilities.java
index aff81b0..c954750 100644
--- a/api/src/com/cloud/hypervisor/HypervisorCapabilities.java
+++ b/api/src/com/cloud/hypervisor/HypervisorCapabilities.java
@@ -52,4 +52,6 @@ public interface HypervisorCapabilities extends Identity, InternalIdentity{
      */
     Integer getMaxHostsPerCluster();
 
+    boolean isStorageMotionSupported();
+
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/server/ManagementService.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/server/ManagementService.java b/api/src/com/cloud/server/ManagementService.java
index 460357b..2249407 100755
--- a/api/src/com/cloud/server/ManagementService.java
+++ b/api/src/com/cloud/server/ManagementService.java
@@ -75,9 +75,11 @@ import com.cloud.network.IpAddress;
 import com.cloud.org.Cluster;
 import com.cloud.storage.GuestOS;
 import com.cloud.storage.GuestOsCategory;
+import com.cloud.storage.StoragePool;
 import com.cloud.template.VirtualMachineTemplate;
 import com.cloud.user.SSHKeyPair;
 import com.cloud.utils.Pair;
+import com.cloud.utils.Ternary;
 import com.cloud.vm.InstanceGroup;
 import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VirtualMachine.Type;
@@ -388,10 +390,21 @@ public interface ManagementService {
      * @param Long
      *            vmId
      *            Id of The VM to migrate
-     * @return Pair<List<? extends Host>, List<? extends Host>> List of all Hosts in VM's cluster and list of Hosts with
-     *         enough capacity
+     * @return Ternary<List<? extends Host>, List<? extends Host>, Map<Host, Boolean>> List of all Hosts to which a VM
+     *         can be migrated, list of Hosts with enough capacity and hosts requiring storage motion for migration.
      */
-    Pair<Pair<List<? extends Host>, Integer>, List<? extends Host>> listHostsForMigrationOfVM(Long vmId, Long startIndex, Long pageSize);
+    Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(
+            Long vmId, Long startIndex, Long pageSize);
+
+    /**
+     * List storage pools for live migrating of a volume. The API returns list of all pools in the cluster to which the
+     * volume can be migrated. Current pool is not included in the list.
+     *
+     * @param Long volumeId
+     * @return Pair<List<? extends StoragePool>, List<? extends StoragePool>> List of storage pools in cluster and list
+     *         of pools with enough capacity.
+     */
+    Pair<List<? extends StoragePool>, List<? extends StoragePool>> listStoragePoolsForMigrationOfVolume(Long volumeId);
 
     String[] listEventTypes();
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/com/cloud/vm/UserVmService.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/vm/UserVmService.java b/api/src/com/cloud/vm/UserVmService.java
index d963b74..aa21136 100755
--- a/api/src/com/cloud/vm/UserVmService.java
+++ b/api/src/com/cloud/vm/UserVmService.java
@@ -405,6 +405,33 @@ public interface UserVmService {
      */
     VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException;
 
+    /**
+     * Migrate the given VM with its volumes to the destination host. The API returns the migrated VM if it succeeds.
+     * Only root admin can migrate a VM.
+     *
+     * @param destinationStorage
+     *            TODO
+     * @param Long
+     *            vmId of The VM to migrate
+     * @param Host
+     *            destinationHost to migrate the VM
+     * @param Map
+     *            A map of volume to which pool it should be migrated
+     *
+     * @return VirtualMachine migrated VM
+     * @throws ManagementServerException
+     *             in case we get error finding the VM or host or access errors or other internal errors.
+     * @throws ConcurrentOperationException
+     *             if there are multiple users working on the same VM.
+     * @throws ResourceUnavailableException
+     *             if the destination host to migrate the VM is not currently available.
+     * @throws VirtualMachineMigrationException
+     *             if the VM to be migrated is not in Running state
+     */
+    VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinationHost, Map<String, String> volumeToPool)
+            throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException,
+            VirtualMachineMigrationException;
+
     UserVm moveVMToUser(AssignVMCmd moveUserVMCmd) throws ResourceAllocationException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException;
 
     VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/ApiConstants.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java
index 8c32bb3..edaaeb3 100755
--- a/api/src/org/apache/cloudstack/api/ApiConstants.java
+++ b/api/src/org/apache/cloudstack/api/ApiConstants.java
@@ -365,6 +365,8 @@ public class ApiConstants {
     public static final String HA_HOST = "hahost";
     public static final String CUSTOM_DISK_OFF_MAX_SIZE = "customdiskofferingmaxsize";
     public static final String DEFAULT_ZONE_ID = "defaultzoneid";
+    public static final String LIVE_MIGRATE = "livemigrate";
+    public static final String MIGRATE_TO = "migrateto";
     public static final String GUID = "guid";
     public static final String VSWITCH_TYPE_GUEST_TRAFFIC = "guestvswitchtype";
     public static final String VSWITCH_TYPE_PUBLIC_TRAFFIC = "publicvswitchtype";

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/ResponseGenerator.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/org/apache/cloudstack/api/ResponseGenerator.java
index c0dd57e..a3aa9de 100644
--- a/api/src/org/apache/cloudstack/api/ResponseGenerator.java
+++ b/api/src/org/apache/cloudstack/api/ResponseGenerator.java
@@ -189,6 +189,10 @@ public interface ResponseGenerator {
 
     HostResponse createHostResponse(Host host);
 
+    HostForMigrationResponse createHostForMigrationResponse(Host host);
+
+    HostForMigrationResponse createHostForMigrationResponse(Host host, EnumSet<HostDetails> details);
+
     VlanIpRangeResponse createVlanIpRangeResponse(Vlan vlan);
 
     IPAddressResponse createIPAddressResponse(IpAddress ipAddress);
@@ -216,6 +220,8 @@ public interface ResponseGenerator {
 
     StoragePoolResponse createStoragePoolResponse(StoragePool pool);
 
+    StoragePoolForMigrationResponse createStoragePoolForMigrationResponse(StoragePool pool);
+
     ClusterResponse createClusterResponse(Cluster cluster, Boolean showCapacities);
 
     FirewallRuleResponse createPortForwardingRuleResponse(PortForwardingRule fwRule);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java b/api/src/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java
new file mode 100644
index 0000000..e6e45cc
--- /dev/null
+++ b/api/src/org/apache/cloudstack/api/command/admin/host/FindHostsForMigrationCmd.java
@@ -0,0 +1,107 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.admin.host;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.cloudstack.api.APICommand;
+import org.apache.log4j.Logger;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseListCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.response.HostForMigrationResponse;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.cloudstack.api.response.UserVmResponse;
+import com.cloud.host.Host;
+import com.cloud.utils.Pair;
+import com.cloud.utils.Ternary;
+
+@APICommand(name = "findHostsForMigration", description="Find hosts suitable for migrating a virtual machine.",
+    responseObject=HostForMigrationResponse.class)
+public class FindHostsForMigrationCmd extends BaseListCmd {
+    public static final Logger s_logger = Logger.getLogger(FindHostsForMigrationCmd.class.getName());
+
+    private static final String s_name = "findhostsformigrationresponse";
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+
+    @Parameter(name=ApiConstants.VIRTUAL_MACHINE_ID, type=CommandType.UUID, entityType = UserVmResponse.class,
+            required=false, description="find hosts to which this VM can be migrated and flag the hosts with enough " +
+                "CPU/RAM to host the VM")
+    private Long virtualMachineId;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getVirtualMachineId() {
+        return virtualMachineId;
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public String getCommandName() {
+        return s_name;
+    }
+
+    @Override
+    public void execute() {
+        ListResponse<HostForMigrationResponse> response = null;
+        Pair<List<? extends Host>,Integer> result;
+        List<? extends Host> hostsWithCapacity = new ArrayList<Host>();
+        Map<Host, Boolean> hostsRequiringStorageMotion;
+
+        Ternary<Pair<List<? extends Host>,Integer>, List<? extends Host>, Map<Host, Boolean>> hostsForMigration =
+                _mgr.listHostsForMigrationOfVM(getVirtualMachineId(), this.getStartIndex(), this.getPageSizeVal());
+        result = hostsForMigration.first();
+        hostsWithCapacity = hostsForMigration.second();
+        hostsRequiringStorageMotion = hostsForMigration.third();
+
+        response = new ListResponse<HostForMigrationResponse>();
+        List<HostForMigrationResponse> hostResponses = new ArrayList<HostForMigrationResponse>();
+        for (Host host : result.first()) {
+            HostForMigrationResponse hostResponse = _responseGenerator.createHostForMigrationResponse(host);
+            Boolean suitableForMigration = false;
+            if (hostsWithCapacity.contains(host)) {
+                suitableForMigration = true;
+            }
+            hostResponse.setSuitableForMigration(suitableForMigration);
+
+            Boolean requiresStorageMotion = hostsRequiringStorageMotion.get(host);
+            if (requiresStorageMotion != null && requiresStorageMotion) {
+                hostResponse.setRequiresStorageMotion(true);
+            } else {
+                hostResponse.setRequiresStorageMotion(false);
+            }
+
+            hostResponse.setObjectName("host");
+            hostResponses.add(hostResponse);
+        }
+
+        response.setResponses(hostResponses, result.second());
+        response.setResponseName(getCommandName());
+        this.setResponseObject(response);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java b/api/src/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java
index 29844c3..5ec7cf3 100644
--- a/api/src/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java
@@ -19,6 +19,7 @@ package org.apache.cloudstack.api.command.admin.host;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.cloudstack.api.APICommand;
 import org.apache.cloudstack.api.ApiConstants;
@@ -37,6 +38,7 @@ import com.cloud.async.AsyncJob;
 import com.cloud.exception.InvalidParameterValueException;
 import com.cloud.host.Host;
 import com.cloud.utils.Pair;
+import com.cloud.utils.Ternary;
 
 @APICommand(name = "listHosts", description="Lists hosts.", responseObject=HostResponse.class)
 public class ListHostsCmd extends BaseListCmd {
@@ -170,8 +172,8 @@ public class ListHostsCmd extends BaseListCmd {
         } else {
             Pair<List<? extends Host>,Integer> result;
             List<? extends Host> hostsWithCapacity = new ArrayList<Host>();
-
-            Pair<Pair<List<? extends Host>,Integer>, List<? extends Host>> hostsForMigration = _mgr.listHostsForMigrationOfVM(getVirtualMachineId(), this.getStartIndex(), this.getPageSizeVal());
+            Ternary<Pair<List<? extends Host>,Integer>, List<? extends Host>, Map<Host, Boolean>> hostsForMigration =
+                    _mgr.listHostsForMigrationOfVM(getVirtualMachineId(), this.getStartIndex(), this.getPageSizeVal());
             result = hostsForMigration.first();
             hostsWithCapacity = hostsForMigration.second();
 
@@ -192,6 +194,5 @@ public class ListHostsCmd extends BaseListCmd {
         }
         response.setResponseName(getCommandName());
         this.setResponseObject(response);
-
     }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java
new file mode 100644
index 0000000..37d007c
--- /dev/null
+++ b/api/src/org/apache/cloudstack/api/command/admin/storage/FindStoragePoolsForMigrationCmd.java
@@ -0,0 +1,98 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.admin.storage;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.cloudstack.api.APICommand;
+import org.apache.log4j.Logger;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseListCmd;
+import org.apache.cloudstack.api.Parameter;
+import org.apache.cloudstack.api.response.ListResponse;
+import org.apache.cloudstack.api.response.StoragePoolForMigrationResponse;
+import org.apache.cloudstack.api.response.VolumeResponse;
+import com.cloud.async.AsyncJob;
+import com.cloud.storage.StoragePool;
+import com.cloud.utils.Pair;
+
+@APICommand(name = "findStoragePoolsForMigration", description="Lists storage pools available for migration of a volume.",
+    responseObject=StoragePoolForMigrationResponse.class)
+public class FindStoragePoolsForMigrationCmd extends BaseListCmd {
+    public static final Logger s_logger = Logger.getLogger(FindStoragePoolsForMigrationCmd.class.getName());
+
+    private static final String s_name = "findstoragepoolsformigrationresponse";
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+
+    @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType = VolumeResponse.class, required=true,
+            description="the ID of the volume")
+    private Long id;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getId() {
+        return id;
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public String getCommandName() {
+        return s_name;
+    }
+
+    public AsyncJob.Type getInstanceType() {
+        return AsyncJob.Type.StoragePool;
+    }
+
+    @Override
+    public void execute() {
+        Pair<List<? extends StoragePool>, List<? extends StoragePool>> pools =
+                _mgr.listStoragePoolsForMigrationOfVolume(getId());
+        ListResponse<StoragePoolForMigrationResponse> response = new ListResponse<StoragePoolForMigrationResponse>();
+        List<StoragePoolForMigrationResponse> poolResponses = new ArrayList<StoragePoolForMigrationResponse>();
+
+        List<? extends StoragePool> allPools = pools.first();
+        List<? extends StoragePool> suitablePoolList = pools.second();
+        for (StoragePool pool : allPools) {
+            StoragePoolForMigrationResponse poolResponse = _responseGenerator.createStoragePoolForMigrationResponse(pool);
+            Boolean suitableForMigration = false;
+            for (StoragePool suitablePool : suitablePoolList) {
+                if (suitablePool.getId() == pool.getId()) {
+                    suitableForMigration = true;
+                    break;
+                }
+            }
+            poolResponse.setSuitableForMigration(suitableForMigration);
+            poolResponse.setObjectName("storagepool");
+            poolResponses.add(poolResponse);
+        }
+
+        response.setResponses(poolResponses);
+        response.setResponseName(getCommandName());
+        this.setResponseObject(response);
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java
new file mode 100644
index 0000000..b1eaf11
--- /dev/null
+++ b/api/src/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java
@@ -0,0 +1,160 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.command.admin.vm;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.cloudstack.api.*;
+import org.apache.log4j.Logger;
+
+import org.apache.cloudstack.api.APICommand;
+
+import org.apache.cloudstack.api.BaseCmd.CommandType;
+import org.apache.cloudstack.api.response.HostResponse;
+import org.apache.cloudstack.api.response.StoragePoolResponse;
+import org.apache.cloudstack.api.response.UserVmResponse;
+import com.cloud.event.EventTypes;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.exception.ManagementServerException;
+import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.exception.VirtualMachineMigrationException;
+import com.cloud.host.Host;
+import com.cloud.storage.StoragePool;
+import com.cloud.user.Account;
+import com.cloud.user.UserContext;
+import com.cloud.uservm.UserVm;
+import com.cloud.vm.VirtualMachine;
+
+@APICommand(name = "migrateVirtualMachineWithVolume", description="Attempts Migration of a VM with its volumes to a different host", responseObject=UserVmResponse.class)
+public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd {
+    public static final Logger s_logger = Logger.getLogger(MigrateVMCmd.class.getName());
+
+    private static final String s_name = "migratevirtualmachinewithvolumeresponse";
+
+    /////////////////////////////////////////////////////
+    //////////////// API parameters /////////////////////
+    /////////////////////////////////////////////////////
+
+    @Parameter(name=ApiConstants.HOST_ID, type=CommandType.UUID, entityType=HostResponse.class,
+            required=true, description="Destination Host ID to migrate VM to.")
+    private Long hostId;
+
+    @Parameter(name=ApiConstants.VIRTUAL_MACHINE_ID, type=CommandType.UUID, entityType=UserVmResponse.class,
+            required=true, description="the ID of the virtual machine")
+    private Long virtualMachineId;
+
+    @Parameter(name = ApiConstants.MIGRATE_TO, type = CommandType.MAP, required=false,
+            description = "Map of pool to which each volume should be migrated (volume/pool pair)")
+    private Map migrateVolumeTo;
+
+    /////////////////////////////////////////////////////
+    /////////////////// Accessors ///////////////////////
+    /////////////////////////////////////////////////////
+
+    public Long getHostId() {
+        return hostId;
+    }
+
+    public Long getVirtualMachineId() {
+        return virtualMachineId;
+    }
+
+    public Map<String, String> getVolumeToPool() {
+        Map<String, String> volumeToPoolMap = new HashMap<String, String>();
+        if (migrateVolumeTo != null && !migrateVolumeTo.isEmpty()) {
+            Collection<?> allValues = migrateVolumeTo.values();
+            Iterator<?> iter = allValues.iterator();
+            while (iter.hasNext()) {
+                HashMap<String, String> volumeToPool = (HashMap<String, String>) iter.next();
+                String volume = volumeToPool.get("volume");
+                String pool = volumeToPool.get("pool");
+                volumeToPoolMap.put(volume, pool);
+            }
+        }
+        return volumeToPoolMap;
+    }
+
+    /////////////////////////////////////////////////////
+    /////////////// API Implementation///////////////////
+    /////////////////////////////////////////////////////
+
+    @Override
+    public String getCommandName() {
+        return s_name;
+    }
+
+    @Override
+    public long getEntityOwnerId() {
+        UserVm userVm = _entityMgr.findById(UserVm.class, getVirtualMachineId());
+        if (userVm != null) {
+            return userVm.getAccountId();
+        }
+
+        return Account.ACCOUNT_ID_SYSTEM;
+    }
+
+    @Override
+    public String getEventType() {
+        return EventTypes.EVENT_VM_MIGRATE;
+    }
+
+    @Override
+    public String getEventDescription() {
+        return  "Attempting to migrate VM Id: " + getVirtualMachineId() + " to host Id: "+ getHostId();
+    }
+
+    @Override
+    public void execute(){
+        UserVm userVm = _userVmService.getUserVm(getVirtualMachineId());
+        if (userVm == null) {
+            throw new InvalidParameterValueException("Unable to find the VM by id=" + getVirtualMachineId());
+        }
+
+        Host destinationHost = _resourceService.getHost(getHostId());
+        if (destinationHost == null) {
+            throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id =" + getHostId());
+        }
+
+        try{
+            VirtualMachine migratedVm = _userVmService.migrateVirtualMachineWithVolume(getVirtualMachineId(),
+                    destinationHost, getVolumeToPool());
+            if (migratedVm != null) {
+                UserVmResponse response = _responseGenerator.createUserVmResponse("virtualmachine", (UserVm)migratedVm).get(0);
+                response.setResponseName(getCommandName());
+                this.setResponseObject(response);
+            } else {
+                throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to migrate vm");
+            }
+        } catch (ResourceUnavailableException ex) {
+            s_logger.warn("Exception: ", ex);
+            throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
+        } catch (ConcurrentOperationException e) {
+            s_logger.warn("Exception: ", e);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+        } catch (ManagementServerException e) {
+            s_logger.warn("Exception: ", e);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+        } catch (VirtualMachineMigrationException e) {
+            s_logger.warn("Exception: ", e);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage());
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java
index 287241a..ce40f0d 100644
--- a/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/user/volume/MigrateVolumeCmd.java
@@ -47,6 +47,10 @@ public class MigrateVolumeCmd extends BaseAsyncCmd {
             required=true, description="destination storage pool ID to migrate the volume to")
     private Long storageId;
 
+    @Parameter(name=ApiConstants.LIVE_MIGRATE, type=CommandType.BOOLEAN, required=false,
+            description="if the volume should be live migrated when it is attached to a running vm")
+    private Boolean liveMigrate;
+
     /////////////////////////////////////////////////////
     /////////////////// Accessors ///////////////////////
     /////////////////////////////////////////////////////
@@ -58,6 +62,10 @@ public class MigrateVolumeCmd extends BaseAsyncCmd {
     public Long getStoragePoolId() {
         return storageId;
     }
+
+    public boolean isLiveMigrate() {
+        return (liveMigrate != null) ? liveMigrate : false;
+    }
     /////////////////////////////////////////////////////
     /////////////// API Implementation///////////////////
     /////////////////////////////////////////////////////

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/response/HostForMigrationResponse.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/response/HostForMigrationResponse.java b/api/src/org/apache/cloudstack/api/response/HostForMigrationResponse.java
new file mode 100644
index 0000000..fde2440
--- /dev/null
+++ b/api/src/org/apache/cloudstack/api/response/HostForMigrationResponse.java
@@ -0,0 +1,365 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.response;
+
+import java.util.Date;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseResponse;
+import org.apache.cloudstack.api.EntityReference;
+
+import com.cloud.host.Host;
+import com.cloud.host.Status;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.serializer.Param;
+import com.google.gson.annotations.SerializedName;
+
+@EntityReference(value=Host.class)
+public class HostForMigrationResponse extends BaseResponse {
+    @SerializedName(ApiConstants.ID) @Param(description="the ID of the host")
+    private String id;
+
+    @SerializedName(ApiConstants.NAME) @Param(description="the name of the host")
+    private String name;
+
+    @SerializedName(ApiConstants.STATE) @Param(description="the state of the host")
+    private Status state;
+
+    @SerializedName("disconnected") @Param(description="true if the host is disconnected. False otherwise.")
+    private Date disconnectedOn;
+
+    @SerializedName(ApiConstants.TYPE) @Param(description="the host type")
+    private Host.Type hostType;
+
+    @SerializedName("oscategoryid") @Param(description="the OS category ID of the host")
+    private String osCategoryId;
+
+    @SerializedName("oscategoryname") @Param(description="the OS category name of the host")
+    private String osCategoryName;
+
+    @SerializedName(ApiConstants.IP_ADDRESS) @Param(description="the IP address of the host")
+    private String ipAddress;
+
+    @SerializedName(ApiConstants.ZONE_ID) @Param(description="the Zone ID of the host")
+    private String zoneId;
+
+    @SerializedName(ApiConstants.ZONE_NAME) @Param(description="the Zone name of the host")
+    private String zoneName;
+
+    @SerializedName(ApiConstants.POD_ID) @Param(description="the Pod ID of the host")
+    private String podId;
+
+    @SerializedName("podname") @Param(description="the Pod name of the host")
+    private String podName;
+
+    @SerializedName("version") @Param(description="the host version")
+    private String version;
+
+    @SerializedName(ApiConstants.HYPERVISOR) @Param(description="the host hypervisor")
+    private HypervisorType hypervisor;
+
+    @SerializedName("cpunumber") @Param(description="the CPU number of the host")
+    private Integer cpuNumber;
+
+    @SerializedName("cpuspeed") @Param(description="the CPU speed of the host")
+    private Long cpuSpeed;
+
+    @SerializedName("cpuallocated") @Param(description="the amount of the host's CPU currently allocated")
+    private String cpuAllocated;
+
+    @SerializedName("cpuused") @Param(description="the amount of the host's CPU currently used")
+    private String cpuUsed;
+
+    @SerializedName("cpuwithoverprovisioning") @Param(description="the amount of the host's CPU after applying the cpu.overprovisioning.factor ")
+    private String cpuWithOverprovisioning;
+
+    @SerializedName("averageload") @Param(description="the cpu average load on the host")
+    private Long averageLoad;
+
+    @SerializedName("networkkbsread") @Param(description="the incoming network traffic on the host")
+    private Long networkKbsRead;
+
+    @SerializedName("networkkbswrite") @Param(description="the outgoing network traffic on the host")
+    private Long networkKbsWrite;
+
+    @SerializedName("memorytotal") @Param(description="the memory total of the host")
+    private Long memoryTotal;
+
+    @SerializedName("memoryallocated") @Param(description="the amount of the host's memory currently allocated")
+    private Long memoryAllocated;
+
+    @SerializedName("memoryused") @Param(description="the amount of the host's memory currently used")
+    private Long memoryUsed;
+
+    @SerializedName("disksizetotal") @Param(description="the total disk size of the host")
+    private Long diskSizeTotal;
+
+    @SerializedName("disksizeallocated") @Param(description="the host's currently allocated disk size")
+    private Long diskSizeAllocated;
+
+    @SerializedName("capabilities") @Param(description="capabilities of the host")
+    private String capabilities;
+
+    @SerializedName("lastpinged") @Param(description="the date and time the host was last pinged")
+    private Date lastPinged;
+
+    @SerializedName("managementserverid") @Param(description="the management server ID of the host")
+    private Long managementServerId;
+
+    @SerializedName("clusterid") @Param(description="the cluster ID of the host")
+    private String clusterId;
+
+    @SerializedName("clustername") @Param(description="the cluster name of the host")
+    private String clusterName;
+
+    @SerializedName("clustertype") @Param(description="the cluster type of the cluster that host belongs to")
+    private String clusterType;
+
+    @SerializedName("islocalstorageactive") @Param(description="true if local storage is active, false otherwise")
+    private Boolean localStorageActive;
+
+    @SerializedName(ApiConstants.CREATED) @Param(description="the date and time the host was created")
+    private Date created;
+
+    @SerializedName("removed") @Param(description="the date and time the host was removed")
+    private Date removed;
+
+    @SerializedName("events") @Param(description="events available for the host")
+    private String events;
+
+    @SerializedName("hosttags") @Param(description="comma-separated list of tags for the host")
+    private String hostTags;
+
+    @SerializedName("hasenoughcapacity") @Param(description="true if this host has enough CPU and RAM capacity to migrate a VM to it, false otherwise")
+    private Boolean hasEnoughCapacity;
+
+    @SerializedName("suitableformigration") @Param(description="true if this host is suitable(has enough capacity and satisfies all conditions like hosttags, max guests vm limit etc) to migrate a VM to it , false otherwise")
+    private Boolean suitableForMigration;
+
+    @SerializedName("requiresStorageMotion") @Param(description="true if migrating a vm to this host requires storage motion, false otherwise")
+    private Boolean requiresStorageMotion;
+
+    @SerializedName("resourcestate") @Param(description="the resource state of the host")
+    private String resourceState;
+
+    @SerializedName(ApiConstants.HYPERVISOR_VERSION) @Param(description="the hypervisor version")
+    private String hypervisorVersion;
+
+    @SerializedName(ApiConstants.HA_HOST) @Param(description="true if the host is Ha host (dedicated to vms started by HA process; false otherwise")
+    private Boolean haHost;
+
+    @Override
+    public String getObjectId() {
+        return this.getId();
+    }
+
+    public String getId() {
+        return id;
+    }
+
+    public void setId(String id) {
+        this.id = id;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public void setState(Status state) {
+        this.state = state;
+    }
+
+    public void setDisconnectedOn(Date disconnectedOn) {
+        this.disconnectedOn = disconnectedOn;
+    }
+
+    public void setHostType(Host.Type hostType) {
+        this.hostType = hostType;
+    }
+
+    public void setOsCategoryId(String osCategoryId) {
+        this.osCategoryId = osCategoryId;
+    }
+
+    public void setOsCategoryName(String osCategoryName) {
+        this.osCategoryName = osCategoryName;
+    }
+
+    public void setIpAddress(String ipAddress) {
+        this.ipAddress = ipAddress;
+    }
+
+    public void setZoneId(String zoneId) {
+        this.zoneId = zoneId;
+    }
+
+    public void setZoneName(String zoneName) {
+        this.zoneName = zoneName;
+    }
+
+    public void setPodId(String podId) {
+        this.podId = podId;
+    }
+
+    public void setPodName(String podName) {
+        this.podName = podName;
+    }
+
+    public void setVersion(String version) {
+        this.version = version;
+    }
+
+    public void setHypervisor(HypervisorType hypervisor) {
+        this.hypervisor = hypervisor;
+    }
+
+    public void setCpuNumber(Integer cpuNumber) {
+        this.cpuNumber = cpuNumber;
+    }
+
+    public void setCpuSpeed(Long cpuSpeed) {
+        this.cpuSpeed = cpuSpeed;
+    }
+
+    public String getCpuAllocated() {
+        return cpuAllocated;
+    }
+
+    public void setCpuAllocated(String cpuAllocated) {
+        this.cpuAllocated = cpuAllocated;
+    }
+
+    public void setCpuUsed(String cpuUsed) {
+        this.cpuUsed = cpuUsed;
+    }
+
+    public void setAverageLoad(Long averageLoad) {
+        this.averageLoad = averageLoad;
+    }
+
+    public void setNetworkKbsRead(Long networkKbsRead) {
+        this.networkKbsRead = networkKbsRead;
+    }
+
+    public void setNetworkKbsWrite(Long networkKbsWrite) {
+        this.networkKbsWrite = networkKbsWrite;
+    }
+
+    public void setMemoryTotal(Long memoryTotal) {
+        this.memoryTotal = memoryTotal;
+    }
+
+    public void setMemoryAllocated(Long memoryAllocated) {
+        this.memoryAllocated = memoryAllocated;
+    }
+
+    public void setMemoryUsed(Long memoryUsed) {
+        this.memoryUsed = memoryUsed;
+    }
+
+    public void setDiskSizeTotal(Long diskSizeTotal) {
+        this.diskSizeTotal = diskSizeTotal;
+    }
+
+    public void setDiskSizeAllocated(Long diskSizeAllocated) {
+        this.diskSizeAllocated = diskSizeAllocated;
+    }
+
+    public void setCapabilities(String capabilities) {
+        this.capabilities = capabilities;
+    }
+
+    public void setLastPinged(Date lastPinged) {
+        this.lastPinged = lastPinged;
+    }
+
+    public void setManagementServerId(Long managementServerId) {
+        this.managementServerId = managementServerId;
+    }
+
+    public void setClusterId(String clusterId) {
+        this.clusterId = clusterId;
+    }
+
+    public void setClusterName(String clusterName) {
+        this.clusterName = clusterName;
+    }
+
+    public void setClusterType(String clusterType) {
+        this.clusterType = clusterType;
+    }
+
+    public void setLocalStorageActive(Boolean localStorageActive) {
+        this.localStorageActive = localStorageActive;
+    }
+
+    public void setCreated(Date created) {
+        this.created = created;
+    }
+
+    public void setRemoved(Date removed) {
+        this.removed = removed;
+    }
+
+    public void setEvents(String events) {
+        this.events = events;
+    }
+
+    public String getHostTags() {
+        return hostTags;
+    }
+
+    public void setHostTags(String hostTags) {
+        this.hostTags = hostTags;
+    }
+
+    public void setHasEnoughCapacity(Boolean hasEnoughCapacity) {
+        this.hasEnoughCapacity = hasEnoughCapacity;
+    }
+
+    public void setSuitableForMigration(Boolean suitableForMigration) {
+        this.suitableForMigration = suitableForMigration;
+    }
+
+    public void setRequiresStorageMotion(Boolean requiresStorageMotion) {
+        this.requiresStorageMotion = requiresStorageMotion;
+    }
+
+    public String getResourceState() {
+        return resourceState;
+    }
+
+    public void setResourceState(String resourceState) {
+        this.resourceState = resourceState;
+    }
+
+    public String getCpuWithOverprovisioning() {
+        return cpuWithOverprovisioning;
+    }
+
+    public void setCpuWithOverprovisioning(String cpuWithOverprovisioning) {
+        this.cpuWithOverprovisioning = cpuWithOverprovisioning;
+    }
+
+    public void setHypervisorVersion(String hypervisorVersion) {
+        this.hypervisorVersion = hypervisorVersion;
+    }
+
+    public void setHaHost(Boolean haHost) {
+        this.haHost = haHost;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/response/HostResponse.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/response/HostResponse.java b/api/src/org/apache/cloudstack/api/response/HostResponse.java
index f5aa8f9..687687d 100644
--- a/api/src/org/apache/cloudstack/api/response/HostResponse.java
+++ b/api/src/org/apache/cloudstack/api/response/HostResponse.java
@@ -330,7 +330,6 @@ public class HostResponse extends BaseResponse {
         this.hasEnoughCapacity = hasEnoughCapacity;
     }
 
-
     public void setSuitableForMigration(Boolean suitableForMigration) {
         this.suitableForMigration = suitableForMigration;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/response/StoragePoolForMigrationResponse.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/response/StoragePoolForMigrationResponse.java b/api/src/org/apache/cloudstack/api/response/StoragePoolForMigrationResponse.java
new file mode 100644
index 0000000..f0bbcb1
--- /dev/null
+++ b/api/src/org/apache/cloudstack/api/response/StoragePoolForMigrationResponse.java
@@ -0,0 +1,248 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.response;
+
+import java.util.Date;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseResponse;
+import org.apache.cloudstack.api.EntityReference;
+
+import com.cloud.serializer.Param;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolStatus;
+import com.google.gson.annotations.SerializedName;
+
+@EntityReference(value=StoragePool.class)
+public class StoragePoolForMigrationResponse extends BaseResponse {
+    @SerializedName("id") @Param(description="the ID of the storage pool")
+    private String id;
+
+    @SerializedName("zoneid") @Param(description="the Zone ID of the storage pool")
+    private String zoneId;
+
+    @SerializedName(ApiConstants.ZONE_NAME) @Param(description="the Zone name of the storage pool")
+    private String zoneName;
+
+    @SerializedName("podid") @Param(description="the Pod ID of the storage pool")
+    private String podId;
+
+    @SerializedName("podname") @Param(description="the Pod name of the storage pool")
+    private String podName;
+
+    @SerializedName("name") @Param(description="the name of the storage pool")
+    private String name;
+
+    @SerializedName("ipaddress") @Param(description="the IP address of the storage pool")
+    private String ipAddress;
+
+    @SerializedName("path") @Param(description="the storage pool path")
+    private String path;
+
+    @SerializedName("created") @Param(description="the date and time the storage pool was created")
+    private Date created;
+
+    @SerializedName("type") @Param(description="the storage pool type")
+    private String type;
+
+    @SerializedName("clusterid") @Param(description="the ID of the cluster for the storage pool")
+    private String clusterId;
+
+    @SerializedName("clustername") @Param(description="the name of the cluster for the storage pool")
+    private String clusterName;
+
+    @SerializedName("disksizetotal") @Param(description="the total disk size of the storage pool")
+    private Long diskSizeTotal;
+
+    @SerializedName("disksizeallocated") @Param(description="the host's currently allocated disk size")
+    private Long diskSizeAllocated;
+
+    @SerializedName("disksizeused") @Param(description="the host's currently used disk size")
+    private Long diskSizeUsed;
+
+    @SerializedName("tags") @Param(description="the tags for the storage pool")
+    private String tags;
+
+    @SerializedName(ApiConstants.STATE) @Param(description="the state of the storage pool")
+    private StoragePoolStatus state;
+
+    @SerializedName(ApiConstants.SCOPE) @Param(description="the scope of the storage pool")
+    private String scope;
+
+    @SerializedName("suitableformigration") @Param(description="true if this pool is suitable to migrate a volume," +
+            " false otherwise")
+    private Boolean suitableForMigration;
+
+    /**
+     * @return the scope
+     */
+    public String getScope() {
+        return scope;
+    }
+
+    /**
+     * @param scope the scope to set
+     */
+    public void setScope(String scope) {
+        this.scope = scope;
+    }
+
+    @Override
+    public String getObjectId() {
+        return this.getId();
+    }
+
+    public String getId() {
+        return id;
+    }
+
+    public void setId(String id) {
+        this.id = id;
+    }
+
+    public String getZoneId() {
+        return zoneId;
+    }
+
+    public void setZoneId(String zoneId) {
+        this.zoneId = zoneId;
+    }
+
+    public String getZoneName() {
+        return zoneName;
+    }
+
+    public void setZoneName(String zoneName) {
+        this.zoneName = zoneName;
+    }
+
+    public String getPodId() {
+        return podId;
+    }
+
+    public void setPodId(String podId) {
+        this.podId = podId;
+    }
+
+    public String getPodName() {
+        return podName;
+    }
+
+    public void setPodName(String podName) {
+        this.podName = podName;
+    }
+
+    public String getName() {
+        return name;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public String getIpAddress() {
+        return ipAddress;
+    }
+
+    public void setIpAddress(String ipAddress) {
+        this.ipAddress = ipAddress;
+    }
+
+    public String getPath() {
+        return path;
+    }
+
+    public void setPath(String path) {
+        this.path = path;
+    }
+
+    public Date getCreated() {
+        return created;
+    }
+
+    public void setCreated(Date created) {
+        this.created = created;
+    }
+
+    public String getType() {
+        return type;
+    }
+
+    public void setType(String type) {
+        this.type = type;
+    }
+
+    public String getClusterId() {
+        return clusterId;
+    }
+
+    public void setClusterId(String clusterId) {
+        this.clusterId = clusterId;
+    }
+
+    public String getClusterName() {
+        return clusterName;
+    }
+
+    public void setClusterName(String clusterName) {
+        this.clusterName = clusterName;
+    }
+
+    public Long getDiskSizeTotal() {
+        return diskSizeTotal;
+    }
+
+    public void setDiskSizeTotal(Long diskSizeTotal) {
+        this.diskSizeTotal = diskSizeTotal;
+    }
+
+    public Long getDiskSizeAllocated() {
+        return diskSizeAllocated;
+    }
+
+    public void setDiskSizeAllocated(Long diskSizeAllocated) {
+        this.diskSizeAllocated = diskSizeAllocated;
+    }
+
+    public Long getDiskSizeUsed() {
+        return diskSizeUsed;
+    }
+
+    public void setDiskSizeUsed(Long diskSizeUsed) {
+        this.diskSizeUsed = diskSizeUsed;
+    }
+
+    public String getTags() {
+        return tags;
+    }
+
+    public void setTags(String tags) {
+        this.tags = tags;
+    }
+
+    public StoragePoolStatus getState() {
+        return state;
+    }
+
+    public void setState(StoragePoolStatus state) {
+        this.state = state;
+    }
+
+    public void setSuitableForMigration(Boolean suitableForMigration) {
+        this.suitableForMigration = suitableForMigration;
+    }
+}

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java
----------------------------------------------------------------------
diff --git a/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java
index 0b16226..e034b17 100644
--- a/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java
+++ b/api/src/org/apache/cloudstack/api/response/StoragePoolResponse.java
@@ -83,8 +83,6 @@ public class StoragePoolResponse extends BaseResponse {
     @SerializedName(ApiConstants.SCOPE) @Param(description="the scope of the storage pool")
     private String scope;
 
-
-
     /**
      * @return the scope
      */
@@ -239,5 +237,4 @@ public class StoragePoolResponse extends BaseResponse {
     public void setState(StoragePoolStatus state) {
         this.state = state;
     }
-
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/client/tomcatconf/applicationContext.xml.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/applicationContext.xml.in b/client/tomcatconf/applicationContext.xml.in
index 7487a5e..d2ea380 100644
--- a/client/tomcatconf/applicationContext.xml.in
+++ b/client/tomcatconf/applicationContext.xml.in
@@ -714,6 +714,7 @@
   <bean id="agentMonitor" class="com.cloud.agent.manager.AgentMonitor" />
   <bean id="alertGenerator" class="com.cloud.event.AlertGenerator" />
   <bean id="ancientDataMotionStrategy" class="org.apache.cloudstack.storage.motion.AncientDataMotionStrategy" />
+  <bean id="xenserverStorageMotionStrategy" class="org.apache.cloudstack.storage.motion.XenServerStorageMotionStrategy" />
   <bean id="ancientImageDataStoreProvider" class="org.apache.cloudstack.storage.image.store.AncientImageDataStoreProvider" />
   <bean id="ancientSnapshotStrategy" class="org.apache.cloudstack.storage.snapshot.strategy.AncientSnapshotStrategy" />
   <bean id="apiDBUtils" class="com.cloud.api.ApiDBUtils" />

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/client/tomcatconf/commands.properties.in
----------------------------------------------------------------------
diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in
index 10fcfe3..b49e1fb 100644
--- a/client/tomcatconf/commands.properties.in
+++ b/client/tomcatconf/commands.properties.in
@@ -69,6 +69,7 @@ changeServiceForVirtualMachine=15
 scaleVirtualMachine=15
 assignVirtualMachine=1
 migrateVirtualMachine=1
+migrateVirtualMachineWithVolume=1
 recoverVirtualMachine=7
 
 #### snapshot commands
@@ -254,6 +255,7 @@ deleteHost=3
 prepareHostForMaintenance=1
 cancelHostMaintenance=1
 listHosts=3
+findHostsForMigration=1
 addSecondaryStorage=1
 updateHostPassword=1
 
@@ -288,6 +290,7 @@ deleteStoragePool=1
 listClusters=3
 enableStorageMaintenance=1
 cancelStorageMaintenance=1
+findStoragePoolsForMigration=1
 
 #### security group commands
 createSecurityGroup=15

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java
----------------------------------------------------------------------
diff --git a/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java b/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java
index fafc0a3..6689066 100644
--- a/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java
+++ b/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java
@@ -64,16 +64,21 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities {
 
     @Column(name="vm_snapshot_enabled")
     private Boolean vmSnapshotEnabled;
-    
+
+    @Column(name="storage_motion_supported")
+    private boolean storageMotionSupported;
+
     protected HypervisorCapabilitiesVO() {
     	this.uuid = UUID.randomUUID().toString();
     }
 
-    public HypervisorCapabilitiesVO(HypervisorType hypervisorType, String hypervisorVersion, Long maxGuestsLimit, boolean securityGroupEnabled) {
+    public HypervisorCapabilitiesVO(HypervisorType hypervisorType, String hypervisorVersion, Long maxGuestsLimit,
+            boolean securityGroupEnabled, boolean storageMotionSupported) {
         this.hypervisorType = hypervisorType;
         this.hypervisorVersion = hypervisorVersion;
         this.maxGuestsLimit = maxGuestsLimit;
         this.securityGroupEnabled = securityGroupEnabled;
+        this.storageMotionSupported = storageMotionSupported;
     	this.uuid = UUID.randomUUID().toString();
     }
 
@@ -135,6 +140,21 @@ public class HypervisorCapabilitiesVO implements HypervisorCapabilities {
         return maxGuestsLimit;
     }
 
+    /**
+     * @param storageMotionSupported
+     */
+    public void setStorageMotionSupported(boolean storageMotionSupported) {
+        this.storageMotionSupported = storageMotionSupported;
+    }
+
+    /**
+     * @return if storage motion is supported
+     */
+    @Override
+    public boolean isStorageMotionSupported() {
+        return storageMotionSupported;
+    }
+
 
     public long getId() {
         return id;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java
index f619ef4..94ae800 100644
--- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java
@@ -28,6 +28,7 @@ public interface ObjectInDataStoreStateMachine extends StateObject<ObjectInDataS
         Created("The object is created"),
         Ready("Template downloading is accomplished"),
         Copying("The object is being coping"),
+        Migrating("The object is being migrated"),
         Destroying("Template is destroying"),
         Destroyed("Template is destroyed"),
         Failed("Failed to download template");
@@ -49,6 +50,7 @@ public interface ObjectInDataStoreStateMachine extends StateObject<ObjectInDataS
         OperationSuccessed,
         OperationFailed,
         CopyingRequested,
+        MigrationRequested,
         ResizeRequested,
         ExpungeRequested
         

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
index 102c471..3a1fe6a 100644
--- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
@@ -18,11 +18,12 @@
  */
 package org.apache.cloudstack.engine.subsystem.api.storage;
 
+import java.util.Map;
 import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity;
 import org.apache.cloudstack.framework.async.AsyncCallFuture;
-
+import com.cloud.agent.api.to.VirtualMachineTO;
 import com.cloud.exception.ConcurrentOperationException;
-
+import com.cloud.host.Host;
 
 public interface VolumeService {
     
@@ -70,6 +71,8 @@ public interface VolumeService {
 
     AsyncCallFuture<VolumeApiResult> createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template);
     AsyncCallFuture<VolumeApiResult> copyVolume(VolumeInfo srcVolume, DataStore destStore);
+    AsyncCallFuture<VolumeApiResult> migrateVolume(VolumeInfo srcVolume, DataStore destStore);
+    AsyncCallFuture<CommandResult> migrateVolumes(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost);
 
     boolean destroyVolume(long volumeId) throws ConcurrentOperationException;
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/21ce3bef/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java
----------------------------------------------------------------------
diff --git a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java
index a70fd8a..1c21496 100644
--- a/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java
+++ b/engine/storage/imagemotion/src/org/apache/cloudstack/storage/image/motion/DefaultImageMotionStrategy.java
@@ -18,12 +18,15 @@
  */
 package org.apache.cloudstack.storage.image.motion;
 
+import java.util.Map;
+
 import javax.inject.Inject;
 
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.framework.async.AsyncRpcConext;
@@ -33,6 +36,8 @@ import org.apache.cloudstack.storage.endpoint.EndPointSelector;
 import org.apache.cloudstack.storage.volume.TemplateOnPrimaryDataStoreInfo;
 
 import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.host.Host;
 
 //At least one of datastore is coming from image store or image cache store
 
@@ -96,6 +101,11 @@ public class DefaultImageMotionStrategy implements ImageMotionStrategy {
     }
 
     @Override
+    public boolean canHandle(Map<VolumeInfo, DataStore> volumeMap, Host srcHost, Host destHost) {
+        return false;
+    }
+
+    @Override
     public Void copyAsync(DataObject srcData, DataObject destData,
             AsyncCompletionCallback<CopyCommandResult> callback) {
         DataStore destStore = destData.getDataStore();
@@ -137,4 +147,12 @@ public class DefaultImageMotionStrategy implements ImageMotionStrategy {
         
     }
 
+    @Override
+    public Void copyAsync(Map<VolumeInfo, DataStore> volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost,
+            AsyncCompletionCallback<CopyCommandResult> callback) {
+        CopyCommandResult result = new CopyCommandResult("", null);
+        result.setResult("not implemented");
+        callback.complete(result);
+        return null;
+    }
 }