You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ra...@apache.org on 2016/09/13 12:11:02 UTC

[1/4] git commit: updated refs/heads/master to f21477a

Repository: cloudstack
Updated Branches:
  refs/heads/master f31d2ddce -> f21477a17


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/TestVMSnapshots.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestVMSnapshots.py b/test/integration/plugins/solidfire/TestVMSnapshots.py
index 8fba8f8..14e8e71 100644
--- a/test/integration/plugins/solidfire/TestVMSnapshots.py
+++ b/test/integration/plugins/solidfire/TestVMSnapshots.py
@@ -20,6 +20,8 @@ import random
 import SignedAPICall
 import XenAPI
 
+from util import sf_util
+
 # All tests inherit from cloudstackTestCase
 from marvin.cloudstackTestCase import cloudstackTestCase
 
@@ -36,8 +38,10 @@ from marvin.lib.utils import cleanup_resources
 
 from solidfire import solidfire_element_api as sf_api
 
-# on April 15, 2016: Ran 2 tests in 800.299s with three hosts
-# on May 2, 2016: Ran 2 tests in 789.729s with two hosts
+# Prerequisites:
+#  Only one zone
+#  Only one pod
+#  Only one cluster
 
 
 class TestData:
@@ -328,7 +332,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
             cls.primary_storage.delete(cls.apiClient)
 
-            cls._purge_solidfire_volumes()
+            sf_util.purge_solidfire_volumes(cls.sf_client)
         except Exception as e:
             logging.debug("Exception in tearDownClass(cls): %s" % e)
 
@@ -346,7 +350,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
 
-        self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
+        sf_util.check_list(root_volumes, 1, self, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
 
         root_volume = root_volumes[0]
 
@@ -355,7 +359,7 @@ class TestVMSnapshots(cloudstackTestCase):
         sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(volume_id)
         sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
 
-        self._check_iscsi_name(sf_iscsi_name)
+        sf_util.check_iscsi_name(sf_iscsi_name, self)
 
         root_volume_path_1 = self._get_path(volume_id)
 
@@ -388,7 +392,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
 
-        self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         vdis_after_create = self._get_vdis(xen_vdis)
 
@@ -411,7 +415,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
 
-        self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
+        sf_util.check_list(list_vm_snapshots, 1, self, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
 
         root_volume_path_3 = self._get_path(volume_id)
 
@@ -423,7 +427,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
 
-        self._check_list(xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         vdis_after_revert = self._get_vdis(xen_vdis)
 
@@ -470,7 +474,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         xen_vdis = self.xen_session.xenapi.SR.get_VDIs(xen_sr)
 
-        self._check_list(xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
+        sf_util.check_list(xen_vdis, 1, self, TestVMSnapshots._should_only_be_one_vdi_err_msg)
 
         vdis_after_delete = self._get_vdis(xen_vdis, True)
 
@@ -505,7 +509,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volumes = list_volumes(self.apiClient, type="ROOT", listAll="true")
 
-        self._check_list(root_volumes, 1, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
+        sf_util.check_list(root_volumes, 1, self, TestVMSnapshots._should_only_be_one_root_volume_err_msg)
 
         root_volume = root_volumes[0]
 
@@ -514,13 +518,13 @@ class TestVMSnapshots(cloudstackTestCase):
         sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(root_volume_id)
         sf_iscsi_root_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
 
-        self._check_iscsi_name(sf_iscsi_root_volume_name)
+        sf_util.check_iscsi_name(sf_iscsi_root_volume_name, self)
 
         root_volume_path_1 = self._get_path(root_volume_id)
 
         data_volumes = list_volumes(self.apiClient, type="DATADISK", listAll="true")
 
-        self._check_list(data_volumes, 1, "There should only be one data volume.")
+        sf_util.check_list(data_volumes, 1, self, "There should only be one data volume.")
 
         data_volume = data_volumes[0]
 
@@ -529,7 +533,7 @@ class TestVMSnapshots(cloudstackTestCase):
         sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(data_volume_id)
         sf_iscsi_data_volume_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
 
-        self._check_iscsi_name(sf_iscsi_data_volume_name)
+        sf_util.check_iscsi_name(sf_iscsi_data_volume_name, self)
 
         data_volume_path_1 = self._get_path(data_volume_id)
 
@@ -570,7 +574,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
 
-        self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(root_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         root_volume_vdis_after_create = self._get_vdis(root_volume_xen_vdis)
 
@@ -586,7 +590,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
 
-        self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(data_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         data_volume_vdis_after_create = self._get_vdis(data_volume_xen_vdis)
 
@@ -609,7 +613,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         list_vm_snapshots = VmSnapshot.list(self.apiClient, listAll="true")
 
-        self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
+        sf_util.check_list(list_vm_snapshots, 1, self, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
 
         root_volume_path_3 = self._get_path(root_volume_id)
 
@@ -621,7 +625,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
 
-        self._check_list(root_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(root_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         root_volume_vdis_after_revert = self._get_vdis(root_volume_xen_vdis)
 
@@ -653,7 +657,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
 
-        self._check_list(data_volume_xen_vdis, 3, TestVMSnapshots._should_be_three_vdis_err_msg)
+        sf_util.check_list(data_volume_xen_vdis, 3, self, TestVMSnapshots._should_be_three_vdis_err_msg)
 
         data_volume_vdis_after_revert = self._get_vdis(data_volume_xen_vdis)
 
@@ -700,7 +704,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         root_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(root_volume_xen_sr)
 
-        self._check_list(root_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
+        sf_util.check_list(root_volume_xen_vdis, 1, self, TestVMSnapshots._should_only_be_one_vdi_err_msg)
 
         root_volume_vdis_after_delete = self._get_vdis(root_volume_xen_vdis, True)
 
@@ -720,7 +724,7 @@ class TestVMSnapshots(cloudstackTestCase):
 
         data_volume_xen_vdis = self.xen_session.xenapi.SR.get_VDIs(data_volume_xen_sr)
 
-        self._check_list(data_volume_xen_vdis, 1, TestVMSnapshots._should_only_be_one_vdi_err_msg)
+        sf_util.check_list(data_volume_xen_vdis, 1, self, TestVMSnapshots._should_only_be_one_vdi_err_msg)
 
         data_volume_vdis_after_delete = self._get_vdis(data_volume_xen_vdis, True)
 
@@ -745,7 +749,7 @@ class TestVMSnapshots(cloudstackTestCase):
         return path_result['apipathforvolume']['path']
 
     def _verify_vm_snapshot(self, list_vm_snapshots, vm_snapshot):
-        self._check_list(list_vm_snapshots, 1, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
+        sf_util.check_list(list_vm_snapshots, 1, self, TestVMSnapshots._should_only_be_one_vm_snapshot_err_msg)
 
         vm_snapshot_from_list = list_vm_snapshots[0]
 
@@ -767,26 +771,6 @@ class TestVMSnapshots(cloudstackTestCase):
             "The snapshot is not in the 'Ready' state."
         )
 
-    def _check_iscsi_name(self, sf_iscsi_name):
-        self.assertEqual(
-            sf_iscsi_name[0],
-            "/",
-            "The iSCSI name needs to start with a forward slash."
-        )
-
-    def _check_list(self, in_list, expected_size_of_list, err_msg):
-        self.assertEqual(
-            isinstance(in_list, list),
-            True,
-            "'in_list' is not a list."
-        )
-
-        self.assertEqual(
-            len(in_list),
-            expected_size_of_list,
-            err_msg
-        )
-
     def _get_vdis(self, xen_vdis, only_active_expected=False):
         expected_number_of_vdis = 1 if only_active_expected else 3
 
@@ -852,11 +836,3 @@ class TestVMSnapshots(cloudstackTestCase):
         vdis.base_vdi = base_vdi
 
         return vdis
-
-    @classmethod
-    def _purge_solidfire_volumes(cls):
-        deleted_volumes = cls.sf_client.list_deleted_volumes()
-
-        for deleted_volume in deleted_volumes:
-            cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
-

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/TestVolumes.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestVolumes.py b/test/integration/plugins/solidfire/TestVolumes.py
index ed7d42a..63b9be1 100644
--- a/test/integration/plugins/solidfire/TestVolumes.py
+++ b/test/integration/plugins/solidfire/TestVolumes.py
@@ -20,6 +20,8 @@ import random
 import SignedAPICall
 import XenAPI
 
+from util import sf_util
+
 # All tests inherit from cloudstackTestCase
 from marvin.cloudstackTestCase import cloudstackTestCase
 
@@ -39,11 +41,13 @@ from marvin.lib.utils import cleanup_resources
 
 from solidfire import solidfire_element_api as sf_api
 
-# on April 14, 2016: Ran 11 tests in 2494.043s with three hosts (resign = True)
-# on April 14, 2016: Ran 11 tests in 2033.516s with three hosts (resign = False)
-
-# on May 2, 2016: Ran 11 tests in 2352.461s with two hosts (resign = True)
-# on May 2, 2016: Ran 11 tests in 1982.066s with two hosts (resign = False)
+# Prerequisites:
+#  Only one zone
+#  Only one pod
+#  Only one cluster
+#
+# Running the tests:
+#  Change the "supports_resign" variable to True or False as desired.
 
 
 class TestData():
@@ -145,7 +149,7 @@ class TestData():
                 "miniops": "10000",
                 "maxiops": "15000",
                 "hypervisorsnapshotreserve": 200,
-                "tags": "SolidFire_SAN_1"
+                TestData.tags: TestData.storageTag
             },
             TestData.diskOffering: {
                 "name": "SF_DO_1",
@@ -158,71 +162,6 @@ class TestData():
                 TestData.tags: TestData.storageTag,
                 "storagetype": "shared"
             },
-            "testdiskofferings": {
-                "customiopsdo": {
-                    "name": "SF_Custom_Iops_DO",
-                    "displaytext": "Customized Iops DO",
-                    "disksize": 128,
-                    "customizediops": True,
-                    "miniops": 500,
-                    "maxiops": 1000,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "customsizedo": {
-                    "name": "SF_Custom_Size_DO",
-                    "displaytext": "Customized Size DO",
-                    "disksize": 175,
-                    "customizediops": False,
-                    "miniops": 500,
-                    "maxiops": 1000,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "customsizeandiopsdo": {
-                    "name": "SF_Custom_Iops_Size_DO",
-                    "displaytext": "Customized Size and Iops DO",
-                    "disksize": 200,
-                    "customizediops": True,
-                    "miniops": 400,
-                    "maxiops": 800,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "newiopsdo": {
-                    "name": "SF_New_Iops_DO",
-                    "displaytext": "New Iops (min=350, max = 700)",
-                    "disksize": 128,
-                    "miniops": 350,
-                    "maxiops": 700,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "newsizedo": {
-                    "name": "SF_New_Size_DO",
-                    "displaytext": "New Size: 175",
-                    "disksize": 175,
-                    "miniops": 400,
-                    "maxiops": 800,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                },
-                "newsizeandiopsdo": {
-                    "name": "SF_New_Size_Iops_DO",
-                    "displaytext": "New Size and Iops",
-                    "disksize": 200,
-                    "miniops": 200,
-                    "maxiops": 400,
-                    "hypervisorsnapshotreserve": 200,
-                    TestData.tags: TestData.storageTag,
-                    "storagetype": "shared"
-                }
-            },
             TestData.volume_1: {
                 TestData.diskName: "test-volume",
             },
@@ -241,14 +180,11 @@ class TestVolumes(cloudstackTestCase):
     _should_only_be_one_vm_in_list_err_msg = "There should only be one VM in this list."
     _should_only_be_one_volume_in_list_err_msg = "There should only be one volume in this list."
     _sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
-    _vag_id_should_be_non_zero_int_err_msg = "The SolidFire VAG ID should be a non-zero integer."
     _volume_size_should_be_non_zero_int_err_msg = "The SolidFire volume size should be a non-zero integer."
     _volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match."
     _vm_not_in_running_state_err_msg = "The VM is not in the 'Running' state."
     _vm_not_in_stopped_state_err_msg = "The VM is not in the 'Stopped' state."
-    _sr_not_shared_err_msg = "The SR is not shared."
     _volume_response_should_not_be_zero_err_msg = "The length of the response for the SolidFire-volume query should not be zero."
-    _list_should_be_empty = "The list should be empty."
     _volume_should_not_be_in_a_vag = "The volume should not be in a volume access group."
 
     @classmethod
@@ -262,7 +198,7 @@ class TestVolumes(cloudstackTestCase):
 
         cls.supports_resign = True
 
-        cls._set_supports_resign()
+        sf_util.set_supports_resign(cls.supports_resign, cls.dbConnection)
 
         # Set up xenAPI connection
         host_ip = "https://" + \
@@ -368,7 +304,7 @@ class TestVolumes(cloudstackTestCase):
 
             cls.primary_storage.delete(cls.apiClient)
 
-            cls._purge_solidfire_volumes()
+            sf_util.purge_solidfire_volumes(cls.sf_client)
         except Exception as e:
             logging.debug("Exception in tearDownClass(cls): %s" % e)
 
@@ -387,9 +323,9 @@ class TestVolumes(cloudstackTestCase):
         if self.supports_resign == False:
             return
 
-        sf_volumes = self._get_sf_volumes()
+        sf_volumes = self._get_active_sf_volumes()
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, TestData.templateCacheName)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, TestData.templateCacheName, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -451,21 +387,23 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(new_volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_iscsi_name = self._get_iqn(new_volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, newvolume.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, newvolume.name, self)
 
-        self._check_size_and_iops(sf_volume, newvolume, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, newvolume, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -481,9 +419,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.start(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -516,17 +454,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -559,9 +499,9 @@ class TestVolumes(cloudstackTestCase):
             str(vm.state)
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -600,11 +540,11 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -614,9 +554,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.start(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -649,17 +589,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -674,17 +616,19 @@ class TestVolumes(cloudstackTestCase):
 
         vm = self._get_vm(self.virtual_machine.id)
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -694,9 +638,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.start(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -729,17 +673,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -772,9 +718,9 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -796,9 +742,9 @@ class TestVolumes(cloudstackTestCase):
 
         vm = self._get_vm(self.virtual_machine.id)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -814,9 +760,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.start(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -849,17 +795,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -894,9 +842,9 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_stopped_state_err_msg
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -918,9 +866,9 @@ class TestVolumes(cloudstackTestCase):
 
         vm = self._get_vm(self.virtual_machine.id)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -936,9 +884,9 @@ class TestVolumes(cloudstackTestCase):
 
         self.virtual_machine.stop(self.apiClient)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
         self._check_and_get_cs_volume(self.volume.id, self.testdata[TestData.volume_1][TestData.diskName])
 
@@ -971,17 +919,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_stopped_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -1003,17 +953,19 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -1061,21 +1013,23 @@ class TestVolumes(cloudstackTestCase):
             TestVolumes._vm_not_in_running_state_err_msg
         )
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
 
-        sf_vag_id = self._get_vag_id()
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -1114,11 +1068,11 @@ class TestVolumes(cloudstackTestCase):
             "Check if VM was actually expunged"
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -1174,21 +1128,23 @@ class TestVolumes(cloudstackTestCase):
             str(vm.state)
         )
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, new_volume, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(new_volume)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_iscsi_name = self._get_iqn(new_volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, new_volume, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
@@ -1219,11 +1175,11 @@ class TestVolumes(cloudstackTestCase):
             str(vm.state)
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
         self.assertEqual(
             len(sf_volume['volumeAccessGroups']),
@@ -1246,9 +1202,9 @@ class TestVolumes(cloudstackTestCase):
             "Check volume was deleted"
         )
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        self._check_and_get_sf_volume(sf_volumes, vol.name, False)
+        sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self, False)
 
     @attr(hypervisor='XenServer')
     def test_09_attach_volumes_multiple_accounts(self):
@@ -1342,39 +1298,43 @@ class TestVolumes(cloudstackTestCase):
             str(test_vm.state)
         )
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        sf_volume_size = self._get_volume_size_with_hsr(vol)
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, vol, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
+
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
+
+        sf_test_account_id = sf_util.get_sf_account_id(self.cs_api, test_account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
 
-        sf_test_account_id = self._get_sf_account_id(self.primary_storage.id, test_account.id)
+        sf_test_volumes = self._get_active_sf_volumes(sf_test_account_id)
 
-        sf_test_volumes = self._get_sf_volumes(sf_test_account_id)
+        sf_test_volume = sf_util.check_and_get_sf_volume(sf_test_volumes, test_vol.name, self)
 
-        sf_test_volume = self._check_and_get_sf_volume(sf_test_volumes, test_vol.name)
+        sf_test_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, test_vol, self)
 
-        sf_test_volume_size = self._get_volume_size_with_hsr(test_vol)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_test_volume_size)
 
-        self._check_size_and_iops(sf_test_volume, test_vol, sf_test_volume_size)
+        sf_util.check_size_and_iops(sf_test_volume, test_vol, sf_test_volume_size, self)
 
-        sf_test_iscsi_name = self._get_iqn(test_volume)
+        sf_test_iscsi_name = sf_util.get_iqn(self.cs_api, test_volume, self)
 
         self._check_xen_sr(sf_test_iscsi_name)
 
-        self._check_vag(sf_test_volume, sf_vag_id)
+        sf_util.check_vag(sf_test_volume, sf_vag_id, self)
 
     @attr(hypervisor='XenServer')
     def test_10_attach_more_than_one_disk_to_VM(self):
@@ -1417,66 +1377,50 @@ class TestVolumes(cloudstackTestCase):
 
         vol_2 = self._check_and_get_cs_volume(volume_2.id, self.testdata[TestData.volume_2][TestData.diskName])
 
-        sf_account_id = self._get_sf_account_id(self.primary_storage.id, self.account.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestVolumes._sf_account_id_should_be_non_zero_int_err_msg)
+
+        sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, self.volume, self)
+
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_size)
 
-        sf_volume_size = self._get_volume_size_with_hsr(self.volume)
+        sf_volume_2_size = sf_util.get_volume_size_with_hsr(self.cs_api, volume_2, self)
 
-        sf_volume_2_size = self._get_volume_size_with_hsr(volume_2)
+        self._verify_hsr(self.disk_offering.disksize, self.disk_offering.hypervisorsnapshotreserve, sf_volume_2_size)
 
-        sf_vag_id = self._get_vag_id()
+        sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self)
 
-        sf_volumes = self._get_sf_volumes(sf_account_id)
+        sf_volumes = self._get_active_sf_volumes(sf_account_id)
 
-        sf_volume = self._check_and_get_sf_volume(sf_volumes, vol.name)
+        sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, vol.name, self)
 
-        self._check_size_and_iops(sf_volume, vol, sf_volume_size)
+        sf_util.check_size_and_iops(sf_volume, vol, sf_volume_size, self)
 
-        sf_iscsi_name = self._get_iqn(self.volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, self.volume, self)
 
         self._check_xen_sr(sf_iscsi_name)
 
-        self._check_vag(sf_volume, sf_vag_id)
+        sf_util.check_vag(sf_volume, sf_vag_id, self)
 
-        sf_volume_2 = self._check_and_get_sf_volume(sf_volumes, vol_2.name)
+        sf_volume_2 = sf_util.check_and_get_sf_volume(sf_volumes, vol_2.name, self)
 
-        self._check_size_and_iops(sf_volume_2, vol_2, sf_volume_2_size)
+        sf_util.check_size_and_iops(sf_volume_2, vol_2, sf_volume_2_size, self)
 
-        sf_iscsi_name_2 = self._get_iqn(volume_2)
+        sf_iscsi_name_2 = sf_util.get_iqn(self.cs_api, volume_2, self)
 
         self._check_xen_sr(sf_iscsi_name_2)
 
-        self._check_vag(sf_volume_2, sf_vag_id)
+        sf_util.check_vag(sf_volume_2, sf_vag_id, self)
 
         self.virtual_machine.detach_volume(self.apiClient, volume_2)
 
     '''
     @attr(hypervisor = 'XenServer')
-    def _test_11_attach_disk_to_running_vm_change_iops(self):
+    def test_11_attach_disk_to_running_vm_change_iops(self):
         Attach a disk to a running VM, then change iops
         self.custom_iops_disk_offering = DiskOffering.create(
             
         )'''
 
-    def _check_list(self, in_list, expected_size_of_list, err_msg):
-        self.assertEqual(
-            isinstance(in_list, list),
-            True,
-            "'in_list' is not a list."
-        )
-
-        self.assertEqual(
-            len(in_list),
-            expected_size_of_list,
-            err_msg
-        )
-
-    def _check_iscsi_name(self, sf_iscsi_name):
-        self.assertEqual(
-            sf_iscsi_name[0],
-            "/",
-            "The iSCSI name needs to start with a forward slash."
-        )
-
     def _check_volume(self, volume, volume_name):
         self.assertTrue(
             volume.name.startswith(volume_name),
@@ -1501,45 +1445,13 @@ class TestVolumes(cloudstackTestCase):
             "The storage type is incorrect."
         )
 
-    def _check_size_and_iops(self, sf_volume, volume, size):
-        self.assertEqual(
-            sf_volume['qos']['minIOPS'],
-            volume.miniops,
-            "Check QOS - Min IOPS: " + str(sf_volume['qos']['minIOPS'])
-        )
-
-        self.assertEqual(
-            sf_volume['qos']['maxIOPS'],
-            volume.maxiops,
-            "Check QOS - Max IOPS: " + str(sf_volume['qos']['maxIOPS'])
-        )
-
-        self.assertEqual(
-            sf_volume['totalSize'],
-            size,
-            "Check SF volume size: " + str(sf_volume['totalSize'])
-        )
-
-    def _check_vag(self, sf_volume, sf_vag_id):
-        self.assertEqual(
-            len(sf_volume['volumeAccessGroups']),
-            1,
-            "The volume should only be in one VAG."
-        )
-
-        self.assertEqual(
-            sf_volume['volumeAccessGroups'][0],
-            sf_vag_id,
-            "The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "."
-        )
-
     def _check_and_get_cs_volume(self, volume_id, volume_name):
         list_volumes_response = list_volumes(
             self.apiClient,
             id=volume_id
         )
 
-        self._check_list(list_volumes_response, 1, TestVolumes._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestVolumes._should_only_be_one_volume_in_list_err_msg)
 
         cs_volume = list_volumes_response[0]
 
@@ -1547,108 +1459,37 @@ class TestVolumes(cloudstackTestCase):
 
         return cs_volume
 
-    def _get_sf_account_id(self, primary_storage_id, account_id):
-        sf_account_id_request = {'storageid': primary_storage_id, 'accountid': account_id}
-        sf_account_id_result = self.cs_api.getSolidFireAccountId(sf_account_id_request)
-        sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId']
-
-        self.assertEqual(
-            isinstance(sf_account_id, int),
-            True,
-            TestVolumes._sf_account_id_should_be_non_zero_int_err_msg
-        )
-
-        return sf_account_id
-
-    def _get_volume_size_with_hsr(self, cs_volume):
-        # Get underlying SF volume size with hypervisor snapshot reserve
-        sf_volume_size_request = {'volumeid': cs_volume.id}
-        sf_volume_size_result = self.cs_api.getSolidFireVolumeSize(sf_volume_size_request)
-        sf_volume_size = sf_volume_size_result['apisolidfirevolumesize']['solidFireVolumeSize']
-
-        self.assertEqual(
-            isinstance(sf_volume_size, int),
-            True,
-            "The SolidFire volume size should be a non-zero integer."
-        )
-
-        return sf_volume_size
-
-    def _get_vag_id(self):
-        # Get SF Volume Access Group ID
-        sf_vag_id_request = {'clusterid': self.cluster.id, 'storageid': self.primary_storage.id}
-        sf_vag_id_result = self.cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request)
-        sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId']
+    def _verify_hsr(self, cs_volume_size_in_gb, hsr, sf_volume_size_in_bytes):
+        cs_volume_size_including_hsr_in_bytes = self._get_cs_volume_size_including_hsr_in_bytes(cs_volume_size_in_gb, hsr)
 
-        self.assertEqual(
-            isinstance(sf_vag_id, int),
-            True,
-            TestVolumes._vag_id_should_be_non_zero_int_err_msg
-        )
+        self.assertTrue(
+            cs_volume_size_including_hsr_in_bytes == sf_volume_size_in_bytes,
+            "HSR does not add up correctly."
+        );
 
-        return sf_vag_id
+    def _get_cs_volume_size_including_hsr_in_bytes(self, cs_volume_size_in_gb, hsr):
+        lowest_hsr = 10
 
-    def _get_iqn(self, volume):
-        # Get volume IQN
-        sf_iscsi_name_request = {'volumeid': volume.id}
-        sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(sf_iscsi_name_request)
-        sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
+        if hsr < lowest_hsr:
+            hsr = lowest_hsr;
 
-        self._check_iscsi_name(sf_iscsi_name)
+        return self._get_bytes_from_gb(cs_volume_size_in_gb + (cs_volume_size_in_gb * (hsr / 100)))
 
-        return sf_iscsi_name
+    def _get_bytes_from_gb(self, number_in_gb):
+        return number_in_gb * 1024 * 1024 * 1024
 
     def _get_vm(self, vm_id):
         list_vms_response = list_virtual_machines(self.apiClient, id=vm_id)
 
-        self._check_list(list_vms_response, 1, TestVolumes._should_only_be_one_vm_in_list_err_msg)
+        sf_util.check_list(list_vms_response, 1, self, TestVolumes._should_only_be_one_vm_in_list_err_msg)
 
         return list_vms_response[0]
 
-    def _check_and_get_sf_volume(self, sf_volumes, sf_volume_name, should_exist=True):
-        sf_volume = None
-
-        for volume in sf_volumes:
-            if volume['name'] == sf_volume_name:
-                sf_volume = volume
-                break
-
-        if should_exist:
-            self.assertNotEqual(
-                sf_volume,
-                None,
-                "Check if SF volume was created in correct account: " + str(sf_volumes)
-            )
-        else:
-            self.assertEqual(
-                sf_volume,
-                None,
-                "Check if SF volume was deleted: " + str(sf_volumes)
-            )
-
-        return sf_volume
-
     def _check_xen_sr(self, xen_sr_name, should_exist=True):
-        if should_exist:
-            xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name)[0]
-
-            self.sr_shared = self.xen_session.xenapi.SR.get_shared(xen_sr)
-
-            self.assertEqual(
-                self.sr_shared,
-                True,
-                TestVolumes._sr_not_shared_err_msg
-            )
-        else:
-            xen_sr = self.xen_session.xenapi.SR.get_by_name_label(xen_sr_name)
+        sf_util.check_xen_sr(xen_sr_name, self.xen_session, self, should_exist)
 
-            self._check_list(xen_sr, 0, TestVolumes._list_should_be_empty)
-
-    def _get_sf_volumes(self, sf_account_id=None):
-        if sf_account_id is not None:
-            sf_volumes = self.sf_client.list_volumes_for_account(sf_account_id)
-        else:
-            sf_volumes = self.sf_client.list_active_volumes()
+    def _get_active_sf_volumes(self, sf_account_id=None):
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
         self.assertNotEqual(
             len(sf_volumes),
@@ -1657,20 +1498,3 @@ class TestVolumes(cloudstackTestCase):
         )
 
         return sf_volumes
-
-    @classmethod
-    def _set_supports_resign(cls):
-        supports_resign = str(cls.supports_resign)
-
-        sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'"
-
-        # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
-        cls.dbConnection.execute(sql_query)
-
-    @classmethod
-    def _purge_solidfire_volumes(cls):
-        deleted_volumes = cls.sf_client.list_deleted_volumes()
-
-        for deleted_volume in deleted_volumes:
-            cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
-

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/util/sf_util.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/util/sf_util.py b/test/integration/plugins/solidfire/util/sf_util.py
new file mode 100644
index 0000000..6629571
--- /dev/null
+++ b/test/integration/plugins/solidfire/util/sf_util.py
@@ -0,0 +1,217 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+def check_list(in_list, expected_size_of_list, obj_assert, err_msg):
+    obj_assert.assertEqual(
+        isinstance(in_list, list),
+        True,
+        "'in_list' is not a list."
+    )
+
+    obj_assert.assertEqual(
+        len(in_list),
+        expected_size_of_list,
+        err_msg
+    )
+
+def get_sf_account_id(cs_api, cs_account_id, primary_storage_id, obj_assert, err_msg):
+    sf_account_id_request = {'accountid': cs_account_id, 'storageid': primary_storage_id}
+    sf_account_id_result = cs_api.getSolidFireAccountId(sf_account_id_request)
+    sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId']
+
+    obj_assert.assertEqual(
+        isinstance(sf_account_id, int),
+        True,
+        err_msg
+    )
+
+    return sf_account_id
+
+def get_iqn(cs_api, volume, obj_assert):
+    # Get volume IQN
+    sf_iscsi_name_request = {'volumeid': volume.id}
+    sf_iscsi_name_result = cs_api.getVolumeiScsiName(sf_iscsi_name_request)
+    sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
+
+    check_iscsi_name(sf_iscsi_name, obj_assert)
+
+    return sf_iscsi_name
+
+def check_iscsi_name(sf_iscsi_name, obj_assert):
+    obj_assert.assertEqual(
+        sf_iscsi_name[0],
+        "/",
+        "The iSCSI name needs to start with a forward slash."
+    )
+
+def set_supports_resign(supports_resign, db_connection):
+    _set_supports_resign_for_table(supports_resign, db_connection, "host_details")
+    _set_supports_resign_for_table(supports_resign, db_connection, "cluster_details")
+
+def _set_supports_resign_for_table(supports_resign, db_connection, table):
+    sql_query = "Update " + str(table) + " Set value = '" + str(supports_resign) + "' Where name = 'supportsResign'"
+
+    # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
+    db_connection.execute(sql_query)
+
+def purge_solidfire_volumes(sf_client):
+    deleted_volumes = sf_client.list_deleted_volumes()
+
+    for deleted_volume in deleted_volumes:
+        sf_client.purge_deleted_volume(deleted_volume['volumeID'])
+
+def get_not_active_sf_volumes(sf_client, sf_account_id=None):
+    if sf_account_id is not None:
+        sf_volumes = sf_client.list_volumes_for_account(sf_account_id)
+
+        if sf_volumes is not None and len(sf_volumes) > 0:
+            sf_volumes = _get_not_active_sf_volumes_only(sf_volumes)
+    else:
+        sf_volumes = sf_client.list_deleted_volumes()
+
+    return sf_volumes
+
+def _get_not_active_sf_volumes_only(sf_volumes):
+    not_active_sf_volumes_only = []
+
+    for sf_volume in sf_volumes:
+        if sf_volume["status"] != "active":
+            not_active_sf_volumes_only.append(sf_volume)
+
+    return not_active_sf_volumes_only
+
+def get_active_sf_volumes(sf_client, sf_account_id=None):
+    if sf_account_id is not None:
+        sf_volumes = sf_client.list_volumes_for_account(sf_account_id)
+
+        if sf_volumes is not None and len(sf_volumes) > 0:
+            sf_volumes = _get_active_sf_volumes_only(sf_volumes)
+    else:
+        sf_volumes = sf_client.list_active_volumes()
+
+    return sf_volumes
+
+def _get_active_sf_volumes_only(sf_volumes):
+    active_sf_volumes_only = []
+
+    for sf_volume in sf_volumes:
+        if sf_volume["status"] == "active":
+            active_sf_volumes_only.append(sf_volume)
+
+    return active_sf_volumes_only
+
+def check_and_get_sf_volume(sf_volumes, sf_volume_name, obj_assert, should_exist=True):
+    sf_volume = None
+
+    for volume in sf_volumes:
+        if volume['name'] == sf_volume_name:
+            sf_volume = volume
+            break
+
+    if should_exist:
+        obj_assert.assertNotEqual(
+            sf_volume,
+            None,
+            "Check if SF volume was created in correct account: " + str(sf_volumes)
+        )
+    else:
+        obj_assert.assertEqual(
+            sf_volume,
+            None,
+            "Check if SF volume was deleted: " + str(sf_volumes)
+        )
+
+    return sf_volume
+
+def check_xen_sr(xen_sr_name, xen_session, obj_assert, should_exist=True):
+    xen_sr = xen_session.xenapi.SR.get_by_name_label(xen_sr_name)
+
+    if should_exist:
+        check_list(xen_sr, 1, obj_assert, "SR " + xen_sr_name + " doesn't exist, but should.")
+
+        sr_shared = xen_session.xenapi.SR.get_shared(xen_sr[0])
+
+        obj_assert.assertEqual(
+            sr_shared,
+            True,
+            "SR " + xen_sr_name + " is not shared, but should be."
+        )
+    else:
+        check_list(xen_sr, 0, obj_assert, "SR " + xen_sr_name + " exists, but shouldn't.")
+
+def check_vag(sf_volume, sf_vag_id, obj_assert):
+    obj_assert.assertEqual(
+        len(sf_volume['volumeAccessGroups']),
+        1,
+        "The volume should only be in one VAG."
+    )
+
+    obj_assert.assertEqual(
+        sf_volume['volumeAccessGroups'][0],
+        sf_vag_id,
+        "The volume is not in the VAG with the following ID: " + str(sf_vag_id) + "."
+    )
+
+def get_vag_id(cs_api, cluster_id, primary_storage_id, obj_assert):
+    # Get SF Volume Access Group ID
+    sf_vag_id_request = {'clusterid': cluster_id, 'storageid': primary_storage_id}
+    sf_vag_id_result = cs_api.getSolidFireVolumeAccessGroupId(sf_vag_id_request)
+    sf_vag_id = sf_vag_id_result['apisolidfirevolumeaccessgroupid']['solidFireVolumeAccessGroupId']
+
+    obj_assert.assertEqual(
+        isinstance(sf_vag_id, int),
+        True,
+        "The SolidFire VAG ID should be a non-zero integer."
+    )
+
+    return sf_vag_id
+
+def format_iqn(iqn):
+    return "/" + iqn + "/0"
+
+def check_size_and_iops(sf_volume, cs_volume, size, obj_assert):
+    obj_assert.assertEqual(
+        sf_volume['qos']['minIOPS'],
+        cs_volume.miniops,
+        "Check QoS - Min IOPS: " + str(sf_volume['qos']['minIOPS'])
+    )
+
+    obj_assert.assertEqual(
+        sf_volume['qos']['maxIOPS'],
+        cs_volume.maxiops,
+        "Check QoS - Max IOPS: " + str(sf_volume['qos']['maxIOPS'])
+    )
+
+    obj_assert.assertEqual(
+        sf_volume['totalSize'],
+        size,
+        "Check SolidFire volume size: " + str(sf_volume['totalSize'])
+    )
+
+def get_volume_size_with_hsr(cs_api, cs_volume, obj_assert):
+    # Get underlying SF volume size with hypervisor snapshot reserve
+    sf_volume_size_request = {'volumeid': cs_volume.id}
+    sf_volume_size_result = cs_api.getSolidFireVolumeSize(sf_volume_size_request)
+    sf_volume_size = sf_volume_size_result['apisolidfirevolumesize']['solidFireVolumeSize']
+
+    obj_assert.assertEqual(
+        isinstance(sf_volume_size, int),
+        True,
+        "The SolidFire volume size should be a non-zero integer."
+    )
+
+    return sf_volume_size


[2/4] git commit: updated refs/heads/master to f21477a

Posted by ra...@apache.org.
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/TestSnapshots.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestSnapshots.py b/test/integration/plugins/solidfire/TestSnapshots.py
index 9c3d255..9ae10f3 100644
--- a/test/integration/plugins/solidfire/TestSnapshots.py
+++ b/test/integration/plugins/solidfire/TestSnapshots.py
@@ -21,6 +21,8 @@ import SignedAPICall
 import time
 import XenAPI
 
+from util import sf_util
+
 # All tests inherit from cloudstackTestCase
 from marvin.cloudstackTestCase import cloudstackTestCase
 
@@ -32,15 +34,17 @@ from nose.plugins.attrib import attr
 from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, VirtualMachine, Volume
 
 # common - commonly used methods for all tests are listed here
-from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes
+from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes, list_snapshots
 
 # utils - utility classes for common cleanup, external library wrappers, etc.
-from marvin.lib.utils import cleanup_resources
+from marvin.lib.utils import cleanup_resources, wait_until
 
 from solidfire import solidfire_element_api as sf_api
 
-# on April 10, 2016: Ran 3 tests in 7742.481s with three hosts
-# on May 2, 2016: Ran 3 tests in 7409.770s with two hosts
+# Prerequisites:
+#  Only one zone
+#  Only one pod
+#  Only one cluster
 
 
 class TestData():
@@ -334,7 +338,7 @@ class TestSnapshots(cloudstackTestCase):
 
             cls.primary_storage.delete(cls.apiClient)
 
-            cls._purge_solidfire_volumes()
+            sf_util.purge_solidfire_volumes(cls.sf_client)
         except Exception as e:
             logging.debug("Exception in tearDownClass(cls): %s" % e)
 
@@ -346,7 +350,7 @@ class TestSnapshots(cloudstackTestCase):
 
     @attr(hypervisor='XenServer')
     def test_01_create_volume_snapshot_using_sf_snapshot(self):
-        self._set_supports_resign(True)
+        sf_util.set_supports_resign(True, self.dbConnection)
 
         virtual_machine = VirtualMachine.create(
             self.apiClient,
@@ -365,24 +369,24 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_1_root_volume = list_volumes_response[0]
         vm_1_root_volume_name = vm_1_root_volume.name
 
-        sf_account_id = self._get_sf_account_id(self.account.id, self.primary_storage.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
 
-        self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
 
@@ -405,27 +409,27 @@ class TestSnapshots(cloudstackTestCase):
         virtual_machine.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
 
         self._delete_and_test_snapshot(vol_snap_1)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
 
         self._delete_and_test_snapshot(vol_snap_2)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
 
         virtual_machine = VirtualMachine.create(
             self.apiClient,
@@ -444,22 +448,22 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_1_root_volume = list_volumes_response[0]
         vm_1_root_volume_name = vm_1_root_volume.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
 
-        self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
 
@@ -492,22 +496,22 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_2_root_volume = list_volumes_response[0]
         vm_2_root_volume_name = vm_2_root_volume.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
 
         sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID'])
 
-        self._check_list(sf_snapshots_2, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         vol_snap_a = self._create_and_test_snapshot(vm_2_root_volume.id, sf_volume_2, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
 
@@ -518,15 +522,15 @@ class TestSnapshots(cloudstackTestCase):
         volume_created_from_snapshot_name = volume_created_from_snapshot.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
         sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
         sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
 
-        self._check_list(sf_volume_3['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+        sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
 
         volume_created_from_snapshot = virtual_machine.attach_volume(
             self.apiClient,
@@ -538,9 +542,9 @@ class TestSnapshots(cloudstackTestCase):
         virtual_machine.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
         sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
@@ -558,9 +562,9 @@ class TestSnapshots(cloudstackTestCase):
         self._delete_and_test_snapshot(vol_snap_1)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
 
         sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
         sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
@@ -568,9 +572,9 @@ class TestSnapshots(cloudstackTestCase):
         virtual_machine_2.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
 
@@ -579,7 +583,7 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         data_volume = list_volumes_response[0]
 
@@ -588,13 +592,13 @@ class TestSnapshots(cloudstackTestCase):
         data_volume.delete(self.apiClient)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
 
     @attr(hypervisor='XenServer')
     def test_02_create_volume_snapshot_using_sf_volume(self):
-        self._set_supports_resign(False)
+        sf_util.set_supports_resign(False, self.dbConnection)
 
         virtual_machine = VirtualMachine.create(
             self.apiClient,
@@ -613,24 +617,24 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_1_root_volume = list_volumes_response[0]
         vm_1_root_volume_name = vm_1_root_volume.name
 
-        sf_account_id = self._get_sf_account_id(self.account.id, self.primary_storage.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
 
-        self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
 
@@ -661,9 +665,9 @@ class TestSnapshots(cloudstackTestCase):
         virtual_machine.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
 
         self._delete_and_test_snapshot_2(vol_snap_1, sf_account_id, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
@@ -686,22 +690,22 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_1_root_volume = list_volumes_response[0]
         vm_1_root_volume_name = vm_1_root_volume.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
 
-        self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         sf_volume_id = sf_volume['volumeID']
         sf_volume_size = sf_volume['totalSize']
@@ -740,22 +744,22 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_2_root_volume = list_volumes_response[0]
         vm_2_root_volume_name = vm_2_root_volume.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
 
         sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID'])
 
-        self._check_list(sf_snapshots_2, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         sf_volume_id_2 = sf_volume_2['volumeID']
         sf_volume_size_2 = sf_volume_2['totalSize']
@@ -770,15 +774,15 @@ class TestSnapshots(cloudstackTestCase):
         volume_created_from_snapshot_name = volume_created_from_snapshot.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 7, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
         sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
         sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
 
-        self._check_list(sf_volume_3['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+        sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
 
         volume_created_from_snapshot = virtual_machine.attach_volume(
             self.apiClient,
@@ -790,9 +794,9 @@ class TestSnapshots(cloudstackTestCase):
         virtual_machine.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
 
         sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
         sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
@@ -811,16 +815,16 @@ class TestSnapshots(cloudstackTestCase):
         virtual_machine_2.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         list_volumes_response = list_volumes(
             self.apiClient,
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         data_volume = list_volumes_response[0]
 
@@ -829,9 +833,9 @@ class TestSnapshots(cloudstackTestCase):
         data_volume.delete(self.apiClient)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
 
         virtual_machine = VirtualMachine.create(
             self.apiClient,
@@ -850,15 +854,15 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_1_root_volume = list_volumes_response[0]
         vm_1_root_volume_name = vm_1_root_volume.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         time.sleep(60)
 
@@ -873,9 +877,9 @@ class TestSnapshots(cloudstackTestCase):
                                                       sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
 
         services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
 
@@ -884,13 +888,13 @@ class TestSnapshots(cloudstackTestCase):
         volume_created_from_snapshot_name = volume_created_from_snapshot.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
 
         sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
 
-        self._check_list(sf_volume_2['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+        sf_util.check_list(sf_volume_2['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
 
         volume_created_from_snapshot = virtual_machine.attach_volume(
             self.apiClient,
@@ -910,16 +914,16 @@ class TestSnapshots(cloudstackTestCase):
         virtual_machine.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         list_volumes_response = list_volumes(
             self.apiClient,
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vol_snap_a = self._create_and_test_snapshot_2(volume_created_from_snapshot.id, sf_volume_id_2, sf_volume_id + 4, primary_storage_db_id, sf_volume_size_2,
                                                       sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
@@ -931,9 +935,9 @@ class TestSnapshots(cloudstackTestCase):
         data_volume.delete(self.apiClient)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         list_volumes_response = list_volumes(
             self.apiClient,
@@ -950,7 +954,7 @@ class TestSnapshots(cloudstackTestCase):
 
     @attr(hypervisor='XenServer')
     def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self):
-        self._set_supports_resign(False)
+        sf_util.set_supports_resign(False, self.dbConnection)
 
         virtual_machine = VirtualMachine.create(
             self.apiClient,
@@ -969,24 +973,24 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_1_root_volume = list_volumes_response[0]
         vm_1_root_volume_name = vm_1_root_volume.name
 
-        sf_account_id = self._get_sf_account_id(self.account.id, self.primary_storage.id)
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
 
-        self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
 
@@ -999,7 +1003,7 @@ class TestSnapshots(cloudstackTestCase):
         vol_snap_2 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 2, primary_storage_db_id, sf_volume_size,
                                                       sf_account_id, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
 
-        self._set_supports_resign(True)
+        sf_util.set_supports_resign(True, self.dbConnection)
 
         vol_snap_a = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
 
@@ -1030,31 +1034,31 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_2_root_volume = list_volumes_response[0]
         vm_2_root_volume_name = vm_2_root_volume.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg)
 
         sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID'])
 
-        self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
 
         volume_created_from_snapshot_1 = Volume.create_from_snapshot(self.apiClient, vol_snap_2.id, services, account=self.account.name, domainid=self.domain.id)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
 
         volume_created_from_snapshot_1 = virtual_machine_2.attach_volume(
             self.apiClient,
@@ -1086,31 +1090,31 @@ class TestSnapshots(cloudstackTestCase):
             listall=True
         )
 
-        self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         vm_3_root_volume = list_volumes_response[0]
         vm_3_root_volume_name = vm_3_root_volume.name
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg)
 
         sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, vm_3_root_volume_name)
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_3['volumeID'])
 
-        self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         services = {"diskname": "Vol-A", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
 
         volume_created_from_snapshot_a = Volume.create_from_snapshot(self.apiClient, vol_snap_b.id, services, account=self.account.name, domainid=self.domain.id)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 7, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
 
         volume_created_from_snapshot_a = virtual_machine_3.attach_volume(
             self.apiClient,
@@ -1120,85 +1124,320 @@ class TestSnapshots(cloudstackTestCase):
         virtual_machine.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
         # should still be 7 volumes because the SolidFire volume for the root disk of the VM just destroyed
         # is still needed for the SolidFire snapshots
-        self._check_list(sf_volumes, 7, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
 
         virtual_machine_2.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg)
 
         virtual_machine_3.delete(self.apiClient, True)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
 
         data_volume = Volume(volume_created_from_snapshot_a.__dict__)
 
         data_volume.delete(self.apiClient)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg)
 
         data_volume = Volume(volume_created_from_snapshot_1.__dict__)
 
         data_volume.delete(self.apiClient)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
 
         self._delete_and_test_snapshot_2(vol_snap_1, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
 
         self._delete_and_test_snapshot(vol_snap_b)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
         # should still be 2 volumes because the SolidFire volume for the root disk of the VM just destroyed
         # is still needed for the SolidFire snapshots
-        self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
 
         self._delete_and_test_snapshot(vol_snap_a)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
 
         self._delete_and_test_snapshot_2(vol_snap_2, sf_account_id, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
 
-    def _set_supports_resign(self, supports_resign):
-        supports_resign = str(supports_resign)
+    @attr(hypervisor='XenServer')
+    def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self):
+        sf_util.set_supports_resign(True, self.dbConnection)
 
-        sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'"
+        virtual_machine = VirtualMachine.create(
+            self.apiClient,
+            self.testdata[TestData.virtualMachine],
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            serviceofferingid=self.compute_offering.id,
+            templateid=self.template.id,
+            domainid=self.domain.id,
+            startvm=True
+        )
 
-        # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
-        self.dbConnection.execute(sql_query)
+        list_volumes_response = list_volumes(
+            self.apiClient,
+            virtualmachineid=virtual_machine.id,
+            listall=True
+        )
 
-    def _check_list(self, in_list, expected_size_of_list, err_msg):
-        self.assertEqual(
-            isinstance(in_list, list),
-            True,
-            "'in_list' is not a list."
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+        vm_1_root_volume = list_volumes_response[0]
+        vm_1_root_volume_name = vm_1_root_volume.name
+
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg)
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+        sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+
+        # Get snapshot information for volume from SolidFire cluster
+        sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
+
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+
+        primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
+
+        vol_snap_1_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+        vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
+
+        vol_snap_3_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+        vol_snap_4 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
+
+        self._delete_and_test_archive_snapshot(vol_snap_3_archive)
+
+        self._delete_and_test_snapshot(vol_snap_2)
+
+        self._delete_and_test_snapshot(vol_snap_4)
+
+        self._delete_and_test_archive_snapshot(vol_snap_1_archive)
+
+        vol_snap_1_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+        vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
+
+        virtual_machine.delete(self.apiClient, True)
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+        sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+
+        self._delete_and_test_archive_snapshot(vol_snap_1_archive)
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+        sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+
+        self._delete_and_test_snapshot(vol_snap_2)
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+
+        virtual_machine = VirtualMachine.create(
+            self.apiClient,
+            self.testdata[TestData.virtualMachine],
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            serviceofferingid=self.compute_offering.id,
+            templateid=self.template.id,
+            domainid=self.domain.id,
+            startvm=True
         )
 
-        self.assertEqual(
-            len(in_list),
-            expected_size_of_list,
-            err_msg
+        list_volumes_response = list_volumes(
+            self.apiClient,
+            virtualmachineid=virtual_machine.id,
+            listall=True
         )
 
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+        vm_1_root_volume = list_volumes_response[0]
+        vm_1_root_volume_name = vm_1_root_volume.name
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+        sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+
+        # Get snapshot information for volume from SolidFire cluster
+        sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
+
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+
+        vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
+
+        vol_snap_2_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+        vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
+
+        vol_snap_4_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+        services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"}
+
+        template = Template.create_from_snapshot(self.apiClient, vol_snap_2_archive, services)
+
+        self.cleanup.append(template)
+
+        virtual_machine_dict = {"name": "TestVM2", "displayname": "Test VM 2"}
+
+        virtual_machine_2 = VirtualMachine.create(
+            self.apiClient,
+            virtual_machine_dict,
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            serviceofferingid=self.compute_offering.id,
+            templateid=template.id,
+            domainid=self.domain.id,
+            startvm=True
+        )
+
+        list_volumes_response = list_volumes(
+            self.apiClient,
+            virtualmachineid=virtual_machine_2.id,
+            listall=True
+        )
+
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+        vm_2_root_volume = list_volumes_response[0]
+        vm_2_root_volume_name = vm_2_root_volume.name
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+
+        sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
+
+        # Get snapshot information for volume from SolidFire cluster
+        sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID'])
+
+        sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+
+        vol_snap_a_archive = self._create_and_test_archive_snapshot(vm_2_root_volume.id, sf_volume_2)
+
+        services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
+
+        volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a_archive.id, services, account=self.account.name, domainid=self.domain.id)
+
+        volume_created_from_snapshot_name = volume_created_from_snapshot.name
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+
+        sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+        sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
+        sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
+
+        sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+
+        volume_created_from_snapshot = virtual_machine.attach_volume(
+            self.apiClient,
+            volume_created_from_snapshot
+        )
+
+        self._delete_and_test_archive_snapshot(vol_snap_a_archive)
+
+        virtual_machine.delete(self.apiClient, True)
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+
+        sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+        sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
+        sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
+
+        volume_created_from_snapshot = virtual_machine_2.attach_volume(
+            self.apiClient,
+            volume_created_from_snapshot
+        )
+
+        self._delete_and_test_archive_snapshot(vol_snap_4_archive)
+
+        self._delete_and_test_snapshot(vol_snap_1)
+
+        self._delete_and_test_archive_snapshot(vol_snap_2_archive)
+
+        self._delete_and_test_snapshot(vol_snap_3)
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+
+        sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
+        sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
+
+        virtual_machine_2.delete(self.apiClient, True)
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+        sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
+
+        list_volumes_response = list_volumes(
+            self.apiClient,
+            listall=True
+        )
+
+        sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+        data_volume = list_volumes_response[0]
+
+        data_volume = Volume(data_volume.__dict__)
+
+        data_volume.delete(self.apiClient)
+
+        # Get volume information from SolidFire cluster
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+
     def _check_list_not_empty(self, in_list):
         self.assertEqual(
             isinstance(in_list, list),
@@ -1214,7 +1453,7 @@ class TestSnapshots(cloudstackTestCase):
 
     # used when SolidFire snapshots are being used for CloudStack volume snapshots
     def _check_snapshot_details(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, sf_snapshot_id, storage_pool_id, sf_volume_size):
-        self._check_list(sf_snapshot_details, 5, TestSnapshots._should_be_five_items_in_list_err_msg)
+        sf_util.check_list(sf_snapshot_details, 5, self, TestSnapshots._should_be_five_items_in_list_err_msg)
 
         self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "takeSnapshot", "true")
         self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id)
@@ -1224,7 +1463,7 @@ class TestSnapshots(cloudstackTestCase):
 
     # used when SolidFire volumes are being used for CloudStack volume snapshots
     def _check_snapshot_details_2(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, storage_pool_id, sf_volume_size):
-        self._check_list(sf_snapshot_details, 5, TestSnapshots._should_be_five_items_in_list_err_msg)
+        sf_util.check_list(sf_snapshot_details, 5, self, TestSnapshots._should_be_five_items_in_list_err_msg)
 
         self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id)
         self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfStoragePoolId", storage_pool_id)
@@ -1334,19 +1573,6 @@ class TestSnapshots(cloudstackTestCase):
 
         return sf_volume
 
-    def _get_sf_account_id(self, cs_account_id, primary_storage_id):
-        sf_account_id_request = {'accountid': cs_account_id, 'storageid': primary_storage_id}
-        sf_account_id_result = self.cs_api.getSolidFireAccountId(sf_account_id_request)
-        sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId']
-
-        self.assertEqual(
-            isinstance(sf_account_id, int),
-            True,
-            TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg
-        )
-
-        return sf_account_id
-
     def _get_snapshot_detail(self, sf_snapshot_details_list, key):
         for sf_snapshot_detail_dict in sf_snapshot_details_list:
             if sf_snapshot_detail_dict["snapshotDetailsName"] == key:
@@ -1378,12 +1604,14 @@ class TestSnapshots(cloudstackTestCase):
             volume_id=volume_id_for_snapshot
         )
 
+        self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP)
+
         sf_volume_id = sf_volume['volumeID']
 
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id)
 
-        self._check_list(sf_snapshots, expected_num_snapshots, snapshot_err_msg)
+        sf_util.check_list(sf_snapshots, expected_num_snapshots, self, snapshot_err_msg)
 
         sf_snapshot = self._most_recent_sf_snapshot(sf_snapshots)
 
@@ -1397,6 +1625,32 @@ class TestSnapshots(cloudstackTestCase):
 
         return vol_snap
 
+    # used when SolidFire snapshots are being used for CloudStack volume snapshots to create a backup on secondary storage
+    def _create_and_test_archive_snapshot(self, volume_id_for_snapshot, sf_volume):
+        sf_volume_id = sf_volume['volumeID']
+
+        # Get snapshot information for volume from SolidFire cluster
+        sf_snapshots_orig = self.sf_client.list_snapshots(sf_volume_id)
+
+        vol_snap = Snapshot.create(
+            self.apiClient,
+            volume_id=volume_id_for_snapshot,
+            locationtype=2
+        )
+
+        self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP)
+
+        # Get snapshot information for volume from SolidFire cluster
+        sf_snapshots = self.sf_client.list_snapshots(sf_volume_id)
+
+        sf_util.check_list(sf_snapshots, len(sf_snapshots_orig), self, "A new SolidFire snapshot was detected.")
+
+        vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap)
+
+        self._check_snapshot_details_do_not_exist(vol_snap_db_id)
+
+        return vol_snap
+
     # used when SolidFire volumes are being used for CloudStack volume snapshots
     def _create_and_test_snapshot_2(self, volume_id_for_snapshot, sf_volume_id, sf_volume_id_for_volume_snapshot, primary_storage_db_id, sf_volume_size,
                                     sf_account_id, expected_num_volumes, volume_err_msg):
@@ -1405,10 +1659,12 @@ class TestSnapshots(cloudstackTestCase):
             volume_id=volume_id_for_snapshot
         )
 
+        self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP)
+
         # Get snapshot information for volume from SolidFire cluster
         sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id)
 
-        self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+        sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
 
         sf_snapshot_details_request = {'snapshotid': vol_snap.id}
         sf_snapshot_details_response = self.cs_api.getVolumeSnapshotDetails(sf_snapshot_details_request)
@@ -1419,16 +1675,38 @@ class TestSnapshots(cloudstackTestCase):
         self._check_snapshot_details_2(sf_snapshot_details, vol_snap_db_id, sf_volume_id_for_volume_snapshot, primary_storage_db_id, sf_volume_size)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
-        self._check_list(sf_volumes, expected_num_volumes, volume_err_msg)
+        sf_util.check_list(sf_volumes, expected_num_volumes, self, volume_err_msg)
 
         sf_volume_for_snapshot = self._get_sf_volume_by_id(sf_volumes, sf_volume_id_for_volume_snapshot)
 
-        self._check_list(sf_volume_for_snapshot['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+        sf_util.check_list(sf_volume_for_snapshot['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
 
         return vol_snap
 
+    def _wait_for_snapshot_state(self, vol_snap_id, snapshot_state):
+        retry_interval = 10
+        num_tries = 10
+
+        wait_result, return_val = wait_until(retry_interval, num_tries, TestSnapshots._check_snapshot_state, self.apiClient, vol_snap_id, snapshot_state)
+
+        if not wait_result:
+            raise Exception(return_val)
+
+    @staticmethod
+    def _check_snapshot_state(api_client, vol_snap_id, snapshot_state):
+        volume_snapshot = list_snapshots(
+            api_client,
+            id=vol_snap_id,
+            listall=True
+        )[0]
+
+        if str(volume_snapshot.state).lower() == snapshot_state.lower():
+            return True, ""
+
+        return False, "The snapshot is not in the '" + snapshot_state + "' state. State = " + str(volume_snapshot.state)
+
     # used when SolidFire snapshots are being used for CloudStack volume snapshots
     def _delete_and_test_snapshot(self, vol_snap):
         vol_snap_id = vol_snap.id
@@ -1450,6 +1728,10 @@ class TestSnapshots(cloudstackTestCase):
 
         self._check_snapshot_details_do_not_exist(vol_snap_db_id)
 
+    # used when SolidFire snapshots are being used for CloudStack volume snapshots to create a backup on secondary storage
+    def _delete_and_test_archive_snapshot(self, vol_snap):
+        vol_snap.delete(self.apiClient)
+
     # used when SolidFire volumes are being used for CloudStack volume snapshots
     def _delete_and_test_snapshot_2(self, vol_snap, sf_account_id, expected_num_volumes, volume_err_msg):
         vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap)
@@ -1459,14 +1741,6 @@ class TestSnapshots(cloudstackTestCase):
         self._check_snapshot_details_do_not_exist(vol_snap_db_id)
 
         # Get volume information from SolidFire cluster
-        sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
-
-        self._check_list(sf_volumes, expected_num_volumes, volume_err_msg)
-
-    @classmethod
-    def _purge_solidfire_volumes(cls):
-        deleted_volumes = cls.sf_client.list_deleted_volumes()
-
-        for deleted_volume in deleted_volumes:
-            cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
 
+        sf_util.check_list(sf_volumes, expected_num_volumes, self, volume_err_msg)

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
new file mode 100644
index 0000000..255df07
--- /dev/null
+++ b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
@@ -0,0 +1,697 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import logging
+import random
+import SignedAPICall
+import XenAPI
+
+from util import sf_util
+
+# All tests inherit from cloudstackTestCase
+from marvin.cloudstackTestCase import cloudstackTestCase
+
+# base - contains all resources as entities and defines create, delete, list operations on them
+from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, Volume
+
+# common - commonly used methods for all tests are listed here
+from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes
+
+# utils - utility classes for common cleanup, external library wrappers, etc.
+from marvin.lib.utils import cleanup_resources
+
+from solidfire import solidfire_element_api as sf_api
+
+# Prerequisites:
+#  Only one zone
+#  Only one pod
+#  Two clusters
+
+
+class TestData():
+    account = "account"
+    capacityBytes = "capacitybytes"
+    capacityIops = "capacityiops"
+    clusterId1 = "clusterId1"
+    clusterId2 = "clusterId2"
+    computeOffering1 = "computeoffering1"
+    computeOffering2 = "computeoffering2"
+    computeOffering3 = "computeoffering3"
+    diskName = "diskname"
+    diskOffering1 = "diskoffering1"
+    diskOffering2 = "diskoffering2"
+    domainId = "domainid"
+    hypervisor = "hypervisor"
+    login = "login"
+    mvip = "mvip"
+    name = "name"
+    password = "password"
+    podId = "podid"
+    port = "port"
+    primaryStorage = "primarystorage"
+    primaryStorage2 = "primarystorage2"
+    provider = "provider"
+    scope = "scope"
+    solidFire = "solidfire"
+    storageTag = "SolidFire_SAN_1"
+    storageTag2 = "SolidFire_Volume_1"
+    tags = "tags"
+    templateCacheName = "centos56-x86-64-xen"
+    templateName = "templatename"
+    testAccount = "testaccount"
+    url = "url"
+    user = "user"
+    username = "username"
+    virtualMachine = "virtualmachine"
+    virtualMachine2 = "virtualmachine2"
+    volume_1 = "volume_1"
+    xenServer = "xenserver"
+    zoneId = "zoneid"
+
+    def __init__(self):
+        self.testdata = {
+            TestData.solidFire: {
+                TestData.mvip: "192.168.139.112",
+                TestData.login: "admin",
+                TestData.password: "admin",
+                TestData.port: 443,
+                TestData.url: "https://192.168.139.112:443"
+            },
+            TestData.xenServer: {
+                TestData.username: "root",
+                TestData.password: "solidfire"
+            },
+            TestData.account: {
+                "email": "test@test.com",
+                "firstname": "John",
+                "lastname": "Doe",
+                "username": "test",
+                "password": "test"
+            },
+            TestData.testAccount: {
+                "email": "test2@test2.com",
+                "firstname": "Jane",
+                "lastname": "Doe",
+                "username": "test2",
+                "password": "test"
+            },
+            TestData.user: {
+                "email": "user@test.com",
+                "firstname": "Jane",
+                "lastname": "Doe",
+                "username": "testuser",
+                "password": "password"
+            },
+            TestData.primaryStorage: {
+                TestData.name: "SolidFire-%d" % random.randint(0, 100),
+                TestData.scope: "ZONE",
+                TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
+                       "clusterAdminUsername=admin;clusterAdminPassword=admin;" +
+                       "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
+                       "clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
+                TestData.provider: "SolidFire",
+                TestData.tags: TestData.storageTag,
+                TestData.capacityIops: 4500000,
+                TestData.capacityBytes: 2251799813685248,
+                TestData.hypervisor: "Any",
+                TestData.zoneId: 1
+            },
+            TestData.primaryStorage2: {
+                TestData.name: "SolidFireShared-%d" % random.randint(0, 100),
+                TestData.scope: "CLUSTER",
+                TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
+                        "clusterAdminUsername=admin;clusterAdminPassword=admin;" +
+                        "minIops=5000;maxIops=50000;burstIops=75000",
+                TestData.provider: "SolidFireShared",
+                TestData.tags: TestData.storageTag2,
+                TestData.capacityIops: 5000,
+                TestData.capacityBytes: 1099511627776,
+                TestData.hypervisor: "XenServer",
+                TestData.podId: 1,
+                TestData.zoneId: 1
+            },
+            TestData.virtualMachine: {
+                "name": "TestVM",
+                "displayname": "Test VM"
+            },
+            TestData.computeOffering1: {
+                "name": "SF_CO_1",
+                "displaytext": "SF_CO_1 (Min IOPS = 1,000; Max IOPS = 2,000)",
+                "cpunumber": 1,
+                "cpuspeed": 100,
+                "memory": 128,
+                "storagetype": "shared",
+                "customizediops": False,
+                "miniops": 1000,
+                "maxiops": 2000,
+                "hypervisorsnapshotreserve": 125,
+                TestData.tags: TestData.storageTag,
+            },
+            TestData.computeOffering2: {
+                "name": "SF_CO_2",
+                "displaytext": "SF_CO_2 (Min IOPS = 1,000; Max IOPS = 2,000)",
+                "cpunumber": 1,
+                "cpuspeed": 100,
+                "memory": 128,
+                "storagetype": "shared",
+                "customizediops": False,
+                "miniops": 1000,
+                "maxiops": 2000,
+                "hypervisorsnapshotreserve": 100,
+                TestData.tags: TestData.storageTag,
+            },
+            TestData.computeOffering3: {
+                "name": "SF_CO_3",
+                "displaytext": "SF_CO_3 Desc",
+                "cpunumber": 1,
+                "cpuspeed": 100,
+                "memory": 128,
+                "storagetype": "shared",
+                TestData.tags: TestData.storageTag2,
+            },
+            TestData.diskOffering1: {
+                "name": "SF_DO_1",
+                "displaytext": "SF_DO_1 (Min IOPS = 3,000; Max IOPS = 6,000)",
+                "disksize": 100,
+                "customizediops": False,
+                "miniops": 3000,
+                "maxiops": 6000,
+                "hypervisorsnapshotreserve": 125,
+                TestData.tags: TestData.storageTag,
+                "storagetype": "shared"
+            },
+            TestData.diskOffering2: {
+                "name": "SF_DO_2",
+                "displaytext": "SF_DO_2 (Min IOPS = 3,000; Max IOPS = 6,000)",
+                "disksize": 100,
+                "customizediops": False,
+                "miniops": 3000,
+                "maxiops": 6000,
+                "hypervisorsnapshotreserve": 100,
+                TestData.tags: TestData.storageTag,
+                "storagetype": "shared"
+            },
+            TestData.volume_1: {
+                TestData.diskName: "test-volume",
+            },
+            TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)",
+            TestData.zoneId: 1,
+            TestData.clusterId1: 1,
+            TestData.clusterId2: 2,
+            TestData.domainId: 1,
+            TestData.url: "192.168.129.50"
+        }
+
+
+class TestVMMigrationWithStorage(cloudstackTestCase):
+    _sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
+
+    @classmethod
+    def setUpClass(cls):
+        # Set up API client
+        testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient()
+        cls.apiClient = testclient.getApiClient()
+        cls.dbConnection = testclient.getDbConnection()
+
+        cls.testdata = TestData().testdata
+
+        xenserver = cls.testdata[TestData.xenServer]
+
+        # Set up xenAPI connection
+        host_ip = "https://" + \
+                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId1], name="XenServer-6.5-1")[0].ipaddress
+
+        # Set up XenAPI connection
+        cls.xen_session_1 = XenAPI.Session(host_ip)
+
+        cls.xen_session_1.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
+
+        # Set up xenAPI connection
+        host_ip = "https://" + \
+                  list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId2], name="XenServer-6.5-3")[0].ipaddress
+
+        # Set up XenAPI connection
+        cls.xen_session_2 = XenAPI.Session(host_ip)
+
+        cls.xen_session_2.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
+
+        # Set up SolidFire connection
+        cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire])
+
+        # Get Resources from Cloud Infrastructure
+        cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
+        cls.cluster_1 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId1])[0]
+        cls.cluster_2 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId2])[0]
+        cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName])
+        cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
+
+        # Create test account
+        cls.account = Account.create(
+            cls.apiClient,
+            cls.testdata["account"],
+            admin=1
+        )
+
+        # Set up connection to make customized API calls
+        cls.user = User.create(
+            cls.apiClient,
+            cls.testdata["user"],
+            account=cls.account.name,
+            domainid=cls.domain.id
+        )
+
+        url = cls.testdata[TestData.url]
+
+        api_url = "http://" + url + ":8080/client/api"
+        userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)
+
+        cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)
+
+        primarystorage = cls.testdata[TestData.primaryStorage]
+
+        cls.primary_storage = StoragePool.create(
+            cls.apiClient,
+            primarystorage
+        )
+
+        cls.compute_offering_1 = ServiceOffering.create(
+            cls.apiClient,
+            cls.testdata[TestData.computeOffering1]
+        )
+
+        cls.compute_offering_2 = ServiceOffering.create(
+            cls.apiClient,
+            cls.testdata[TestData.computeOffering2]
+        )
+
+        cls.compute_offering_3 = ServiceOffering.create(
+            cls.apiClient,
+            cls.testdata[TestData.computeOffering3]
+        )
+
+        cls.disk_offering_1 = DiskOffering.create(
+            cls.apiClient,
+            cls.testdata[TestData.diskOffering1]
+        )
+
+        cls.disk_offering_2 = DiskOffering.create(
+            cls.apiClient,
+            cls.testdata[TestData.diskOffering2]
+        )
+
+        # Resources that are to be destroyed
+        cls._cleanup = [
+            cls.compute_offering_1,
+            cls.compute_offering_2,
+            cls.compute_offering_3,
+            cls.disk_offering_1,
+            cls.disk_offering_2,
+            cls.user,
+            cls.account
+        ]
+
+    @classmethod
+    def tearDownClass(cls):
+        try:
+            cleanup_resources(cls.apiClient, cls._cleanup)
+
+            cls.primary_storage.delete(cls.apiClient)
+        except Exception as e:
+            logging.debug("Exception in tearDownClass(cls): %s" % e)
+
+    def setUp(self):
+        self.cleanup = []
+
+    def tearDown(self):
+        try:
+            cleanup_resources(self.apiClient, self.cleanup)
+
+            sf_util.purge_solidfire_volumes(self.sf_client)
+        except Exception as e:
+            logging.debug("Exception in tearDownClass(self): %s" % e)
+
+    def test_01_storage_migrate_root_and_data_disks(self):
+        src_host, dest_host = self._get_source_and_dest_hosts()
+
+        virtual_machine = VirtualMachine.create(
+            self.apiClient,
+            self.testdata[TestData.virtualMachine],
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            serviceofferingid=self.compute_offering_1.id,
+            templateid=self.template.id,
+            domainid=self.domain.id,
+            hostid=src_host.id,
+            startvm=True
+        )
+
+        self.cleanup.append(virtual_machine)
+
+        cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0]
+
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
+                                                  TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)
+
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
+
+        cs_data_volume = Volume.create(
+            self.apiClient,
+            self.testdata[TestData.volume_1],
+            account=self.account.name,
+            domainid=self.domain.id,
+            zoneid=self.zone.id,
+            diskofferingid=self.disk_offering_1.id
+        )
+
+        self.cleanup.append(cs_data_volume)
+
+        cs_data_volume = virtual_machine.attach_volume(
+            self.apiClient,
+            cs_data_volume
+        )
+
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+        sf_root_volume, sf_data_volume = self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id,
+                                                                  sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2)
+
+        src_host, dest_host = dest_host, src_host
+
+        self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume,
+                                 self.xen_session_2, self.xen_session_1)
+
+    def test_02_storage_migrate_root_and_data_disks(self):
+        primarystorage2 = self.testdata[TestData.primaryStorage2]
+
+        primary_storage_2 = StoragePool.create(
+            self.apiClient,
+            primarystorage2,
+            clusterid=self.cluster_1.id
+        )
+
+        primary_storage_3 = StoragePool.create(
+            self.apiClient,
+            primarystorage2,
+            clusterid=self.cluster_2.id
+        )
+
+        src_host, dest_host = self._get_source_and_dest_hosts()
+
+        virtual_machine = VirtualMachine.create(
+            self.apiClient,
+            self.testdata[TestData.virtualMachine],
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            serviceofferingid=self.compute_offering_3.id,
+            templateid=self.template.id,
+            domainid=self.domain.id,
+            hostid=src_host.id,
+            startvm=True
+        )
+
+        cs_data_volume = Volume.create(
+            self.apiClient,
+            self.testdata[TestData.volume_1],
+            account=self.account.name,
+            domainid=self.domain.id,
+            zoneid=self.zone.id,
+            diskofferingid=self.disk_offering_1.id
+        )
+
+        self.cleanup = [
+            virtual_machine,
+            cs_data_volume,
+            primary_storage_2,
+            primary_storage_3
+        ]
+
+        cs_data_volume = virtual_machine.attach_volume(
+            self.apiClient,
+            cs_data_volume
+        )
+
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
+                                                  TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)
+
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+        sf_data_volume = self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id,
+                                                                sf_data_volume, self.xen_session_1, self.xen_session_2)
+
+        src_host, dest_host = dest_host, src_host
+
+        self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id, sf_data_volume,
+                                               self.xen_session_2, self.xen_session_1)
+
+    # The hypervisor snapshot reserve isn't large enough for either the compute or disk offering.
+    def test_03_storage_migrate_root_and_data_disks_fail(self):
+        self._execute_migration_failure(self.compute_offering_2.id, self.disk_offering_2.id)
+
+    # The hypervisor snapshot reserve isn't large enough for the compute offering.
+    def test_04_storage_migrate_root_disk_fails(self):
+        self._execute_migration_failure(self.compute_offering_2.id, self.disk_offering_1.id)
+
+    # The hypervisor snapshot reserve isn't large enough for the disk offering.
+    def test_05_storage_migrate_data_disk_fails(self):
+        self._execute_migration_failure(self.compute_offering_1.id, self.disk_offering_2.id)
+
+    def _execute_migration_failure(self, compute_offering_id, disk_offering_id):
+        src_host, dest_host = self._get_source_and_dest_hosts()
+
+        virtual_machine = VirtualMachine.create(
+            self.apiClient,
+            self.testdata[TestData.virtualMachine],
+            accountid=self.account.name,
+            zoneid=self.zone.id,
+            serviceofferingid=compute_offering_id,
+            templateid=self.template.id,
+            domainid=self.domain.id,
+            hostid=src_host.id,
+            startvm=True
+        )
+
+        self.cleanup.append(virtual_machine)
+
+        cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0]
+
+        sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
+                                                  TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)
+
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
+
+        cs_data_volume = Volume.create(
+            self.apiClient,
+            self.testdata[TestData.volume_1],
+            account=self.account.name,
+            domainid=self.domain.id,
+            zoneid=self.zone.id,
+            diskofferingid=disk_offering_id
+        )
+
+        self.cleanup.append(cs_data_volume)
+
+        cs_data_volume = virtual_machine.attach_volume(
+            self.apiClient,
+            cs_data_volume
+        )
+
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+        self._fail_migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id,
+                                      sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2)
+
+    def _get_source_and_dest_hosts(self):
+        hosts = list_hosts(self.apiClient)
+
+        for host in hosts:
+            if host.name == "XenServer-6.5-1":
+                src_host = host
+            elif host.name == "XenServer-6.5-3":
+                dest_host = host
+
+        self.assertIsNotNone(src_host, "Could not locate the source host")
+
+        self.assertIsNotNone(dest_host, "Could not locate the destination host")
+
+        return src_host, dest_host
+
+    def _migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, src_sf_root_volume, src_sf_data_volume,
+                            src_xen_session, dest_xen_session):
+        self._verifyFields(cs_root_volume, src_sf_root_volume)
+        self._verifyFields(cs_data_volume, src_sf_data_volume)
+
+        virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id)
+
+        cs_root_volume = self._get_updated_cs_volume(cs_root_volume.id)
+        cs_data_volume = self._get_updated_cs_volume(cs_data_volume.id)
+
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
+        dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+        self._verifyFields(cs_root_volume, dest_sf_root_volume)
+        self._verifyFields(cs_data_volume, dest_sf_data_volume)
+
+        self._verify_no_basic_volume_details()
+
+        self._verify_different_volume_access_groups(src_sf_root_volume, dest_sf_root_volume)
+        self._verify_different_volume_access_groups(src_sf_data_volume, dest_sf_data_volume)
+
+        self._verify_same_account(src_sf_root_volume, dest_sf_root_volume)
+        self._verify_same_account(src_sf_data_volume, dest_sf_data_volume)
+
+        self._verifySfVolumeIds(src_sf_root_volume, dest_sf_root_volume)
+        self._verifySfVolumeIds(src_sf_data_volume, dest_sf_data_volume)
+
+        self._verify_xenserver_state(src_xen_session, src_sf_root_volume, dest_xen_session, dest_sf_root_volume)
+        self._verify_xenserver_state(src_xen_session, src_sf_data_volume, dest_xen_session, dest_sf_data_volume)
+
+        return dest_sf_root_volume, dest_sf_data_volume
+
+    def _migrate_and_verify_one_disk_only(self, virtual_machine, dest_host, cs_volume, sf_account_id, src_sf_volume, src_xen_session, dest_xen_session):
+        self._verifyFields(cs_volume, src_sf_volume)
+
+        virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id)
+
+        cs_volume = self._get_updated_cs_volume(cs_volume.id)
+
+        sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+        dest_sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_volume.name, self)
+
+        self._verifyFields(cs_volume, dest_sf_volume)
+
+        self._verify_no_basic_volume_details()
+
+        self._verify_different_volume_access_groups(src_sf_volume, dest_sf_volume)
+
+        self._verify_same_account(src_sf_volume, dest_sf_volume)
+
+        self._verifySfVolumeIds(src_sf_volume, dest_sf_volume)
+
+        self._verify_xenserver_state(src_xen_session, src_sf_volume, dest_xen_session, dest_sf_volume)
+
+        return dest_sf_volume
+
+    def _fail_migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, src_sf_root_volume, src_sf_data_volume,
+                                 src_xen_session, dest_xen_session):
+        self._verifyFields(cs_root_volume, src_sf_root_volume)
+        self._verifyFields(cs_data_volume, src_sf_data_volume)
+
+        class MigrationException(Exception):
+            def __init__(self, *args, **kwargs):
+                Exception.__init__(self, *args, **kwargs)
+
+        try:
+            virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id)
+
+            raise MigrationException("The migration did not fail (as expected).")
+        except MigrationException:
+            raise
+        except Exception:
+            pass
+
+        self._verify_no_basic_volume_details()
+
+        cs_root_volume_refreshed = self._get_updated_cs_volume(cs_root_volume.id)
+        cs_data_volume_refreshed = self._get_updated_cs_volume(cs_data_volume.id)
+
+        self._verifyFields(cs_root_volume_refreshed, src_sf_root_volume)
+        self._verifyFields(cs_data_volume_refreshed, src_sf_data_volume)
+
+        sf_volumes = sf_util.get_not_active_sf_volumes(self.sf_client, sf_account_id)
+
+        dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
+        dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+        self._verify_xenserver_state(dest_xen_session, dest_sf_root_volume, src_xen_session, src_sf_root_volume)
+        self._verify_xenserver_state(dest_xen_session, dest_sf_data_volume, src_xen_session, src_sf_data_volume)
+
+    def _verify_different_volume_access_groups(self, src_sf_volume, dest_sf_volume):
+        src_vags = src_sf_volume['volumeAccessGroups']
+
+        sf_util.check_list(src_vags, 1, self, "'src_vags' should be a list with only one element in it.")
+
+        dest_vags = dest_sf_volume['volumeAccessGroups']
+
+        sf_util.check_list(dest_vags, 1, self, "'dest_vags' should be a list with only one element in it.")
+
+        self.assertNotEqual(src_vags[0], dest_vags[0], "The source and destination volumes should not be in the same volume access group.")
+
+    def _get_updated_cs_volume(self, cs_volume_id):
+        return list_volumes(self.apiClient, listall=True, id=cs_volume_id)[0]
+
+    def _verify_same_account(self, src_sf_volume, dest_sf_volume):
+        self.assertEqual(src_sf_volume['accountID'], dest_sf_volume['accountID'], "The source and destination volumes should be in the same SolidFire account.")
+
+    def _verifySfVolumeIds(self, src_sf_volume, dest_sf_volume):
+        self.assert_(src_sf_volume['volumeID'] < dest_sf_volume['volumeID'],
+                     "The destination SolidFire root volume's ID should be greater than the id of the source one.")
+
+    # verify the name, folder, and iscsi_name
+    def _verifyFields(self, cs_volume, sf_volume):
+        self.assert_(cs_volume.name == sf_volume['name'], "The CloudStack volume name does not match the SolidFire volume name.")
+
+        cs_volume_folder = self._get_cs_volume_folder(cs_volume.id)
+
+        self.assert_(int(cs_volume_folder) == sf_volume['volumeID'], "The CloudStack folder name does not match the SolidFire volume ID.")
+
+        cs_volume_iscsi_name = self._get_cs_volume_iscsi_name(cs_volume.id)
+
+        self.assert_(cs_volume_iscsi_name == sf_util.format_iqn(sf_volume['iqn']), "The CloudStack volume iscsi_name does not match the SolidFire volume IQN.")
+
+    def _get_cs_volume_property(self, cs_volume_id, volume_property):
+        sql_query = "Select " + volume_property + " From volumes Where uuid = '" + cs_volume_id + "'"
+
+        # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
+        sql_result = self.dbConnection.execute(sql_query)
+
+        return sql_result[0][0]
+
+    def _get_cs_volume_folder(self, cs_volume_id):
+        return self._get_cs_volume_property(cs_volume_id, "folder")
+
+    def _get_cs_volume_iscsi_name(self, cs_volume_id):
+        return self._get_cs_volume_property(cs_volume_id, "iscsi_name")
+
+    def _verify_no_basic_volume_details(self):
+        sql_query = "Select id From volume_details Where name like 'basic_'"
+
+        # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
+        sql_result = self.dbConnection.execute(sql_query)
+
+        sf_util.check_list(sql_result, 0, self, "The cloud.volume_details table should not have any name fields that start with 'basic_'.")
+
+    def _verify_xenserver_state(self, xen_session_1, sf_volume_1, xen_session_2, sf_volume_2):
+        sr_name = sf_util.format_iqn(sf_volume_1["iqn"])
+
+        sf_util.check_xen_sr(sr_name, xen_session_1, self, False)
+
+        sr_name = sf_util.format_iqn(sf_volume_2["iqn"])
+
+        sf_util.check_xen_sr(sr_name, xen_session_2, self)


[3/4] git commit: updated refs/heads/master to f21477a

Posted by ra...@apache.org.
Adding support for cross-cluster storage migration for managed storage when using XenServer


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/b508fb86
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/b508fb86
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/b508fb86

Branch: refs/heads/master
Commit: b508fb8692eac1675a4597c9dfaef463304aecba
Parents: 1d9735c
Author: Mike Tutkowski <mi...@solidfire.com>
Authored: Sat Aug 20 17:58:30 2016 -0600
Committer: Mike Tutkowski <mi...@solidfire.com>
Committed: Mon Sep 12 07:39:13 2016 -0600

----------------------------------------------------------------------
 api/src/com/cloud/storage/StoragePool.java      |   2 +
 .../api/MigrateWithStorageReceiveCommand.java   |  11 +-
 .../agent/test/BackupSnapshotCommandTest.java   |   3 +
 .../api/agent/test/CheckNetworkAnswerTest.java  |   3 +
 .../api/agent/test/SnapshotCommandTest.java     |   3 +
 .../api/storage/PrimaryDataStoreDriver.java     |   7 +
 .../com/cloud/vm/VirtualMachineManagerImpl.java |  88 ++-
 .../storage/datastore/db/StoragePoolVO.java     |   1 +
 ...MigrateWithStorageReceiveCommandWrapper.java |  13 +-
 .../CitrixCreateStoragePoolCommandWrapper.java  |  31 +-
 .../CitrixDeleteStoragePoolCommandWrapper.java  |  30 +-
 .../motion/XenServerStorageMotionStrategy.java  | 222 +++++-
 .../xenbase/XenServer610WrapperTest.java        |   6 +-
 .../driver/SolidFirePrimaryDataStoreDriver.java | 190 ++++-
 .../com/cloud/server/ManagementServerImpl.java  |  23 +-
 .../plugins/solidfire/TestAddRemoveHosts.py     |  58 +-
 .../plugins/solidfire/TestSnapshots.py          | 580 +++++++++++----
 .../solidfire/TestVMMigrationWithStorage.py     | 697 +++++++++++++++++++
 .../plugins/solidfire/TestVMSnapshots.py        |  74 +-
 .../plugins/solidfire/TestVolumes.py            | 548 +++++----------
 .../plugins/solidfire/util/sf_util.py           | 217 ++++++
 21 files changed, 2083 insertions(+), 724 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/api/src/com/cloud/storage/StoragePool.java
----------------------------------------------------------------------
diff --git a/api/src/com/cloud/storage/StoragePool.java b/api/src/com/cloud/storage/StoragePool.java
index 8e03c33..3a2d3bd 100644
--- a/api/src/com/cloud/storage/StoragePool.java
+++ b/api/src/com/cloud/storage/StoragePool.java
@@ -104,4 +104,6 @@ public interface StoragePool extends Identity, InternalIdentity {
     boolean isInMaintenance();
 
     Hypervisor.HypervisorType getHypervisor();
+
+    boolean isManaged();
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/core/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java
----------------------------------------------------------------------
diff --git a/core/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java b/core/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java
index 66aecdb..3d413fc 100644
--- a/core/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java
+++ b/core/src/com/cloud/agent/api/MigrateWithStorageReceiveCommand.java
@@ -21,26 +21,25 @@ package com.cloud.agent.api;
 
 import java.util.List;
 
-import com.cloud.agent.api.to.StorageFilerTO;
 import com.cloud.agent.api.to.VirtualMachineTO;
 import com.cloud.agent.api.to.VolumeTO;
 import com.cloud.utils.Pair;
 
 public class MigrateWithStorageReceiveCommand extends Command {
     VirtualMachineTO vm;
-    List<Pair<VolumeTO, StorageFilerTO>> volumeToFiler;
+    List<Pair<VolumeTO, String>> volumeToStorageUuid;
 
-    public MigrateWithStorageReceiveCommand(VirtualMachineTO vm, List<Pair<VolumeTO, StorageFilerTO>> volumeToFiler) {
+    public MigrateWithStorageReceiveCommand(VirtualMachineTO vm, List<Pair<VolumeTO, String>> volumeToStorageUuid) {
         this.vm = vm;
-        this.volumeToFiler = volumeToFiler;
+        this.volumeToStorageUuid = volumeToStorageUuid;
     }
 
     public VirtualMachineTO getVirtualMachine() {
         return vm;
     }
 
-    public List<Pair<VolumeTO, StorageFilerTO>> getVolumeToFiler() {
-        return volumeToFiler;
+    public List<Pair<VolumeTO, String>> getVolumeToStorageUuid() {
+        return volumeToStorageUuid;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java
----------------------------------------------------------------------
diff --git a/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java
index bdcda38..edc90aa 100644
--- a/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java
+++ b/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java
@@ -135,6 +135,9 @@ public class BackupSnapshotCommandTest {
         };
 
         @Override
+        public boolean isManaged() { return false; }
+
+        @Override
         public Long getPodId() {
             return 0L;
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java
----------------------------------------------------------------------
diff --git a/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java b/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java
index d6f0bfc..4d49c99 100644
--- a/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java
+++ b/core/test/org/apache/cloudstack/api/agent/test/CheckNetworkAnswerTest.java
@@ -174,6 +174,9 @@ public class CheckNetworkAnswerTest {
             };
 
             @Override
+            public boolean isManaged() { return false; }
+
+            @Override
             public Long getPodId() {
                 return 0L;
             }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java
----------------------------------------------------------------------
diff --git a/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java
index 629669a..576419a 100644
--- a/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java
+++ b/core/test/org/apache/cloudstack/api/agent/test/SnapshotCommandTest.java
@@ -136,6 +136,9 @@ public class SnapshotCommandTest {
         };
 
         @Override
+        public boolean isManaged() { return false; }
+
+        @Override
         public Long getPodId() {
             return 0L;
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
----------------------------------------------------------------------
diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
index 6dcdf4f..8749589 100644
--- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java
@@ -25,6 +25,13 @@ import com.cloud.host.Host;
 import com.cloud.storage.StoragePool;
 
 public interface PrimaryDataStoreDriver extends DataStoreDriver {
+    String BASIC_CREATE = "basicCreate";
+    String BASIC_DELETE = "basicDelete";
+    String BASIC_DELETE_FAILURE = "basicDeleteFailure";
+    String BASIC_GRANT_ACCESS = "basicGrantAccess";
+    String BASIC_REVOKE_ACCESS = "basicRevokeAccess";
+    String BASIC_IQN = "basicIqn";
+
     ChapInfo getChapInfo(DataObject dataObject);
 
     boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
----------------------------------------------------------------------
diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
index 9523b92..a4c9889 100644
--- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -2045,62 +2045,74 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
 
     private Map<Volume, StoragePool> getPoolListForVolumesForMigration(final VirtualMachineProfile profile, final Host host, final Map<Long, Long> volumeToPool) {
         final List<VolumeVO> allVolumes = _volsDao.findUsableVolumesForInstance(profile.getId());
-        final Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<Volume, StoragePool> ();
+        final Map<Volume, StoragePool> volumeToPoolObjectMap = new HashMap<>();
+
         for (final VolumeVO volume : allVolumes) {
             final Long poolId = volumeToPool.get(Long.valueOf(volume.getId()));
-            final StoragePoolVO pool = _storagePoolDao.findById(poolId);
+            final StoragePoolVO destPool = _storagePoolDao.findById(poolId);
             final StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId());
             final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId());
-            if (pool != null) {
+
+            if (destPool != null) {
                 // Check if pool is accessible from the destination host and disk offering with which the volume was
                 // created is compliant with the pool type.
-                if (_poolHostDao.findByPoolHost(pool.getId(), host.getId()) == null || pool.isLocal() != diskOffering.getUseLocalStorage()) {
+                if (_poolHostDao.findByPoolHost(destPool.getId(), host.getId()) == null || destPool.isLocal() != diskOffering.getUseLocalStorage()) {
                     // Cannot find a pool for the volume. Throw an exception.
-                    throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + pool + " while migrating vm to host " + host +
+                    throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + destPool + " while migrating vm to host " + host +
                             ". Either the pool is not accessible from the host or because of the offering with which the volume is created it cannot be placed on " +
                             "the given pool.");
-                } else if (pool.getId() == currentPool.getId()) {
-                    // If the pool to migrate too is the same as current pool, the volume doesn't need to be migrated.
+                } else if (destPool.getId() == currentPool.getId()) {
+                    // If the pool to migrate to is the same as current pool, the volume doesn't need to be migrated.
                 } else {
-                    volumeToPoolObjectMap.put(volume, pool);
+                    volumeToPoolObjectMap.put(volume, destPool);
                 }
             } else {
-                // Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
-                final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
-                final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null);
-                final ExcludeList avoid = new ExcludeList();
-                boolean currentPoolAvailable = false;
-
-                final List<StoragePool> poolList = new ArrayList<StoragePool>();
-                for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
-                    final List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
-                    if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) {
-                        poolList.addAll(poolListFromAllocator);
-                    }
-                }
+                if (currentPool.isManaged()) {
+                    volumeToPoolObjectMap.put(volume, currentPool);
+                } else {
+                    // Find a suitable pool for the volume. Call the storage pool allocator to find the list of pools.
 
-                if (poolList != null && !poolList.isEmpty()) {
-                    // Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the
-                    // volume to a pool only if it is required; that is the current pool on which the volume resides
-                    // is not available on the destination host.
-                    final Iterator<StoragePool> iter = poolList.iterator();
-                    while (iter.hasNext()) {
-                        if (currentPool.getId() == iter.next().getId()) {
-                            currentPoolAvailable = true;
-                            break;
+                    final DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType());
+                    final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), null, null);
+
+                    final List<StoragePool> poolList = new ArrayList<>();
+                    final ExcludeList avoid = new ExcludeList();
+
+                    for (final StoragePoolAllocator allocator : _storagePoolAllocators) {
+                        final List<StoragePool> poolListFromAllocator = allocator.allocateToPool(diskProfile, profile, plan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
+
+                        if (poolListFromAllocator != null && !poolListFromAllocator.isEmpty()) {
+                            poolList.addAll(poolListFromAllocator);
                         }
                     }
 
-                    if (!currentPoolAvailable) {
-                        volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid()));
-                    }
-                }
+                    boolean currentPoolAvailable = false;
 
+                    if (poolList != null && !poolList.isEmpty()) {
+                        // Volume needs to be migrated. Pick the first pool from the list. Add a mapping to migrate the
+                        // volume to a pool only if it is required; that is the current pool on which the volume resides
+                        // is not available on the destination host.
 
-                if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) {
-                    // Cannot find a pool for the volume. Throw an exception.
-                    throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " +
-                            profile.getVirtualMachine() + " to host " + host);
+                        final Iterator<StoragePool> iter = poolList.iterator();
+
+                        while (iter.hasNext()) {
+                            if (currentPool.getId() == iter.next().getId()) {
+                                currentPoolAvailable = true;
+
+                                break;
+                            }
+                        }
+
+                        if (!currentPoolAvailable) {
+                            volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(poolList.get(0).getUuid()));
+                        }
+                    }
+
+                    if (!currentPoolAvailable && !volumeToPoolObjectMap.containsKey(volume)) {
+                        // Cannot find a pool for the volume. Throw an exception.
+                        throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " +
+                                profile.getVirtualMachine() + " to host " + host);
+                    }
                 }
             }
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/engine/schema/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java
index ad2ad41..24fcaa0 100644
--- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java
+++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/StoragePoolVO.java
@@ -231,6 +231,7 @@ public class StoragePoolVO implements StoragePool {
         this.managed = managed;
     }
 
+    @Override
     public boolean isManaged() {
         return managed;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java
index 046a425..fdcb7b5 100644
--- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xen610/XenServer610MigrateWithStorageReceiveCommandWrapper.java
@@ -31,7 +31,6 @@ import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.MigrateWithStorageReceiveAnswer;
 import com.cloud.agent.api.MigrateWithStorageReceiveCommand;
 import com.cloud.agent.api.to.NicTO;
-import com.cloud.agent.api.to.StorageFilerTO;
 import com.cloud.agent.api.to.VirtualMachineTO;
 import com.cloud.agent.api.to.VolumeTO;
 import com.cloud.hypervisor.xenserver.resource.XenServer610Resource;
@@ -56,7 +55,7 @@ public final class XenServer610MigrateWithStorageReceiveCommandWrapper extends C
     public Answer execute(final MigrateWithStorageReceiveCommand command, final XenServer610Resource xenServer610Resource) {
         final Connection connection = xenServer610Resource.getConnection();
         final VirtualMachineTO vmSpec = command.getVirtualMachine();
-        final List<Pair<VolumeTO, StorageFilerTO>> volumeToFiler = command.getVolumeToFiler();
+        final List<Pair<VolumeTO, String>> volumeToStorageUuid = command.getVolumeToStorageUuid();
 
         try {
             // In a cluster management server setup, the migrate with storage receive and send
@@ -69,10 +68,12 @@ public final class XenServer610MigrateWithStorageReceiveCommandWrapper extends C
             // storage send command execution.
             Gson gson = new Gson();
             // Get a map of all the SRs to which the vdis will be migrated.
-            final List<Pair<VolumeTO, Object>> volumeToSr = new ArrayList<Pair<VolumeTO, Object>>();
-            for (final Pair<VolumeTO, StorageFilerTO> entry : volumeToFiler) {
-                final StorageFilerTO storageFiler = entry.second();
-                final SR sr = xenServer610Resource.getStorageRepository(connection, storageFiler.getUuid());
+            final List<Pair<VolumeTO, Object>> volumeToSr = new ArrayList<>();
+
+            for (final Pair<VolumeTO, String> entry : volumeToStorageUuid) {
+                final String storageUuid = entry.second();
+                final SR sr = xenServer610Resource.getStorageRepository(connection, storageUuid);
+
                 volumeToSr.add(new Pair<VolumeTO, Object>(entry.first(), sr));
             }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java
index bed417f..7b2a599 100644
--- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixCreateStoragePoolCommandWrapper.java
@@ -19,6 +19,8 @@
 
 package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase;
 
+import java.util.Map;
+
 import org.apache.log4j.Logger;
 
 import com.cloud.agent.api.Answer;
@@ -39,20 +41,35 @@ public final class CitrixCreateStoragePoolCommandWrapper extends CommandWrapper<
     public Answer execute(final CreateStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) {
         final Connection conn = citrixResourceBase.getConnection();
         final StorageFilerTO pool = command.getPool();
+
         try {
-            if (pool.getType() == StoragePoolType.NetworkFilesystem) {
-                citrixResourceBase.getNfsSR(conn, Long.toString(pool.getId()), pool.getUuid(), pool.getHost(), pool.getPath(), pool.toString());
-            } else if (pool.getType() == StoragePoolType.IscsiLUN) {
-                citrixResourceBase.getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, false);
-            } else if (pool.getType() == StoragePoolType.PreSetup) {
-            } else {
-                return new Answer(command, false, "The pool type: " + pool.getType().name() + " is not supported.");
+            if (command.getCreateDatastore()) {
+                Map<String, String> details = command.getDetails();
+
+                String srNameLabel = details.get(CreateStoragePoolCommand.DATASTORE_NAME);
+                String storageHost = details.get(CreateStoragePoolCommand.STORAGE_HOST);
+                String iqn = details.get(CreateStoragePoolCommand.IQN);
+
+                citrixResourceBase.getIscsiSR(conn, srNameLabel, storageHost, iqn, null, null, false);
             }
+            else {
+                if (pool.getType() == StoragePoolType.NetworkFilesystem) {
+                    citrixResourceBase.getNfsSR(conn, Long.toString(pool.getId()), pool.getUuid(), pool.getHost(), pool.getPath(), pool.toString());
+                } else if (pool.getType() == StoragePoolType.IscsiLUN) {
+                    citrixResourceBase.getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, false);
+                } else if (pool.getType() == StoragePoolType.PreSetup) {
+                } else {
+                    return new Answer(command, false, "The pool type: " + pool.getType().name() + " is not supported.");
+                }
+            }
+
             return new Answer(command, true, "success");
         } catch (final Exception e) {
             final String msg = "Catch Exception " + e.getClass().getName() + ", create StoragePool failed due to " + e.toString() + " on host:"
                     + citrixResourceBase.getHost().getUuid() + " pool: " + pool.getHost() + pool.getPath();
+
             s_logger.warn(msg, e);
+
             return new Answer(command, false, msg);
         }
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java
index a9ae680..c93dd90 100644
--- a/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java
+++ b/plugins/hypervisors/xenserver/src/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/CitrixDeleteStoragePoolCommandWrapper.java
@@ -19,6 +19,8 @@
 
 package com.cloud.hypervisor.xenserver.resource.wrapper.xenbase;
 
+import java.util.Map;
+
 import org.apache.log4j.Logger;
 
 import com.cloud.agent.api.Answer;
@@ -32,22 +34,40 @@ import com.xensource.xenapi.SR;
 
 @ResourceWrapper(handles =  DeleteStoragePoolCommand.class)
 public final class CitrixDeleteStoragePoolCommandWrapper extends CommandWrapper<DeleteStoragePoolCommand, Answer, CitrixResourceBase> {
-
     private static final Logger s_logger = Logger.getLogger(CitrixDeleteStoragePoolCommandWrapper.class);
 
     @Override
     public Answer execute(final DeleteStoragePoolCommand command, final CitrixResourceBase citrixResourceBase) {
         final Connection conn = citrixResourceBase.getConnection();
         final StorageFilerTO poolTO = command.getPool();
+
         try {
-            final SR sr = citrixResourceBase.getStorageRepository(conn, poolTO.getUuid());
+            final SR sr;
+
+            // getRemoveDatastore being true indicates we are using managed storage and need to pull the SR name out of a Map
+            // instead of pulling it out using getUuid of the StorageFilerTO instance.
+            if (command.getRemoveDatastore()) {
+                Map<String, String> details = command.getDetails();
+
+                String srNameLabel = details.get(DeleteStoragePoolCommand.DATASTORE_NAME);
+
+                sr = citrixResourceBase.getStorageRepository(conn, srNameLabel);
+            }
+            else {
+                sr = citrixResourceBase.getStorageRepository(conn, poolTO.getUuid());
+            }
+
             citrixResourceBase.removeSR(conn, sr);
+
             final Answer answer = new Answer(command, true, "success");
+
             return answer;
         } catch (final Exception e) {
-            final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() + " pool: " + poolTO.getHost()
-                    + poolTO.getPath();
-            s_logger.warn(msg, e);
+            final String msg = "DeleteStoragePoolCommand XenAPIException:" + e.getMessage() + " host:" + citrixResourceBase.getHost().getUuid() +
+                    " pool: " + poolTO.getHost() + poolTO.getPath();
+
+            s_logger.error(msg, e);
+
             return new Answer(command, false, msg);
         }
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java b/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java
index 7de96b0..2409b6e 100644
--- a/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java
+++ b/plugins/hypervisors/xenserver/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java
@@ -19,6 +19,7 @@
 package org.apache.cloudstack.storage.motion;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -28,6 +29,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
 import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
@@ -39,6 +41,8 @@ import org.springframework.stereotype.Component;
 
 import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.CreateStoragePoolCommand;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
 import com.cloud.agent.api.MigrateWithStorageAnswer;
 import com.cloud.agent.api.MigrateWithStorageCommand;
 import com.cloud.agent.api.MigrateWithStorageCompleteAnswer;
@@ -56,9 +60,12 @@ import com.cloud.host.Host;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.storage.StoragePool;
 import com.cloud.storage.VolumeVO;
+import com.cloud.storage.VolumeDetailVO;
 import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.VolumeDetailsDao;
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.utils.Pair;
+import com.cloud.utils.StringUtils;
 import com.cloud.vm.VMInstanceVO;
 import com.cloud.vm.dao.VMInstanceDao;
 
@@ -74,6 +81,8 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
     @Inject
     PrimaryDataStoreDao storagePoolDao;
     @Inject
+    private VolumeDetailsDao volumeDetailsDao;
+    @Inject
     VMInstanceDao instanceDao;
 
     @Override
@@ -120,25 +129,175 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
         callback.complete(result);
     }
 
+    private String getBasicIqn(long volumeId) {
+        VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, PrimaryDataStoreDriver.BASIC_IQN);
+
+        return volumeDetail.getValue();
+    }
+
+    /**
+     * Tell the underlying storage plug-in to create a new volume, put it in the VAG of the destination cluster, and
+     * send a command to the destination cluster to create an SR and to attach to the SR from all hosts in the cluster.
+     */
+    private String handleManagedVolumePreMigration(VolumeInfo volumeInfo, StoragePool storagePool, Host destHost) {
+        final PrimaryDataStoreDriver pdsd = (PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver();
+
+        VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_CREATE, Boolean.TRUE.toString(), false);
+
+        volumeDetailsDao.persist(volumeDetailVo);
+
+        pdsd.createAsync(volumeInfo.getDataStore(), volumeInfo, null);
+
+        volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_GRANT_ACCESS, Boolean.TRUE.toString(), false);
+
+        volumeDetailsDao.persist(volumeDetailVo);
+
+        pdsd.grantAccess(volumeInfo, destHost, volumeInfo.getDataStore());
+
+        final Map<String, String> details = new HashMap<>();
+
+        final String iqn = getBasicIqn(volumeInfo.getId());
+
+        details.put(CreateStoragePoolCommand.DATASTORE_NAME, iqn);
+
+        details.put(CreateStoragePoolCommand.IQN, iqn);
+
+        details.put(CreateStoragePoolCommand.STORAGE_HOST, storagePool.getHostAddress());
+
+        details.put(CreateStoragePoolCommand.STORAGE_PORT, String.valueOf(storagePool.getPort()));
+
+        final CreateStoragePoolCommand cmd = new CreateStoragePoolCommand(true, storagePool);
+
+        cmd.setDetails(details);
+        cmd.setCreateDatastore(true);
+
+        final Answer answer = agentMgr.easySend(destHost.getId(), cmd);
+
+        if (answer == null || !answer.getResult()) {
+            String errMsg = "Error interacting with host (related to CreateStoragePoolCommand)" +
+                    (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "");
+
+            s_logger.error(errMsg);
+
+            throw new CloudRuntimeException(errMsg);
+        }
+
+        return iqn;
+    }
+
+    private void handleManagedVolumePostMigration(VolumeInfo volumeInfo, Host srcHost, VolumeObjectTO volumeTO) {
+        final Map<String, String> details = new HashMap<>();
+
+        details.put(DeleteStoragePoolCommand.DATASTORE_NAME, volumeInfo.get_iScsiName());
+
+        final DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand();
+
+        cmd.setDetails(details);
+        cmd.setRemoveDatastore(true);
+
+        final Answer answer = agentMgr.easySend(srcHost.getId(), cmd);
+
+        if (answer == null || !answer.getResult()) {
+            String errMsg = "Error interacting with host (related to DeleteStoragePoolCommand)" +
+                    (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "");
+
+            s_logger.error(errMsg);
+
+            throw new CloudRuntimeException(errMsg);
+        }
+
+        final PrimaryDataStoreDriver pdsd = (PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver();
+
+        pdsd.revokeAccess(volumeInfo, srcHost, volumeInfo.getDataStore());
+
+        VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_DELETE, Boolean.TRUE.toString(), false);
+
+        volumeDetailsDao.persist(volumeDetailVo);
+
+        pdsd.deleteAsync(volumeInfo.getDataStore(), volumeInfo, null);
+
+        VolumeVO volumeVO = volDao.findById(volumeInfo.getId());
+
+        volumeVO.setPath(volumeTO.getPath());
+
+        volDao.update(volumeVO.getId(), volumeVO);
+    }
+
+    private void handleManagedVolumesAfterFailedMigration(Map<VolumeInfo, DataStore> volumeToPool, Host destHost) {
+        for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+            VolumeInfo volumeInfo = entry.getKey();
+            StoragePool storagePool = storagePoolDao.findById(volumeInfo.getPoolId());
+
+            if (storagePool.isManaged()) {
+                final Map<String, String> details = new HashMap<>();
+
+                details.put(DeleteStoragePoolCommand.DATASTORE_NAME, getBasicIqn(volumeInfo.getId()));
+
+                final DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand();
+
+                cmd.setDetails(details);
+                cmd.setRemoveDatastore(true);
+
+                final Answer answer = agentMgr.easySend(destHost.getId(), cmd);
+
+                if (answer == null || !answer.getResult()) {
+                    String errMsg = "Error interacting with host (related to handleManagedVolumesAfterFailedMigration)" +
+                            (StringUtils.isNotBlank(answer.getDetails()) ? ": " + answer.getDetails() : "");
+
+                    s_logger.error(errMsg);
+
+                    // no need to throw an exception here as the calling code is responsible for doing so
+                    // regardless of the success or lack thereof concerning this method
+                    return;
+                }
+
+                final PrimaryDataStoreDriver pdsd = (PrimaryDataStoreDriver)volumeInfo.getDataStore().getDriver();
+
+                VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_REVOKE_ACCESS, Boolean.TRUE.toString(), false);
+
+                volumeDetailsDao.persist(volumeDetailVo);
+
+                pdsd.revokeAccess(volumeInfo, destHost, volumeInfo.getDataStore());
+
+                volumeDetailVo = new VolumeDetailVO(volumeInfo.getId(), PrimaryDataStoreDriver.BASIC_DELETE_FAILURE, Boolean.TRUE.toString(), false);
+
+                volumeDetailsDao.persist(volumeDetailVo);
+
+                pdsd.deleteAsync(volumeInfo.getDataStore(), volumeInfo, null);
+            }
+        }
+    }
+
     private Answer migrateVmWithVolumesAcrossCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map<VolumeInfo, DataStore> volumeToPool)
             throws AgentUnavailableException {
+        // Initiate migration of a virtual machine with its volumes.
 
-        // Initiate migration of a virtual machine with it's volumes.
         try {
-            List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerto = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
+            List<Pair<VolumeTO, String>> volumeToStorageUuid = new ArrayList<>();
+
             for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
-                VolumeInfo volume = entry.getKey();
-                VolumeTO volumeTo = new VolumeTO(volume, storagePoolDao.findById(volume.getPoolId()));
-                StorageFilerTO filerTo = new StorageFilerTO((StoragePool)entry.getValue());
-                volumeToFilerto.add(new Pair<VolumeTO, StorageFilerTO>(volumeTo, filerTo));
+                VolumeInfo volumeInfo = entry.getKey();
+                StoragePool storagePool = storagePoolDao.findById(volumeInfo.getPoolId());
+                VolumeTO volumeTo = new VolumeTO(volumeInfo, storagePool);
+
+                if (storagePool.isManaged()) {
+                    String iqn = handleManagedVolumePreMigration(volumeInfo, storagePool, destHost);
+
+                    volumeToStorageUuid.add(new Pair<>(volumeTo, iqn));
+                }
+                else {
+                    volumeToStorageUuid.add(new Pair<>(volumeTo, ((StoragePool)entry.getValue()).getPath()));
+                }
             }
 
             // Migration across cluster needs to be done in three phases.
             // 1. Send a migrate receive command to the destination host so that it is ready to receive a vm.
             // 2. Send a migrate send command to the source host. This actually migrates the vm to the destination.
             // 3. Complete the process. Update the volume details.
-            MigrateWithStorageReceiveCommand receiveCmd = new MigrateWithStorageReceiveCommand(to, volumeToFilerto);
+
+            MigrateWithStorageReceiveCommand receiveCmd = new MigrateWithStorageReceiveCommand(to, volumeToStorageUuid);
             MigrateWithStorageReceiveAnswer receiveAnswer = (MigrateWithStorageReceiveAnswer)agentMgr.send(destHost.getId(), receiveCmd);
+
             if (receiveAnswer == null) {
                 s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
@@ -150,16 +309,22 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
             MigrateWithStorageSendCommand sendCmd =
                     new MigrateWithStorageSendCommand(to, receiveAnswer.getVolumeToSr(), receiveAnswer.getNicToNetwork(), receiveAnswer.getToken());
             MigrateWithStorageSendAnswer sendAnswer = (MigrateWithStorageSendAnswer)agentMgr.send(srcHost.getId(), sendCmd);
+
             if (sendAnswer == null) {
+                handleManagedVolumesAfterFailedMigration(volumeToPool, destHost);
+
                 s_logger.error("Migration with storage of vm " + vm + " to host " + destHost + " failed.");
                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
             } else if (!sendAnswer.getResult()) {
+                handleManagedVolumesAfterFailedMigration(volumeToPool, destHost);
+
                 s_logger.error("Migration with storage of vm " + vm + " failed. Details: " + sendAnswer.getDetails());
                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
             }
 
             MigrateWithStorageCompleteCommand command = new MigrateWithStorageCompleteCommand(to);
             MigrateWithStorageCompleteAnswer answer = (MigrateWithStorageCompleteAnswer)agentMgr.send(destHost.getId(), command);
+
             if (answer == null) {
                 s_logger.error("Migration with storage of vm " + vm + " failed.");
                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
@@ -168,7 +333,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost);
             } else {
                 // Update the volume details after migration.
-                updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos());
+                updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos(), srcHost);
             }
 
             return answer;
@@ -181,7 +346,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
     private Answer migrateVmWithVolumesWithinCluster(VMInstanceVO vm, VirtualMachineTO to, Host srcHost, Host destHost, Map<VolumeInfo, DataStore> volumeToPool)
             throws AgentUnavailableException {
 
-        // Initiate migration of a virtual machine with it's volumes.
+        // Initiate migration of a virtual machine with its volumes.
         try {
             List<Pair<VolumeTO, StorageFilerTO>> volumeToFilerto = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
             for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
@@ -201,7 +366,7 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
                 throw new CloudRuntimeException("Error while migrating the vm " + vm + " to host " + destHost + ". " + answer.getDetails());
             } else {
                 // Update the volume details after migration.
-                updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos());
+                updateVolumePathsAfterMigration(volumeToPool, answer.getVolumeTos(), srcHost);
             }
 
             return answer;
@@ -211,28 +376,39 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy {
         }
     }
 
-    private void updateVolumePathsAfterMigration(Map<VolumeInfo, DataStore> volumeToPool, List<VolumeObjectTO> volumeTos) {
+    private void updateVolumePathsAfterMigration(Map<VolumeInfo, DataStore> volumeToPool, List<VolumeObjectTO> volumeTos, Host srcHost) {
         for (Map.Entry<VolumeInfo, DataStore> entry : volumeToPool.entrySet()) {
+            VolumeInfo volumeInfo = entry.getKey();
+            StoragePool storagePool = (StoragePool)entry.getValue();
+
             boolean updated = false;
-            VolumeInfo volume = entry.getKey();
-            StoragePool pool = (StoragePool)entry.getValue();
+
             for (VolumeObjectTO volumeTo : volumeTos) {
-                if (volume.getId() == volumeTo.getId()) {
-                    VolumeVO volumeVO = volDao.findById(volume.getId());
-                    Long oldPoolId = volumeVO.getPoolId();
-                    volumeVO.setPath(volumeTo.getPath());
-                    volumeVO.setFolder(pool.getPath());
-                    volumeVO.setPodId(pool.getPodId());
-                    volumeVO.setPoolId(pool.getId());
-                    volumeVO.setLastPoolId(oldPoolId);
-                    volDao.update(volume.getId(), volumeVO);
+                if (volumeInfo.getId() == volumeTo.getId()) {
+                    if (storagePool.isManaged()) {
+                        handleManagedVolumePostMigration(volumeInfo, srcHost, volumeTo);
+                    }
+                    else {
+                        VolumeVO volumeVO = volDao.findById(volumeInfo.getId());
+                        Long oldPoolId = volumeVO.getPoolId();
+
+                        volumeVO.setPath(volumeTo.getPath());
+                        volumeVO.setFolder(storagePool.getPath());
+                        volumeVO.setPodId(storagePool.getPodId());
+                        volumeVO.setPoolId(storagePool.getId());
+                        volumeVO.setLastPoolId(oldPoolId);
+
+                        volDao.update(volumeInfo.getId(), volumeVO);
+                    }
+
                     updated = true;
+
                     break;
                 }
             }
 
             if (!updated) {
-                s_logger.error("Volume path wasn't updated for volume " + volume + " after it was migrated.");
+                s_logger.error("The volume path wasn't updated for volume '" + volumeInfo + "' after it was migrated.");
             }
         }
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/XenServer610WrapperTest.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/XenServer610WrapperTest.java b/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/XenServer610WrapperTest.java
index f294af1..8fa68f5 100644
--- a/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/XenServer610WrapperTest.java
+++ b/plugins/hypervisors/xenserver/test/com/cloud/hypervisor/xenserver/resource/wrapper/xenbase/XenServer610WrapperTest.java
@@ -204,9 +204,9 @@ public class XenServer610WrapperTest {
         final StorageFilerTO storage1 = Mockito.mock(StorageFilerTO.class);
         final StorageFilerTO storage2 = Mockito.mock(StorageFilerTO.class);
 
-        final List<Pair<VolumeTO, StorageFilerTO>> volumeToFiler = new ArrayList<Pair<VolumeTO, StorageFilerTO>>();
-        volumeToFiler.add(new Pair<VolumeTO, StorageFilerTO>(vol1, storage1));
-        volumeToFiler.add(new Pair<VolumeTO, StorageFilerTO>(vol2, storage2));
+        final List<Pair<VolumeTO, String>> volumeToFiler = new ArrayList<>();
+        volumeToFiler.add(new Pair<>(vol1, storage1.getPath()));
+        volumeToFiler.add(new Pair<>(vol2, storage2.getPath()));
 
         final NicTO nicTO1 = Mockito.mock(NicTO.class);
         final NicTO nicTO2 = Mockito.mock(NicTO.class);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java
----------------------------------------------------------------------
diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java
index af969e1..ccc1bdc 100644
--- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java
@@ -94,6 +94,8 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
     private static final long MIN_IOPS_FOR_SNAPSHOT_VOLUME = 100L;
     private static final long MAX_IOPS_FOR_SNAPSHOT_VOLUME = 20000L;
 
+    private static final String BASIC_SF_ID = "basicSfId";
+
     @Inject private AccountDao accountDao;
     @Inject private AccountDetailsDao accountDetailsDao;
     @Inject private ClusterDao clusterDao;
@@ -153,7 +155,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
         Preconditions.checkArgument(host != null, "'host' should not be 'null'");
         Preconditions.checkArgument(dataStore != null, "'dataStore' should not be 'null'");
 
-        long sfVolumeId = getSolidFireVolumeId(dataObject);
+        long sfVolumeId = getSolidFireVolumeId(dataObject, true);
         long clusterId = host.getClusterId();
         long storagePoolId = dataStore.getId();
 
@@ -215,7 +217,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
             return;
         }
 
-        long sfVolumeId = getSolidFireVolumeId(dataObject);
+        long sfVolumeId = getSolidFireVolumeId(dataObject, false);
         long clusterId = host.getClusterId();
         long storagePoolId = dataStore.getId();
 
@@ -252,9 +254,31 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
         }
     }
 
-    private long getSolidFireVolumeId(DataObject dataObject) {
+    private long getSolidFireVolumeId(DataObject dataObject, boolean grantAccess) {
         if (dataObject.getType() == DataObjectType.VOLUME) {
-            return Long.parseLong(((VolumeInfo)dataObject).getFolder());
+            final VolumeInfo volumeInfo = (VolumeInfo)dataObject;
+            final long volumeId = volumeInfo.getId();
+
+            if (grantAccess && isBasicGrantAccess(volumeId)) {
+                volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_GRANT_ACCESS);
+
+                final Long sfVolumeId = getBasicSfVolumeId(volumeId);
+
+                Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null' (basic grant access).");
+
+                return sfVolumeId;
+            }
+            else if (!grantAccess && isBasicRevokeAccess(volumeId)) {
+                volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_REVOKE_ACCESS);
+
+                final Long sfVolumeId = getBasicSfVolumeId(volumeId);
+
+                Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null' (basic revoke access).");
+
+                return sfVolumeId;
+            }
+
+            return Long.parseLong(volumeInfo.getFolder());
         }
 
         if (dataObject.getType() == DataObjectType.SNAPSHOT) {
@@ -271,7 +295,7 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
             return getVolumeIdFrom_iScsiPath(((TemplateInfo)dataObject).getInstallPath());
         }
 
-        throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to getSolidFireVolumeId(DataObject)");
+        throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to getSolidFireVolumeId(DataObject, boolean)");
     }
 
     private long getVolumeIdFrom_iScsiPath(String iScsiPath) {
@@ -313,10 +337,11 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
 
     private SolidFireUtil.SolidFireVolume createSolidFireVolume(SolidFireUtil.SolidFireConnection sfConnection, DataObject dataObject, long sfAccountId) {
         long storagePoolId = dataObject.getDataStore().getId();
-        Long minIops = null;
-        Long maxIops = null;
-        Long volumeSize = dataObject.getSize();
-        String volumeName = null;
+
+        final Long minIops;
+        final Long maxIops;
+        final Long volumeSize;
+        final String volumeName;
 
         final Map<String, String> mapAttributes;
 
@@ -647,6 +672,58 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
         snapshotDetailsDao.remove(snapshotDetails.getId());
     }
 
+    private Long getBasicSfVolumeId(long volumeId) {
+        VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, BASIC_SF_ID);
+
+        if (volumeDetail != null && volumeDetail.getValue() != null) {
+            return new Long(volumeDetail.getValue());
+        }
+
+        return null;
+    }
+
+    private String getBasicIqn(long volumeId) {
+        VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, BASIC_IQN);
+
+        if (volumeDetail != null && volumeDetail.getValue() != null) {
+            return volumeDetail.getValue();
+        }
+
+        return null;
+    }
+
+    // If isBasicCreate returns true, this means the calling code simply wants us to create a SolidFire volume with specified
+    // characteristics. We do not update the cloud.volumes table with this info.
+    private boolean isBasicCreate(long volumeId) {
+        return getBooleanValueFromVolumeDetails(volumeId, BASIC_CREATE);
+    }
+
+    private boolean isBasicDelete(long volumeId) {
+        return getBooleanValueFromVolumeDetails(volumeId, BASIC_DELETE);
+    }
+
+    private boolean isBasicDeleteFailure(long volumeId) {
+        return getBooleanValueFromVolumeDetails(volumeId, BASIC_DELETE_FAILURE);
+    }
+
+    private boolean isBasicGrantAccess(long volumeId) {
+        return getBooleanValueFromVolumeDetails(volumeId, BASIC_GRANT_ACCESS);
+    }
+
+    private boolean isBasicRevokeAccess(long volumeId) {
+        return getBooleanValueFromVolumeDetails(volumeId, BASIC_REVOKE_ACCESS);
+    }
+
+    private boolean getBooleanValueFromVolumeDetails(long volumeId, String name) {
+        VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, name);
+
+        if (volumeDetail != null && volumeDetail.getValue() != null) {
+            return Boolean.parseBoolean(volumeDetail.getValue());
+        }
+
+        return false;
+    }
+
     private long getCsIdForCloning(long volumeId, String cloneOf) {
         VolumeDetailVO volumeDetail = volumeDetailsDao.findDetail(volumeId, cloneOf);
 
@@ -788,11 +865,13 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
             LOGGER.error(errMsg);
         }
 
-        CommandResult result = new CommandResult();
+        if (callback != null) {
+            CommandResult result = new CommandResult();
 
-        result.setResult(errMsg);
+            result.setResult(errMsg);
 
-        callback.complete(result);
+            callback.complete(result);
+        }
     }
 
     @Override
@@ -950,19 +1029,43 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
         snapshotDetailsDao.persist(snapshotDetail);
     }
 
+    private void addBasicCreateInfoToVolumeDetails(long volumeId, SolidFireUtil.SolidFireVolume sfVolume) {
+        VolumeDetailVO volumeDetailVo = new VolumeDetailVO(volumeId, BASIC_SF_ID, String.valueOf(sfVolume.getId()), false);
+
+        volumeDetailsDao.persist(volumeDetailVo);
+
+        volumeDetailVo = new VolumeDetailVO(volumeId, BASIC_IQN, sfVolume.getIqn(), false);
+
+        volumeDetailsDao.persist(volumeDetailVo);
+    }
+
     private String createVolume(VolumeInfo volumeInfo, long storagePoolId) {
-        verifySufficientBytesForStoragePool(volumeInfo, storagePoolId);
-        verifySufficientIopsForStoragePool(volumeInfo.getMinIops() != null ? volumeInfo.getMinIops() : getDefaultMinIops(storagePoolId), storagePoolId);
+        boolean isBasicCreate = isBasicCreate(volumeInfo.getId());
+
+        if (!isBasicCreate) {
+            verifySufficientBytesForStoragePool(volumeInfo, storagePoolId);
+            verifySufficientIopsForStoragePool(volumeInfo.getMinIops() != null ? volumeInfo.getMinIops() : getDefaultMinIops(storagePoolId), storagePoolId);
+        }
 
         SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
 
         long sfAccountId = getCreateSolidFireAccountId(sfConnection, volumeInfo.getAccountId(), storagePoolId);
 
+        SolidFireUtil.SolidFireVolume sfVolume;
+
+        if (isBasicCreate) {
+            sfVolume = createSolidFireVolume(sfConnection, volumeInfo, sfAccountId);
+
+            volumeDetailsDao.removeDetail(volumeInfo.getId(), BASIC_CREATE);
+
+            addBasicCreateInfoToVolumeDetails(volumeInfo.getId(), sfVolume);
+
+            return sfVolume.getIqn();
+        }
+
         long csSnapshotId = getCsIdForCloning(volumeInfo.getId(), "cloneOfSnapshot");
         long csTemplateId = getCsIdForCloning(volumeInfo.getId(), "cloneOfTemplate");
 
-        SolidFireUtil.SolidFireVolume sfVolume;
-
         if (csSnapshotId > 0) {
             // We are supposed to create a clone of the underlying volume or snapshot that supports the CloudStack snapshot.
             sfVolume = createClone(sfConnection, csSnapshotId, volumeInfo, sfAccountId, storagePoolId, DataObjectType.SNAPSHOT);
@@ -1083,23 +1186,66 @@ public class SolidFirePrimaryDataStoreDriver implements PrimaryDataStoreDriver {
         return iqn;
     }
 
+    private void performBasicDelete(SolidFireUtil.SolidFireConnection sfConnection, long volumeId) {
+        Long sfVolumeId = getBasicSfVolumeId(volumeId);
+
+        Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null'.");
+
+        String iqn = getBasicIqn(volumeId);
+
+        Preconditions.checkNotNull(iqn, "'iqn' should not be 'null'.");
+
+        VolumeVO volumeVO = volumeDao.findById(volumeId);
+
+        SolidFireUtil.deleteSolidFireVolume(sfConnection, Long.parseLong(volumeVO.getFolder()));
+
+        volumeVO.setFolder(String.valueOf(sfVolumeId));
+        volumeVO.set_iScsiName(iqn);
+
+        volumeDao.update(volumeId, volumeVO);
+
+        volumeDetailsDao.removeDetail(volumeId, BASIC_SF_ID);
+        volumeDetailsDao.removeDetail(volumeId, BASIC_IQN);
+        volumeDetailsDao.removeDetail(volumeId, BASIC_DELETE);
+    }
+
+    private void performBasicDeleteFailure(SolidFireUtil.SolidFireConnection sfConnection, long volumeId) {
+        Long sfVolumeId = getBasicSfVolumeId(volumeId);
+
+        Preconditions.checkNotNull(sfVolumeId, "'sfVolumeId' should not be 'null'.");
+
+        SolidFireUtil.deleteSolidFireVolume(sfConnection, sfVolumeId);
+
+        volumeDetailsDao.removeDetail(volumeId, BASIC_SF_ID);
+        volumeDetailsDao.removeDetail(volumeId, BASIC_IQN);
+        volumeDetailsDao.removeDetail(volumeId, BASIC_DELETE_FAILURE);
+    }
+
     private void deleteVolume(VolumeInfo volumeInfo, long storagePoolId) {
         try {
             long volumeId = volumeInfo.getId();
 
             SolidFireUtil.SolidFireConnection sfConnection = SolidFireUtil.getSolidFireConnection(storagePoolId, storagePoolDetailsDao);
 
-            deleteSolidFireVolume(sfConnection, volumeInfo);
+            if (isBasicDelete(volumeId)) {
+                performBasicDelete(sfConnection, volumeId);
+            }
+            else if (isBasicDeleteFailure(volumeId)) {
+                performBasicDeleteFailure(sfConnection, volumeId);
+            }
+            else {
+                deleteSolidFireVolume(sfConnection, volumeInfo);
 
-            volumeDetailsDao.removeDetails(volumeId);
+                volumeDetailsDao.removeDetails(volumeId);
 
-            StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
+                StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId);
 
-            long usedBytes = getUsedBytes(storagePool, volumeId);
+                long usedBytes = getUsedBytes(storagePool, volumeId);
 
-            storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);
+                storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes);
 
-            storagePoolDao.update(storagePoolId, storagePool);
+                storagePoolDao.update(storagePoolId, storagePool);
+            }
         }
         catch (Exception ex) {
             LOGGER.debug(SolidFireUtil.LOG_PREFIX + "Failed to delete SolidFire volume. CloudStack volume ID: " + volumeInfo.getId(), ex);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/server/src/com/cloud/server/ManagementServerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java
index 60b44d7..82f8030 100644
--- a/server/src/com/cloud/server/ManagementServerImpl.java
+++ b/server/src/com/cloud/server/ManagementServerImpl.java
@@ -1205,12 +1205,15 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
                     srcHost.getHypervisorType(), srcHost.getHypervisorVersion());
             allHosts = allHostsPair.first();
             allHosts.remove(srcHost);
+
             for (final VolumeVO volume : volumes) {
-                final Long volClusterId = _poolDao.findById(volume.getPoolId()).getClusterId();
-                // only check for volume which are not in zone wide primary store, as only those may require storage motion
-                if (volClusterId != null) {
-                    for (final Iterator<HostVO> iterator = allHosts.iterator(); iterator.hasNext();) {
-                        final Host host = iterator.next();
+                final StoragePool storagePool = _poolDao.findById(volume.getPoolId());
+                final Long volClusterId = storagePool.getClusterId();
+
+                for (final Iterator<HostVO> iterator = allHosts.iterator(); iterator.hasNext();) {
+                    final Host host = iterator.next();
+
+                    if (volClusterId != null) {
                         if (!host.getClusterId().equals(volClusterId) || usesLocal) {
                             if (hasSuitablePoolsForVolume(volume, host, vmProfile)) {
                                 requiresStorageMotion.put(host, true);
@@ -1219,8 +1222,16 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
                             }
                         }
                     }
+                    else {
+                        if (storagePool.isManaged()) {
+                            if (srcHost.getClusterId() != host.getClusterId()) {
+                                requiresStorageMotion.put(host, true);
+                            }
+                        }
+                    }
                 }
             }
+
             plan = new DataCenterDeployment(srcHost.getDataCenterId(), null, null, null, null, null);
         } else {
             final Long cluster = srcHost.getClusterId();
@@ -1249,7 +1260,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe
         }
 
         for (final HostAllocator allocator : hostAllocators) {
-            if  (canMigrateWithStorage) {
+            if (canMigrateWithStorage) {
                 suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, allHosts, HostAllocator.RETURN_UPTO_ALL, false);
             } else {
                 suitableHosts = allocator.allocateTo(vmProfile, plan, Host.Type.Routing, excludes, HostAllocator.RETURN_UPTO_ALL, false);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/TestAddRemoveHosts.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestAddRemoveHosts.py b/test/integration/plugins/solidfire/TestAddRemoveHosts.py
index 518d022..a13c61a 100644
--- a/test/integration/plugins/solidfire/TestAddRemoveHosts.py
+++ b/test/integration/plugins/solidfire/TestAddRemoveHosts.py
@@ -21,6 +21,8 @@ import SignedAPICall
 import time
 import XenAPI
 
+from util import sf_util
+
 # All tests inherit from cloudstackTestCase
 from marvin.cloudstackTestCase import cloudstackTestCase
 
@@ -37,6 +39,15 @@ from marvin.lib.utils import cleanup_resources
 
 from solidfire import solidfire_element_api as sf_api
 
+# Prerequisites:
+#  Only one zone
+#  Only one pod
+#  Only one cluster (two hosts with another added/removed during the tests)
+#
+# Running the tests:
+#  Set a breakpoint on each test after the first one. When the breakpoint is hit, reset the third
+#   host to a snapshot state and re-start it. Once it's up and running, run the test code.
+
 
 class TestData:
     account = "account"
@@ -238,7 +249,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
         try:
             cleanup_resources(cls.apiClient, cls._cleanup)
 
-            cls._purge_solidfire_volumes()
+            sf_util.purge_solidfire_volumes(cls.sf_client)
         except Exception as e:
             logging.debug("Exception in tearDownClass(cls): %s" % e)
 
@@ -286,7 +297,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
 
         root_volume = self._get_root_volume(self.virtual_machine)
 
-        sf_iscsi_name = self._get_iqn(root_volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)
 
         self._perform_add_remove_host(primary_storage.id, sf_iscsi_name)
 
@@ -342,7 +353,7 @@ class TestAddRemoveHosts(cloudstackTestCase):
 
         root_volume = self._get_root_volume(self.virtual_machine)
 
-        sf_iscsi_name = self._get_iqn(root_volume)
+        sf_iscsi_name = sf_util.get_iqn(self.cs_api, root_volume, self)
 
         primarystorage2 = self.testdata[TestData.primaryStorage2]
 
@@ -596,19 +607,6 @@ class TestAddRemoveHosts(cloudstackTestCase):
 
         self.assert_(False, "Unable to locate the ROOT volume of the VM with the following ID: " + str(vm.id))
 
-    def _get_iqn(self, volume):
-        # Get volume IQN
-        sf_iscsi_name_request = {'volumeid': volume.id}
-        # put this commented line back once PR 1403 is in
-        # sf_iscsi_name_result = self.cs_api.getVolumeiScsiName(sf_iscsi_name_request)
-        sf_iscsi_name_result = self.cs_api.getSolidFireVolumeIscsiName(sf_iscsi_name_request)
-        # sf_iscsi_name = sf_iscsi_name_result['apivolumeiscsiname']['volumeiScsiName']
-        sf_iscsi_name = sf_iscsi_name_result['apisolidfirevolumeiscsiname']['solidFireVolumeIscsiName']
-
-        self._check_iscsi_name(sf_iscsi_name)
-
-        return sf_iscsi_name
-
     def _get_iqn_2(self, primary_storage):
         sql_query = "Select path From storage_pool Where uuid = '" + str(primary_storage.id) + "'"
 
@@ -617,13 +615,6 @@ class TestAddRemoveHosts(cloudstackTestCase):
 
         return sql_result[0][0]
 
-    def _check_iscsi_name(self, sf_iscsi_name):
-        self.assertEqual(
-            sf_iscsi_name[0],
-            "/",
-            "The iSCSI name needs to start with a forward slash."
-        )
-
     def _get_host_iscsi_iqns(self):
         hosts = self.xen_session.xenapi.host.get_all()
 
@@ -687,24 +678,3 @@ class TestAddRemoveHosts(cloudstackTestCase):
         for host_iscsi_iqn in host_iscsi_iqns:
             # an error should occur if host_iscsi_iqn is not in sf_vag_initiators
             sf_vag_initiators.index(host_iscsi_iqn)
-
-    def _check_list(self, in_list, expected_size_of_list, err_msg):
-        self.assertEqual(
-            isinstance(in_list, list),
-            True,
-            "'in_list' is not a list."
-        )
-
-        self.assertEqual(
-            len(in_list),
-            expected_size_of_list,
-            err_msg
-        )
-
-    @classmethod
-    def _purge_solidfire_volumes(cls):
-        deleted_volumes = cls.sf_client.list_deleted_volumes()
-
-        for deleted_volume in deleted_volumes:
-            cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
-


[4/4] git commit: updated refs/heads/master to f21477a

Posted by ra...@apache.org.
Merge pull request #1671 from mike-tutkowski/copy-vol-migration

Adding support for cross-cluster storage migration for managed storage when using XenServerThis PR adds support for cross-cluster storage migration of VMs that make use of managed storage with XenServer.

Managed storage is when you have a 1:1 mapping between a virtual disk and a volume on a SAN (in the case of XenServer, an SR is placed on this SAN volume and a single virtual disk placed in the SR).

Managed storage allows features such as storage QoS and SAN-side snapshots to work (sort of analogous to VMware VVols).

This PR focuses on enabling VMs that are using managed storage to be migrated across XenServer clusters.

I have successfully run the following tests on this branch:

TestVolumes.py
TestSnapshots.py
TestVMSnapshots.py
TestAddRemoveHosts.py
TestVMMigrationWithStorage.py (which is a new test that is being added with this PR)

* pr/1671:
  Adding support for cross-cluster storage migration for managed storage when using XenServer

Signed-off-by: Rajani Karuturi <ra...@accelerite.com>


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/f21477a1
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/f21477a1
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/f21477a1

Branch: refs/heads/master
Commit: f21477a1787703e27ca36999241f411fc52b4951
Parents: f31d2dd b508fb8
Author: Rajani Karuturi <ra...@accelerite.com>
Authored: Tue Sep 13 17:40:12 2016 +0530
Committer: Rajani Karuturi <ra...@accelerite.com>
Committed: Tue Sep 13 17:40:12 2016 +0530

----------------------------------------------------------------------
 api/src/com/cloud/storage/StoragePool.java      |   2 +
 .../api/MigrateWithStorageReceiveCommand.java   |  11 +-
 .../agent/test/BackupSnapshotCommandTest.java   |   3 +
 .../api/agent/test/CheckNetworkAnswerTest.java  |   3 +
 .../api/agent/test/SnapshotCommandTest.java     |   3 +
 .../api/storage/PrimaryDataStoreDriver.java     |   7 +
 .../com/cloud/vm/VirtualMachineManagerImpl.java |  88 ++-
 .../storage/datastore/db/StoragePoolVO.java     |   1 +
 ...MigrateWithStorageReceiveCommandWrapper.java |  13 +-
 .../CitrixCreateStoragePoolCommandWrapper.java  |  31 +-
 .../CitrixDeleteStoragePoolCommandWrapper.java  |  30 +-
 .../motion/XenServerStorageMotionStrategy.java  | 222 +++++-
 .../xenbase/XenServer610WrapperTest.java        |   6 +-
 .../driver/SolidFirePrimaryDataStoreDriver.java | 190 ++++-
 .../com/cloud/server/ManagementServerImpl.java  |  23 +-
 .../plugins/solidfire/TestAddRemoveHosts.py     |  58 +-
 .../plugins/solidfire/TestSnapshots.py          | 580 +++++++++++----
 .../solidfire/TestVMMigrationWithStorage.py     | 697 +++++++++++++++++++
 .../plugins/solidfire/TestVMSnapshots.py        |  74 +-
 .../plugins/solidfire/TestVolumes.py            | 548 +++++----------
 .../plugins/solidfire/util/sf_util.py           | 217 ++++++
 21 files changed, 2083 insertions(+), 724 deletions(-)
----------------------------------------------------------------------