You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by bh...@apache.org on 2016/09/28 09:46:23 UTC
[32/50] [abbrv] git commit: updated refs/heads/4.9-bountycastle-daan
to b9ee34f
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/TestSnapshots.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestSnapshots.py b/test/integration/plugins/solidfire/TestSnapshots.py
index 9c3d255..9ae10f3 100644
--- a/test/integration/plugins/solidfire/TestSnapshots.py
+++ b/test/integration/plugins/solidfire/TestSnapshots.py
@@ -21,6 +21,8 @@ import SignedAPICall
import time
import XenAPI
+from util import sf_util
+
# All tests inherit from cloudstackTestCase
from marvin.cloudstackTestCase import cloudstackTestCase
@@ -32,15 +34,17 @@ from nose.plugins.attrib import attr
from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, VirtualMachine, Volume
# common - commonly used methods for all tests are listed here
-from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes
+from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes, list_snapshots
# utils - utility classes for common cleanup, external library wrappers, etc.
-from marvin.lib.utils import cleanup_resources
+from marvin.lib.utils import cleanup_resources, wait_until
from solidfire import solidfire_element_api as sf_api
-# on April 10, 2016: Ran 3 tests in 7742.481s with three hosts
-# on May 2, 2016: Ran 3 tests in 7409.770s with two hosts
+# Prerequisites:
+# Only one zone
+# Only one pod
+# Only one cluster
class TestData():
@@ -334,7 +338,7 @@ class TestSnapshots(cloudstackTestCase):
cls.primary_storage.delete(cls.apiClient)
- cls._purge_solidfire_volumes()
+ sf_util.purge_solidfire_volumes(cls.sf_client)
except Exception as e:
logging.debug("Exception in tearDownClass(cls): %s" % e)
@@ -346,7 +350,7 @@ class TestSnapshots(cloudstackTestCase):
@attr(hypervisor='XenServer')
def test_01_create_volume_snapshot_using_sf_snapshot(self):
- self._set_supports_resign(True)
+ sf_util.set_supports_resign(True, self.dbConnection)
virtual_machine = VirtualMachine.create(
self.apiClient,
@@ -365,24 +369,24 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_1_root_volume = list_volumes_response[0]
vm_1_root_volume_name = vm_1_root_volume.name
- sf_account_id = self._get_sf_account_id(self.account.id, self.primary_storage.id)
+ sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
- self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
@@ -405,27 +409,27 @@ class TestSnapshots(cloudstackTestCase):
virtual_machine.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
self._delete_and_test_snapshot(vol_snap_1)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
self._delete_and_test_snapshot(vol_snap_2)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
virtual_machine = VirtualMachine.create(
self.apiClient,
@@ -444,22 +448,22 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_1_root_volume = list_volumes_response[0]
vm_1_root_volume_name = vm_1_root_volume.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
- self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
@@ -492,22 +496,22 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_2_root_volume = list_volumes_response[0]
vm_2_root_volume_name = vm_2_root_volume.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID'])
- self._check_list(sf_snapshots_2, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
vol_snap_a = self._create_and_test_snapshot(vm_2_root_volume.id, sf_volume_2, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
@@ -518,15 +522,15 @@ class TestSnapshots(cloudstackTestCase):
volume_created_from_snapshot_name = volume_created_from_snapshot.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
- self._check_list(sf_volume_3['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+ sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
volume_created_from_snapshot = virtual_machine.attach_volume(
self.apiClient,
@@ -538,9 +542,9 @@ class TestSnapshots(cloudstackTestCase):
virtual_machine.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
@@ -558,9 +562,9 @@ class TestSnapshots(cloudstackTestCase):
self._delete_and_test_snapshot(vol_snap_1)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
@@ -568,9 +572,9 @@ class TestSnapshots(cloudstackTestCase):
virtual_machine_2.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
@@ -579,7 +583,7 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
data_volume = list_volumes_response[0]
@@ -588,13 +592,13 @@ class TestSnapshots(cloudstackTestCase):
data_volume.delete(self.apiClient)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
@attr(hypervisor='XenServer')
def test_02_create_volume_snapshot_using_sf_volume(self):
- self._set_supports_resign(False)
+ sf_util.set_supports_resign(False, self.dbConnection)
virtual_machine = VirtualMachine.create(
self.apiClient,
@@ -613,24 +617,24 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_1_root_volume = list_volumes_response[0]
vm_1_root_volume_name = vm_1_root_volume.name
- sf_account_id = self._get_sf_account_id(self.account.id, self.primary_storage.id)
+ sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
- self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
@@ -661,9 +665,9 @@ class TestSnapshots(cloudstackTestCase):
virtual_machine.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
self._delete_and_test_snapshot_2(vol_snap_1, sf_account_id, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
@@ -686,22 +690,22 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_1_root_volume = list_volumes_response[0]
vm_1_root_volume_name = vm_1_root_volume.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
- self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
sf_volume_id = sf_volume['volumeID']
sf_volume_size = sf_volume['totalSize']
@@ -740,22 +744,22 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_2_root_volume = list_volumes_response[0]
vm_2_root_volume_name = vm_2_root_volume.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID'])
- self._check_list(sf_snapshots_2, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
sf_volume_id_2 = sf_volume_2['volumeID']
sf_volume_size_2 = sf_volume_2['totalSize']
@@ -770,15 +774,15 @@ class TestSnapshots(cloudstackTestCase):
volume_created_from_snapshot_name = volume_created_from_snapshot.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 7, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
- self._check_list(sf_volume_3['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+ sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
volume_created_from_snapshot = virtual_machine.attach_volume(
self.apiClient,
@@ -790,9 +794,9 @@ class TestSnapshots(cloudstackTestCase):
virtual_machine.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
@@ -811,16 +815,16 @@ class TestSnapshots(cloudstackTestCase):
virtual_machine_2.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
list_volumes_response = list_volumes(
self.apiClient,
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
data_volume = list_volumes_response[0]
@@ -829,9 +833,9 @@ class TestSnapshots(cloudstackTestCase):
data_volume.delete(self.apiClient)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
virtual_machine = VirtualMachine.create(
self.apiClient,
@@ -850,15 +854,15 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_1_root_volume = list_volumes_response[0]
vm_1_root_volume_name = vm_1_root_volume.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
time.sleep(60)
@@ -873,9 +877,9 @@ class TestSnapshots(cloudstackTestCase):
sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
@@ -884,13 +888,13 @@ class TestSnapshots(cloudstackTestCase):
volume_created_from_snapshot_name = volume_created_from_snapshot.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
- self._check_list(sf_volume_2['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+ sf_util.check_list(sf_volume_2['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
volume_created_from_snapshot = virtual_machine.attach_volume(
self.apiClient,
@@ -910,16 +914,16 @@ class TestSnapshots(cloudstackTestCase):
virtual_machine.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
list_volumes_response = list_volumes(
self.apiClient,
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vol_snap_a = self._create_and_test_snapshot_2(volume_created_from_snapshot.id, sf_volume_id_2, sf_volume_id + 4, primary_storage_db_id, sf_volume_size_2,
sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
@@ -931,9 +935,9 @@ class TestSnapshots(cloudstackTestCase):
data_volume.delete(self.apiClient)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
list_volumes_response = list_volumes(
self.apiClient,
@@ -950,7 +954,7 @@ class TestSnapshots(cloudstackTestCase):
@attr(hypervisor='XenServer')
def test_03_create_volume_snapshot_using_sf_volume_and_sf_snapshot(self):
- self._set_supports_resign(False)
+ sf_util.set_supports_resign(False, self.dbConnection)
virtual_machine = VirtualMachine.create(
self.apiClient,
@@ -969,24 +973,24 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_1_root_volume = list_volumes_response[0]
vm_1_root_volume_name = vm_1_root_volume.name
- sf_account_id = self._get_sf_account_id(self.account.id, self.primary_storage.id)
+ sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
- self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
@@ -999,7 +1003,7 @@ class TestSnapshots(cloudstackTestCase):
vol_snap_2 = self._create_and_test_snapshot_2(vm_1_root_volume.id, sf_volume_id, sf_volume_id + 2, primary_storage_db_id, sf_volume_size,
sf_account_id, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
- self._set_supports_resign(True)
+ sf_util.set_supports_resign(True, self.dbConnection)
vol_snap_a = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
@@ -1030,31 +1034,31 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_2_root_volume = list_volumes_response[0]
vm_2_root_volume_name = vm_2_root_volume.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg)
sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID'])
- self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
volume_created_from_snapshot_1 = Volume.create_from_snapshot(self.apiClient, vol_snap_2.id, services, account=self.account.name, domainid=self.domain.id)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
volume_created_from_snapshot_1 = virtual_machine_2.attach_volume(
self.apiClient,
@@ -1086,31 +1090,31 @@ class TestSnapshots(cloudstackTestCase):
listall=True
)
- self._check_list(list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
vm_3_root_volume = list_volumes_response[0]
vm_3_root_volume_name = vm_3_root_volume.name
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg)
sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, vm_3_root_volume_name)
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_3['volumeID'])
- self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
services = {"diskname": "Vol-A", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True}
volume_created_from_snapshot_a = Volume.create_from_snapshot(self.apiClient, vol_snap_b.id, services, account=self.account.name, domainid=self.domain.id)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 7, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
volume_created_from_snapshot_a = virtual_machine_3.attach_volume(
self.apiClient,
@@ -1120,85 +1124,320 @@ class TestSnapshots(cloudstackTestCase):
virtual_machine.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
# should still be 7 volumes because the SolidFire volume for the root disk of the VM just destroyed
# is still needed for the SolidFire snapshots
- self._check_list(sf_volumes, 7, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 7, self, TestSnapshots._should_be_seven_volumes_in_list_err_msg)
virtual_machine_2.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 6, TestSnapshots._should_be_six_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 6, self, TestSnapshots._should_be_six_volumes_in_list_err_msg)
virtual_machine_3.delete(self.apiClient, True)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 5, TestSnapshots._should_be_five_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 5, self, TestSnapshots._should_be_five_volumes_in_list_err_msg)
data_volume = Volume(volume_created_from_snapshot_a.__dict__)
data_volume.delete(self.apiClient)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 4, TestSnapshots._should_be_four_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 4, self, TestSnapshots._should_be_four_volumes_in_list_err_msg)
data_volume = Volume(volume_created_from_snapshot_1.__dict__)
data_volume.delete(self.apiClient)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 3, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
self._delete_and_test_snapshot_2(vol_snap_1, sf_account_id, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
self._delete_and_test_snapshot(vol_snap_b)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
# should still be 2 volumes because the SolidFire volume for the root disk of the VM just destroyed
# is still needed for the SolidFire snapshots
- self._check_list(sf_volumes, 2, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
self._delete_and_test_snapshot(vol_snap_a)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
self._delete_and_test_snapshot_2(vol_snap_2, sf_account_id, 0, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
- def _set_supports_resign(self, supports_resign):
- supports_resign = str(supports_resign)
+ @attr(hypervisor='XenServer')
+ def test_04_create_volume_snapshot_using_sf_snapshot_and_archiving(self):
+ sf_util.set_supports_resign(True, self.dbConnection)
- sql_query = "Update host_details Set value = '" + supports_resign + "' Where name = 'supportsResign'"
+ virtual_machine = VirtualMachine.create(
+ self.apiClient,
+ self.testdata[TestData.virtualMachine],
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering.id,
+ templateid=self.template.id,
+ domainid=self.domain.id,
+ startvm=True
+ )
- # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
- self.dbConnection.execute(sql_query)
+ list_volumes_response = list_volumes(
+ self.apiClient,
+ virtualmachineid=virtual_machine.id,
+ listall=True
+ )
- def _check_list(self, in_list, expected_size_of_list, err_msg):
- self.assertEqual(
- isinstance(in_list, list),
- True,
- "'in_list' is not a list."
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+ vm_1_root_volume = list_volumes_response[0]
+ vm_1_root_volume_name = vm_1_root_volume.name
+
+ sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg)
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+ sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+
+ # Get snapshot information for volume from SolidFire cluster
+ sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
+
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+
+ primary_storage_db_id = self._get_cs_storage_pool_db_id(self.primary_storage)
+
+ vol_snap_1_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+ vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
+
+ vol_snap_3_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+ vol_snap_4 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
+
+ self._delete_and_test_archive_snapshot(vol_snap_3_archive)
+
+ self._delete_and_test_snapshot(vol_snap_2)
+
+ self._delete_and_test_snapshot(vol_snap_4)
+
+ self._delete_and_test_archive_snapshot(vol_snap_1_archive)
+
+ vol_snap_1_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+ vol_snap_2 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
+
+ virtual_machine.delete(self.apiClient, True)
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+ sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+
+ self._delete_and_test_archive_snapshot(vol_snap_1_archive)
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+ sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+
+ self._delete_and_test_snapshot(vol_snap_2)
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+
+ virtual_machine = VirtualMachine.create(
+ self.apiClient,
+ self.testdata[TestData.virtualMachine],
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering.id,
+ templateid=self.template.id,
+ domainid=self.domain.id,
+ startvm=True
)
- self.assertEqual(
- len(in_list),
- expected_size_of_list,
- err_msg
+ list_volumes_response = list_volumes(
+ self.apiClient,
+ virtualmachineid=virtual_machine.id,
+ listall=True
)
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+ vm_1_root_volume = list_volumes_response[0]
+ vm_1_root_volume_name = vm_1_root_volume.name
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+ sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+
+ # Get snapshot information for volume from SolidFire cluster
+ sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume['volumeID'])
+
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+
+ vol_snap_1 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg)
+
+ vol_snap_2_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+ vol_snap_3 = self._create_and_test_snapshot(vm_1_root_volume.id, sf_volume, primary_storage_db_id, 2, TestSnapshots._should_be_two_snapshots_in_list_err_msg)
+
+ vol_snap_4_archive = self._create_and_test_archive_snapshot(vm_1_root_volume.id, sf_volume)
+
+ services = {"displaytext": "Template-1", "name": "Template-1-name", "ostype": "CentOS 5.6 (64-bit)", "ispublic": "true"}
+
+ template = Template.create_from_snapshot(self.apiClient, vol_snap_2_archive, services)
+
+ self.cleanup.append(template)
+
+ virtual_machine_dict = {"name": "TestVM2", "displayname": "Test VM 2"}
+
+ virtual_machine_2 = VirtualMachine.create(
+ self.apiClient,
+ virtual_machine_dict,
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering.id,
+ templateid=template.id,
+ domainid=self.domain.id,
+ startvm=True
+ )
+
+ list_volumes_response = list_volumes(
+ self.apiClient,
+ virtualmachineid=virtual_machine_2.id,
+ listall=True
+ )
+
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+ vm_2_root_volume = list_volumes_response[0]
+ vm_2_root_volume_name = vm_2_root_volume.name
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+
+ sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
+
+ # Get snapshot information for volume from SolidFire cluster
+ sf_snapshots_2 = self.sf_client.list_snapshots(volume_id=sf_volume_2['volumeID'])
+
+ sf_util.check_list(sf_snapshots_2, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+
+ vol_snap_a_archive = self._create_and_test_archive_snapshot(vm_2_root_volume.id, sf_volume_2)
+
+ services = {"diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True}
+
+ volume_created_from_snapshot = Volume.create_from_snapshot(self.apiClient, vol_snap_a_archive.id, services, account=self.account.name, domainid=self.domain.id)
+
+ volume_created_from_snapshot_name = volume_created_from_snapshot.name
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+
+ sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+ sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
+ sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
+
+ sf_util.check_list(sf_volume_3['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+
+ volume_created_from_snapshot = virtual_machine.attach_volume(
+ self.apiClient,
+ volume_created_from_snapshot
+ )
+
+ self._delete_and_test_archive_snapshot(vol_snap_a_archive)
+
+ virtual_machine.delete(self.apiClient, True)
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 3, self, TestSnapshots._should_be_three_volumes_in_list_err_msg)
+
+ sf_volume = self._get_sf_volume_by_name(sf_volumes, vm_1_root_volume_name)
+ sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
+ sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
+
+ volume_created_from_snapshot = virtual_machine_2.attach_volume(
+ self.apiClient,
+ volume_created_from_snapshot
+ )
+
+ self._delete_and_test_archive_snapshot(vol_snap_4_archive)
+
+ self._delete_and_test_snapshot(vol_snap_1)
+
+ self._delete_and_test_archive_snapshot(vol_snap_2_archive)
+
+ self._delete_and_test_snapshot(vol_snap_3)
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 2, self, TestSnapshots._should_be_two_volumes_in_list_err_msg)
+
+ sf_volume_2 = self._get_sf_volume_by_name(sf_volumes, vm_2_root_volume_name)
+ sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
+
+ virtual_machine_2.delete(self.apiClient, True)
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+ sf_volume_3 = self._get_sf_volume_by_name(sf_volumes, volume_created_from_snapshot_name)
+
+ list_volumes_response = list_volumes(
+ self.apiClient,
+ listall=True
+ )
+
+ sf_util.check_list(list_volumes_response, 1, self, TestSnapshots._should_only_be_one_volume_in_list_err_msg)
+
+ data_volume = list_volumes_response[0]
+
+ data_volume = Volume(data_volume.__dict__)
+
+ data_volume.delete(self.apiClient)
+
+ # Get volume information from SolidFire cluster
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_util.check_list(sf_volumes, 0, self, TestSnapshots._should_be_zero_volumes_in_list_err_msg)
+
def _check_list_not_empty(self, in_list):
self.assertEqual(
isinstance(in_list, list),
@@ -1214,7 +1453,7 @@ class TestSnapshots(cloudstackTestCase):
# used when SolidFire snapshots are being used for CloudStack volume snapshots
def _check_snapshot_details(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, sf_snapshot_id, storage_pool_id, sf_volume_size):
- self._check_list(sf_snapshot_details, 5, TestSnapshots._should_be_five_items_in_list_err_msg)
+ sf_util.check_list(sf_snapshot_details, 5, self, TestSnapshots._should_be_five_items_in_list_err_msg)
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "takeSnapshot", "true")
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id)
@@ -1224,7 +1463,7 @@ class TestSnapshots(cloudstackTestCase):
# used when SolidFire volumes are being used for CloudStack volume snapshots
def _check_snapshot_details_2(self, sf_snapshot_details, cs_snapshot_id, sf_volume_id, storage_pool_id, sf_volume_size):
- self._check_list(sf_snapshot_details, 5, TestSnapshots._should_be_five_items_in_list_err_msg)
+ sf_util.check_list(sf_snapshot_details, 5, self, TestSnapshots._should_be_five_items_in_list_err_msg)
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "volumeId", sf_volume_id)
self._check_snapshot_detail(sf_snapshot_details, cs_snapshot_id, "sfStoragePoolId", storage_pool_id)
@@ -1334,19 +1573,6 @@ class TestSnapshots(cloudstackTestCase):
return sf_volume
- def _get_sf_account_id(self, cs_account_id, primary_storage_id):
- sf_account_id_request = {'accountid': cs_account_id, 'storageid': primary_storage_id}
- sf_account_id_result = self.cs_api.getSolidFireAccountId(sf_account_id_request)
- sf_account_id = sf_account_id_result['apisolidfireaccountid']['solidFireAccountId']
-
- self.assertEqual(
- isinstance(sf_account_id, int),
- True,
- TestSnapshots._sf_account_id_should_be_non_zero_int_err_msg
- )
-
- return sf_account_id
-
def _get_snapshot_detail(self, sf_snapshot_details_list, key):
for sf_snapshot_detail_dict in sf_snapshot_details_list:
if sf_snapshot_detail_dict["snapshotDetailsName"] == key:
@@ -1378,12 +1604,14 @@ class TestSnapshots(cloudstackTestCase):
volume_id=volume_id_for_snapshot
)
+ self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP)
+
sf_volume_id = sf_volume['volumeID']
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id)
- self._check_list(sf_snapshots, expected_num_snapshots, snapshot_err_msg)
+ sf_util.check_list(sf_snapshots, expected_num_snapshots, self, snapshot_err_msg)
sf_snapshot = self._most_recent_sf_snapshot(sf_snapshots)
@@ -1397,6 +1625,32 @@ class TestSnapshots(cloudstackTestCase):
return vol_snap
+ # used when SolidFire snapshots are being used for CloudStack volume snapshots to create a backup on secondary storage
+ def _create_and_test_archive_snapshot(self, volume_id_for_snapshot, sf_volume):
+ sf_volume_id = sf_volume['volumeID']
+
+ # Get snapshot information for volume from SolidFire cluster
+ sf_snapshots_orig = self.sf_client.list_snapshots(sf_volume_id)
+
+ vol_snap = Snapshot.create(
+ self.apiClient,
+ volume_id=volume_id_for_snapshot,
+ locationtype=2
+ )
+
+ self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP)
+
+ # Get snapshot information for volume from SolidFire cluster
+ sf_snapshots = self.sf_client.list_snapshots(sf_volume_id)
+
+ sf_util.check_list(sf_snapshots, len(sf_snapshots_orig), self, "A new SolidFire snapshot was detected.")
+
+ vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap)
+
+ self._check_snapshot_details_do_not_exist(vol_snap_db_id)
+
+ return vol_snap
+
# used when SolidFire volumes are being used for CloudStack volume snapshots
def _create_and_test_snapshot_2(self, volume_id_for_snapshot, sf_volume_id, sf_volume_id_for_volume_snapshot, primary_storage_db_id, sf_volume_size,
sf_account_id, expected_num_volumes, volume_err_msg):
@@ -1405,10 +1659,12 @@ class TestSnapshots(cloudstackTestCase):
volume_id=volume_id_for_snapshot
)
+ self._wait_for_snapshot_state(vol_snap.id, Snapshot.BACKED_UP)
+
# Get snapshot information for volume from SolidFire cluster
sf_snapshots = self.sf_client.list_snapshots(volume_id=sf_volume_id)
- self._check_list(sf_snapshots, 0, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
+ sf_util.check_list(sf_snapshots, 0, self, TestSnapshots._should_be_zero_snapshots_in_list_err_msg)
sf_snapshot_details_request = {'snapshotid': vol_snap.id}
sf_snapshot_details_response = self.cs_api.getVolumeSnapshotDetails(sf_snapshot_details_request)
@@ -1419,16 +1675,38 @@ class TestSnapshots(cloudstackTestCase):
self._check_snapshot_details_2(sf_snapshot_details, vol_snap_db_id, sf_volume_id_for_volume_snapshot, primary_storage_db_id, sf_volume_size)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
- self._check_list(sf_volumes, expected_num_volumes, volume_err_msg)
+ sf_util.check_list(sf_volumes, expected_num_volumes, self, volume_err_msg)
sf_volume_for_snapshot = self._get_sf_volume_by_id(sf_volumes, sf_volume_id_for_volume_snapshot)
- self._check_list(sf_volume_for_snapshot['volumeAccessGroups'], 0, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
+ sf_util.check_list(sf_volume_for_snapshot['volumeAccessGroups'], 0, self, TestSnapshots._should_be_zero_volume_access_groups_in_list_err_msg)
return vol_snap
+ def _wait_for_snapshot_state(self, vol_snap_id, snapshot_state):
+ retry_interval = 10
+ num_tries = 10
+
+ wait_result, return_val = wait_until(retry_interval, num_tries, TestSnapshots._check_snapshot_state, self.apiClient, vol_snap_id, snapshot_state)
+
+ if not wait_result:
+ raise Exception(return_val)
+
+ @staticmethod
+ def _check_snapshot_state(api_client, vol_snap_id, snapshot_state):
+ volume_snapshot = list_snapshots(
+ api_client,
+ id=vol_snap_id,
+ listall=True
+ )[0]
+
+ if str(volume_snapshot.state).lower() == snapshot_state.lower():
+ return True, ""
+
+ return False, "The snapshot is not in the '" + snapshot_state + "' state. State = " + str(volume_snapshot.state)
+
# used when SolidFire snapshots are being used for CloudStack volume snapshots
def _delete_and_test_snapshot(self, vol_snap):
vol_snap_id = vol_snap.id
@@ -1450,6 +1728,10 @@ class TestSnapshots(cloudstackTestCase):
self._check_snapshot_details_do_not_exist(vol_snap_db_id)
+ # used when SolidFire snapshots are being used for CloudStack volume snapshots to create a backup on secondary storage
+ def _delete_and_test_archive_snapshot(self, vol_snap):
+ vol_snap.delete(self.apiClient)
+
# used when SolidFire volumes are being used for CloudStack volume snapshots
def _delete_and_test_snapshot_2(self, vol_snap, sf_account_id, expected_num_volumes, volume_err_msg):
vol_snap_db_id = self._get_cs_volume_snapshot_db_id(vol_snap)
@@ -1459,14 +1741,6 @@ class TestSnapshots(cloudstackTestCase):
self._check_snapshot_details_do_not_exist(vol_snap_db_id)
# Get volume information from SolidFire cluster
- sf_volumes = self.sf_client.list_volumes_for_account(account_id=sf_account_id)
-
- self._check_list(sf_volumes, expected_num_volumes, volume_err_msg)
-
- @classmethod
- def _purge_solidfire_volumes(cls):
- deleted_volumes = cls.sf_client.list_deleted_volumes()
-
- for deleted_volume in deleted_volumes:
- cls.sf_client.purge_deleted_volume(deleted_volume['volumeID'])
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+ sf_util.check_list(sf_volumes, expected_num_volumes, self, volume_err_msg)
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/b508fb86/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
----------------------------------------------------------------------
diff --git a/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
new file mode 100644
index 0000000..255df07
--- /dev/null
+++ b/test/integration/plugins/solidfire/TestVMMigrationWithStorage.py
@@ -0,0 +1,697 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import logging
+import random
+import SignedAPICall
+import XenAPI
+
+from util import sf_util
+
+# All tests inherit from cloudstackTestCase
+from marvin.cloudstackTestCase import cloudstackTestCase
+
+# base - contains all resources as entities and defines create, delete, list operations on them
+from marvin.lib.base import Account, DiskOffering, ServiceOffering, StoragePool, User, VirtualMachine, Volume
+
+# common - commonly used methods for all tests are listed here
+from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_volumes
+
+# utils - utility classes for common cleanup, external library wrappers, etc.
+from marvin.lib.utils import cleanup_resources
+
+from solidfire import solidfire_element_api as sf_api
+
+# Prerequisites:
+# Only one zone
+# Only one pod
+# Two clusters
+
+
+class TestData():
+ account = "account"
+ capacityBytes = "capacitybytes"
+ capacityIops = "capacityiops"
+ clusterId1 = "clusterId1"
+ clusterId2 = "clusterId2"
+ computeOffering1 = "computeoffering1"
+ computeOffering2 = "computeoffering2"
+ computeOffering3 = "computeoffering3"
+ diskName = "diskname"
+ diskOffering1 = "diskoffering1"
+ diskOffering2 = "diskoffering2"
+ domainId = "domainid"
+ hypervisor = "hypervisor"
+ login = "login"
+ mvip = "mvip"
+ name = "name"
+ password = "password"
+ podId = "podid"
+ port = "port"
+ primaryStorage = "primarystorage"
+ primaryStorage2 = "primarystorage2"
+ provider = "provider"
+ scope = "scope"
+ solidFire = "solidfire"
+ storageTag = "SolidFire_SAN_1"
+ storageTag2 = "SolidFire_Volume_1"
+ tags = "tags"
+ templateCacheName = "centos56-x86-64-xen"
+ templateName = "templatename"
+ testAccount = "testaccount"
+ url = "url"
+ user = "user"
+ username = "username"
+ virtualMachine = "virtualmachine"
+ virtualMachine2 = "virtualmachine2"
+ volume_1 = "volume_1"
+ xenServer = "xenserver"
+ zoneId = "zoneid"
+
+ def __init__(self):
+ self.testdata = {
+ TestData.solidFire: {
+ TestData.mvip: "192.168.139.112",
+ TestData.login: "admin",
+ TestData.password: "admin",
+ TestData.port: 443,
+ TestData.url: "https://192.168.139.112:443"
+ },
+ TestData.xenServer: {
+ TestData.username: "root",
+ TestData.password: "solidfire"
+ },
+ TestData.account: {
+ "email": "test@test.com",
+ "firstname": "John",
+ "lastname": "Doe",
+ "username": "test",
+ "password": "test"
+ },
+ TestData.testAccount: {
+ "email": "test2@test2.com",
+ "firstname": "Jane",
+ "lastname": "Doe",
+ "username": "test2",
+ "password": "test"
+ },
+ TestData.user: {
+ "email": "user@test.com",
+ "firstname": "Jane",
+ "lastname": "Doe",
+ "username": "testuser",
+ "password": "password"
+ },
+ TestData.primaryStorage: {
+ TestData.name: "SolidFire-%d" % random.randint(0, 100),
+ TestData.scope: "ZONE",
+ TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
+ "clusterAdminUsername=admin;clusterAdminPassword=admin;" +
+ "clusterDefaultMinIops=10000;clusterDefaultMaxIops=15000;" +
+ "clusterDefaultBurstIopsPercentOfMaxIops=1.5;",
+ TestData.provider: "SolidFire",
+ TestData.tags: TestData.storageTag,
+ TestData.capacityIops: 4500000,
+ TestData.capacityBytes: 2251799813685248,
+ TestData.hypervisor: "Any",
+ TestData.zoneId: 1
+ },
+ TestData.primaryStorage2: {
+ TestData.name: "SolidFireShared-%d" % random.randint(0, 100),
+ TestData.scope: "CLUSTER",
+ TestData.url: "MVIP=192.168.139.112;SVIP=10.10.8.112;" +
+ "clusterAdminUsername=admin;clusterAdminPassword=admin;" +
+ "minIops=5000;maxIops=50000;burstIops=75000",
+ TestData.provider: "SolidFireShared",
+ TestData.tags: TestData.storageTag2,
+ TestData.capacityIops: 5000,
+ TestData.capacityBytes: 1099511627776,
+ TestData.hypervisor: "XenServer",
+ TestData.podId: 1,
+ TestData.zoneId: 1
+ },
+ TestData.virtualMachine: {
+ "name": "TestVM",
+ "displayname": "Test VM"
+ },
+ TestData.computeOffering1: {
+ "name": "SF_CO_1",
+ "displaytext": "SF_CO_1 (Min IOPS = 1,000; Max IOPS = 2,000)",
+ "cpunumber": 1,
+ "cpuspeed": 100,
+ "memory": 128,
+ "storagetype": "shared",
+ "customizediops": False,
+ "miniops": 1000,
+ "maxiops": 2000,
+ "hypervisorsnapshotreserve": 125,
+ TestData.tags: TestData.storageTag,
+ },
+ TestData.computeOffering2: {
+ "name": "SF_CO_2",
+ "displaytext": "SF_CO_2 (Min IOPS = 1,000; Max IOPS = 2,000)",
+ "cpunumber": 1,
+ "cpuspeed": 100,
+ "memory": 128,
+ "storagetype": "shared",
+ "customizediops": False,
+ "miniops": 1000,
+ "maxiops": 2000,
+ "hypervisorsnapshotreserve": 100,
+ TestData.tags: TestData.storageTag,
+ },
+ TestData.computeOffering3: {
+ "name": "SF_CO_3",
+ "displaytext": "SF_CO_3 Desc",
+ "cpunumber": 1,
+ "cpuspeed": 100,
+ "memory": 128,
+ "storagetype": "shared",
+ TestData.tags: TestData.storageTag2,
+ },
+ TestData.diskOffering1: {
+ "name": "SF_DO_1",
+ "displaytext": "SF_DO_1 (Min IOPS = 3,000; Max IOPS = 6,000)",
+ "disksize": 100,
+ "customizediops": False,
+ "miniops": 3000,
+ "maxiops": 6000,
+ "hypervisorsnapshotreserve": 125,
+ TestData.tags: TestData.storageTag,
+ "storagetype": "shared"
+ },
+ TestData.diskOffering2: {
+ "name": "SF_DO_2",
+ "displaytext": "SF_DO_2 (Min IOPS = 3,000; Max IOPS = 6,000)",
+ "disksize": 100,
+ "customizediops": False,
+ "miniops": 3000,
+ "maxiops": 6000,
+ "hypervisorsnapshotreserve": 100,
+ TestData.tags: TestData.storageTag,
+ "storagetype": "shared"
+ },
+ TestData.volume_1: {
+ TestData.diskName: "test-volume",
+ },
+ TestData.templateName: "CentOS 5.6(64-bit) no GUI (XenServer)",
+ TestData.zoneId: 1,
+ TestData.clusterId1: 1,
+ TestData.clusterId2: 2,
+ TestData.domainId: 1,
+ TestData.url: "192.168.129.50"
+ }
+
+
+class TestVMMigrationWithStorage(cloudstackTestCase):
+ _sf_account_id_should_be_non_zero_int_err_msg = "The SolidFire account ID should be a non-zero integer."
+
+ @classmethod
+ def setUpClass(cls):
+ # Set up API client
+ testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient()
+ cls.apiClient = testclient.getApiClient()
+ cls.dbConnection = testclient.getDbConnection()
+
+ cls.testdata = TestData().testdata
+
+ xenserver = cls.testdata[TestData.xenServer]
+
+ # Set up xenAPI connection
+ host_ip = "https://" + \
+ list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId1], name="XenServer-6.5-1")[0].ipaddress
+
+ # Set up XenAPI connection
+ cls.xen_session_1 = XenAPI.Session(host_ip)
+
+ cls.xen_session_1.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
+
+ # Set up xenAPI connection
+ host_ip = "https://" + \
+ list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId2], name="XenServer-6.5-3")[0].ipaddress
+
+ # Set up XenAPI connection
+ cls.xen_session_2 = XenAPI.Session(host_ip)
+
+ cls.xen_session_2.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password])
+
+ # Set up SolidFire connection
+ cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire])
+
+ # Get Resources from Cloud Infrastructure
+ cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId])
+ cls.cluster_1 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId1])[0]
+ cls.cluster_2 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId2])[0]
+ cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName])
+ cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId])
+
+ # Create test account
+ cls.account = Account.create(
+ cls.apiClient,
+ cls.testdata["account"],
+ admin=1
+ )
+
+ # Set up connection to make customized API calls
+ cls.user = User.create(
+ cls.apiClient,
+ cls.testdata["user"],
+ account=cls.account.name,
+ domainid=cls.domain.id
+ )
+
+ url = cls.testdata[TestData.url]
+
+ api_url = "http://" + url + ":8080/client/api"
+ userkeys = User.registerUserKeys(cls.apiClient, cls.user.id)
+
+ cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey)
+
+ primarystorage = cls.testdata[TestData.primaryStorage]
+
+ cls.primary_storage = StoragePool.create(
+ cls.apiClient,
+ primarystorage
+ )
+
+ cls.compute_offering_1 = ServiceOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.computeOffering1]
+ )
+
+ cls.compute_offering_2 = ServiceOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.computeOffering2]
+ )
+
+ cls.compute_offering_3 = ServiceOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.computeOffering3]
+ )
+
+ cls.disk_offering_1 = DiskOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.diskOffering1]
+ )
+
+ cls.disk_offering_2 = DiskOffering.create(
+ cls.apiClient,
+ cls.testdata[TestData.diskOffering2]
+ )
+
+ # Resources that are to be destroyed
+ cls._cleanup = [
+ cls.compute_offering_1,
+ cls.compute_offering_2,
+ cls.compute_offering_3,
+ cls.disk_offering_1,
+ cls.disk_offering_2,
+ cls.user,
+ cls.account
+ ]
+
+ @classmethod
+ def tearDownClass(cls):
+ try:
+ cleanup_resources(cls.apiClient, cls._cleanup)
+
+ cls.primary_storage.delete(cls.apiClient)
+ except Exception as e:
+ logging.debug("Exception in tearDownClass(cls): %s" % e)
+
+ def setUp(self):
+ self.cleanup = []
+
+ def tearDown(self):
+ try:
+ cleanup_resources(self.apiClient, self.cleanup)
+
+ sf_util.purge_solidfire_volumes(self.sf_client)
+ except Exception as e:
+ logging.debug("Exception in tearDownClass(self): %s" % e)
+
+ def test_01_storage_migrate_root_and_data_disks(self):
+ src_host, dest_host = self._get_source_and_dest_hosts()
+
+ virtual_machine = VirtualMachine.create(
+ self.apiClient,
+ self.testdata[TestData.virtualMachine],
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering_1.id,
+ templateid=self.template.id,
+ domainid=self.domain.id,
+ hostid=src_host.id,
+ startvm=True
+ )
+
+ self.cleanup.append(virtual_machine)
+
+ cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0]
+
+ sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
+ TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)
+
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
+
+ cs_data_volume = Volume.create(
+ self.apiClient,
+ self.testdata[TestData.volume_1],
+ account=self.account.name,
+ domainid=self.domain.id,
+ zoneid=self.zone.id,
+ diskofferingid=self.disk_offering_1.id
+ )
+
+ self.cleanup.append(cs_data_volume)
+
+ cs_data_volume = virtual_machine.attach_volume(
+ self.apiClient,
+ cs_data_volume
+ )
+
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+ sf_root_volume, sf_data_volume = self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id,
+ sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2)
+
+ src_host, dest_host = dest_host, src_host
+
+ self._migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, sf_root_volume, sf_data_volume,
+ self.xen_session_2, self.xen_session_1)
+
+ def test_02_storage_migrate_root_and_data_disks(self):
+ primarystorage2 = self.testdata[TestData.primaryStorage2]
+
+ primary_storage_2 = StoragePool.create(
+ self.apiClient,
+ primarystorage2,
+ clusterid=self.cluster_1.id
+ )
+
+ primary_storage_3 = StoragePool.create(
+ self.apiClient,
+ primarystorage2,
+ clusterid=self.cluster_2.id
+ )
+
+ src_host, dest_host = self._get_source_and_dest_hosts()
+
+ virtual_machine = VirtualMachine.create(
+ self.apiClient,
+ self.testdata[TestData.virtualMachine],
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=self.compute_offering_3.id,
+ templateid=self.template.id,
+ domainid=self.domain.id,
+ hostid=src_host.id,
+ startvm=True
+ )
+
+ cs_data_volume = Volume.create(
+ self.apiClient,
+ self.testdata[TestData.volume_1],
+ account=self.account.name,
+ domainid=self.domain.id,
+ zoneid=self.zone.id,
+ diskofferingid=self.disk_offering_1.id
+ )
+
+ self.cleanup = [
+ virtual_machine,
+ cs_data_volume,
+ primary_storage_2,
+ primary_storage_3
+ ]
+
+ cs_data_volume = virtual_machine.attach_volume(
+ self.apiClient,
+ cs_data_volume
+ )
+
+ sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
+ TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)
+
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+ sf_data_volume = self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id,
+ sf_data_volume, self.xen_session_1, self.xen_session_2)
+
+ src_host, dest_host = dest_host, src_host
+
+ self._migrate_and_verify_one_disk_only(virtual_machine, dest_host, cs_data_volume, sf_account_id, sf_data_volume,
+ self.xen_session_2, self.xen_session_1)
+
+ # The hypervisor snapshot reserve isn't large enough for either the compute or disk offering.
+ def test_03_storage_migrate_root_and_data_disks_fail(self):
+ self._execute_migration_failure(self.compute_offering_2.id, self.disk_offering_2.id)
+
+ # The hypervisor snapshot reserve isn't large enough for the compute offering.
+ def test_04_storage_migrate_root_disk_fails(self):
+ self._execute_migration_failure(self.compute_offering_2.id, self.disk_offering_1.id)
+
+ # The hypervisor snapshot reserve isn't large enough for the disk offering.
+ def test_05_storage_migrate_data_disk_fails(self):
+ self._execute_migration_failure(self.compute_offering_1.id, self.disk_offering_2.id)
+
+ def _execute_migration_failure(self, compute_offering_id, disk_offering_id):
+ src_host, dest_host = self._get_source_and_dest_hosts()
+
+ virtual_machine = VirtualMachine.create(
+ self.apiClient,
+ self.testdata[TestData.virtualMachine],
+ accountid=self.account.name,
+ zoneid=self.zone.id,
+ serviceofferingid=compute_offering_id,
+ templateid=self.template.id,
+ domainid=self.domain.id,
+ hostid=src_host.id,
+ startvm=True
+ )
+
+ self.cleanup.append(virtual_machine)
+
+ cs_root_volume = list_volumes(self.apiClient, listall=True, virtualmachineid=virtual_machine.id)[0]
+
+ sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self,
+ TestVMMigrationWithStorage._sf_account_id_should_be_non_zero_int_err_msg)
+
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
+
+ cs_data_volume = Volume.create(
+ self.apiClient,
+ self.testdata[TestData.volume_1],
+ account=self.account.name,
+ domainid=self.domain.id,
+ zoneid=self.zone.id,
+ diskofferingid=disk_offering_id
+ )
+
+ self.cleanup.append(cs_data_volume)
+
+ cs_data_volume = virtual_machine.attach_volume(
+ self.apiClient,
+ cs_data_volume
+ )
+
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+ self._fail_migrate_and_verify(virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id,
+ sf_root_volume, sf_data_volume, self.xen_session_1, self.xen_session_2)
+
+ def _get_source_and_dest_hosts(self):
+ hosts = list_hosts(self.apiClient)
+
+ for host in hosts:
+ if host.name == "XenServer-6.5-1":
+ src_host = host
+ elif host.name == "XenServer-6.5-3":
+ dest_host = host
+
+ self.assertIsNotNone(src_host, "Could not locate the source host")
+
+ self.assertIsNotNone(dest_host, "Could not locate the destination host")
+
+ return src_host, dest_host
+
+ def _migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, src_sf_root_volume, src_sf_data_volume,
+ src_xen_session, dest_xen_session):
+ self._verifyFields(cs_root_volume, src_sf_root_volume)
+ self._verifyFields(cs_data_volume, src_sf_data_volume)
+
+ virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id)
+
+ cs_root_volume = self._get_updated_cs_volume(cs_root_volume.id)
+ cs_data_volume = self._get_updated_cs_volume(cs_data_volume.id)
+
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
+ dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+ self._verifyFields(cs_root_volume, dest_sf_root_volume)
+ self._verifyFields(cs_data_volume, dest_sf_data_volume)
+
+ self._verify_no_basic_volume_details()
+
+ self._verify_different_volume_access_groups(src_sf_root_volume, dest_sf_root_volume)
+ self._verify_different_volume_access_groups(src_sf_data_volume, dest_sf_data_volume)
+
+ self._verify_same_account(src_sf_root_volume, dest_sf_root_volume)
+ self._verify_same_account(src_sf_data_volume, dest_sf_data_volume)
+
+ self._verifySfVolumeIds(src_sf_root_volume, dest_sf_root_volume)
+ self._verifySfVolumeIds(src_sf_data_volume, dest_sf_data_volume)
+
+ self._verify_xenserver_state(src_xen_session, src_sf_root_volume, dest_xen_session, dest_sf_root_volume)
+ self._verify_xenserver_state(src_xen_session, src_sf_data_volume, dest_xen_session, dest_sf_data_volume)
+
+ return dest_sf_root_volume, dest_sf_data_volume
+
+ def _migrate_and_verify_one_disk_only(self, virtual_machine, dest_host, cs_volume, sf_account_id, src_sf_volume, src_xen_session, dest_xen_session):
+ self._verifyFields(cs_volume, src_sf_volume)
+
+ virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id)
+
+ cs_volume = self._get_updated_cs_volume(cs_volume.id)
+
+ sf_volumes = sf_util.get_active_sf_volumes(self.sf_client, sf_account_id)
+
+ dest_sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_volume.name, self)
+
+ self._verifyFields(cs_volume, dest_sf_volume)
+
+ self._verify_no_basic_volume_details()
+
+ self._verify_different_volume_access_groups(src_sf_volume, dest_sf_volume)
+
+ self._verify_same_account(src_sf_volume, dest_sf_volume)
+
+ self._verifySfVolumeIds(src_sf_volume, dest_sf_volume)
+
+ self._verify_xenserver_state(src_xen_session, src_sf_volume, dest_xen_session, dest_sf_volume)
+
+ return dest_sf_volume
+
+ def _fail_migrate_and_verify(self, virtual_machine, dest_host, cs_root_volume, cs_data_volume, sf_account_id, src_sf_root_volume, src_sf_data_volume,
+ src_xen_session, dest_xen_session):
+ self._verifyFields(cs_root_volume, src_sf_root_volume)
+ self._verifyFields(cs_data_volume, src_sf_data_volume)
+
+ class MigrationException(Exception):
+ def __init__(self, *args, **kwargs):
+ Exception.__init__(self, *args, **kwargs)
+
+ try:
+ virtual_machine.migrate_vm_with_volume(self.apiClient, dest_host.id)
+
+ raise MigrationException("The migration did not fail (as expected).")
+ except MigrationException:
+ raise
+ except Exception:
+ pass
+
+ self._verify_no_basic_volume_details()
+
+ cs_root_volume_refreshed = self._get_updated_cs_volume(cs_root_volume.id)
+ cs_data_volume_refreshed = self._get_updated_cs_volume(cs_data_volume.id)
+
+ self._verifyFields(cs_root_volume_refreshed, src_sf_root_volume)
+ self._verifyFields(cs_data_volume_refreshed, src_sf_data_volume)
+
+ sf_volumes = sf_util.get_not_active_sf_volumes(self.sf_client, sf_account_id)
+
+ dest_sf_root_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_root_volume.name, self)
+ dest_sf_data_volume = sf_util.check_and_get_sf_volume(sf_volumes, cs_data_volume.name, self)
+
+ self._verify_xenserver_state(dest_xen_session, dest_sf_root_volume, src_xen_session, src_sf_root_volume)
+ self._verify_xenserver_state(dest_xen_session, dest_sf_data_volume, src_xen_session, src_sf_data_volume)
+
+ def _verify_different_volume_access_groups(self, src_sf_volume, dest_sf_volume):
+ src_vags = src_sf_volume['volumeAccessGroups']
+
+ sf_util.check_list(src_vags, 1, self, "'src_vags' should be a list with only one element in it.")
+
+ dest_vags = dest_sf_volume['volumeAccessGroups']
+
+ sf_util.check_list(dest_vags, 1, self, "'dest_vags' should be a list with only one element in it.")
+
+ self.assertNotEqual(src_vags[0], dest_vags[0], "The source and destination volumes should not be in the same volume access group.")
+
+ def _get_updated_cs_volume(self, cs_volume_id):
+ return list_volumes(self.apiClient, listall=True, id=cs_volume_id)[0]
+
+ def _verify_same_account(self, src_sf_volume, dest_sf_volume):
+ self.assertEqual(src_sf_volume['accountID'], dest_sf_volume['accountID'], "The source and destination volumes should be in the same SolidFire account.")
+
+ def _verifySfVolumeIds(self, src_sf_volume, dest_sf_volume):
+ self.assert_(src_sf_volume['volumeID'] < dest_sf_volume['volumeID'],
+ "The destination SolidFire root volume's ID should be greater than the id of the source one.")
+
+ # verify the name, folder, and iscsi_name
+ def _verifyFields(self, cs_volume, sf_volume):
+ self.assert_(cs_volume.name == sf_volume['name'], "The CloudStack volume name does not match the SolidFire volume name.")
+
+ cs_volume_folder = self._get_cs_volume_folder(cs_volume.id)
+
+ self.assert_(int(cs_volume_folder) == sf_volume['volumeID'], "The CloudStack folder name does not match the SolidFire volume ID.")
+
+ cs_volume_iscsi_name = self._get_cs_volume_iscsi_name(cs_volume.id)
+
+ self.assert_(cs_volume_iscsi_name == sf_util.format_iqn(sf_volume['iqn']), "The CloudStack volume iscsi_name does not match the SolidFire volume IQN.")
+
+ def _get_cs_volume_property(self, cs_volume_id, volume_property):
+ sql_query = "Select " + volume_property + " From volumes Where uuid = '" + cs_volume_id + "'"
+
+ # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
+ sql_result = self.dbConnection.execute(sql_query)
+
+ return sql_result[0][0]
+
+ def _get_cs_volume_folder(self, cs_volume_id):
+ return self._get_cs_volume_property(cs_volume_id, "folder")
+
+ def _get_cs_volume_iscsi_name(self, cs_volume_id):
+ return self._get_cs_volume_property(cs_volume_id, "iscsi_name")
+
+ def _verify_no_basic_volume_details(self):
+ sql_query = "Select id From volume_details Where name like 'basic_'"
+
+ # make sure you can connect to MySQL: https://teamtreehouse.com/community/cant-connect-remotely-to-mysql-server-with-mysql-workbench
+ sql_result = self.dbConnection.execute(sql_query)
+
+ sf_util.check_list(sql_result, 0, self, "The cloud.volume_details table should not have any name fields that start with 'basic_'.")
+
+ def _verify_xenserver_state(self, xen_session_1, sf_volume_1, xen_session_2, sf_volume_2):
+ sr_name = sf_util.format_iqn(sf_volume_1["iqn"])
+
+ sf_util.check_xen_sr(sr_name, xen_session_1, self, False)
+
+ sr_name = sf_util.format_iqn(sf_volume_2["iqn"])
+
+ sf_util.check_xen_sr(sr_name, xen_session_2, self)