You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cloudstack.apache.org by ro...@apache.org on 2021/09/02 10:34:11 UTC
[cloudstack] branch main updated: tests: component test ports/fixes
in python3 (#5082)
This is an automated email from the ASF dual-hosted git repository.
rohit pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudstack.git
The following commit(s) were added to refs/heads/main by this push:
new 571a361 tests: component test ports/fixes in python3 (#5082)
571a361 is described below
commit 571a361926da8e369eeff2fa28938a39ed236d9c
Author: dahn <da...@shapeblue.com>
AuthorDate: Thu Sep 2 12:33:49 2021 +0200
tests: component test ports/fixes in python3 (#5082)
* cleanup plan
* more robust cleanup within member method
* ss_domain_limits fixed
* egress 'icmp' instead of 'all' and cleanup
* configdrive syntax, fixed to be up to par with py2
* cleanup fixed the lb secondary ip tests?
* deal with different base64 encoding in py3 for userdata
* cleanup of multiple_ips_per_nic
* cleanup and reformat of test_volumes
* cleanup and fixes for test_ps_domain_limits.py
* cleanup and fix of test_ps_limits.py
* fix occasional match of float against int
* cleanup and fix test_ps_resize_volume.py
* cleanup and fix test_snapshots
* cleanup ss_max_limits and fix for float vs int problem in API
* mere cleanup of test_volume_destroy_recover
* add missing command creation
* cleanup of test_vpc_on_host_maintenance
* cleanup test_vpc_network
* cleanup, comments and logging in test_vpc_network_lbrules
* cleanup of test_vpc_network_pfrules
* cleanup and format code for test_vpc_network_staticnatrule
* f string instead of conversion specifiers
* check http and ssh fix and cleanup (for vpc pfrules tests)
* generalise create network method
* make ip optional in creating webserver
* remove unused code and add rules to enable ssh
* more cleanup
* remove unused code and cleanup
* small cleanup, mostly precarous run environment required
* cleanup and removed unused code
* advancedsg only, cleanup, pulled in services
* reformat/cleanup
* log result of error after verify rule
* add nw_off_no_services
* tags=["TODO"] for escalations_networks
* tags=["TODO"] for organization_states
* tags=["TODO"] for browse_templates
* tags=["TODO"] for configdrive
* tags=["TODO"] for vpc_vms_deployment
* add remove network cleanup and fixes
* move tests that fail on all platforms out of the way
Co-authored-by: Daan Hoogland <da...@onecht.net>
---
.../maint/test_escalation_templates.py | 7 +-
.../maint/test_escalations_hosts.py | 0
.../maint/test_ha_pool_maintenance.py | 0
.../maint/test_vpc_host_maintenance.py | 0
.../maint/test_vpc_on_host_maintenance.py | 18 +-
.../maint/test_zone_level_local_storage_setting.py | 0
.../test_escalation_listTemplateDomainAdmin.py | 0
.../test_escalations_vpncustomergateways.py | 0
.../test_ps_resource_limits_volume.py | 0
.../test_ss_project_limits.py | 0
test/integration/broken/test_vpc_vm_life_cycle.py | 792 +++++++
.../{component => broken}/test_vpn_service.py | 0
.../{component => broken}/test_vr_metadata.py | 0
test/integration/component/maint/test_bugs.py | 7 +-
.../component/maint/test_redundant_router.py | 3 +-
.../test_redundant_router_deployment_planning.py | 6 +-
.../component/test_add_remove_network.py | 211 +-
test/integration/component/test_affinity_groups.py | 6 +-
.../component/test_base_image_updation.py | 27 +-
.../integration/component/test_browse_templates.py | 53 +-
test/integration/component/test_browse_volumes.py | 2187 ++++++++++----------
test/integration/component/test_configdrive.py | 42 +-
.../component/test_deploy_vm_userdata_reg.py | 29 +-
test/integration/component/test_egress_fw_rules.py | 85 +-
.../component/test_escalations_networks.py | 66 +-
.../component/test_escalations_templates.py | 11 +-
.../component/test_escalations_vmware.py | 12 +-
.../component/test_escalations_volumes.py | 6 +-
test/integration/component/test_lb_secondary_ip.py | 135 +-
.../component/test_multiple_ips_per_nic.py | 55 +-
.../component/test_multiple_nic_support.py | 39 +-
.../component/test_multiple_public_interfaces.py | 499 ++---
.../component/test_organization_states.py | 12 +-
.../component/test_persistent_networks.py | 6 +-
test/integration/component/test_portable_ip.py | 3 +-
.../integration/component/test_ps_domain_limits.py | 71 +-
test/integration/component/test_ps_limits.py | 58 +-
.../integration/component/test_ps_resize_volume.py | 29 +-
.../component/test_redundant_router_cleanups.py | 3 +-
.../component/test_rootvolume_resize.py | 6 +-
test/integration/component/test_shared_networks.py | 3 +-
test/integration/component/test_snapshots.py | 126 +-
.../integration/component/test_ss_domain_limits.py | 208 +-
test/integration/component/test_ss_limits.py | 11 +-
test/integration/component/test_ss_max_limits.py | 31 +-
test/integration/component/test_stopped_vm.py | 39 +-
.../component/test_volume_destroy_recover.py | 43 +-
test/integration/component/test_volumes.py | 338 ++-
test/integration/component/test_vpc_network.py | 137 +-
.../component/test_vpc_network_internal_lbrules.py | 9 +-
.../component/test_vpc_network_lbrules.py | 181 +-
.../component/test_vpc_network_pfrules.py | 301 +--
.../component/test_vpc_network_staticnatrule.py | 657 +++---
test/integration/component/test_vpc_offerings.py | 3 +-
.../component/test_vpc_vm_life_cycle.py | 776 +------
.../component/test_vpc_vms_deployment.py | 76 +-
tools/marvin/marvin/cloudstackTestCase.py | 161 +-
tools/marvin/marvin/config/test_data.py | 10 +
tools/marvin/marvin/lib/base.py | 1 +
tools/marvin/marvin/lib/common.py | 4 +-
tools/marvin/marvin/lib/utils.py | 7 +-
61 files changed, 3547 insertions(+), 4059 deletions(-)
diff --git a/test/integration/component/maint/test_escalation_templates.py b/test/integration/broken/maint/test_escalation_templates.py
similarity index 98%
rename from test/integration/component/maint/test_escalation_templates.py
rename to test/integration/broken/maint/test_escalation_templates.py
index 68012c0..1d0824a 100644
--- a/test/integration/component/maint/test_escalation_templates.py
+++ b/test/integration/broken/maint/test_escalation_templates.py
@@ -88,13 +88,8 @@ class TestlistTemplates(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.apiclient, cls.cleanup)
+ super(TestlistTemplates, cls).tearDownClass()
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
def RestartServers(self):
""" Restart management server and usage server """
diff --git a/test/integration/component/maint/test_escalations_hosts.py b/test/integration/broken/maint/test_escalations_hosts.py
similarity index 100%
rename from test/integration/component/maint/test_escalations_hosts.py
rename to test/integration/broken/maint/test_escalations_hosts.py
diff --git a/test/integration/component/maint/test_ha_pool_maintenance.py b/test/integration/broken/maint/test_ha_pool_maintenance.py
similarity index 100%
rename from test/integration/component/maint/test_ha_pool_maintenance.py
rename to test/integration/broken/maint/test_ha_pool_maintenance.py
diff --git a/test/integration/component/maint/test_vpc_host_maintenance.py b/test/integration/broken/maint/test_vpc_host_maintenance.py
similarity index 100%
rename from test/integration/component/maint/test_vpc_host_maintenance.py
rename to test/integration/broken/maint/test_vpc_host_maintenance.py
diff --git a/test/integration/component/maint/test_vpc_on_host_maintenance.py b/test/integration/broken/maint/test_vpc_on_host_maintenance.py
similarity index 93%
rename from test/integration/component/maint/test_vpc_on_host_maintenance.py
rename to test/integration/broken/maint/test_vpc_on_host_maintenance.py
index e1312e9..ca14d5b 100644
--- a/test/integration/component/maint/test_vpc_on_host_maintenance.py
+++ b/test/integration/broken/maint/test_vpc_on_host_maintenance.py
@@ -18,7 +18,6 @@
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
import unittest
-from marvin.lib.utils import cleanup_resources
from marvin.lib.base import (Account,
Host,
VPC,
@@ -58,6 +57,7 @@ class TestVPCHostMaintenance(cloudstackTestCase):
cls.api_client,
cls.services["vpc_offering"]
)
+ cls._cleanup.append(cls.vpc_off)
cls.vpc_off.update(cls.api_client, state='Enabled')
cls.hosts = Host.list(
cls.api_client,
@@ -93,15 +93,11 @@ class TestVPCHostMaintenance(cloudstackTestCase):
"Failed to enable maintenance mode on %s" %
host.name)
timeout = timeout - 1
-
- cls._cleanup.append(cls.vpc_off)
return
@classmethod
def tearDownClass(cls):
try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
for host in cls.hosts:
Host.cancelMaintenance(
cls.api_client,
@@ -117,7 +113,9 @@ class TestVPCHostMaintenance(cloudstackTestCase):
"Failed to cancel maintenance mode on %s" %
(host.name))
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ raise Exception("Warning: Exception during resetting hosts maintenance : %s" % e)
+ finally:
+ super(TestVPCHostMaintenance, cls).tearDownClass()
return
def setUp(self):
@@ -138,12 +136,7 @@ class TestVPCHostMaintenance(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the created network offerings
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestVPCHostMaintenance, self).tearDown()
def validate_vpc_offering(self, vpc_offering):
"""Validates the VPC offering"""
@@ -217,5 +210,6 @@ class TestVPCHostMaintenance(cloudstackTestCase):
domainid=self.account.domainid,
start=False
)
+ self.cleanup.append(vpc)
self.validate_vpc_network(vpc, state='enabled')
return
diff --git a/test/integration/component/maint/test_zone_level_local_storage_setting.py b/test/integration/broken/maint/test_zone_level_local_storage_setting.py
similarity index 100%
rename from test/integration/component/maint/test_zone_level_local_storage_setting.py
rename to test/integration/broken/maint/test_zone_level_local_storage_setting.py
diff --git a/test/integration/component/test_escalation_listTemplateDomainAdmin.py b/test/integration/broken/test_escalation_listTemplateDomainAdmin.py
similarity index 100%
rename from test/integration/component/test_escalation_listTemplateDomainAdmin.py
rename to test/integration/broken/test_escalation_listTemplateDomainAdmin.py
diff --git a/test/integration/component/test_escalations_vpncustomergateways.py b/test/integration/broken/test_escalations_vpncustomergateways.py
similarity index 100%
rename from test/integration/component/test_escalations_vpncustomergateways.py
rename to test/integration/broken/test_escalations_vpncustomergateways.py
diff --git a/test/integration/component/test_ps_resource_limits_volume.py b/test/integration/broken/test_ps_resource_limits_volume.py
similarity index 100%
rename from test/integration/component/test_ps_resource_limits_volume.py
rename to test/integration/broken/test_ps_resource_limits_volume.py
diff --git a/test/integration/component/test_ss_project_limits.py b/test/integration/broken/test_ss_project_limits.py
similarity index 100%
rename from test/integration/component/test_ss_project_limits.py
rename to test/integration/broken/test_ss_project_limits.py
diff --git a/test/integration/broken/test_vpc_vm_life_cycle.py b/test/integration/broken/test_vpc_vm_life_cycle.py
new file mode 100644
index 0000000..c1868d0
--- /dev/null
+++ b/test/integration/broken/test_vpc_vm_life_cycle.py
@@ -0,0 +1,792 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from nose.plugins.attrib import attr
+
+from component.test_vpc_vm_life_cycle import Services
+
+
+class TestVMLifeCycleSharedNwVPC(cloudstackTesTODOtCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.testClient = super(TestVMLifeCycleSharedNwVPC, cls).getClsTestClient()
+ cls.api_client = cls.testClient.getApiClient()
+
+ cls.services = Services().services
+ # Get Zone, Domain and templates
+ cls.domain = get_domain(cls.api_client)
+ cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
+ cls.template = get_template(
+ cls.api_client,
+ cls.zone.id,
+ cls.services["ostype"]
+ )
+ cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ cls.services["virtual_machine"]["template"] = cls.template.id
+
+ cls.service_offering = ServiceOffering.create(
+ cls.api_client,
+ cls.services["service_offering"]
+ )
+ cls.vpc_off = VpcOffering.create(
+ cls.api_client,
+ cls.services["vpc_offering"]
+ )
+ cls.vpc_off.update(cls.api_client, state='Enabled')
+
+ cls.account = Account.create(
+ cls.api_client,
+ cls.services["account"],
+ admin=True,
+ domainid=cls.domain.id
+ )
+
+ cls.services["vpc"]["cidr"] = '10.1.1.1/16'
+ cls.vpc = VPC.create(
+ cls.api_client,
+ cls.services["vpc"],
+ vpcofferingid=cls.vpc_off.id,
+ zoneid=cls.zone.id,
+ account=cls.account.name,
+ domainid=cls.account.domainid
+ )
+
+ cls.nw_off = NetworkOffering.create(
+ cls.api_client,
+ cls.services["network_offering"],
+ conservemode=False
+ )
+ # Enable Network offering
+ cls.nw_off.update(cls.api_client, state='Enabled')
+
+ # Creating network using the network offering created
+ cls.network_1 = Network.create(
+ cls.api_client,
+ cls.services["network"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ networkofferingid=cls.nw_off.id,
+ zoneid=cls.zone.id,
+ gateway='10.1.1.1',
+ vpcid=cls.vpc.id
+ )
+ cls.nw_off_no_lb = NetworkOffering.create(
+ cls.api_client,
+ cls.services["network_offering_no_lb"],
+ conservemode=False
+ )
+
+ cls.shared_nw_off = NetworkOffering.create(
+ cls.api_client,
+ cls.services["network_off_shared"],
+ conservemode=False
+ )
+ # Enable Network offering
+ cls.shared_nw_off.update(cls.api_client, state='Enabled')
+
+
+ physical_network, shared_vlan = get_free_vlan(cls.api_client, cls.zone.id)
+ if shared_vlan is None:
+ assert False, "Failed to get free vlan id for shared network creation in the zone"
+
+ #create network using the shared network offering created
+ cls.services["network"]["acltype"] = "Domain"
+ cls.services["network"]["physicalnetworkid"] = physical_network.id
+ cls.services["network"]["vlan"] = shared_vlan
+
+ # Start Ip and End Ip should be specified for shared network
+ cls.services["network"]["startip"] = '10.1.2.20'
+ cls.services["network"]["endip"] = '10.1.2.30'
+
+ # Creating network using the network offering created
+ cls.network_2 = Network.create(
+ cls.api_client,
+ cls.services["network"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ networkofferingid=cls.shared_nw_off.id,
+ zoneid=cls.zone.id,
+ gateway='10.1.2.1',
+ )
+
+ cls.vm_1 = VirtualMachine.create(
+ cls.api_client,
+ cls.services["virtual_machine"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=cls.service_offering.id,
+ networkids=[str(cls.network_1.id),
+ str(cls.network_2.id)]
+ )
+
+ cls.vm_2 = VirtualMachine.create(
+ cls.api_client,
+ cls.services["virtual_machine"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=cls.service_offering.id,
+ networkids=[str(cls.network_1.id),
+ str(cls.network_2.id)]
+ )
+
+
+ cls.vm_3 = VirtualMachine.create(
+ cls.api_client,
+ cls.services["virtual_machine"],
+ accountid=cls.account.name,
+ domainid=cls.account.domainid,
+ serviceofferingid=cls.service_offering.id,
+ networkids=[str(cls.network_1.id),
+ str(cls.network_2.id)]
+ )
+
+ cls.public_ip_1 = PublicIPAddress.create(
+ cls.api_client,
+ accountid=cls.account.name,
+ zoneid=cls.zone.id,
+ domainid=cls.account.domainid,
+ networkid=cls.network_1.id,
+ vpcid=cls.vpc.id
+ )
+ cls.lb_rule = LoadBalancerRule.create(
+ cls.api_client,
+ cls.services["lbrule"],
+ ipaddressid=cls.public_ip_1.ipaddress.id,
+ accountid=cls.account.name,
+ networkid=cls.network_1.id,
+ vpcid=cls.vpc.id,
+ domainid=cls.account.domainid
+ )
+
+ # Only the vms in the same network can be added to load balancing rule
+ # hence we can't add vm_2 with vm_1
+ cls.lb_rule.assign(cls.api_client, [cls.vm_1])
+
+ cls.public_ip_2 = PublicIPAddress.create(
+ cls.api_client,
+ accountid=cls.account.name,
+ zoneid=cls.zone.id,
+ domainid=cls.account.domainid,
+ networkid=cls.network_1.id,
+ vpcid=cls.vpc.id
+ )
+
+ cls.nat_rule = NATRule.create(
+ cls.api_client,
+ cls.vm_1,
+ cls.services["natrule"],
+ ipaddressid=cls.public_ip_2.ipaddress.id,
+ openfirewall=False,
+ networkid=cls.network_1.id,
+ vpcid=cls.vpc.id
+ )
+
+ # Opening up the ports in VPC
+ cls.nwacl_nat = NetworkACL.create(
+ cls.api_client,
+ networkid=cls.network_1.id,
+ services=cls.services["natrule"],
+ traffictype='Ingress'
+ )
+
+ cls.nwacl_lb = NetworkACL.create(
+ cls.api_client,
+ networkid=cls.network_1.id,
+ services=cls.services["lbrule"],
+ traffictype='Ingress'
+ )
+ cls.services["icmp_rule"]["protocol"] = "all"
+ cls.nwacl_internet_1 = NetworkACL.create(
+ cls.api_client,
+ networkid=cls.network_1.id,
+ services=cls.services["icmp_rule"],
+ traffictype='Egress'
+ )
+ cls._cleanup = [
+ cls.account,
+ cls.network_2,
+ cls.nw_off,
+ cls.shared_nw_off,
+ cls.vpc_off,
+ cls.service_offering,
+ ]
+ return
+
+ @classmethod
+ def tearDownClass(cls):
+ try:
+ cls.vpc_off.update(cls.api_client, state='Disabled')
+ cls.shared_nw_off.update(cls.api_client, state='Disabled')
+ cls.nw_off.update(cls.api_client, state='Disabled')
+ cleanup_resources(cls.api_client, cls._cleanup)
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+ return
+
+ def setUp(self):
+ self.apiclient = self.testClient.getApiClient()
+ self.dbclient = self.testClient.getDbConnection()
+ self.cleanup = []
+ return
+
+ def tearDown(self):
+ try:
+ cleanup_resources(self.apiclient, self.cleanup)
+ except Exception as e:
+ raise Exception("Warning: Exception during cleanup : %s" % e)
+ return
+
+ def validate_vpc_offering(self, vpc_offering):
+ """Validates the VPC offering"""
+
+ self.debug("Check if the VPC offering is created successfully?")
+ vpc_offs = VpcOffering.list(
+ self.apiclient,
+ id=vpc_offering.id
+ )
+ self.assertEqual(
+ isinstance(vpc_offs, list),
+ True,
+ "List VPC offerings should return a valid list"
+ )
+ self.assertEqual(
+ vpc_offering.name,
+ vpc_offs[0].name,
+ "Name of the VPC offering should match with listVPCOff data"
+ )
+ self.debug(
+ "VPC offering is created successfully - %s" %
+ vpc_offering.name)
+ return
+
+ def validate_vpc_network(self, network, state=None):
+ """Validates the VPC network"""
+
+ self.debug("Check if the VPC network is created successfully?")
+ vpc_networks = VPC.list(
+ self.apiclient,
+ id=network.id
+ )
+ self.assertEqual(
+ isinstance(vpc_networks, list),
+ True,
+ "List VPC network should return a valid list"
+ )
+ self.assertEqual(
+ network.name,
+ vpc_networks[0].name,
+ "Name of the VPC network should match with listVPC data"
+ )
+ if state:
+ self.assertEqual(
+ vpc_networks[0].state,
+ state,
+ "VPC state should be '%s'" % state
+ )
+ self.debug("VPC network validated - %s" % network.name)
+ return
+
+ def validate_network_rules(self):
+ """Validating if the network rules (PF/LB) works properly or not?"""
+
+ try:
+ self.debug("Checking if we can SSH into VM_1 through %s?" %
+ (self.public_ip_1.ipaddress.ipaddress))
+ ssh_1 = self.vm_1.get_ssh_client(
+ ipaddress=self.public_ip_1.ipaddress.ipaddress,
+ reconnect=True)
+ self.debug("SSH into VM is successfully")
+
+ self.debug("Verifying if we can ping to outside world from VM?")
+ # Ping to outsite world
+ res = ssh_1.execute("ping -c 1 www.google.com")
+ # res = 64 bytes from maa03s17-in-f20.1e100.net (74.125.236.212):
+ # icmp_req=1 ttl=57 time=25.9 ms
+ # --- www.l.google.com ping statistics ---
+ # 1 packets transmitted, 1 received, 0% packet loss, time 0ms
+ # rtt min/avg/max/mdev = 25.970/25.970/25.970/0.000 ms
+ result = str(res)
+ self.assertEqual(
+ result.count("1 received"),
+ 1,
+ "Ping to outside world from VM should be successful"
+ )
+
+ self.debug("We should be allowed to ping virtual gateway")
+ self.debug("Finding the gateway corresponding to isolated network")
+ gateways = [nic.gateway for nic in self.vm_1.nic if nic.networkid == self.network_1.id]
+
+ gateway_list_validation_result = validateList(gateways)
+
+ self.assertEqual(gateway_list_validation_result[0], PASS, "gateway list validation failed due to %s" %
+ gateway_list_validation_result[2])
+
+ gateway = gateway_list_validation_result[1]
+
+ self.debug("VM gateway: %s" % gateway)
+
+ res = ssh_1.execute("ping -c 1 %s" % gateway)
+ self.debug("ping -c 1 %s: %s" % (gateway, res))
+
+ result = str(res)
+ self.assertEqual(
+ result.count("1 received"),
+ 1,
+ "Ping to VM gateway should be successful"
+ )
+ except Exception as e:
+ self.fail("Failed to SSH into VM - %s, %s" %
+ (self.public_ip_1.ipaddress.ipaddress, e))
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_01_deploy_instance_in_network(self):
+ """ Test deploy an instance in VPC networks
+ """
+
+ # Validate the following
+ # 1. Successful deployment of the User VM.
+ # 2. Ping any host in the public Internet successfully.
+ # 3. Ping the gateways of the VPC's guest network and the
+ # Shared Guest Network successfully.
+
+ self.debug("Check if deployed VMs are in running state?")
+ vms = VirtualMachine.list(
+ self.apiclient,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ listall=True
+ )
+ self.assertEqual(
+ isinstance(vms, list),
+ True,
+ "List VMs should return a valid response"
+ )
+ for vm in vms:
+ self.debug("VM name: %s, VM state: %s" % (vm.name, vm.state))
+ self.assertEqual(
+ vm.state,
+ "Running",
+ "Vm state should be running for each VM deployed"
+ )
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_02_stop_instance_in_network(self):
+ """ Test stop an instance in VPC networks
+ """
+
+ # Validate the following
+ # 1. Stop the virtual machines.
+ # 2. Rules should be still configured on virtual router.
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+
+ self.debug("Stopping one of the virtual machines in account: %s" %
+ self.account.name)
+ try:
+ self.vm_2.stop(self.apiclient)
+ except Exception as e:
+ self.fail("Failed to stop the virtual instances, %s" % e)
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_03_start_instance_in_network(self):
+ """ Test start an instance in VPC networks
+ """
+
+ # Validate the following
+ # 1. Start the virtual machines.
+ # 2. Rules should be still configured on virtual router.
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+
+ self.debug("Starting one of the virtual machines in account: %s" %
+ self.account.name)
+ try:
+ self.vm_2.start(self.apiclient)
+ except Exception as e:
+ self.fail("Failed to start the virtual instances, %s" % e)
+
+ self.debug("Check if the instance is in stopped state?")
+ vms = VirtualMachine.list(
+ self.apiclient,
+ id=self.vm_2.id,
+ listall=True
+ )
+ self.assertEqual(
+ isinstance(vms, list),
+ True,
+ "List virtual machines should return a valid list"
+ )
+ vm = vms[0]
+ self.assertEqual(
+ vm.state,
+ "Running",
+ "Virtual machine should be in running state"
+ )
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_04_reboot_instance_in_network(self):
+ """ Test reboot an instance in VPC networks
+ """
+
+ # Validate the following
+ # 1. Reboot the virtual machines.
+ # 2. Rules should be still configured on virtual router.
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+
+ self.debug("Restarting the virtual machines in account: %s" %
+ self.account.name)
+ try:
+ self.vm_1.reboot(self.apiclient)
+ self.vm_2.reboot(self.apiclient)
+ except Exception as e:
+ self.fail("Failed to reboot the virtual instances, %s" % e)
+
+ self.debug("Check if the instance is in stopped state?")
+ vms = VirtualMachine.list(
+ self.apiclient,
+ account=self.account.name,
+ domainid=self.account.domainid,
+ listall=True
+ )
+ self.assertEqual(
+ isinstance(vms, list),
+ True,
+ "List virtual machines should return a valid list"
+ )
+ for vm in vms:
+ self.assertEqual(
+ vm.state,
+ "Running",
+ "Virtual machine should be in running state"
+ )
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_05_destroy_instance_in_network(self):
+ """ Test destroy an instance in VPC networks
+ """
+
+ # Validate the following
+ # 1. Destroy one of the virtual machines.
+ # 2. Rules should be still configured on virtual router.
+
+ self.debug("Destroying one of the virtual machines in account: %s" %
+ self.account.name)
+ try:
+ self.vm_2.delete(self.apiclient)
+ except Exception as e:
+ self.fail("Failed to destroy the virtual instances, %s" % e)
+
+ #Wait for expunge interval to cleanup VM
+ wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"])
+
+ self.debug("Check if the instance is in stopped state?")
+ vms = VirtualMachine.list(
+ self.apiclient,
+ id=self.vm_2.id,
+ listall=True
+ )
+ self.assertEqual(
+ vms,
+ None,
+ "List virtual machines should not return anything"
+ )
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_06_recover_instance_in_network(self):
+ """ Test recover an instance in VPC networks
+ """
+
+ self.debug("Deploying vm")
+
+ self.vm_2 = VirtualMachine.create(
+ self.api_client,
+ self.services["virtual_machine"],
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ serviceofferingid=self.service_offering.id,
+ networkids=[str(self.network_1.id),
+ str(self.network_2.id)]
+ )
+
+ self.cleanup.append(self.vm_2)
+
+ try:
+ self.vm_2.delete(self.apiclient, expunge=False)
+ except Exception as e:
+ self.fail("Failed to destroy the virtual instances, %s" % e)
+
+ try:
+ self.vm_2.recover(self.apiclient)
+ except Exception as e:
+ self.fail("Failed to recover the virtual instances, %s" % e)
+
+ self.debug("Check if the instance is in stopped state?")
+ vms = VirtualMachine.list(
+ self.apiclient,
+ id=self.vm_2.id,
+ listall=True
+ )
+ self.assertEqual(
+ isinstance(vms, list),
+ True,
+ "List virtual machines should return a valid list"
+ )
+ vm = vms[0]
+ self.assertEqual(
+ vm.state,
+ "Stopped",
+ "Virtual machine should be in stopped state"
+ )
+
+ self.debug("Starting the instance: %s" % self.vm_2.name)
+ try:
+ self.vm_2.start(self.apiclient)
+ except Exception as e:
+ self.fail("Failed to start the instances, %s" % e)
+
+ vms = VirtualMachine.list(
+ self.apiclient,
+ id=self.vm_2.id,
+ listall=True
+ )
+ self.assertEqual(
+ isinstance(vms, list),
+ True,
+ "List virtual machines should return a valid list"
+ )
+ vm = vms[0]
+ self.assertEqual(
+ vm.state,
+ "Running",
+ "Virtual machine should be in running state"
+ )
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_07_migrate_instance_in_network(self):
+ """ Test migrate an instance in VPC networks
+ """
+
+ # Validate the following
+ # 1. Migrate the virtual machines to other hosts
+ # 2. Vm should be in stopped state. State both the instances
+ # 3. Make sure that all the PF,LB and Static NAT rules on this VM
+ # works as expected.
+ # 3. Make sure that we are able to access google.com from this user Vm
+ self.hypervisor = self.testClient.getHypervisorInfo()
+ if self.hypervisor.lower() in ['lxc']:
+ self.skipTest("vm migrate is not supported in %s" % self.hypervisor)
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+
+ host = findSuitableHostForMigration(self.apiclient, self.vm_1.id)
+ if host is None:
+ self.skipTest(ERROR_NO_HOST_FOR_MIGRATION)
+
+ self.debug("Migrating VM-ID: %s to Host: %s" % (
+ self.vm_1.id,
+ host.id
+ ))
+
+ try:
+ self.vm_1.migrate(self.apiclient, hostid=host.id)
+ except Exception as e:
+ self.fail("Failed to migrate instance, %s" % e)
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_08_user_data(self):
+ """ Test user data in virtual machines
+ """
+
+ # Validate the following
+ # 1. Create a VPC with cidr - 10.1.1.1/16
+ # 2. Add network1(10.1.1.1/24) and network2(10.1.2.1/24) to this VPC.
+ # 3. Deploy a vm in network1 and a vm in network2 using userdata
+ # Steps
+ # 1.Query for the user data for both the user vms from both networks
+ # User should be able to query the user data for the vms belonging to
+ # both the networks from the VR
+
+ try:
+ ssh = self.vm_1.get_ssh_client(
+ ipaddress=self.public_ip_1.ipaddress.ipaddress,
+ reconnect=True)
+ self.debug("SSH into VM is successfully")
+ ssh.execute("yum install wget -y")
+ except Exception as e:
+ self.fail("Failed to SSH into instance")
+
+ self.debug("check the userdata with that of present in router")
+ try:
+ cmds = [
+ "wget http://%s/latest/user-data" % self.network_1.gateway,
+ "cat user-data",
+ ]
+ for c in cmds:
+ result = ssh.execute(c)
+ self.debug("%s: %s" % (c, result))
+ except Exception as e:
+ self.fail("Failed to SSH in Virtual machine: %s" % e)
+
+ res = str(result)
+ self.assertEqual(
+ res.count(
+ self.services["virtual_machine"]["userdata"]),
+ 1,
+ "Verify user data from router"
+ )
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_09_meta_data(self):
+ """ Test meta data in virtual machines
+ """
+
+ # Validate the following
+ # 1. Create a VPC with cidr - 10.1.1.1/16
+ # 2. Add network1(10.1.1.1/24) and network2(10.1.2.1/24) to this VPC.
+ # 3. Deploy a vm in network1 and a vm in network2 using userdata
+ # Steps
+ # 1.Query for the meta data for both the user vms from both networks
+ # User should be able to query the user data for the vms belonging to
+ # both the networks from the VR
+
+ try:
+ ssh = self.vm_1.get_ssh_client(
+ ipaddress=self.public_ip_1.ipaddress.ipaddress,
+ reconnect=True)
+ self.debug("SSH into VM is successfully")
+ except Exception as e:
+ self.fail("Failed to SSH into instance")
+
+ self.debug("check the metadata with that of present in router")
+ try:
+ cmds = [
+ "wget http://%s/latest/vm-id" % self.network_1.gateway,
+ "cat vm-id",
+ ]
+ for c in cmds:
+ result = ssh.execute(c)
+ self.debug("%s: %s" % (c, result))
+ except Exception as e:
+ self.fail("Failed to SSH in Virtual machine: %s" % e)
+
+ res = str(result)
+ self.assertNotEqual(
+ res,
+ None,
+ "Meta data should be returned from router"
+ )
+ return
+
+ @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ def test_10_expunge_instance_in_network(self):
+ """ Test expunge an instance in VPC networks
+ """
+
+ # Validate the following
+ # 1. Recover the virtual machines.
+ # 2. Vm should be in stopped state. State both the instances
+ # 3. Make sure that all the PF,LB and Static NAT rules on this VM
+ # works as expected.
+ # 3. Make sure that we are able to access google.com from this user Vm
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+
+ self.debug("Delete virtual machines in account: %s" %
+ self.account.name)
+ try:
+ self.vm_3.delete(self.apiclient)
+ except Exception as e:
+ self.fail("Failed to destroy the virtual instances, %s" % e)
+
+ self.debug(
+ "Waiting for expunge interval to cleanup the network and VMs")
+
+ wait_for_cleanup(
+ self.apiclient,
+ ["expunge.interval", "expunge.delay"]
+ )
+
+ self.debug("Validating if network rules are coonfigured properly?")
+ self.validate_network_rules()
+
+ self.debug(
+ "Deleting the rest of the virtual machines in account: %s" %
+ self.account.name)
+ try:
+ self.vm_1.delete(self.apiclient)
+ except Exception as e:
+ self.fail("Failed to destroy the virtual instances, %s" % e)
+
+ self.debug(
+ "Waiting for expunge interval to cleanup the network and VMs")
+
+ wait_for_cleanup(
+ self.apiclient,
+ ["expunge.interval", "expunge.delay"]
+ )
+
+ # Check if the network rules still exists after Vm expunged
+ self.debug("Checking if NAT rules existed ")
+ with self.assertRaises(Exception):
+ NATRule.list(
+ self.apiclient,
+ id=self.nat_rule.id,
+ listall=True
+ )
+
+ LoadBalancerRule.list(
+ self.apiclient,
+ id=self.lb_rule.id,
+ listall=True
+ )
+ return
\ No newline at end of file
diff --git a/test/integration/component/test_vpn_service.py b/test/integration/broken/test_vpn_service.py
similarity index 100%
rename from test/integration/component/test_vpn_service.py
rename to test/integration/broken/test_vpn_service.py
diff --git a/test/integration/component/test_vr_metadata.py b/test/integration/broken/test_vr_metadata.py
similarity index 100%
rename from test/integration/component/test_vr_metadata.py
rename to test/integration/broken/test_vr_metadata.py
diff --git a/test/integration/component/maint/test_bugs.py b/test/integration/component/maint/test_bugs.py
index b839dbe..262dea6 100644
--- a/test/integration/component/maint/test_bugs.py
+++ b/test/integration/component/maint/test_bugs.py
@@ -302,9 +302,10 @@ class Test42xBugsMgmtSvr(cloudstackTestCase):
# Step2: It should return a commit hash
return
- @attr(tags=["advanced", "basic"])
- @attr(required_hardware="false")
- @attr(storage="s3")
+ # @attr(tags=["advanced", "basic"])
+ # @attr(required_hardware="false")
+ # @attr(storage="s3")
+ @attr(tags=["TODO"], required_hardware="false")
def test_es_1863_register_template_s3_domain_admin_user(self):
"""
@Desc: Test whether cloudstack allows Domain admin or user
diff --git a/test/integration/component/maint/test_redundant_router.py b/test/integration/component/maint/test_redundant_router.py
index fe27888..f79856d 100644
--- a/test/integration/component/maint/test_redundant_router.py
+++ b/test/integration/component/maint/test_redundant_router.py
@@ -607,7 +607,8 @@ class TestRVRInternals(cloudstackTestCase):
raise Exception("Warning: Exception during cleanup : %s" % e)
return
- @attr(tags=["advanced", "advancedns", "ssh"])
+ # @attr(tags=["advanced", "advancedns", "ssh"])
+ @attr(tags=["TODO"])
def test_redundantVR_internals(self):
"""Test redundant router internals
"""
diff --git a/test/integration/component/maint/test_redundant_router_deployment_planning.py b/test/integration/component/maint/test_redundant_router_deployment_planning.py
index eb68c43..dc2a0ae 100644
--- a/test/integration/component/maint/test_redundant_router_deployment_planning.py
+++ b/test/integration/component/maint/test_redundant_router_deployment_planning.py
@@ -535,7 +535,8 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
self.apiclient.updatePod(cmd)
return
- @attr(tags=["advanced", "advancedns"])
+ # @attr(tags=["advanced", "advancedns"])
+ @attr(tags=["TODO"])
def test_RvR_multiprimarystorage(self):
"""Test RvR with multi primary storage
"""
@@ -773,7 +774,8 @@ class TestRvRDeploymentPlanning(cloudstackTestCase):
self.apiclient.updateCluster(cmd)
return
- @attr(tags=["advanced", "advancedns", "ssh"])
+ # @attr(tags=["advanced", "advancedns", "ssh"])
+ @attr(tags=["TODO"])
def test_RvR_multihosts(self):
"""Test RvR with multi hosts
"""
diff --git a/test/integration/component/test_add_remove_network.py b/test/integration/component/test_add_remove_network.py
index ac0ecc7..132eed4 100644
--- a/test/integration/component/test_add_remove_network.py
+++ b/test/integration/component/test_add_remove_network.py
@@ -25,11 +25,16 @@
Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Add+Remove+Networks+to+VMs
"""
-# Import Local Modules
-from nose.plugins.attrib import attr
-from marvin.cloudstackTestCase import cloudstackTestCase
+import random
+import time
import unittest
+
from ddt import ddt, data
+from marvin.cloudstackAPI import (addNicToVirtualMachine,
+ removeNicFromVirtualMachine,
+ updateDefaultNicForVirtualMachine)
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.codes import PASS
from marvin.lib.base import (
Account,
Domain,
@@ -53,19 +58,11 @@ from marvin.lib.common import (get_domain,
update_resource_limit,
list_nat_rules
)
-
from marvin.lib.utils import (validateList,
random_gen,
- get_hypervisor_type,
- cleanup_resources)
-
-from marvin.cloudstackAPI import (addNicToVirtualMachine,
- removeNicFromVirtualMachine,
- updateDefaultNicForVirtualMachine)
-
-from marvin.codes import PASS
-import random
-import time
+ get_hypervisor_type)
+# Import Local Modules
+from nose.plugins.attrib import attr
class Services:
@@ -219,22 +216,21 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name,
domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id,
mode=cls.zone.networktype)
+ cls._cleanup.append(cls.virtual_machine)
cls.defaultNetworkId = cls.virtual_machine.nic[0].networkid
- # Create Shared Network Offering
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
- # Enable Isolated Network offering
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
- # Create Shared Network Offering
cls.shared_network_offering = NetworkOffering.create(cls.api_client, cls.services["shared_network_offering"])
- # Enable shared Network offering
+ cls._cleanup.append(cls.shared_network_offering)
cls.shared_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
+ cls._cleanup.append(cls.isolated_network)
cls.services["shared_network"]["vlan"] = get_free_vlan(cls.api_client, cls.zone.id)[1]
@@ -249,7 +245,6 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
cls.shared_network = Network.create(cls.api_client, cls.services["shared_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.shared_network_offering.id)
cls._cleanup.append(cls.shared_network)
- cls._cleanup.append(cls.shared_network_offering)
return
def setUp(self):
@@ -272,11 +267,9 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
try:
for nic in self.addednics:
self.virtual_machine.remove_nic(self.apiclient, nic.id)
- # Clean up, terminate the created accounts, domains etc
- cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ self.debug("Exception during removal of nics : %s" % e)
+ super(TestAddNetworkToVirtualMachine, self).tearDown()
@classmethod
def tearDownClass(cls):
@@ -284,13 +277,9 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
# Disable Network Offerings
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
cls.shared_network_offering.update(cls.api_client, state='Disabled')
-
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
-
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ cls.debug("Exception during disable of networks : %s" % e)
+ super(TestAddNetworkToVirtualMachine, cls).tearDownClass()
def addNetworkToVm(self, network, vm, ipaddress=None):
"""Add network to VM and check if new nic added in the VM"""
@@ -460,15 +449,14 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
self.debug("Creating VPC offering")
vpc_off = VpcOffering.create(self.api_client, self.services["vpc_offering"])
+ self.cleanup.append(vpc_off)
self.debug("Created VPC offering: %s" % vpc_off.id)
self.debug("Enabling the VPC offering")
vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating VPC")
vpc = VPC.create(self.apiclient, self.services["vpc"], vpcofferingid=vpc_off.id, zoneid=self.zone.id,
account=self.account.name, domainid=self.account.domainid)
- # Appending to cleanup list
self.cleanup.append(vpc)
- self.cleanup.append(vpc_off)
self.debug("Trying to add VPC to vm belonging to isolated network, this should fail")
with self.assertRaises(Exception):
@@ -501,15 +489,14 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
self.debug("Creating VPC offering")
vpc_off = VpcOffering.create(self.api_client, self.services["vpc_offering"])
+ self.cleanup.append(vpc_off)
self.debug("Created VPC offering: %s" % vpc_off.id)
self.debug("Enabling the VPC offering")
vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating VPC")
vpc = VPC.create(self.apiclient, self.services["vpc"], vpcofferingid=vpc_off.id, zoneid=self.zone.id,
account=self.account.name, domainid=self.account.domainid)
- # Appending to cleanup list
self.cleanup.append(vpc)
- self.cleanup.append(vpc_off)
self.debug("Trying to add VPC to vm belonging to isolated network, this should fail")
with self.assertRaises(Exception):
self.virtual_machine.add_nic(self.apiclient, vpc.id)
@@ -567,7 +554,9 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
return
- @attr(tags=["advanced", "dvs"])
+ # was tags=["advanced", "dvs"],
+ # the apiclient that is being used to test this has to much rights?
+ @attr(tags=["TODO"])
@data("isolated", "shared")
def test_14_add_nw_different_account(self, value):
"""Add network to running VM"""
@@ -586,6 +575,7 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
if value == "isolated":
network = Network.create(self.api_client, self.services["isolated_network"], account.name,
account.domainid, networkofferingid=self.isolated_network_offering.id)
+ self.cleanup.append(network)
elif value == "shared":
self.services["shared_network_2"]["zoneid"] = self.zone.id
self.services["shared_network_2"]["vlan"] = get_free_vlan(self.apiclient, self.zone.id)[1]
@@ -600,7 +590,9 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
(network.type, account.name, self.account.name))
try:
- self.virtual_machine.add_nic(self.apiclient, network.id)
+ vm_with_nic = self.virtual_machine.add_nic(self.apiclient, network.id)
+ nics = [x for x in vm_with_nic.nic if x.networkid == network.id]
+ self.addednics.append(nics[-1])
except Exception:
pass
else:
@@ -621,11 +613,10 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
network = None # The network which we are adding to the vm
try:
- tempCleanupList = []
self.child_domain_1 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
- tempCleanupList.append(self.child_domain_1)
+ self.cleanup.append(self.child_domain_1)
self.child_do_admin_1 = Account.create(
self.apiclient,
@@ -633,31 +624,30 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
admin=True,
domainid=self.child_domain_1.id
)
- tempCleanupList.append(self.child_do_admin_1)
+ self.cleanup.append(self.child_do_admin_1)
self.child_domain_2 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
- tempCleanupList.append(self.child_domain_2)
+ self.cleanup.append(self.child_domain_2)
self.child_do_admin_2 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.child_domain_2.id)
- tempCleanupList.append(self.child_do_admin_2)
+ self.cleanup.append(self.child_do_admin_2)
except Exception as e:
self.fail(e)
- finally:
- tempCleanupList.reverse()
- self.cleanup += tempCleanupList
network = Network.create(self.api_client, self.services["isolated_network"], self.child_do_admin_1.name,
self.child_do_admin_1.domainid, networkofferingid=self.isolated_network_offering.id)
+ self.cleanup.append(network)
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=self.child_do_admin_2.name,
domainid=self.child_do_admin_2.domainid, serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
+ self.cleanup.append(virtual_machine)
time.sleep(self.services["sleep"])
self.debug("Trying to %s network in domain %s to a vm in domain %s, This should fail" %
@@ -700,6 +690,7 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
network_1 = Network.create(self.api_client, self.services["isolated_network"], account_1.name,
account_1.domainid, networkofferingid=self.isolated_network_offering.id)
+ self.cleanup.append(network_1)
self.debug("created network %s" % network_1.name)
@@ -708,6 +699,7 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=account_1.name,
domainid=account_1.domainid, serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
+ self.cleanup.append(virtual_machine)
self.debug("Deployed virtual machine : %s" % virtual_machine.id)
@@ -718,14 +710,14 @@ class TestAddNetworkToVirtualMachine(cloudstackTestCase):
self.services["account"],
domainid=self.domain.id
)
+ self.cleanup.append(account_2)
self.debug("Created account %s" % account_2.name)
- self.cleanup.append(account_2)
-
self.debug("Creating network in account %s" % account_2.name)
network_2 = Network.create(self.api_client, self.services["isolated_network"], account_2.name,
account_2.domainid, networkofferingid=self.isolated_network_offering.id)
+ self.cleanup.append(network_2)
self.debug("Created network %s" % network_2.name)
@@ -775,6 +767,7 @@ class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name,
domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id,
mode=cls.zone.networktype)
+ cls._cleanup.append(cls.virtual_machine)
# Create Shared Network Offering
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
@@ -783,31 +776,30 @@ class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
+ cls._cleanup.append(cls.isolated_network)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
+ self.addednics = []
def tearDown(self):
try:
- # Clean up, terminate the created accounts, domains etc
- cleanup_resources(self.apiclient, self.cleanup)
+ for nic in self.addednics:
+ self.virtual_machine.remove_nic(self.apiclient, nic.id)
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ self.debug("Exception during removal of nics : %s" % e)
+ super(TestRemoveNetworkFromVirtualMachine, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
- # Disable Network Offerings
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ cls.debug("Exception during disabling network offering : %s" % e)
+ super(TestRemoveNetworkFromVirtualMachine, cls).tearDownClass()
def addNetworkToVm(self, network, vm):
"""Add network to VM and check if new nic added in the VM"""
@@ -939,6 +931,7 @@ class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
vm1 = self.virtual_machine
nic2 = self.addNetworkToVm(self.isolated_network, vm1)
+ self.addednics.append(nic2)
# get the ip address of the nic added in 2nd network
vm1_ip = nic2[0].ipaddress
self.assertIsNotNone(vm1_ip, "New nic did not get the ip address")
@@ -982,7 +975,6 @@ class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
self.network3.id
)
self.cleanup.append(ip_address)
- self.cleanup = self.cleanup[::-1]
# Open up firewall port for SSH
FireWallRule.create(
self.apiclient,
@@ -1045,6 +1037,7 @@ class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
self.account.domainid,
networkofferingid=self.isolated_network_offering.id
)
+ self.cleanup.append(self.ntwk2)
self.ntwk3 = Network.create(
self.apiclient,
self.services["isolated_network"],
@@ -1052,6 +1045,7 @@ class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
self.account.domainid,
networkofferingid=self.isolated_network_offering.id
)
+ self.cleanup.append(self.ntwk3)
self.test_vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
@@ -1061,8 +1055,8 @@ class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
mode=self.zone.networktype,
networkids=[self.isolated_network.id, self.ntwk2.id, self.ntwk3.id]
)
+ self.cleanup.append(self.test_vm)
self.assertIsNotNone(self.test_vm, "Failed to create vm with 3 nics")
- list(map(lambda x: self.cleanup.append(x), [self.test_vm, self.ntwk2, self.ntwk3]))
vm_res = VirtualMachine.list(
self.apiclient,
id=self.test_vm.id
@@ -1122,6 +1116,7 @@ class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
3,
"Nic is not attached/detected"
)
+ self.addednics.extend(vm_nics)
return
@@ -1162,39 +1157,38 @@ class TestUpdateVirtualMachineNIC(cloudstackTestCase):
accountid=cls.account.name, domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.zone.networktype)
- # Create Shared Network Offering
+ cls._cleanup.append(cls.virtual_machine)
+
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
- # Enable Isolated Network offering
+
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
+ cls._cleanup.append(cls.isolated_network)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
+ self.addednics = []
def tearDown(self):
try:
- # Clean up, terminate the created accounts, domains etc
- cleanup_resources(self.apiclient, self.cleanup)
+ for nic in self.addednics:
+ self.virtual_machine.remove_nic(self.apiclient, nic.id)
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ self.debug("Exception during removal of nics : %s" % e)
+ super(TestUpdateVirtualMachineNIC, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
- # Disable Network Offerings
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
-
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ cls.debug("Exception during disable of network offering : %s" % e)
+ super(TestUpdateVirtualMachineNIC, cls).tearDownClass()
def addNetworkToVm(self, network, vm):
"""Add network to VM and check if new nic added in the VM"""
@@ -1213,6 +1207,7 @@ class TestUpdateVirtualMachineNIC(cloudstackTestCase):
self.assertTrue(len(self.nics) == 1, "nics list should contain the nic of added isolated network,\
the number of nics for the network should be 1, instead they are %s" %
len(self.nics))
+ self.addednics.append(self.nics[0])
return
@attr(tags=["advanced", "dvs"])
@@ -1330,6 +1325,7 @@ class TestUpdateVirtualMachineNIC(cloudstackTestCase):
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"],
accountid=account.name, domainid=account.domainid,
serviceofferingid=self.service_offering.id, mode=self.zone.networktype)
+ self.cleanup.append(virtual_machine)
time.sleep(self.services["sleep"])
self.debug("Deployed virtual machine: %s" % virtual_machine.id)
foreignNicId = virtual_machine.nic[0].id
@@ -1376,15 +1372,16 @@ class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"],
accountid=cls.account.name, domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype)
- # Create Shared Network Offering
+ cls._cleanup.append(cls.virtual_machine)
+
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"], )
cls._cleanup.append(cls.isolated_network_offering)
- # Enable Isolated Network offering
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
+ cls._cleanup.append(cls.isolated_network)
return
def setUp(self):
@@ -1393,24 +1390,15 @@ class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
self.cleanup = []
def tearDown(self):
- try:
- # Clean up, terminate the created accounts, domains etc
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestFailureScenariosAddNetworkToVM, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
- # Disable Network Offerings
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
-
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ cls.debug("Exception during disabling network offering : %s" % e)
+ super(TestFailureScenariosAddNetworkToVM, cls).tearDownClass()
@attr(tags=["advanced", "dvs"])
def test_15_add_nic_wrong_vm_id(self):
@@ -1482,6 +1470,7 @@ class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
isolated_network = Network.create(self.apiclient, self.services["isolated_network"],
self.account.name, self.account.domainid,
networkofferingid=self.isolated_network_offering.id)
+ self.cleanup.append(isolated_network)
self.debug("Created isolated network %s in zone %s" %
(isolated_network.id, foreignZoneId))
@@ -1523,9 +1512,8 @@ class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
self.debug("Creating isolated network in basic zone: %s" % basicZone.id)
isolated_network = Network.create(self.apiclient, self.services["isolated_network"],
networkofferingid=self.isolated_network_offering.id)
-
- self.debug("Created isolated network %s:" % isolated_network.id)
self.cleanup.append(isolated_network)
+ self.debug("Created isolated network %s:" % isolated_network.id)
self.services["virtual_machine"]["zoneid"] = basicZone.id
@@ -1533,6 +1521,7 @@ class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"],
serviceofferingid=self.service_offering.id,
mode=basicZone.networktype)
+ self.cleanup.append(virtual_machine)
time.sleep(self.services["sleep"])
self.debug("Deployed virtual machine %s: " % virtual_machine.id)
@@ -1545,7 +1534,6 @@ class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
with self.assertRaises(Exception) as e:
time.sleep(5)
self.apiclient.addNicToVirtualMachine(cmd)
- self.debug("addNicToVirtualMachine API failed with exception: %s" % e.exception)
return
@@ -1578,7 +1566,6 @@ class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
with self.assertRaises(Exception) as e:
time.sleep(5)
api_client.addNicToVirtualMachine(cmd)
- self.debug("addNicToVirtualMachine API failed with exception: %s" % e.exception)
return
@@ -1620,16 +1607,15 @@ class TestFailureScenariosRemoveNicFromVM(cloudstackTestCase):
accountid=cls.account.name, domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.zone.networktype)
+ cls._cleanup.append(cls.virtual_machine)
- # Create Shared Network Offering
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"], )
cls._cleanup.append(cls.isolated_network_offering)
- # Enable Isolated Network offering
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
+ cls._cleanup.append(cls.isolated_network)
- # Add network to VM
cls.virtual_machine.add_nic(cls.api_client, cls.isolated_network.id)
return
@@ -1639,24 +1625,15 @@ class TestFailureScenariosRemoveNicFromVM(cloudstackTestCase):
self.cleanup = []
def tearDown(self):
- try:
- # Clean up, terminate the created accounts, domains etc
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestFailureScenariosRemoveNicFromVM, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
- # Disable Network Offerings
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
-
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ cls.debug("Exception during disabling of network offering : %s" % e)
+ super(TestFailureScenariosRemoveNicFromVM, cls).tearDownClass()
@attr(tags=["advanced", "dvs"])
def test_19_remove_nic_wrong_vm_id(self):
@@ -1765,6 +1742,8 @@ class TestFailureScenariosRemoveNicFromVM(cloudstackTestCase):
api_client.removeNicFromVirtualMachine(cmd)
self.debug("removeNicFromVirtualMachine API failed with exception: %s" % e.exception)
+ self.apiclient.removeNicFromVirtualMachine(cmd)
+
return
@@ -1794,6 +1773,7 @@ class TestFailureScenariosUpdateVirtualMachineNIC(cloudstackTestCase):
cls.services["isolated_network"]["zoneid"] = cls.zone.id
cls.services["shared_network"]["zoneid"] = cls.zone.id
cls._cleanup = []
+ cls.addednics = []
cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
cls._cleanup.append(cls.account)
@@ -1804,6 +1784,7 @@ class TestFailureScenariosUpdateVirtualMachineNIC(cloudstackTestCase):
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"],
accountid=cls.account.name, domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype)
+ cls._cleanup.append(cls.virtual_machine)
cls.defaultNetworkId = cls.virtual_machine.nic[0].networkid
@@ -1816,7 +1797,11 @@ class TestFailureScenariosUpdateVirtualMachineNIC(cloudstackTestCase):
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"],
cls.account.name, cls.account.domainid,
networkofferingid=cls.isolated_network_offering.id)
- cls.virtual_machine.add_nic(cls.api_client, cls.isolated_network.id)
+ cls._cleanup.append(cls.isolated_network)
+ vm_with_nic = cls.virtual_machine.add_nic(cls.api_client, cls.isolated_network.id)
+ nics = [x for x in vm_with_nic.nic if x.networkid == cls.isolated_network.id]
+ cls.addednics.append(nics[-1])
+
return
def setUp(self):
@@ -1825,24 +1810,20 @@ class TestFailureScenariosUpdateVirtualMachineNIC(cloudstackTestCase):
self.cleanup = []
def tearDown(self):
- try:
- # Clean up, terminate the created accounts, domains etc
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestFailureScenariosUpdateVirtualMachineNIC, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
- # Disable Network Offerings
+ for nic in cls.addednics:
+ cls.virtual_machine.remove_nic(cls.apiclient, nic.id)
+ except Exception as e:
+ cls.debug("Exception during removal of nics : %s" % e)
+ try:
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
-
except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ cls.debug("Exception during disabling of network offering : %s" % e)
+ super(TestFailureScenariosUpdateVirtualMachineNIC, cls).tearDownClass()
@attr(tags=["advanced", "dvs"])
def test_21_update_nic_wrong_vm_id(self):
@@ -2049,7 +2030,5 @@ class TestFailureScenariosUpdateVirtualMachineNIC(cloudstackTestCase):
with self.assertRaises(Exception) as e:
api_client.updateDefaultNicForVirtualMachine(cmd)
- self.debug("updateDefaultNicForVirtualMachine API failed with exception: %s" %
- e.exception)
return
diff --git a/test/integration/component/test_affinity_groups.py b/test/integration/component/test_affinity_groups.py
index 9d4c486..a2bb642 100644
--- a/test/integration/component/test_affinity_groups.py
+++ b/test/integration/component/test_affinity_groups.py
@@ -1041,7 +1041,8 @@ class TestUpdateVMAffinityGroups(cloudstackTestCase):
for aff_grp in aff_grps:
aff_grp.delete(self.api_client)
- @attr(tags=["simulator", "basic", "advanced", "multihost", "NotRun"])
+ # @attr(tags=["simulator", "basic", "advanced", "multihost", "NotRun"])
+ @attr(tags=["TODO"])
def test_04_update_aff_grp_remove_all(self):
"""
Update the list of Affinity Groups to empty list
@@ -1087,7 +1088,8 @@ class TestUpdateVMAffinityGroups(cloudstackTestCase):
for aff_grp in aff_grps:
aff_grp.delete(self.api_client)
- @attr(tags=["simulator", "basic", "advanced", "multihost", "NotRun"])
+ # @attr(tags=["simulator", "basic", "advanced", "multihost", "NotRun"])
+ @attr(tags=["TODO"])
def test_06_update_aff_grp_invalid_args(self):
"""
Update the list of Affinity Groups with either both args or none
diff --git a/test/integration/component/test_base_image_updation.py b/test/integration/component/test_base_image_updation.py
index 133db82..234a86e 100644
--- a/test/integration/component/test_base_image_updation.py
+++ b/test/integration/component/test_base_image_updation.py
@@ -168,7 +168,7 @@ class TestBaseImageUpdate(cloudstackTestCase):
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
-
+ cls._cleanup = []
cls.template = get_template(
cls.api_client,
cls.zone.id,
@@ -193,6 +193,7 @@ class TestBaseImageUpdate(cloudstackTestCase):
admin=True,
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
cls.vm_with_reset = VirtualMachine.create(
cls.api_client,
@@ -201,6 +202,7 @@ class TestBaseImageUpdate(cloudstackTestCase):
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering_with_reset.id,
)
+ cls._cleanup.append(cls.vm_with_reset)
cls.vm_with_reset_root_disk_id = cls.get_root_device_uuid_for_vm(cls.vm_with_reset.id,
cls.vm_with_reset.rootdeviceid)
@@ -212,24 +214,15 @@ class TestBaseImageUpdate(cloudstackTestCase):
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering_without_reset.id,
)
+ cls._cleanup.append(cls.vm_without_reset)
cls.vm_without_reset_root_disk_id = cls.get_root_device_uuid_for_vm(cls.vm_without_reset.id,
cls.vm_without_reset.rootdeviceid)
- cls._cleanup = [
- cls.account,
- cls.service_offering_with_reset,
- cls.service_offering_without_reset,
- ]
return
@classmethod
def tearDownClass(cls):
- try:
- #Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestBaseImageUpdate, cls).tearDownClass()
@classmethod
def get_root_device_uuid_for_vm(cls, vm_id, root_device_id):
@@ -245,12 +238,7 @@ class TestBaseImageUpdate(cloudstackTestCase):
return
def tearDown(self):
- try:
- #Clean up, terminate the created network offerings
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestBaseImageUpdate, self).tearDown()
def verify_template_listing(self, template):
@@ -441,7 +429,7 @@ class TestBaseImageUpdate(cloudstackTestCase):
template.id
))
template.download(self.apiclient)
- self._cleanup.append(template)
+ self._cleanup.insert(1, template)
# Wait for template status to be changed across
time.sleep(self.services["sleep"])
@@ -559,6 +547,7 @@ class TestBaseImageUpdate(cloudstackTestCase):
vm_with_reset_root_disk_id,
self.services["recurring_snapshot"]
)
+ self.cleanup.append(recurring_snapshot)
#ListSnapshotPolicy should return newly created policy
list_snapshots_policy = SnapshotPolicy.list(
diff --git a/test/integration/component/test_browse_templates.py b/test/integration/component/test_browse_templates.py
index 3bceb44..2573f5f 100644
--- a/test/integration/component/test_browse_templates.py
+++ b/test/integration/component/test_browse_templates.py
@@ -47,7 +47,6 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cls.apiclient = cls.testClient.getApiClient()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls._cleanup = []
- cls.cleanup = []
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.unsupportedHypervisor = False
@@ -70,6 +69,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cls.testdata["account"],
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
cls.template = get_template(
cls.apiclient,
@@ -83,23 +83,20 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cls.apiclient,
cls.testdata["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
cls.disk_offering = DiskOffering.create(
cls.apiclient,
cls.testdata["resized_disk_offering"],
custom=True
)
+ cls._cleanup.append(cls.disk_offering)
cls.project = Project.create(
cls.apiclient,
cls.testdata["project"],
account=cls.account.name,
domainid=cls.account.domainid
)
- cls._cleanup = [
- cls.project,
- cls.account,
- cls.service_offering,
- cls.disk_offering
- ]
+ cls._cleanup.append(cls.project)
def setUp(self):
@@ -149,6 +146,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.skipTest("Skipping test because unsupported hypervisor\
%s" % self.hypervisor)
+ self.cleanup = []
+
def getOsType(self, param):
cmd = listOsTypes.listOsTypesCmd()
cmd.description = param
@@ -1444,7 +1443,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ # was tags = ["advanced", "advancedns", "smoke", "basic"]
+ @attr(tags = ["TODO"], required_hardware="true")
def test_01_Browser_template_Life_cycle_tpath(self):
"""
Test Browser_template_Life_cycle
@@ -1506,7 +1506,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
# self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ # was tags = ["advanced", "advancedns", "smoke", "basic"]
+ @attr(tags = ["TODO"], required_hardware="true")
def test_02_SSVM_Life_Cycle_With_Browser_Template_TPath(self):
"""
Test SSVM_Life_Cycle_With_Browser_template_TPath
@@ -1572,7 +1573,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ # was tags = ["advanced", "advancedns", "smoke", "basic"]
+ @attr(tags = ["TODO"], required_hardware="true")
def test_04_Browser_template_ResetVM_With_Deleted_Template(self):
"""
Test Browser_template_upload_ResetVM_With_Deleted_Template
@@ -1593,7 +1595,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ # was tags = ["advanced", "advancedns", "smoke", "basic"]
+ @attr(tags = ["TODO"], required_hardware="true")
def test_05_Browser_Upload_Template_with_all_API_parameters(self):
"""
Test Browser_Upload_Template with all API parameters
@@ -1619,9 +1622,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
-
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ # was tags = ["advanced", "advancedns", "smoke", "basic"]
+ @attr(tags = ["TODO"], required_hardware="true")
def test_06_Browser_Upload_template_resource_limits(self):
"""
Test Browser Upload Template Resource limits
@@ -1644,7 +1646,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ # was tags = ["advanced", "advancedns", "smoke", "basic"]
+ @attr(tags = ["TODO"], required_hardware="true")
def test_07_Browser_Upload_template_secondary_storage_resource_limits(self):
"""
Test Browser_Upload_Template Secondary Storage Resource limits
@@ -1674,7 +1677,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ # was tags = ["advanced", "advancedns", "smoke", "basic"]
+ @attr(tags = ["TODO"], required_hardware="true")
def test_08_Browser_Upload_template_resource_limits_after_deletion(self):
"""
Test Browser_Upload_Template Resource limits after template deletion
@@ -1694,7 +1698,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exceptione occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ # was tags = ["advanced", "advancedns", "smoke", "basic"]
+ @attr(tags = ["TODO"], required_hardware="true")
def test_09_Browser_Upload_Volume_secondary_storage_resource_limits_after_deletion(self):
"""
Test Browser_Upload_Template Secondary Storage Resource limits after template deletion
@@ -1722,8 +1727,8 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
+ # @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
+ @attr(tags = ["TODO"], required_hardware="false")
def test_browser_upload_template_incomplete(self):
"""
Test browser based incomplete template upload, followed by SSVM destroy. Template should go to UploadAbandoned state and get cleaned up.
@@ -1768,9 +1773,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
@classmethod
def tearDownClass(self):
- try:
- self.apiclient = super(TestBrowseUploadVolume,self).getClsTestClient().getApiClient()
- cleanup_resources(self.apiclient, self._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestBrowseUploadVolume, self).tearDownClass()
+
+ def tearDown(self):
+ super(TestBrowseUploadVolume, self).tearDown()
diff --git a/test/integration/component/test_browse_volumes.py b/test/integration/component/test_browse_volumes.py
index 09ed681..73b4cbb 100644
--- a/test/integration/component/test_browse_volumes.py
+++ b/test/integration/component/test_browse_volumes.py
@@ -18,50 +18,43 @@
"""
# Import Local Modules
-import marvin
-from nose.plugins.attrib import attr
-from marvin.cloudstackTestCase import cloudstackTestCase
+import os
+import random
+import string
+import tempfile
+import time
import unittest
+import urllib.error
+import urllib.parse
+import urllib.request
+
+import requests
from marvin.cloudstackAPI import *
-from marvin.lib.utils import *
+from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.codes import PASS, FAILED
from marvin.lib.base import *
from marvin.lib.common import *
-from marvin.codes import PASS,FAILED,SUCCESS,XEN_SERVER
-
-from marvin.sshClient import SshClient
-
-import requests
-
-import wget
-
-import random
-
-import string
+from marvin.lib.utils import *
+from nose.plugins.attrib import attr
-import telnetlib
-import os
-import urllib.request, urllib.parse, urllib.error
-import time
-import tempfile
_multiprocess_shared_ = True
-class TestBrowseUploadVolume(cloudstackTestCase):
+class TestBrowseUploadVolume(cloudstackTestCase):
"""
Testing Browse Upload Volume Feature
"""
+
@classmethod
def setUpClass(cls):
- cls.testClient = super(TestBrowseUploadVolume,cls).getClsTestClient()
- #print cls.testClient.getParsedTestDataConfig()
+ cls.testClient = super(TestBrowseUploadVolume, cls).getClsTestClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.apiclient = cls.testClient.getApiClient()
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls._cleanup = []
- cls.cleanup = []
- cls.uploadvolumeformat="VHD"
+ cls.uploadvolumeformat = "VHD"
cls.storagetype = 'shared'
- cls.globalurl="http://url"
+ cls.globalurl = "http://url"
hosts = list_hosts(
cls.apiclient,
@@ -71,64 +64,60 @@ class TestBrowseUploadVolume(cloudstackTestCase):
if hosts is None:
raise unittest.SkipTest(
"There are no hypervisor's available.Check listhosts response")
- for hypervisorhost in hosts :
- if hypervisorhost.hypervisor == "XenServer":
- cls.uploadvolumeformat="VHD"
- break
- elif hypervisorhost.hypervisor== "VMware":
- cls.uploadvolumeformat="OVA"
- break
- elif hypervisorhost.hypervisor=="KVM":
- cls.uploadvolumeformat="QCOW2"
- break
- else:
- break
-
- cls.uploadurl=cls.testdata["configurableData"]["browser_upload_volume"][cls.uploadvolumeformat]["url"]
- cls.volname=cls.testdata["configurableData"]["browser_upload_volume"][cls.uploadvolumeformat]["diskname"]
- cls.md5sum=cls.testdata["configurableData"]["browser_upload_volume"][cls.uploadvolumeformat]["checksum"]
+ for hypervisorhost in hosts:
+ if hypervisorhost.hypervisor == "XenServer":
+ cls.uploadvolumeformat = "VHD"
+ break
+ elif hypervisorhost.hypervisor == "VMware":
+ cls.uploadvolumeformat = "OVA"
+ break
+ elif hypervisorhost.hypervisor == "KVM":
+ cls.uploadvolumeformat = "QCOW2"
+ break
+ else:
+ break
+
+ cls.uploadurl = cls.testdata["configurableData"]["browser_upload_volume"][cls.uploadvolumeformat]["url"]
+ cls.volname = cls.testdata["configurableData"]["browser_upload_volume"][cls.uploadvolumeformat]["diskname"]
+ cls.md5sum = cls.testdata["configurableData"]["browser_upload_volume"][cls.uploadvolumeformat]["checksum"]
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.domain = get_domain(cls.apiclient)
cls.pod = get_pod(cls.apiclient, cls.zone.id)
- if cls.uploadvolumeformat=="QCOW2" or cls.uploadvolumeformat=="VHD":
- cls.extuploadurl=cls.testdata["configurableData"]["browser_upload_volume_extended"][cls.uploadvolumeformat]["url"]
+ if cls.uploadvolumeformat == "QCOW2" or cls.uploadvolumeformat == "VHD":
+ cls.extuploadurl = cls.testdata["configurableData"]["browser_upload_volume"][cls.uploadvolumeformat]["url"]
cls.account = Account.create(
cls.apiclient,
cls.testdata["account"],
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
cls.template = get_template(
cls.apiclient,
cls.zone.id)
if cls.template == FAILED:
- raise unittest.SkipTest(
- "Check for default cent OS template readiness ")
+ raise unittest.SkipTest(
+ "Check for default cent OS template readiness ")
cls.service_offering = ServiceOffering.create(
- cls.apiclient,
+ cls.apiclient,
cls.testdata["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
cls.disk_offering = DiskOffering.create(
cls.apiclient,
cls.testdata["configurableData"]["browser_upload_volume"]["browser_resized_disk_offering"],
custom=True
)
+ cls._cleanup.append(cls.disk_offering)
cls.project = Project.create(
- cls.apiclient,
- cls.testdata["project"],
- account=cls.account.name,
- domainid=cls.account.domainid
- )
- cls._cleanup = [
- cls.project,
- cls.account,
- cls.service_offering,
- cls.disk_offering
- ]
-
-
+ cls.apiclient,
+ cls.testdata["project"],
+ account=cls.account.name,
+ domainid=cls.account.domainid
+ )
+ cls._cleanup.append(cls.project)
def __verify_values(self, expected_vals, actual_vals):
@@ -151,121 +140,116 @@ class TestBrowseUploadVolume(cloudstackTestCase):
(exp_val, act_val))
return return_flag
- def validate_uploaded_volume(self,up_volid,volumestate):
+ def validate_uploaded_volume(self, up_volid, volumestate):
config1 = Configurations.list(
- self.apiclient,
- name='upload.operation.timeout'
- )
+ self.apiclient,
+ name='upload.operation.timeout'
+ )
config2 = Configurations.list(
- self.apiclient,
- name='upload.monitoring.interval'
- )
+ self.apiclient,
+ name='upload.monitoring.interval'
+ )
uploadtimeout = int(config1[0].value)
- monitoringinterval=int(config2[0].value)
+ monitoringinterval = int(config2[0].value)
- time.sleep((uploadtimeout*60)+monitoringinterval)
+ time.sleep((uploadtimeout * 60) + monitoringinterval)
list_volume_response = Volume.list(
- self.apiclient,
- id=up_volid
- )
+ self.apiclient,
+ id=up_volid
+ )
if list_volume_response is None:
self.debug("Volume got deleted after timeout")
return
self.assertEqual(
- list_volume_response[0].state,
- volumestate,
- "Check volume state in ListVolumes"
- )
+ list_volume_response[0].state,
+ volumestate,
+ "Check volume state in ListVolumes"
+ )
return
-
- def browse_upload_volume_with_projectid(self,projectid):
+ def browse_upload_volume_with_projectid(self, projectid):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.projectid=projectid
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.projectid = projectid
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
- url=self.uploadurl
+ url = self.uploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
print(results.status_code)
- if results.status_code !=200:
+ if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id,'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
list_volume_response = Volume.list(
- self.apiclient,
- projectid=projectid
- )
- if list_volume_response[0].id==getuploadparamsresponce.id:
- return(getuploadparamsresponce)
+ self.apiclient,
+ projectid=projectid
+ )
+ if list_volume_response[0].id == getuploadparamsresponce.id:
+ return (getuploadparamsresponce)
else:
self.fail("Volume is not listed with projectid")
-
def browse_upload_volume_with_out_zoneid(self):
-
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- success= False
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ success = False
try:
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
+ self.apiclient.getUploadParamsForVolume(cmd)
except Exception as ex:
if "Invalid Parameter" in str(ex):
success = True
self.assertEqual(
- success,
- True,
- "Upload Volume - verify upload volume API request is handled without mandatory params - zoneid ")
+ success,
+ True,
+ "Upload Volume - verify upload volume API request is handled without mandatory params - zoneid ")
return
-
def browse_upload_volume_with_out_format(self):
-
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- success= False
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ success = False
try:
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
+ self.apiclient.getUploadParamsForVolume(cmd)
except Exception as ex:
if "Invalid Parameter" in str(ex):
success = True
self.assertEqual(
- success,
- True,
- "Upload Volume - verify upload volume API request is handled without mandatory params - format")
+ success,
+ True,
+ "Upload Volume - verify upload volume API request is handled without mandatory params - format")
return
@@ -273,451 +257,437 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
-
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
- self.globalurl=getuploadparamsresponce.postURL
- #url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
+ self.globalurl = getuploadparamsresponce.postURL
+ # url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
+ url = self.uploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
print(results.status_code)
- if results.status_code !=200:
+ if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id,'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
- return(getuploadparamsresponce)
+ return (getuploadparamsresponce)
def onlyupload(self):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
- return(getuploadparamsresponce)
-
-
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+ return (getuploadparamsresponce)
def invalidupload(self):
- success= False
+ success = False
try:
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = "invalidformat"
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ self.apiclient.getUploadParamsForVolume(cmd)
except Exception as ex:
if "No enum constant com.cloud.storage.Storage.ImageFormat" in str(ex):
success = True
self.assertEqual(
- success,
- True,
- "Verify - Upload volume with invalid format is handled")
+ success,
+ True,
+ "Verify - Upload volume with invalid format is handled")
return
-
def invalidposturl(self):
- success= False
+ success = False
try:
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
-
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
- signt=getuploadparamsresponce.signature
- posturl="http://invalidposturl/2999834."+self.uploadvolumeformat
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
- #url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ signt = getuploadparamsresponce.signature
+ posturl = "http://invalidposturl/2999834." + self.uploadvolumeformat
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
+ # url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
+ url = self.uploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
- self.debug(results.status_code)
- if results.status_code !=200:
+ self.debug(results.status_code)
+ if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id,'UploadedAbandoned')
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'UploadedAbandoned')
except Exception as ex:
- if "Max retries exceeded with url" in str(ex):
- success = True
+ if "Max retries exceeded with url" in str(ex):
+ success = True
self.assertEqual(
- success,
- True,
- "Verify - Tampered Post URL is handled")
-
- return(getuploadparamsresponce)
+ success,
+ True,
+ "Verify - Tampered Post URL is handled")
+ return (getuploadparamsresponce)
def reuse_url(self):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
-
- signt=getuploadparamsresponce.signature
- posturl=self.globalurl
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
- url=self.uploadurl
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+
+ signt = getuploadparamsresponce.signature
+ posturl = self.globalurl
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
+ url = self.uploadurl
time.sleep(300)
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
- f.write(chunk)
- f.flush()
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
+ f.write(chunk)
+ f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
time.sleep(60)
- print(results.status_code)
- if results.status_code == 200:
- self.fail("Upload URL is allowed to reuse")
+ if results.status_code == 200:
+ self.fail("Upload URL is allowed to reuse")
config = Configurations.list(
- self.apiclient,
- name='upload.operation.timeout'
- )
+ self.apiclient,
+ name='upload.operation.timeout'
+ )
uploadtimeout = int(config[0].value)
- time.sleep(uploadtimeout*60)
- self.validate_uploaded_volume(getuploadparamsresponce.id,'UploadAbandoned')
+ time.sleep(uploadtimeout * 60)
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'UploadAbandoned')
return
- def validate_storage_cleanup(self,invalidpostvolume,cleanup_interval):
+ def validate_storage_cleanup(self, invalidpostvolume, cleanup_interval):
list_volume_response = Volume.list(
- self.apiclient,
- id=invalidpostvolume.id
- )
+ self.apiclient,
+ id=invalidpostvolume.id
+ )
self.assertNotEqual(
- list_volume_response,
- None,
- "Check if volume exists in ListVolumes"
- )
+ list_volume_response,
+ None,
+ "Check if volume exists in ListVolumes"
+ )
config1 = Configurations.list(
- self.apiclient,
- name='upload.operation.timeout'
- )
+ self.apiclient,
+ name='upload.operation.timeout'
+ )
config2 = Configurations.list(
- self.apiclient,
- name='upload.monitoring.interval'
- )
+ self.apiclient,
+ name='upload.monitoring.interval'
+ )
uploadtimeout = int(config1[0].value)
- monitorinterval=int(config2[0].value)
+ monitorinterval = int(config2[0].value)
- if cleanup_interval >= ((uploadtimeout*60)+monitorinterval):
+ if cleanup_interval >= ((uploadtimeout * 60) + monitorinterval):
time.sleep(cleanup_interval)
else:
- time.sleep(((uploadtimeout*60)+monitorinterval))
+ time.sleep(((uploadtimeout * 60) + monitorinterval))
list_volume_response = Volume.list(
- self.apiclient,
- id=invalidpostvolume.id
- )
+ self.apiclient,
+ id=invalidpostvolume.id
+ )
self.assertEqual(
- list_volume_response,
- None,
- "Storage Cleanup - Verify UploadAbandoned volumes are deleted"
- )
-
+ list_volume_response,
+ None,
+ "Storage Cleanup - Verify UploadAbandoned volumes are deleted"
+ )
- def validate_max_vol_size(self,up_vol,volumestate):
+ def validate_max_vol_size(self, up_vol, volumestate):
list_volume_response = Volume.list(
- self.apiclient,
- id=up_vol.id
- )
+ self.apiclient,
+ id=up_vol.id
+ )
self.assertNotEqual(
- list_volume_response,
- None,
- "Check if volume exists in ListVolumes"
- )
+ list_volume_response,
+ None,
+ "Check if volume exists in ListVolumes"
+ )
self.assertEqual(
- list_volume_response[0].state,
- volumestate,
- "Check volume state in ListVolumes"
- )
+ list_volume_response[0].state,
+ volumestate,
+ "Check volume state in ListVolumes"
+ )
config = Configurations.list(
- self.apiclient,
- name='storage.max.volume.upload.size'
- )
+ self.apiclient,
+ name='storage.max.volume.upload.size'
+ )
max_size = int(config[0].value)
self.debug(max_size)
- self.debug(int(list_volume_response[0].size)/(1024*1024*1024))
- if (int(list_volume_response[0].size)/(1024*1024*1024)) > max_size:
+ self.debug(int(list_volume_response[0].size) / (1024 * 1024 * 1024))
+ if (int(list_volume_response[0].size) / (1024 * 1024 * 1024)) > max_size:
self.fail("Global Config storage.max.volume.upload.size is not considered with Browser Based Upload volumes")
-
-
def browse_upload_volume_with_md5(self):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- cmd.checksum=self.md5sum
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
-
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
- #url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ cmd.checksum = self.md5sum
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
+ # url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
+ url = self.uploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- #uploadfile='rajani-thin-volume.vhd'
+ # uploadfile='rajani-thin-volume.vhd'
- #files={'file':('rajani-thin-volume.vhd',open(uploadfile,'rb'),'application/octet-stream')}
+ # files={'file':('rajani-thin-volume.vhd',open(uploadfile,'rb'),'application/octet-stream')}
- #headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ # headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
time.sleep(60)
- print(results.status_code)
- if results.status_code !=200:
+ if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id,'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
- return(getuploadparamsresponce)
+ return (getuploadparamsresponce)
def browse_upload_volume_with_invalid_md5(self):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- cmd.checksum="xxxxxxxx"
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
-
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
- #url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ cmd.checksum = "xxxxxxxx"
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
+ # url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
+ url = self.uploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- #uploadfile='rajani-thin-volume.vhd'
+ # uploadfile='rajani-thin-volume.vhd'
- #files={'file':('rajani-thin-volume.vhd',open(uploadfile,'rb'),'application/octet-stream')}
+ # files={'file':('rajani-thin-volume.vhd',open(uploadfile,'rb'),'application/octet-stream')}
- #headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ # headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
time.sleep(60)
print(results.status_code)
- if results.status_code !=200:
+ if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id,'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
- return(getuploadparamsresponce)
+ return (getuploadparamsresponce)
- def validate_vm(self,vmdetails,vmstate):
+ def validate_vm(self, vmdetails, vmstate):
- time.sleep(120 )
+ time.sleep(120)
vm_response = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id,
- )
+ self.apiclient,
+ id=vmdetails.id,
+ )
self.assertEqual(
- isinstance(vm_response, list),
- True,
- "Check list VM response for valid list"
- )
+ isinstance(vm_response, list),
+ True,
+ "Check list VM response for valid list"
+ )
- # Verify VM response to check whether VM deployment was successful
+ # Verify VM response to check whether VM deployment was successful
self.assertNotEqual(
- len(vm_response),
- 0,
- "Check VMs available in List VMs response"
- )
+ len(vm_response),
+ 0,
+ "Check VMs available in List VMs response"
+ )
deployedvm = vm_response[0]
self.assertEqual(
- deployedvm.state,
- vmstate,
- "Check the state of VM"
- )
+ deployedvm.state,
+ vmstate,
+ "Check the state of VM"
+ )
def deploy_vm(self):
- virtual_machine = VirtualMachine.create(
- self.apiclient,
- self.testdata["virtual_machine"],
- templateid=self.template.id,
- zoneid=self.zone.id,
- accountid=self.account.name,
- domainid=self.account.domainid,
- serviceofferingid=self.service_offering.id,
- )
- self.validate_vm(virtual_machine,'Running')
- return(virtual_machine)
-
- def attach_volume(self,vmlist,volid):
+ virtual_machine = VirtualMachine.create(
+ self.apiclient,
+ self.testdata["virtual_machine"],
+ templateid=self.template.id,
+ zoneid=self.zone.id,
+ accountid=self.account.name,
+ domainid=self.account.domainid,
+ serviceofferingid=self.service_offering.id,
+ )
+ self.validate_vm(virtual_machine, 'Running')
+ return (virtual_machine)
+
+ def attach_volume(self, vmlist, volid):
list_volume_response = Volume.list(
- self.apiclient,
- id=volid
- )
+ self.apiclient,
+ id=volid
+ )
print(list_volume_response[0])
vmlist.attach_volume(
- self.apiclient,
- list_volume_response[0]
- )
+ self.apiclient,
+ list_volume_response[0]
+ )
list_volume_response = Volume.list(
- self.apiclient,
- virtualmachineid=vmlist.id,
- type='DATADISK',
- listall=True
- )
+ self.apiclient,
+ virtualmachineid=vmlist.id,
+ type='DATADISK',
+ listall=True
+ )
self.assertNotEqual(
- list_volume_response,
- None,
- "Check if volume exists in ListVolumes")
+ list_volume_response,
+ None,
+ "Check if volume exists in ListVolumes")
self.assertEqual(
- isinstance(list_volume_response, list),
- True,
- "Check list volumes response for valid list")
- self.validate_uploaded_volume(volid,'Ready')
-
+ isinstance(list_volume_response, list),
+ True,
+ "Check list volumes response for valid list")
+ self.validate_uploaded_volume(volid, 'Ready')
- def attach_deleted_volume(self,vmlist,volume):
+ def attach_deleted_volume(self, vmlist, volume):
- success= False
+ success = False
try:
vmlist.attach_volume(
- self.apiclient,
- volume
- )
+ self.apiclient,
+ volume
+ )
except Exception as ex:
if "Please specify a volume with the valid type: DATADISK" in str(ex):
success = True
self.assertEqual(
- success,
- True,
- "Attaching the Deleted Volume is handled appropriately not to get attached the deleted uploaded volume")
+ success,
+ True,
+ "Attaching the Deleted Volume is handled appropriately not to get attached the deleted uploaded volume")
return
-
- def reboot_vm(self,vmdetails):
+ def reboot_vm(self, vmdetails):
vmdetails.reboot(self.apiclient)
- self.validate_vm(vmdetails,'Running')
+ self.validate_vm(vmdetails, 'Running')
- def stop_vm(self,vmdetails):
+ def stop_vm(self, vmdetails):
vmdetails.stop(self.apiclient)
- self.validate_vm(vmdetails,'Stopped')
+ self.validate_vm(vmdetails, 'Stopped')
- def start_vm(self,vmdetails):
+ def start_vm(self, vmdetails):
vmdetails.start(self.apiclient)
- self.validate_vm(vmdetails,'Running')
+ self.validate_vm(vmdetails, 'Running')
- def vmoperations(self,vmdetails):
+ def vmoperations(self, vmdetails):
self.reboot_vm(vmdetails)
self.stop_vm(vmdetails)
self.start_vm(vmdetails)
-
- def detach_volume(self,vmdetails,volid):
+ def detach_volume(self, vmdetails, volid):
"""Detach a Volume attached to a VM
"""
list_volume_response = Volume.list(
- self.apiclient,
- id=volid
- )
- print(list_volume_response[0])
- vmdetails.detach_volume(self.apiclient,list_volume_response[0])
+ self.apiclient,
+ id=volid
+ )
+ vmdetails.detach_volume(self.apiclient, list_volume_response[0])
# Sleep to ensure the current state will reflected in other calls
time.sleep(self.testdata["sleep"])
@@ -750,70 +720,69 @@ class TestBrowseUploadVolume(cloudstackTestCase):
)
return
-
- def restore_vm(self,vmdetails):
- #TODO: SIMENH: add another test the data on the restored VM.
+ def restore_vm(self, vmdetails):
+ # TODO: SIMENH: add another test the data on the restored VM.
"""Test recover Virtual Machine
"""
- #cmd = recoverVirtualMachine.recoverVirtualMachineCmd()
+ # cmd = recoverVirtualMachine.recoverVirtualMachineCmd()
cmd = restoreVirtualMachine.restoreVirtualMachineCmd()
cmd.virtualmachineid = vmdetails.id
self.apiclient.recoverVirtualMachine(cmd)
list_vm_response = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id
- )
+ self.apiclient,
+ id=vmdetails.id
+ )
self.assertEqual(
- isinstance(list_vm_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(list_vm_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
self.assertNotEqual(
- len(list_vm_response),
- 0,
- "Check VM available in List Virtual Machines"
- )
+ len(list_vm_response),
+ 0,
+ "Check VM available in List Virtual Machines"
+ )
self.assertEqual(
- list_vm_response[0].state,
- "Running",
- "Check virtual machine is in Running state"
- )
+ list_vm_response[0].state,
+ "Running",
+ "Check virtual machine is in Running state"
+ )
return
- def deletevolume_fail(self,volumeid):
+ def deletevolume_fail(self, volumeid):
"""Delete a Volume attached to a VM
"""
cmd = deleteVolume.deleteVolumeCmd()
cmd.id = volumeid
- success= False
+ success = False
try:
self.apiclient.deleteVolume(cmd)
except Exception as ex:
if "Please specify a volume that is not attached to any VM" in str(ex):
success = True
self.assertEqual(
- success,
- True,
- "DeleteVolume - verify Ready State volume (attached to a VM) is handled appropriately not to get deleted ")
+ success,
+ True,
+ "DeleteVolume - verify Ready State volume (attached to a VM) is handled appropriately not to get deleted ")
return
- def delete_volume(self,volumeid):
+ def delete_volume(self, volumeid):
"""Delete a Volume attached to a VM
"""
cmd = deleteVolume.deleteVolumeCmd()
- cmd.id =volumeid
+ cmd.id = volumeid
self.apiclient.deleteVolume(cmd)
- def download_volume(self,volumeid):
+ def download_volume(self, volumeid):
cmd = extractVolume.extractVolumeCmd()
cmd.id = volumeid
@@ -827,7 +796,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
response = urllib.request.urlopen(formatted_url)
self.debug("response from volume url %s" % response.getcode())
fd, path = tempfile.mkstemp()
- self.debug("Saving volume %s to path %s" %(volumeid, path))
+ self.debug("Saving volume %s to path %s" % (volumeid, path))
os.close(fd)
with open(path, 'wb') as fd:
fd.write(response.read())
@@ -838,37 +807,36 @@ class TestBrowseUploadVolume(cloudstackTestCase):
% (extract_vol.url, volumeid)
)
- def resize_fail(self,volumeid):
+ def resize_fail(self, volumeid):
- cmd = resizeVolume.resizeVolumeCmd()
- cmd.id = volumeid
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = volumeid
cmd.diskofferingid = self.disk_offering.id
- success = False
+ success = False
try:
self.apiclient.resizeVolume(cmd)
except Exception as ex:
if "Volume should be in ready or allocated state before attempting a resize" in str(ex):
success = True
self.assertEqual(
- success,
- True,
- "ResizeVolume - verify Uploaded State volume is handled appropriately")
-
+ success,
+ True,
+ "ResizeVolume - verify Uploaded State volume is handled appropriately")
- def resize_volume(self,volumeid):
+ def resize_volume(self, volumeid):
"""Test resize a volume"""
self.testdata["configurableData"]["browser_upload_volume"]["browser_resized_disk_offering"]["disksize"] = 20
disk_offering_20_GB = DiskOffering.create(
- self.apiclient,
- self.testdata["configurableData"]["browser_upload_volume"]["browser_resized_disk_offering"]
- )
+ self.apiclient,
+ self.testdata["configurableData"]["browser_upload_volume"]["browser_resized_disk_offering"]
+ )
self.cleanup.append(disk_offering_20_GB)
- cmd= resizeVolume.resizeVolumeCmd()
- cmd.id= volumeid
+ cmd = resizeVolume.resizeVolumeCmd()
+ cmd.id = volumeid
cmd.diskofferingid = disk_offering_20_GB.id
self.apiclient.resizeVolume(cmd)
@@ -877,12 +845,12 @@ class TestBrowseUploadVolume(cloudstackTestCase):
success = False
while count < 3:
list_volume_response = Volume.list(
- self.apiclient,
- id=volumeid,
- type='DATADISK'
- )
+ self.apiclient,
+ id=volumeid,
+ type='DATADISK'
+ )
for vol in list_volume_response:
- if vol.id == volumeid and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024** 3)) and vol.state == 'Ready':
+ if vol.id == volumeid and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready':
success = True
if success:
break
@@ -891,79 +859,77 @@ class TestBrowseUploadVolume(cloudstackTestCase):
count += 1
self.assertEqual(
- success,
- True,
- "Check if the data volume resized appropriately"
- )
+ success,
+ True,
+ "Check if the data volume resized appropriately"
+ )
return
+ def destroy_vm(self, vmdetails):
- def destroy_vm(self,vmdetails):
-
- success = False
+ success = False
vmdetails.delete(self.apiclient, expunge=False)
try:
list_vm_response1 = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id
- )
+ self.apiclient,
+ id=vmdetails.id
+ )
except Exception as ex:
if "Unable to find a virtual machine with specified vmId" in str(ex):
success = True
- if success == "True":
+ if success == "True":
self.debug("VM is already expunged")
return
list_vm_response1 = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id
- )
+ self.apiclient,
+ id=vmdetails.id
+ )
if list_vm_response1 is None:
self.debug("VM already expunged")
return
- if list_vm_response1[0].state=="Expunging":
+ if list_vm_response1[0].state == "Expunging":
self.debug("VM already getting expunged")
return
list_vm_response = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id
- )
+ self.apiclient,
+ id=vmdetails.id
+ )
if list_vm_response is None:
self.debug("VM already expunged")
return
self.assertEqual(
- isinstance(list_vm_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(list_vm_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
self.assertNotEqual(
- len(list_vm_response),
- 0,
- "Check VM available in List Virtual Machines"
- )
+ len(list_vm_response),
+ 0,
+ "Check VM available in List Virtual Machines"
+ )
self.assertEqual(
- list_vm_response[0].state,
- "Destroyed",
- "Check virtual machine is in destroyed state"
- )
+ list_vm_response[0].state,
+ "Destroyed",
+ "Check virtual machine is in destroyed state"
+ )
return
-
- def recover_destroyed_vm(self,vmdetails):
+ def recover_destroyed_vm(self, vmdetails):
list_vm_response1 = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id
- )
+ self.apiclient,
+ id=vmdetails.id
+ )
if list_vm_response1 is None:
self.debug("VM already expunged")
return
@@ -973,46 +939,46 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.apiclient.recoverVirtualMachine(cmd)
list_vm_response1 = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id
- )
+ self.apiclient,
+ id=vmdetails.id
+ )
if list_vm_response1 is None:
self.debug("VM already expunged")
return
list_vm_response1 = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id
- )
- if list_vm_response1[0].state=="Expunging":
+ self.apiclient,
+ id=vmdetails.id
+ )
+ if list_vm_response1[0].state == "Expunging":
self.debug("VM already getting expunged")
return
list_vm_response = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id
- )
+ self.apiclient,
+ id=vmdetails.id
+ )
self.assertEqual(
- isinstance(list_vm_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(list_vm_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
self.assertNotEqual(
- len(list_vm_response),
- 0,
- "Check VM available in List Virtual Machines"
- )
+ len(list_vm_response),
+ 0,
+ "Check VM available in List Virtual Machines"
+ )
self.assertEqual(
- list_vm_response[0].state,
- "Stopped",
- "Check virtual machine is in Stopped state"
- )
+ list_vm_response[0].state,
+ "Stopped",
+ "Check virtual machine is in Stopped state"
+ )
return
- def expunge_vm(self,vmdetails):
+ def expunge_vm(self, vmdetails):
self.debug("Expunge VM-ID: %s" % vmdetails.id)
@@ -1021,26 +987,26 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.apiclient.destroyVirtualMachine(cmd)
config = Configurations.list(
- self.apiclient,
- name='expunge.delay'
- )
+ self.apiclient,
+ name='expunge.delay'
+ )
expunge_delay = int(config[0].value)
time.sleep(expunge_delay * 2)
- #VM should be destroyed unless expunge thread hasn't run
- #Wait for two cycles of the expunge thread
+ # VM should be destroyed unless expunge thread hasn't run
+ # Wait for two cycles of the expunge thread
config = Configurations.list(
- self.apiclient,
- name='expunge.interval'
- )
+ self.apiclient,
+ name='expunge.interval'
+ )
expunge_cycle = int(config[0].value)
wait_time = expunge_cycle * 4
while wait_time >= 0:
list_vm_response = VirtualMachine.list(
- self.apiclient,
- id=vmdetails.id
- )
+ self.apiclient,
+ id=vmdetails.id
+ )
if not list_vm_response:
break
self.debug("Waiting for VM to expunge")
@@ -1049,20 +1015,20 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("listVirtualMachines response: %s" % list_vm_response)
- self.assertEqual(list_vm_response,None,"Check Expunged virtual machine is in listVirtualMachines response")
+ self.assertEqual(list_vm_response, None, "Check Expunged virtual machine is in listVirtualMachines response")
return
- def volume_snapshot(self,volumedetails):
+ def volume_snapshot(self, volumedetails):
"""
@summary: Test to verify creation of snapshot from volume
and creation of template, volume from snapshot
"""
- if self.uploadvolumeformat=="QCOW2":
+ if self.uploadvolumeformat == "QCOW2":
config = Configurations.list(
- self.apiclient,
- name='kvm.snapshot.enabled'
- )
+ self.apiclient,
+ name='kvm.snapshot.enabled'
+ )
kvmsnapshotenabled = config[0].value
if kvmsnapshotenabled == "false":
self.fail("Please enable kvm.snapshot.enable global config")
@@ -1104,9 +1070,9 @@ class TestBrowseUploadVolume(cloudstackTestCase):
status,
"Snapshot created from Volume details are not as expected"
)
- return(snapshot_created)
+ return (snapshot_created)
- def volume_snapshot_volume(self,snapshot_created):
+ def volume_snapshot_volume(self, snapshot_created):
# Creating Volume from snapshot
cmd = createVolume.createVolumeCmd()
@@ -1124,7 +1090,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
return
- def volume_snapshot_template(self,snapshot_created):
+ def volume_snapshot_template(self, snapshot_created):
# Creating Template from Snapshot
list_templates_before = Template.list(
self.apiclient,
@@ -1176,11 +1142,11 @@ class TestBrowseUploadVolume(cloudstackTestCase):
expected_dict,
actual_dict
)
- #self.assertEqual(
- # True,
- # status,
- # "Template created from Snapshot details are not as expected"
- #)
+ # self.assertEqual(
+ # True,
+ # status,
+ # "Template created from Snapshot details are not as expected"
+ # )
list_templates_after = Template.list(
self.apiclient,
@@ -1193,15 +1159,14 @@ class TestBrowseUploadVolume(cloudstackTestCase):
)
return
-
def waitForSystemVMAgent(self, vmname):
timeout = self.testdata["timeout"]
while True:
list_host_response = list_hosts(
- self.apiclient,
- name=vmname
- )
+ self.apiclient,
+ name=vmname
+ )
if list_host_response and list_host_response[0].state == 'Up':
break
@@ -1212,316 +1177,314 @@ class TestBrowseUploadVolume(cloudstackTestCase):
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
-
def ssvm_internals(self):
list_ssvm_response = list_ssvms(
- self.apiclient,
- systemvmtype='secondarystoragevm',
- state='Running',
- zoneid=self.zone.id
- )
+ self.apiclient,
+ systemvmtype='secondarystoragevm',
+ state='Running',
+ zoneid=self.zone.id
+ )
self.assertEqual(
- isinstance(list_ssvm_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(list_ssvm_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
ssvm = list_ssvm_response[0]
hosts = list_hosts(
- self.apiclient,
- id=ssvm.hostid
- )
+ self.apiclient,
+ id=ssvm.hostid
+ )
self.assertEqual(
- isinstance(hosts, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(hosts, list),
+ True,
+ "Check list response returns a valid list"
+ )
host = hosts[0]
self.debug("Running SSVM check script")
if self.hypervisor.lower() in ('vmware', 'hyperv'):
- #SSH into SSVMs is done via management server for Vmware and Hyper-V
+ # SSH into SSVMs is done via management server for Vmware and Hyper-V
result = get_process_status(
- self.apiclient.connection.mgtSvr,
- 22,
- self.apiclient.connection.user,
- self.apiclient.connection.passwd,
- ssvm.privateip,
- "/usr/local/cloud/systemvm/ssvm-check.sh |grep -e ERROR -e WARNING -e FAIL",
- hypervisor=self.hypervisor
- )
+ self.apiclient.connection.mgtSvr,
+ 22,
+ self.apiclient.connection.user,
+ self.apiclient.connection.passwd,
+ ssvm.privateip,
+ "/usr/local/cloud/systemvm/ssvm-check.sh |grep -e ERROR -e WARNING -e FAIL",
+ hypervisor=self.hypervisor
+ )
else:
try:
host.user, host.passwd = get_host_credentials(self.config, host.ipaddress)
result = get_process_status(
- host.ipaddress,
- 22,
- host.user,
- host.passwd,
- ssvm.linklocalip,
- "/usr/local/cloud/systemvm/ssvm-check.sh |grep -e ERROR -e WARNING -e FAIL"
- )
+ host.ipaddress,
+ 22,
+ host.user,
+ host.passwd,
+ ssvm.linklocalip,
+ "/usr/local/cloud/systemvm/ssvm-check.sh |grep -e ERROR -e WARNING -e FAIL"
+ )
except KeyError:
self.skipTest("Marvin configuration has no host credentials to check router services")
res = str(result)
self.debug("SSVM script output: %s" % res)
self.assertEqual(
- res.count("ERROR"),
- 1,
- "Check for Errors in tests"
- )
+ res.count("ERROR"),
+ 1,
+ "Check for Errors in tests"
+ )
self.assertEqual(
- res.count("WARNING"),
- 1,
- "Check for warnings in tests"
- )
+ res.count("WARNING"),
+ 1,
+ "Check for warnings in tests"
+ )
- #Check status of cloud service
+ # Check status of cloud service
if self.hypervisor.lower() in ('vmware', 'hyperv'):
- #SSH into SSVMs is done via management server for Vmware and Hyper-V
+ # SSH into SSVMs is done via management server for Vmware and Hyper-V
result = get_process_status(
- self.apiclient.connection.mgtSvr,
- 22,
- self.apiclient.connection.user,
- self.apiclient.connection.passwd,
- ssvm.privateip,
- "systemctl is-active cloud",
- hypervisor=self.hypervisor
- )
+ self.apiclient.connection.mgtSvr,
+ 22,
+ self.apiclient.connection.user,
+ self.apiclient.connection.passwd,
+ ssvm.privateip,
+ "systemctl is-active cloud",
+ hypervisor=self.hypervisor
+ )
else:
try:
host.user, host.passwd = get_host_credentials(self.config, host.ipaddress)
result = get_process_status(
- host.ipaddress,
- 22,
- host.user,
- host.passwd,
- ssvm.linklocalip,
- "systemctl is-active cloud"
- )
+ host.ipaddress,
+ 22,
+ host.user,
+ host.passwd,
+ ssvm.linklocalip,
+ "systemctl is-active cloud"
+ )
except KeyError:
self.skipTest("Marvin configuration has no host credentials to check router services")
res = str(result)
self.debug("Cloud Process status: %s" % res)
# Apache CloudStack service (type=secstorage) is running: process id: 2346
self.assertEqual(
- res.count("active"),
- 1,
- "Check cloud service is running or not"
- )
+ res.count("active"),
+ 1,
+ "Check cloud service is running or not"
+ )
return
def list_sec_storage_vm(self):
list_ssvm_response = list_ssvms(
- self.apiclient,
- systemvmtype='secondarystoragevm',
- state='Running',
- )
+ self.apiclient,
+ systemvmtype='secondarystoragevm',
+ state='Running',
+ )
self.assertEqual(
- isinstance(list_ssvm_response, list),
- True,
- "Check list response returns a valid list"
- )
- #Verify SSVM response
+ isinstance(list_ssvm_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
+ # Verify SSVM response
self.assertNotEqual(
- len(list_ssvm_response),
- 0,
- "Check list System VMs response"
- )
+ len(list_ssvm_response),
+ 0,
+ "Check list System VMs response"
+ )
list_zones_response = list_zones(self.apiclient)
-
+
self.assertEqual(
- isinstance(list_zones_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(list_zones_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
self.debug("Number of zones: %s" % len(list_zones_response))
self.debug("Number of SSVMs: %s" % len(list_ssvm_response))
# Number of Sec storage VMs = No of Zones
self.assertEqual(
- len(list_ssvm_response),
- len(list_zones_response),
- "Check number of SSVMs with number of zones"
- )
- #For each secondary storage VM check private IP,
- #public IP, link local IP and DNS
+ len(list_ssvm_response),
+ len(list_zones_response),
+ "Check number of SSVMs with number of zones"
+ )
+ # For each secondary storage VM check private IP,
+ # public IP, link local IP and DNS
for ssvm in list_ssvm_response:
self.debug("SSVM state: %s" % ssvm.state)
self.assertEqual(
- ssvm.state,
- 'Running',
- "Check whether state of SSVM is running"
- )
+ ssvm.state,
+ 'Running',
+ "Check whether state of SSVM is running"
+ )
self.assertEqual(
- hasattr(ssvm, 'privateip'),
- True,
- "Check whether SSVM has private IP field"
- )
+ hasattr(ssvm, 'privateip'),
+ True,
+ "Check whether SSVM has private IP field"
+ )
self.assertEqual(
- hasattr(ssvm, 'linklocalip'),
- True,
- "Check whether SSVM has link local IP field"
- )
+ hasattr(ssvm, 'linklocalip'),
+ True,
+ "Check whether SSVM has link local IP field"
+ )
self.assertEqual(
- hasattr(ssvm, 'publicip'),
- True,
- "Check whether SSVM has public IP field"
- )
+ hasattr(ssvm, 'publicip'),
+ True,
+ "Check whether SSVM has public IP field"
+ )
- #Fetch corresponding ip ranges information from listVlanIpRanges
+ # Fetch corresponding ip ranges information from listVlanIpRanges
ipranges_response = list_vlan_ipranges(
- self.apiclient,
- zoneid=ssvm.zoneid
- )
+ self.apiclient,
+ zoneid=ssvm.zoneid
+ )
self.assertEqual(
- isinstance(ipranges_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(ipranges_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
iprange = ipranges_response[0]
-
- #Fetch corresponding Physical Network of SSVM's Zone
+
+ # Fetch corresponding Physical Network of SSVM's Zone
listphyntwk = PhysicalNetwork.list(
- self.apiclient,
- zoneid=ssvm.zoneid
- )
-
+ self.apiclient,
+ zoneid=ssvm.zoneid
+ )
+
# Execute the following assertion in all zones except EIP-ELB Zones
- if not (self.zone.networktype.lower() == 'basic' and isinstance(NetScaler.list(self.apiclient,physicalnetworkid=listphyntwk[0].id), list) is True):
+ if not (self.zone.networktype.lower() == 'basic' and isinstance(NetScaler.list(self.apiclient, physicalnetworkid=listphyntwk[0].id), list) is True):
self.assertEqual(
- ssvm.gateway,
- iprange.gateway,
- "Check gateway with that of corresponding ip range"
- )
+ ssvm.gateway,
+ iprange.gateway,
+ "Check gateway with that of corresponding ip range"
+ )
- #Fetch corresponding zone information from listZones
+ # Fetch corresponding zone information from listZones
zone_response = list_zones(
- self.apiclient,
- id=ssvm.zoneid
- )
+ self.apiclient,
+ id=ssvm.zoneid
+ )
self.assertEqual(
- isinstance(zone_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(zone_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
self.assertEqual(
- ssvm.dns1,
- zone_response[0].dns1,
- "Check DNS1 with that of corresponding zone"
- )
+ ssvm.dns1,
+ zone_response[0].dns1,
+ "Check DNS1 with that of corresponding zone"
+ )
self.assertEqual(
- ssvm.dns2,
- zone_response[0].dns2,
- "Check DNS2 with that of corresponding zone"
- )
+ ssvm.dns2,
+ zone_response[0].dns2,
+ "Check DNS2 with that of corresponding zone"
+ )
return
def stop_ssvm(self):
list_ssvm_response = list_ssvms(
- self.apiclient,
- systemvmtype='secondarystoragevm',
- state='Running',
- zoneid=self.zone.id
- )
+ self.apiclient,
+ systemvmtype='secondarystoragevm',
+ state='Running',
+ zoneid=self.zone.id
+ )
self.assertEqual(
- isinstance(list_ssvm_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(list_ssvm_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
ssvm = list_ssvm_response[0]
hosts = list_hosts(
- self.apiclient,
- id=ssvm.hostid
- )
+ self.apiclient,
+ id=ssvm.hostid
+ )
self.assertEqual(
- isinstance(hosts, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(hosts, list),
+ True,
+ "Check list response returns a valid list"
+ )
host = hosts[0]
self.debug("Stopping SSVM: %s" % ssvm.id)
cmd = stopSystemVm.stopSystemVmCmd()
cmd.id = ssvm.id
self.apiclient.stopSystemVm(cmd)
-
+
timeout = self.testdata["timeout"]
while True:
list_ssvm_response = list_ssvms(
- self.apiclient,
- id=ssvm.id
- )
+ self.apiclient,
+ id=ssvm.id
+ )
if isinstance(list_ssvm_response, list):
if list_ssvm_response[0].state == 'Running':
break
if timeout == 0:
raise Exception("List SSVM call failed!")
-
+
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
-
+
self.assertEqual(
- isinstance(list_ssvm_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(list_ssvm_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
ssvm_response = list_ssvm_response[0]
self.debug("SSVM state after debug: %s" % ssvm_response.state)
self.assertEqual(
- ssvm_response.state,
- 'Running',
- "Check whether SSVM is running or not"
- )
+ ssvm_response.state,
+ 'Running',
+ "Check whether SSVM is running or not"
+ )
# Wait for the agent to be up
self.waitForSystemVMAgent(ssvm_response.name)
# Call above tests to ensure SSVM is properly running
self.list_sec_storage_vm()
-
def reboot_ssvm(self):
list_ssvm_response = list_ssvms(
- self.apiclient,
- systemvmtype='secondarystoragevm',
- state='Running',
- zoneid=self.zone.id
- )
-
+ self.apiclient,
+ systemvmtype='secondarystoragevm',
+ state='Running',
+ zoneid=self.zone.id
+ )
+
self.assertEqual(
- isinstance(list_ssvm_response, list),
- True,
- "Check list response returns a valid list"
- )
-
+ isinstance(list_ssvm_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
+
ssvm_response = list_ssvm_response[0]
hosts = list_hosts(
- self.apiclient,
- id=ssvm_response.hostid
- )
+ self.apiclient,
+ id=ssvm_response.hostid
+ )
self.assertEqual(
- isinstance(hosts, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(hosts, list),
+ True,
+ "Check list response returns a valid list"
+ )
host = hosts[0]
- #Store the public & private IP values before reboot
+ # Store the public & private IP values before reboot
old_public_ip = ssvm_response.publicip
old_private_ip = ssvm_response.privateip
@@ -1533,37 +1496,37 @@ class TestBrowseUploadVolume(cloudstackTestCase):
timeout = self.testdata["timeout"]
while True:
list_ssvm_response = list_ssvms(
- self.apiclient,
- id=ssvm_response.id
- )
+ self.apiclient,
+ id=ssvm_response.id
+ )
if isinstance(list_ssvm_response, list):
if list_ssvm_response[0].state == 'Running':
break
if timeout == 0:
raise Exception("List SSVM call failed!")
-
+
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
ssvm_response = list_ssvm_response[0]
self.debug("SSVM State: %s" % ssvm_response.state)
self.assertEqual(
- 'Running',
- str(ssvm_response.state),
- "Check whether CPVM is running or not"
- )
+ 'Running',
+ str(ssvm_response.state),
+ "Check whether CPVM is running or not"
+ )
self.assertEqual(
- ssvm_response.publicip,
- old_public_ip,
- "Check Public IP after reboot with that of before reboot"
- )
+ ssvm_response.publicip,
+ old_public_ip,
+ "Check Public IP after reboot with that of before reboot"
+ )
self.assertEqual(
- ssvm_response.privateip,
- old_private_ip,
- "Check Private IP after reboot with that of before reboot"
- )
+ ssvm_response.privateip,
+ old_private_ip,
+ "Check Private IP after reboot with that of before reboot"
+ )
# Wait for the agent to be up
self.waitForSystemVMAgent(ssvm_response.name)
@@ -1573,16 +1536,16 @@ class TestBrowseUploadVolume(cloudstackTestCase):
def destroy_ssvm(self):
list_ssvm_response = list_ssvms(
- self.apiclient,
- systemvmtype='secondarystoragevm',
- state='Running',
- zoneid=self.zone.id
- )
+ self.apiclient,
+ systemvmtype='secondarystoragevm',
+ state='Running',
+ zoneid=self.zone.id
+ )
self.assertEqual(
- isinstance(list_ssvm_response, list),
- True,
- "Check list response returns a valid list"
- )
+ isinstance(list_ssvm_response, list),
+ True,
+ "Check list response returns a valid list"
+ )
ssvm_response = list_ssvm_response[0]
old_name = ssvm_response.name
@@ -1595,16 +1558,16 @@ class TestBrowseUploadVolume(cloudstackTestCase):
timeout = self.testdata["timeout"]
while True:
list_ssvm_response = list_ssvms(
- self.apiclient,
- zoneid=self.zone.id,
- systemvmtype='secondarystoragevm'
- )
+ self.apiclient,
+ zoneid=self.zone.id,
+ systemvmtype='secondarystoragevm'
+ )
if isinstance(list_ssvm_response, list):
if list_ssvm_response[0].state == 'Running':
break
if timeout == 0:
raise Exception("List SSVM call failed!")
-
+
time.sleep(self.testdata["sleep"])
timeout = timeout - 1
@@ -1613,234 +1576,228 @@ class TestBrowseUploadVolume(cloudstackTestCase):
# Verify Name, Public IP, Private IP and Link local IP
# for newly created SSVM
self.assertNotEqual(
- ssvm_response.name,
- old_name,
- "Check SSVM new name with name of destroyed SSVM"
- )
+ ssvm_response.name,
+ old_name,
+ "Check SSVM new name with name of destroyed SSVM"
+ )
self.assertEqual(
- hasattr(ssvm_response, 'privateip'),
- True,
- "Check whether SSVM has private IP field"
- )
+ hasattr(ssvm_response, 'privateip'),
+ True,
+ "Check whether SSVM has private IP field"
+ )
self.assertEqual(
- hasattr(ssvm_response, 'linklocalip'),
- True,
- "Check whether SSVM has link local IP field"
- )
+ hasattr(ssvm_response, 'linklocalip'),
+ True,
+ "Check whether SSVM has link local IP field"
+ )
self.assertEqual(
- hasattr(ssvm_response, 'publicip'),
- True,
- "Check whether SSVM has public IP field"
- )
-
+ hasattr(ssvm_response, 'publicip'),
+ True,
+ "Check whether SSVM has public IP field"
+ )
+
# Wait for the agent to be up
self.waitForSystemVMAgent(ssvm_response.name)
return
+ def uploadvol(self, getuploadparamsresponce):
-
- def uploadvol(self,getuploadparamsresponce):
-
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
success = False
- url=self.uploadurl
+ url = self.uploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
list_volume_response = Volume.list(
- self.apiclient,
- id=getuploadparamsresponce.id
- )
+ self.apiclient,
+ id=getuploadparamsresponce.id
+ )
self.debug("======================Before SSVM Reboot==================")
self.reboot_ssvm()
self.debug("======================After SSVM Reboot==================")
config = Configurations.list(
- self.apiclient,
- name='upload.operation.timeout'
- )
+ self.apiclient,
+ name='upload.operation.timeout'
+ )
uploadtimeout = int(config[0].value)
- time.sleep(uploadtimeout*60)
+ time.sleep(uploadtimeout * 60)
- self.validate_uploaded_volume(getuploadparamsresponce.id,'UploadAbandoned')
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'UploadAbandoned')
- return()
+ return ()
+ def uploadvolwithssvmreboot(self, getuploadparamsresponce):
-
- def uploadvolwithssvmreboot(self,getuploadparamsresponce):
-
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
self.debug("======================Before SSVM Reboot==================")
list_volume_response = Volume.list(
- self.apiclient,
- id=getuploadparamsresponce.id
- )
+ self.apiclient,
+ id=getuploadparamsresponce.id
+ )
self.debug(list_volume_response[0])
self.reboot_ssvm()
success = False
- url=self.uploadurl
+ url = self.uploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
list_volume_response = Volume.list(
- self.apiclient,
- id=getuploadparamsresponce.id
- )
+ self.apiclient,
+ id=getuploadparamsresponce.id
+ )
self.debug("======================Upload After SSVM Reboot==================")
self.debug(list_volume_response[0])
- self.validate_uploaded_volume(getuploadparamsresponce.id,'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
- return()
+ return ()
def uploadwithcustomoffering(self):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- cmd.diskofferingid=self.disk_offering.id
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
-
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
- self.globalurl=getuploadparamsresponce.postURL
- #url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ cmd.diskofferingid = self.disk_offering.id
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
+ self.globalurl = getuploadparamsresponce.postURL
+ # url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
+ url = self.uploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
print(results.status_code)
- if results.status_code !=200:
+ if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id,'Uploaded')
-
-
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
def uploadwithimagestoreid(self):
- sscmd=listImageStores.listImageStoresCmd()
- sscmd.zoneid=self.zone.id
- sscmdresponse=self.apiclient.listImageStores(sscmd)
+ sscmd = listImageStores.listImageStoresCmd()
+ sscmd.zoneid = self.zone.id
+ sscmdresponse = self.apiclient.listImageStores(sscmd)
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- cmd.imagestoreuuid=sscmdresponse[0].id
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
-
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
- self.globalurl=getuploadparamsresponce.postURL
- #url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.uploadurl
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ cmd.imagestoreuuid = sscmdresponse[0].id
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
+
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
+ self.globalurl = getuploadparamsresponce.postURL
+ # url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
+ url = self.uploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
print(results.status_code)
- if results.status_code !=200:
+ if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id,'Uploaded')
- def uploadwithsamedisplaytext(self,voldetails):
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ def uploadwithsamedisplaytext(self, voldetails):
list_volume_response = Volume.list(
- self.apiclient,
- id=voldetails.id
- )
+ self.apiclient,
+ id=voldetails.id
+ )
- success=True
+ success = True
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=list_volume_response[0].name
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
+ cmd.name = list_volume_response[0].name
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
list_volume_response1 = Volume.list(
- self.apiclient,
- id=getuploadparamsresponce.id
- )
- if list_volume_response1[0].name==voldetails.name:
- success=False
+ self.apiclient,
+ id=getuploadparamsresponce.id
+ )
+ if list_volume_response1[0].name == voldetails.name:
+ success = False
self.assertEqual(
- success,
- False,
- "Verify: Upload Multiple volumes with same name is handled")
+ success,
+ False,
+ "Verify: Upload Multiple volumes with same name is handled")
return
@@ -1853,85 +1810,82 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("Total SSVMs are:")
self.debug(len(ssvmhosts))
- if len(ssvmhosts)==1:
- return(1)
+ if len(ssvmhosts) == 1:
+ return (1)
config = Configurations.list(
- self.apiclient,
- name='secstorage.session.max'
- )
+ self.apiclient,
+ name='secstorage.session.max'
+ )
multissvmvalue = int(config[0].value)
- if multissvmvalue !=1:
- return(0)
+ if multissvmvalue != 1:
+ return (0)
- browseup_vol=self.browse_upload_volume()
+ browseup_vol = self.browse_upload_volume()
- vm1details=self.deploy_vm()
+ vm1details = self.deploy_vm()
- self.attach_volume(vm1details,browseup_vol.id)
+ self.attach_volume(vm1details, browseup_vol.id)
self.vmoperations(vm1details)
self.destroy_vm(vm1details)
- self.detach_volume(vm1details,browseup_vol.id)
+ self.detach_volume(vm1details, browseup_vol.id)
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browseup_vol.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browseup_vol.id
self.apiclient.deleteVolume(cmd)
- return(2)
-
+ return (2)
def uploadwithextendedfileextentions(self):
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
- cmd.name=self.volname+self.account.name+(random.choice(string.ascii_uppercase))
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- cmd.diskofferingid=self.disk_offering.id
- getuploadparamsresponce=self.apiclient.getUploadParamsForVolume(cmd)
+ cmd.name = self.volname + self.account.name + (random.choice(string.ascii_uppercase))
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ cmd.diskofferingid = self.disk_offering.id
+ getuploadparamsresponce = self.apiclient.getUploadParamsForVolume(cmd)
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
- #url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
- url=self.extuploadurl
+ # url = 'http://10.147.28.7/templates/rajani-thin-volume.vhd'
+ url = self.extuploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
print(results.status_code)
- if results.status_code !=200:
+ if results.status_code != 200:
self.fail("Upload is not fine")
- self.validate_uploaded_volume(getuploadparamsresponce.id,'Uploaded')
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'Uploaded')
+ def posturlwithdeletedvolume(self, getuploadparamsresponce):
-
- def posturlwithdeletedvolume(self,getuploadparamsresponce):
-
- signt=getuploadparamsresponce.signature
- posturl=getuploadparamsresponce.postURL
- metadata=getuploadparamsresponce.metadata
- expiredata=getuploadparamsresponce.expires
- self.validate_uploaded_volume(getuploadparamsresponce.id,'UploadAbandoned')
+ signt = getuploadparamsresponce.signature
+ posturl = getuploadparamsresponce.postURL
+ metadata = getuploadparamsresponce.metadata
+ expiredata = getuploadparamsresponce.expires
+ self.validate_uploaded_volume(getuploadparamsresponce.id, 'UploadAbandoned')
cmd = deleteVolume.deleteVolumeCmd()
cmd.id = getuploadparamsresponce.id
@@ -1940,29 +1894,29 @@ class TestBrowseUploadVolume(cloudstackTestCase):
success = False
- url=self.extuploadurl
+ url = self.extuploadurl
uploadfile = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(uploadfile, 'wb') as f:
- for chunk in r.iter_content(chunk_size=1024):
- if chunk: # filter out keep-alive new chunks
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
- files={'file':(uploadfile,open(uploadfile,'rb'),'application/octet-stream')}
+ files = {'file': (uploadfile, open(uploadfile, 'rb'), 'application/octet-stream')}
- headers={'X-signature':signt,'X-metadata':metadata,'X-expires':expiredata}
+ headers = {'X-signature': signt, 'X-metadata': metadata, 'X-expires': expiredata}
- results = requests.post(posturl,files=files,headers=headers,verify=False)
+ results = requests.post(posturl, files=files, headers=headers, verify=False)
print(results.status_code)
- if results.status_code ==200:
- return("FAIL")
- return("PASS")
+ if results.status_code == 200:
+ return ("FAIL")
+ return ("PASS")
+
+ def volume_migration(self, browseup_vol, vm1details):
- def volume_migration(self,browseup_vol,vm1details):
-
pools = StoragePool.list(
self.apiclient,
zoneid=self.zone.id
@@ -1980,51 +1934,48 @@ class TestBrowseUploadVolume(cloudstackTestCase):
try:
if vm1details is None:
Volume.migrate(
- self.apiclient,
- volumeid=browseup_vol.id,
- storageid=pool.id,
- livemigrate='false'
- )
+ self.apiclient,
+ volumeid=browseup_vol.id,
+ storageid=pool.id,
+ livemigrate='false'
+ )
else:
Volume.migrate(
- self.apiclient,
- volumeid=browseup_vol.id,
- storageid=pool.id,
- livemigrate='true'
- )
+ self.apiclient,
+ volumeid=browseup_vol.id,
+ storageid=pool.id,
+ livemigrate='true'
+ )
except Exception as e:
self.fail("Volume migration failed with error %s" % e)
return
-
def getvolumelimts(self):
- totalresoucelist=Account.list(
- self.apiclient,
- id=self.account.id
- )
- totalvolumes=totalresoucelist[0].volumetotal
-
- return(totalvolumes)
-
+ totalresoucelist = Account.list(
+ self.apiclient,
+ id=self.account.id
+ )
+ totalvolumes = totalresoucelist[0].volumetotal
- def getstoragelimts(self,rtype):
+ return (totalvolumes)
- cmd=updateResourceCount.updateResourceCountCmd()
- cmd.account=self.account.name
- cmd.domainid=self.domain.id
- cmd.resourcetype=rtype
+ def getstoragelimts(self, rtype):
- responce=self.apiclient.updateResourceCount(cmd)
+ cmd = updateResourceCount.updateResourceCountCmd()
+ cmd.account = self.account.name
+ cmd.domainid = self.domain.id
+ cmd.resourcetype = rtype
- totalstorage=responce[0].resourcecount
+ responce = self.apiclient.updateResourceCount(cmd)
- return(totalstorage)
+ totalstorage = responce[0].resourcecount
+ return (totalstorage)
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_01_Browser_volume_Life_cycle_tpath(self):
"""
Test Browser_volume_Life_cycle - This includes upload volume,attach to a VM, write data ,Stop ,Start, Reboot,Reset of a VM, detach,attach back to the VM, delete volumes
@@ -2032,13 +1983,13 @@ class TestBrowseUploadVolume(cloudstackTestCase):
try:
self.debug("========================= Test 1: Upload Browser based volume and validate ========================= ")
- browseup_vol=self.browse_upload_volume()
+ browseup_vol = self.browse_upload_volume()
self.debug("========================= Test 2: Deploy a VM , Attach Uploaded Browser based volume and validate VM Operations========================= ")
- vm1details=self.deploy_vm()
+ vm1details = self.deploy_vm()
- self.attach_volume(vm1details,browseup_vol.id)
+ self.attach_volume(vm1details, browseup_vol.id)
self.vmoperations(vm1details)
@@ -2048,7 +1999,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 4: Detach Uploaded volume and validation of VM operations after detach========================= ")
- self.detach_volume(vm1details,browseup_vol.id)
+ self.detach_volume(vm1details, browseup_vol.id)
self.vmoperations(vm1details)
@@ -2056,67 +2007,64 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 5: Deploy New VM,Attach the detached Uploaded volume and validate VM operations after attach========================= ")
- vm2details=self.deploy_vm()
+ vm2details = self.deploy_vm()
- self.attach_volume(vm2details,browseup_vol.id)
+ self.attach_volume(vm2details, browseup_vol.id)
self.vmoperations(vm2details)
self.debug("========================= Test 6: Detach Uploaded volume and resize detached uploaded volume========================= ")
- self.detach_volume(vm2details,browseup_vol.id)
+ self.detach_volume(vm2details, browseup_vol.id)
if self.hypervisor.lower() != "hyperv":
self.resize_volume(browseup_vol.id)
self.debug("========================= Test 7: Attach resized uploaded volume and validate VM operations========================= ")
- self.attach_volume(vm2details,browseup_vol.id)
+ self.attach_volume(vm2details, browseup_vol.id)
self.vmoperations(vm2details)
- self.detach_volume(vm2details,browseup_vol.id)
+ self.detach_volume(vm2details, browseup_vol.id)
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browseup_vol.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browseup_vol.id
self.apiclient.deleteVolume(cmd)
self.debug("========================= Test 8: Try resizing uploaded state volume and validate the error scenario========================= ")
- browseup_vol2=self.browse_upload_volume()
+ browseup_vol2 = self.browse_upload_volume()
self.resize_fail(browseup_vol2.id)
self.debug("========================= Test 9: Attach multiple uploaded volumes to a VM and validate VM operations========================= ")
- browseup_vol3=self.browse_upload_volume()
+ browseup_vol3 = self.browse_upload_volume()
- self.attach_volume(vm2details,browseup_vol2.id)
+ self.attach_volume(vm2details, browseup_vol2.id)
- self.attach_volume(vm2details,browseup_vol3.id)
+ self.attach_volume(vm2details, browseup_vol3.id)
self.vmoperations(vm2details)
self.debug("========================= Test 10: Detach and delete uploaded volume========================= ")
- self.detach_volume(vm2details,browseup_vol2.id)
+ self.detach_volume(vm2details, browseup_vol2.id)
-
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browseup_vol2.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browseup_vol2.id
self.apiclient.deleteVolume(cmd)
-
self.debug("========================= Test 11: Detach and download uploaded volume========================= ")
- self.detach_volume(vm2details,browseup_vol3.id)
+ self.detach_volume(vm2details, browseup_vol3.id)
self.download_volume(browseup_vol3.id)
self.debug("========================= Test 12: Delete detached uploaded volume========================= ")
-
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browseup_vol3.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browseup_vol3.id
self.apiclient.deleteVolume(cmd)
self.debug("========================= Deletion of UnUsed VM's after test is complete========================= ")
@@ -2125,20 +2073,19 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 13: Delete Uploaded State volume========================= ")
- browseup_vol4=self.browse_upload_volume()
-
+ browseup_vol4 = self.browse_upload_volume()
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browseup_vol4.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browseup_vol4.id
self.apiclient.deleteVolume(cmd)
self.debug("========================= Test 14: Destroy VM which has Uploaded volumes attached========================= ")
- vm4details=self.deploy_vm()
+ vm4details = self.deploy_vm()
- newvolumetodestoy_VM=self.browse_upload_volume()
+ newvolumetodestoy_VM = self.browse_upload_volume()
- self.attach_volume(vm4details,newvolumetodestoy_VM.id)
+ self.attach_volume(vm4details, newvolumetodestoy_VM.id)
self.destroy_vm(vm4details)
@@ -2147,26 +2094,26 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.recover_destroyed_vm(vm4details)
self.expunge_vm(vm4details)
-
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=newvolumetodestoy_VM.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = newvolumetodestoy_VM.id
self.apiclient.deleteVolume(cmd)
- self.debug("========================= Test 16: Delete attached Uploaded volume which is in ready state and it should not be allowed to delete========================= ")
+ self.debug(
+ "========================= Test 16: Delete attached Uploaded volume which is in ready state and it should not be allowed to delete========================= ")
- vm5details=self.deploy_vm()
- browseup_vol5=self.browse_upload_volume()
- self.attach_volume(vm5details,browseup_vol5.id)
+ vm5details = self.deploy_vm()
+ browseup_vol5 = self.browse_upload_volume()
+ self.attach_volume(vm5details, browseup_vol5.id)
self.deletevolume_fail(browseup_vol5.id)
self.debug("========================= Test 17: Create Volume Backup Snapshot uploaded volume attached to the VM========================= ")
- vm6details=self.deploy_vm()
- browseup_vol6=self.browse_upload_volume()
+ vm6details = self.deploy_vm()
+ browseup_vol6 = self.browse_upload_volume()
- self.attach_volume(vm6details,browseup_vol6.id)
+ self.attach_volume(vm6details, browseup_vol6.id)
- snapshotdetails=self.volume_snapshot(browseup_vol6)
+ snapshotdetails = self.volume_snapshot(browseup_vol6)
self.debug("========================= Test 18: Create Volume from Backup Snapshot of attached uploaded volume========================= ")
@@ -2175,35 +2122,32 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 19: Create template from Backup Snapshot of attached uploaded volume========================= ")
self.volume_snapshot_template(snapshotdetails)
- self.detach_volume(vm6details,browseup_vol6.id)
-
+ self.detach_volume(vm6details, browseup_vol6.id)
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browseup_vol6.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browseup_vol6.id
self.apiclient.deleteVolume(cmd)
self.expunge_vm(vm6details)
self.debug("========================= Test 20: Upload Browser based volume with checksum and validate ========================= ")
- browseup_vol_withchecksum=self.browse_upload_volume_with_md5()
+ browseup_vol_withchecksum = self.browse_upload_volume_with_md5()
self.debug("========================= Test 21: Deploy a VM , Attach Uploaded Browser based volume with checksum and validate VM Operations========================= ")
- vm7details=self.deploy_vm()
+ vm7details = self.deploy_vm()
- self.attach_volume(vm7details,browseup_vol_withchecksum.id)
+ self.attach_volume(vm7details, browseup_vol_withchecksum.id)
self.debug("========================= Test 22: Detach Uploaded volume with checksum and validation of VM operations after detach========================= ")
- self.detach_volume(vm7details,browseup_vol_withchecksum.id)
-
+ self.detach_volume(vm7details, browseup_vol_withchecksum.id)
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browseup_vol_withchecksum.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browseup_vol_withchecksum.id
self.apiclient.deleteVolume(cmd)
-
self.vmoperations(vm7details)
self.expunge_vm(vm7details)
@@ -2213,30 +2157,28 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_02_SSVM_Life_Cycle_With_Browser_Volume_TPath(self):
"""
Test SSVM_Life_Cycle_With_Browser_Volume_TPath - This includes SSVM life cycle followed by Browser volume upload operations
"""
try:
-
+
self.debug("========================= Test 23: Stop and Start SSVM and Perform Browser based volume validations ========================= ")
self.stop_ssvm()
- ssvm1browseup_vol=self.browse_upload_volume()
+ ssvm1browseup_vol = self.browse_upload_volume()
- ssvm1vm1details=self.deploy_vm()
+ ssvm1vm1details = self.deploy_vm()
- self.attach_volume(ssvm1vm1details,ssvm1browseup_vol.id)
+ self.attach_volume(ssvm1vm1details, ssvm1browseup_vol.id)
self.vmoperations(ssvm1vm1details)
- self.detach_volume(ssvm1vm1details,ssvm1browseup_vol.id)
-
+ self.detach_volume(ssvm1vm1details, ssvm1browseup_vol.id)
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=ssvm1browseup_vol.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = ssvm1browseup_vol.id
self.apiclient.deleteVolume(cmd)
self.expunge_vm(ssvm1vm1details)
@@ -2244,115 +2186,106 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 24: Reboot SSVM and Perform Browser based volume validations ========================= ")
self.reboot_ssvm()
- ssvm2browseup_vol=self.browse_upload_volume()
+ ssvm2browseup_vol = self.browse_upload_volume()
- ssvm2vm1details=self.deploy_vm()
+ ssvm2vm1details = self.deploy_vm()
- self.attach_volume(ssvm2vm1details,ssvm2browseup_vol.id)
+ self.attach_volume(ssvm2vm1details, ssvm2browseup_vol.id)
self.vmoperations(ssvm2vm1details)
- self.detach_volume(ssvm2vm1details,ssvm2browseup_vol.id)
+ self.detach_volume(ssvm2vm1details, ssvm2browseup_vol.id)
-
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=ssvm2browseup_vol.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = ssvm2browseup_vol.id
self.apiclient.deleteVolume(cmd)
-
self.expunge_vm(ssvm2vm1details)
self.debug("========================= Test 25: Reboot SSVM and Perform Browser based volume validations ========================= ")
self.destroy_ssvm()
- ssvm3browseup_vol=self.browse_upload_volume()
+ ssvm3browseup_vol = self.browse_upload_volume()
- ssvm3vm1details=self.deploy_vm()
+ ssvm3vm1details = self.deploy_vm()
- self.attach_volume(ssvm3vm1details,ssvm3browseup_vol.id)
+ self.attach_volume(ssvm3vm1details, ssvm3browseup_vol.id)
self.vmoperations(ssvm3vm1details)
- self.detach_volume(ssvm3vm1details,ssvm3browseup_vol.id)
-
+ self.detach_volume(ssvm3vm1details, ssvm3browseup_vol.id)
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=ssvm3browseup_vol.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = ssvm3browseup_vol.id
self.apiclient.deleteVolume(cmd)
-
self.expunge_vm(ssvm3vm1details)
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_03_Browser_Upload_Volume_Global_Config_TPath(self):
"""
Test Browser_Upload_Volume_Global_Config limits
"""
try:
-
+
self.debug("========================= Test 26 Validate Storage.max.upload.size ========================= ")
- globalconfig_browse_up_vol=self.browse_upload_volume()
- self.validate_max_vol_size(globalconfig_browse_up_vol,"Uploaded")
+ globalconfig_browse_up_vol = self.browse_upload_volume()
+ self.validate_max_vol_size(globalconfig_browse_up_vol, "Uploaded")
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_04_Browser_Upload_Volume_Negative_Scenarios_TPath(self):
"""
Test Browser_Upload_Volume_Negative_Scenarios
"""
try:
self.debug("========================= Test 27 Reuse the POST URL after expiry time========================= ")
- reuse_browse_up_vol=self.browse_upload_volume()
+ reuse_browse_up_vol = self.browse_upload_volume()
self.reuse_url()
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=reuse_browse_up_vol.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = reuse_browse_up_vol.id
self.apiclient.deleteVolume(cmd)
-
self.debug("========================= Test 28 Reboot SSVM before upload is completed=========================")
- browse_up_vol=self.onlyupload()
+ browse_up_vol = self.onlyupload()
self.uploadvol(browse_up_vol)
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browse_up_vol.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browse_up_vol.id
self.apiclient.deleteVolume(cmd)
-
self.debug("========================= Test 29 Reboot SSVM after getting the upload volume params and before initiating the upload=========================")
- browse_up_vol=self.onlyupload()
+ browse_up_vol = self.onlyupload()
self.uploadvolwithssvmreboot(browse_up_vol)
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=reuse_browse_up_vol.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = reuse_browse_up_vol.id
self.apiclient.deleteVolume(cmd)
self.debug("========================= Test 30 Attach Deleted Volume=========================")
- deleted_browse_up_vol=self.browse_upload_volume()
-
+ deleted_browse_up_vol = self.browse_upload_volume()
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=deleted_browse_up_vol.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = deleted_browse_up_vol.id
self.apiclient.deleteVolume(cmd)
-
- deletedvm1details=self.deploy_vm()
+ deletedvm1details = self.deploy_vm()
self.attach_deleted_volume(deletedvm1details, deleted_browse_up_vol)
self.debug("========================= Test 31 Upload Volume with Invalid Format=========================")
self.invalidupload()
self.debug("========================= Test 32 Upload Mutliple Volumes with same display text=========================")
- samedisplaytext_browse_up_vol=self.browse_upload_volume()
+ samedisplaytext_browse_up_vol = self.browse_upload_volume()
self.uploadwithsamedisplaytext(samedisplaytext_browse_up_vol)
self.debug("========================= Test 33 Upload Volume with custom offering id=========================")
@@ -2362,7 +2295,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_05_Browser_Upload_Volume_MultiSSVM_Scenarios_TPath(self):
"""
Test Browser_Upload_Volume_MultiSSVM_Scenarios
@@ -2371,18 +2304,17 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 34 Upload volume with Multiple SSVM=========================")
- testresult=self.uploadvolwithmultissvm()
- if testresult==0:
+ testresult = self.uploadvolwithmultissvm()
+ if testresult == 0:
raise unittest.SkipTest("secstorage.session.max global config is not set to 1 which means Multiple SSVM's are not present")
- elif testresult==1:
+ elif testresult == 1:
raise unittest.SkipTest("only one SSVM is present")
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_06_Browser_Upload_Volume_with_extended_file_extenstions(self):
"""
Test Browser_Upload_Volume_with_extended_file_extenstions
@@ -2390,77 +2322,73 @@ class TestBrowseUploadVolume(cloudstackTestCase):
try:
self.debug("========================= Test 35 Upload volume with extended file extenstions=========================")
- if self.uploadvolumeformat=="OVA":
- raise unittest.SkipTest("This test is need not be executed on VMWARE")
+ if self.uploadvolumeformat == "OVA":
+ raise unittest.SkipTest("This test is need not be executed on VMWARE")
self.uploadwithextendedfileextentions()
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_07_Browser_Upload_Volume_Storage_Cleanup_Config_Validation(self):
"""
Test Browser_Upload_Volume_Storage_Cleanup_Config_Validation
"""
self.debug("========================= Test 36 Validate storage.cleanup.enabled and storage.cleanup.interval ========================= ")
config1 = Configurations.list(
- self.apiclient,
- name='storage.cleanup.enabled'
- )
+ self.apiclient,
+ name='storage.cleanup.enabled'
+ )
config2 = Configurations.list(
- self.apiclient,
- name='storage.cleanup.interval'
- )
+ self.apiclient,
+ name='storage.cleanup.interval'
+ )
- cleanup_enabled=config1[0].value
+ cleanup_enabled = config1[0].value
cleanup_interval = int(config2[0].value)
- if cleanup_enabled=="false":
- raise unittest.SkipTest("storage.cleanup.enabled is not set to true")
+ if cleanup_enabled == "false":
+ raise unittest.SkipTest("storage.cleanup.enabled is not set to true")
- if cleanup_interval>600:
- raise unittest.SkipTest("storage.cleanup.interval is set to wait for more than 10 mins before cleanup. Please reduce the interval to less than 10 mins")
+ if cleanup_interval > 600:
+ raise unittest.SkipTest("storage.cleanup.interval is set to wait for more than 10 mins before cleanup. Please reduce the interval to less than 10 mins")
- invaliduploadvolume=self.invalidposturl()
+ invaliduploadvolume = self.invalidposturl()
- self.validate_storage_cleanup(invaliduploadvolume,cleanup_interval)
+ self.validate_storage_cleanup(invaliduploadvolume, cleanup_interval)
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_08_Browser_Upload_Volume_TamperedPostURL(self):
"""
Test Browser_Upload_Volume_Negative_Scenarios
"""
try:
self.debug("========================= Test 37 Upload Volume with tampered post URL=========================")
- invaliduploadvolume=self.invalidposturl()
+ invaliduploadvolume = self.invalidposturl()
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_09_Browser_Upload_Volume_PostURL_with_Deleted_Uploadvolume_Details(self):
"""
Test Browser_Upload_Volume_PostURL_with_Deleted_Uploadvolume_Details
"""
self.debug("========================= Test 38 PostURL_with_Deleted_Upload_Abondaned volume details=========================")
- browse_up_vol=self.onlyupload()
- res=self.posturlwithdeletedvolume(browse_up_vol)
+ browse_up_vol = self.onlyupload()
+ res = self.posturlwithdeletedvolume(browse_up_vol)
- if res=="FAIL":
+ if res == "FAIL":
self.fail("Verify - PostURL_with_Deleted_Uploadvolume_Details ")
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_10_Browser_Upload_Volume_API_with_imagepoolid(self):
"""
Test Browser_Upload_Volume_API_with_imagepoolid
@@ -2470,55 +2398,54 @@ class TestBrowseUploadVolume(cloudstackTestCase):
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_11_migrate_upload_volume(self):
"""
Test Browser_Upload_Volume_migrate_upload_volume
"""
self.debug("========================= Test 40 Test Browser_Upload_Volume_Migration=========================")
- browseup_vol=self.browse_upload_volume()
- vm1details=self.deploy_vm()
- self.attach_volume(vm1details,browseup_vol.id)
+ browseup_vol = self.browse_upload_volume()
+ vm1details = self.deploy_vm()
+ self.attach_volume(vm1details, browseup_vol.id)
self.volume_migration(browseup_vol, vm1details)
self.debug("========================= Test 41 Test VM Operations after Browser_Upload_Volume_Migration=========================")
self.vmoperations(vm1details)
self.debug("========================= Test 42 Detach Browser_Upload_Volume after Migration and attach to a new VM=========================")
- self.detach_volume(vm1details,browseup_vol.id)
- vm2details=self.deploy_vm()
- self.attach_volume(vm2details,browseup_vol.id)
+ self.detach_volume(vm1details, browseup_vol.id)
+ vm2details = self.deploy_vm()
+ self.attach_volume(vm2details, browseup_vol.id)
self.vmoperations(vm2details)
self.debug("========================= Test 43 Detach Browser_Upload_Volume and Migrate to another storage=========================")
- self.detach_volume(vm2details,browseup_vol.id)
+ self.detach_volume(vm2details, browseup_vol.id)
self.volume_migration(browseup_vol, "None")
self.debug("========================= Test 44 Attach detached Browser_Upload_Volume after Migration =========================")
- self.attach_volume(vm2details,browseup_vol.id)
+ self.attach_volume(vm2details, browseup_vol.id)
self.vmoperations(vm2details)
self.debug("========================= Test 45 Detach ,Resize,Attach Browser_Upload_Volume after Migration =========================")
- self.detach_volume(vm2details,browseup_vol.id)
+ self.detach_volume(vm2details, browseup_vol.id)
if self.hypervisor.lower() != "hyperv":
self.resize_volume(browseup_vol.id)
- self.attach_volume(vm2details,browseup_vol.id)
+ self.attach_volume(vm2details, browseup_vol.id)
self.vmoperations(vm2details)
- self.detach_volume(vm2details,browseup_vol.id)
+ self.detach_volume(vm2details, browseup_vol.id)
self.cleanup.append(browseup_vol)
self.cleanup.append(vm2details)
self.cleanup.append(vm1details)
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_12_Browser_Upload_Volume_with_all_API_parameters(self):
"""
Test Browser_Upload_Volumewith all API parameters
@@ -2527,27 +2454,24 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 46 & 47 Upload volume with account name and domainid========================")
- browseup_vol1=self.browse_upload_volume()
+ browseup_vol1 = self.browse_upload_volume()
self.debug("========================= Test 48 Upload volume with projectid========================")
- browseup_vol2=self.browse_upload_volume_with_projectid(self.project.id)
+ browseup_vol2 = self.browse_upload_volume_with_projectid(self.project.id)
self.debug("========================= Test 49 Upload volume with out mandatory param zone id ========================")
- browseup_vol2=self.browse_upload_volume_with_out_zoneid()
-
+ browseup_vol2 = self.browse_upload_volume_with_out_zoneid()
self.debug("========================= Test 50 Upload volume with out mandatory param format ========================")
- browseup_vol3=self.browse_upload_volume_with_out_format()
+ browseup_vol3 = self.browse_upload_volume_with_out_format()
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
-
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_13_Browser_Upload_Volume_volume_resource_limits(self):
"""
Test Browser_Upload_Volume Volume Resource limits
@@ -2555,16 +2479,15 @@ class TestBrowseUploadVolume(cloudstackTestCase):
try:
self.debug("========================= Test 51 Upload volume and verify volume limits========================")
- initialvolumelimit=self.getvolumelimts()
- browseup_vol1=self.browse_upload_volume()
- afteruploadvolumelimit=self.getvolumelimts()
+ initialvolumelimit = self.getvolumelimts()
+ browseup_vol1 = self.browse_upload_volume()
+ afteruploadvolumelimit = self.getvolumelimts()
- if int(afteruploadvolumelimit)!=(int(initialvolumelimit)+1):
+ if int(afteruploadvolumelimit) != (int(initialvolumelimit) + 1):
self.fail("Volume Resouce Count is not updated")
-
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=deleted_browse_up_vol1.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = deleted_browse_up_vol1.id
self.apiclient.deleteVolume(cmd)
@@ -2572,7 +2495,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_14_Browser_Upload_Volume_secondary_storage_resource_limits(self):
"""
Test Browser_Upload_Volume Secondary Storage Resource limits
@@ -2581,19 +2504,18 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 52 Upload volume and verify secondary storage limits========================")
- initialsecondarystoragelimit=self.getstoragelimts(11)
- browseup_vol1=self.browse_upload_volume()
- volumedetails=Volume.list(
- self.apiclient,
- id=browseup_vol1.id)
- afteruploadsecondarystoragelimit=self.getstoragelimts(11)
+ initialsecondarystoragelimit = self.getstoragelimts(11)
+ browseup_vol1 = self.browse_upload_volume()
+ volumedetails = Volume.list(
+ self.apiclient,
+ id=browseup_vol1.id)
+ afteruploadsecondarystoragelimit = self.getstoragelimts(11)
- if afteruploadsecondarystoragelimit!=(initialsecondarystoragelimit+volumedetails[0].size):
+ if afteruploadsecondarystoragelimit != (initialsecondarystoragelimit + volumedetails[0].size):
self.fail("Secondary Storage Resouce Count is not updated")
-
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=deleted_browse_up_vol1.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = deleted_browse_up_vol1.id
self.apiclient.deleteVolume(cmd)
@@ -2601,7 +2523,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_15_Browser_Upload_Volume_primary_storage_resource_limits(self):
"""
Test Browser_Upload_Volume Primary Storage Resource limits
@@ -2610,46 +2532,44 @@ class TestBrowseUploadVolume(cloudstackTestCase):
self.debug("========================= Test 53 Attach Upload volume and verify primary storage limits========================")
- initialprimarystoragelimit=self.getstoragelimts(10)
- browseup_vol1=self.browse_upload_volume()
- volumedetails=Volume.list(
- self.apiclient,
- id=browseup_vol1.id)
- afteruploadprimarystoragelimit=self.getstoragelimts(10)
+ initialprimarystoragelimit = self.getstoragelimts(10)
+ browseup_vol1 = self.browse_upload_volume()
+ volumedetails = Volume.list(
+ self.apiclient,
+ id=browseup_vol1.id)
+ afteruploadprimarystoragelimit = self.getstoragelimts(10)
- if afteruploadprimarystoragelimit!=(initialprimarystoragelimit+volumedetails[0].size):
+ if afteruploadprimarystoragelimit != (initialprimarystoragelimit + volumedetails[0].size):
self.fail("Primary Storage Resource Count is not updated")
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_16_Browser_Upload_volume_resource_limits_after_deletion(self):
"""
Test Browser_Upload_Volume resource_limits_after_deletion
"""
try:
self.debug("========================= Test 54 Delete Upload volume and verify volume limits========================")
- browseup_vol1=self.browse_upload_volume()
- initialvolumelimit=self.getvolumelimts()
+ browseup_vol1 = self.browse_upload_volume()
+ initialvolumelimit = self.getvolumelimts()
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browseup_vol1.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browseup_vol1.id
self.apiclient.deleteVolume(cmd)
- aftervolumelimit=self.getvolumelimts()
+ aftervolumelimit = self.getvolumelimts()
- if aftervolumelimit!=(initialvolumelimit-1):
+ if aftervolumelimit != (initialvolumelimit - 1):
self.fail("Volume Resource Count is not updated after deletion")
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
-
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_17_Browser_Upload_Volume_secondary_storage_resource_limits_after_deletion(self):
"""
Test Browser_Upload_Volume secondary_storage_resource_limits_after_deletion
@@ -2657,29 +2577,28 @@ class TestBrowseUploadVolume(cloudstackTestCase):
try:
self.debug("========================= Test 55 Delete Upload volume and secondary storage limits========================")
- browseup_vol1=self.browse_upload_volume()
+ browseup_vol1 = self.browse_upload_volume()
- volumedetails=Volume.list(
- self.apiclient,
- id=browseup_vol1.id)
+ volumedetails = Volume.list(
+ self.apiclient,
+ id=browseup_vol1.id)
- initialuploadsecondarystoragelimit=self.getstoragelimts(11)
+ initialuploadsecondarystoragelimit = self.getstoragelimts(11)
- cmd=deleteVolume.deleteVolumeCmd()
- cmd.id=browseup_vol1.id
+ cmd = deleteVolume.deleteVolumeCmd()
+ cmd.id = browseup_vol1.id
self.apiclient.deleteVolume(cmd)
- afteruploadsecondarystoragelimit=self.getstoragelimts(11)
+ afteruploadsecondarystoragelimit = self.getstoragelimts(11)
- if afteruploadsecondarystoragelimit!=(initialuploadsecondarystoragelimit-volumedetails[0].size):
+ if afteruploadsecondarystoragelimit != (initialuploadsecondarystoragelimit - volumedetails[0].size):
self.fail("Secondary Storage Resouce Count is not updated after deletion")
except Exception as e:
self.fail("Exception occurred : %s" % e)
return
-
- @attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
+ @attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
def test_browser_upload_volume_incomplete(self):
"""
Test browser based incomplete volume upload, followed by SSVM destroy. Volume should go to UploadAbandoned/Error state and get cleaned up.
@@ -2687,7 +2606,7 @@ class TestBrowseUploadVolume(cloudstackTestCase):
try:
self.debug("========================= Test browser based incomplete volume upload ========================")
- #Only register volume, without uploading
+ # Only register volume, without uploading
cmd = getUploadParamsForVolume.getUploadParamsForVolumeCmd()
cmd.zoneid = self.zone.id
cmd.format = self.uploadvolumeformat
@@ -2696,24 +2615,22 @@ class TestBrowseUploadVolume(cloudstackTestCase):
cmd.domainid = self.domain.id
upload_volume_response = self.apiclient.getUploadParamsForVolume(cmd)
- #Destroy SSVM, and wait for new one to start
+ # Destroy SSVM, and wait for new one to start
self.destroy_ssvm()
- #Verify that the volume is cleaned up as part of sync-up during new SSVM start
+ # Verify that the volume is cleaned up as part of sync-up during new SSVM start
self.validate_uploaded_volume(upload_volume_response.id, 'UploadAbandoned')
except Exception as e:
self.fail("Exceptione occurred : %s" % e)
return
-
@classmethod
def tearDownClass(self):
- try:
- self.apiclient = super(TestBrowseUploadVolume,self).getClsTestClient().getApiClient()
- cleanup_resources(self.apiclient, self._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestBrowseUploadVolume, self).tearDownClass()
+ def setup(self):
+ cleanup = []
+ def tearDown(self):
+ super(TestBrowseUploadVolume, self).tearDown()
diff --git a/test/integration/component/test_configdrive.py b/test/integration/component/test_configdrive.py
index fed6908..46494f0 100644
--- a/test/integration/component/test_configdrive.py
+++ b/test/integration/component/test_configdrive.py
@@ -60,7 +60,6 @@ import tempfile
import time
from contextlib import contextmanager
from nose.plugins.attrib import attr
-from retry import retry
VPC_SERVICES = 'Dhcp,StaticNat,SourceNat,NetworkACL,UserData,Dns'
ISO_SERVICES = 'Dhcp,SourceNat,StaticNat,UserData,Firewall,Dns'
@@ -1187,7 +1186,7 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
"""
def __init__(self, methodName='runTest'):
- super(cloudstackTestCase, self).__init__(methodName)
+ super(TestConfigDrive, self).__init__(methodName)
ConfigDriveUtils.__init__(self)
@classmethod
@@ -1200,6 +1199,7 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
cls.db_client = test_client.getDbConnection()
cls.test_data = test_client.getParsedTestDataConfig()
cls.test_data.update(Services().services)
+ cls._cleanup = []
# Get Zone, Domain and templates
cls.zone = get_zone(cls.api_client)
@@ -1217,7 +1217,7 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.test_data["service_offering"])
- cls._cleanup = [cls.service_offering]
+ cls._cleanup.append(cls.service_offering)
hypervisors = Hypervisor.list(cls.api_client, zoneid=cls.zone.id)
cls.isSimulator = any(h.name == "Simulator" for h in hypervisors)
@@ -1225,50 +1225,27 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
def setUp(self):
# Create an account
+ self.cleanup = []
self.account = Account.create(self.api_client,
self.test_data["account"],
admin=True,
domainid=self.domain.id
)
+ self.cleanup.append(self.account)
self.tmp_files = []
- self.cleanup = [self.account]
self.generate_ssh_keys()
return
@classmethod
def tearDownClass(cls):
- # Cleanup resources used
- cls.debug("Cleaning up the resources")
- for obj in reversed(cls._cleanup):
- try:
- if isinstance(obj, VirtualMachine):
- obj.delete(cls.api_client, expunge=True)
- else:
- obj.delete(cls.api_client)
- except Exception as e:
- cls.error("Failed to cleanup %s, got %s" % (obj, e))
- # cleanup_resources(cls.api_client, cls._cleanup)
- cls._cleanup = []
- cls.debug("Cleanup complete!")
- return
+ super(TestConfigDrive, cls).tearDownClass()
def tearDown(self):
- # Cleanup resources used
- self.debug("Cleaning up the resources")
- for obj in reversed(self.cleanup):
- try:
- if isinstance(obj, VirtualMachine):
- obj.delete(self.api_client, expunge=True)
- else:
- obj.delete(self.api_client)
- except Exception as e:
- self.error("Failed to cleanup %s, got %s" % (obj, e))
- # cleanup_resources(self.api_client, self.cleanup)
- self.cleanup = []
+ super(TestConfigDrive,self).tearDown()
+
for tmp_file in self.tmp_files:
os.remove(tmp_file)
self.debug("Cleanup complete!")
- return
# create_StaticNatRule_For_VM - Creates Static NAT rule on the given
# public IP for the given VM in the given network
@@ -1755,7 +1732,8 @@ class TestConfigDrive(cloudstackTestCase, ConfigDriveUtils):
self.api_client.restartVPC(cmd)
self.debug("Restarted VPC with ID - %s" % vpc.id)
- @attr(tags=["advanced", "isonw"], required_hardware="true")
+ # was tags=["advanced", "isonw"]
+ @attr(tags=["TODO"], required_hardware="true")
def test_configdrive_isolated_network(self):
"""Test Configdrive as provider for isolated Networks
to provide userdata and password reset functionality
diff --git a/test/integration/component/test_deploy_vm_userdata_reg.py b/test/integration/component/test_deploy_vm_userdata_reg.py
index cd048d0..56b8655 100644
--- a/test/integration/component/test_deploy_vm_userdata_reg.py
+++ b/test/integration/component/test_deploy_vm_userdata_reg.py
@@ -62,9 +62,6 @@ class Services:
}
-
-
-
class TestDeployVmWithUserData(cloudstackTestCase):
"""Tests for UserData
"""
@@ -75,6 +72,7 @@ class TestDeployVmWithUserData(cloudstackTestCase):
cls.apiClient = cls.testClient.getApiClient()
cls.services = Services().services
cls.zone = get_zone(cls.apiClient, cls.testClient.getZoneForTests())
+ cls._cleanup = []
if cls.zone.localstorageenabled:
#For devcloud since localstroage is enabled
cls.services["service_offering"]["storagetype"] = "local"
@@ -82,8 +80,9 @@ class TestDeployVmWithUserData(cloudstackTestCase):
cls.apiClient,
cls.services["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
cls.account = Account.create(cls.apiClient, services=cls.services["account"])
- cls.cleanup = [cls.account]
+ cls._cleanup.append(cls.account)
cls.template = get_template(
cls.apiClient,
cls.zone.id,
@@ -96,6 +95,11 @@ class TestDeployVmWithUserData(cloudstackTestCase):
cls.userdata = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(2500))
+ # py3 base64 encode adheres to the standard of 76 character lines terminated with '\n'
+ # py2 didn't insert any new-lines
+ # so we now do the encoding in the stored userdata string and remove the '\n's
+ # to get a good easy string compare in the assert later on.
+ cls.userdata = base64.encodestring(cls.userdata.encode()).decode().replace('\n', '')
cls.user_data_2k= ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(2000))
cls.user_data_2kl = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(1900))
@@ -103,6 +107,7 @@ class TestDeployVmWithUserData(cloudstackTestCase):
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.hypervisor = self.testClient.getHypervisorInfo()
+ self.cleanup = []
@attr(tags=["simulator", "devcloud", "basic", "advanced"], required_hardware="true")
@@ -110,7 +115,6 @@ class TestDeployVmWithUserData(cloudstackTestCase):
"""Test userdata as POST, size > 2k
"""
- self.userdata = base64.encodestring(self.userdata.encode()).decode()
self.services["virtual_machine"]["userdata"] = self.userdata
deployVmResponse = VirtualMachine.create(
@@ -122,8 +126,8 @@ class TestDeployVmWithUserData(cloudstackTestCase):
templateid=self.template.id,
zoneid=self.zone.id,
method="POST"
-
)
+ self.cleanup.append(deployVmResponse)
vms = list_virtual_machines(
self.apiClient,
@@ -190,11 +194,8 @@ class TestDeployVmWithUserData(cloudstackTestCase):
)
res = str(result)
self.assertEqual(res.__contains__(self.userdata),True,"Userdata Not applied Check the failures")
-
-
except KeyError:
self.skipTest("Marvin configuration has no host credentials to check USERDATA")
-
else:
try:
host.user, host.passwd = get_host_credentials(self.config, host.ipaddress)
@@ -211,13 +212,9 @@ class TestDeployVmWithUserData(cloudstackTestCase):
except KeyError:
self.skipTest("Marvin configuration has no host credentials to check router user data")
-
-
@classmethod
def tearDownClass(cls):
- try:
- #Cleanup resources used
- cleanup_resources(cls.apiClient, cls.cleanup)
+ super(TestDeployVmWithUserData, cls).tearDownClass()
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ def tearDown(self):
+ super(TestDeployVmWithUserData, self).tearDown()
diff --git a/test/integration/component/test_egress_fw_rules.py b/test/integration/component/test_egress_fw_rules.py
index e1b33bf..43cd0aa 100644
--- a/test/integration/component/test_egress_fw_rules.py
+++ b/test/integration/component/test_egress_fw_rules.py
@@ -37,7 +37,7 @@ from marvin.lib.common import (get_domain,
list_routers,
list_virtual_machines
)
-from marvin.lib.utils import cleanup_resources, validateList
+from marvin.lib.utils import validateList
from marvin.cloudstackAPI import rebootRouter
from marvin.cloudstackAPI.createEgressFirewallRule import createEgressFirewallRuleCmd
from marvin.cloudstackAPI.deleteEgressFirewallRule import deleteEgressFirewallRuleCmd
@@ -160,13 +160,9 @@ class TestEgressFWRules(cloudstackTestCase):
# Cleanup
cls._cleanup.append(cls.service_offering)
-
@classmethod
def tearDownClass(cls):
- try:
- cleanup_resources(cls.api_client, reversed(cls._cleanup))
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ super(TestEgressFWRules, cls).tearDownClass()
def setUp(self):
self.apiclient = self.api_client
@@ -175,12 +171,11 @@ class TestEgressFWRules(cloudstackTestCase):
self.cleanup = []
self.domain = Domain.create(self.apiclient,
self.services["domain"])
- # Create an Account associated with domain
+ self.cleanup.append(self.domain)
self.account = Account.create(self.apiclient,
self.services["account"],
domainid=self.domain.id)
self.cleanup.append(self.account)
- self.cleanup.append(self.domain)
return
def create_network_offering(self, egress_policy=True, RR=False):
@@ -198,7 +193,7 @@ class TestEgressFWRules(cloudstackTestCase):
conservemode=True)
# Cleanup
- self.cleanup.append(self.network_offering)
+ # self.cleanup.append(self.network_offering)
# Enable Network offering
self.network_offering.update(self.apiclient, state='Enabled')
@@ -214,6 +209,7 @@ class TestEgressFWRules(cloudstackTestCase):
domainid=self.account.domainid,
networkofferingid=self.network_offering.id,
zoneid=self.zone.id)
+ self.cleanup.append(self.network)
self.debug("Created network with ID: %s" % self.network.id)
self.debug("Deploying instance in the account: %s" % self.account.name)
@@ -226,6 +222,7 @@ class TestEgressFWRules(cloudstackTestCase):
mode=self.zone.networktype if pfrule else 'basic',
networkids=[str(self.network.id)],
projectid=project.id if project else None)
+ self.cleanup.append(self.virtual_machine)
self.debug("Deployed instance %s in account: %s" % (self.virtual_machine.id,self.account.name))
# Checking if VM is running or not, in case it is deployed in error state, test case fails
@@ -241,9 +238,10 @@ class TestEgressFWRules(cloudstackTestCase):
domainid=self.account.domainid,
networkid=self.network.id
)
+ self.cleanup.append(self.public_ip)
# Open up firewall port for SSH
- FireWallRule.create(
+ fwr = FireWallRule.create(
self.apiclient,
ipaddressid=self.public_ip.ipaddress.id,
protocol=self.services["natrule"]["protocol"],
@@ -251,15 +249,17 @@ class TestEgressFWRules(cloudstackTestCase):
startport=self.services["natrule"]["publicport"],
endport=self.services["natrule"]["publicport"]
)
+ self.cleanup.append(fwr)
self.debug("Creating NAT rule for VM ID: %s" % self.virtual_machine.id)
#Create NAT rule
- NATRule.create(
+ nr = NATRule.create(
self.apiclient,
self.virtual_machine,
self.services["natrule"],
self.public_ip.ipaddress.id
)
+ self.cleanup.append(nr)
return
def exec_script_on_user_vm(self, script, exec_cmd_params, expected_result, negative_test=False):
@@ -332,22 +332,17 @@ class TestEgressFWRules(cloudstackTestCase):
cmd.startport = start_port
if end_port:
cmd.endport = end_port
- rule = self.apiclient.createEgressFirewallRule(cmd)
- self.debug('Created rule=%s' % rule.id)
- self.egressruleid = rule.id
+ self.egressrule = self.apiclient.createEgressFirewallRule(cmd)
+ self.debug('Created rule=%s' % self.egressrule.id)
def deleteEgressRule(self):
cmd = deleteEgressFirewallRuleCmd()
- cmd.id = self.egressruleid
+ cmd.id = self.egressrule.id
self.apiclient.deleteEgressFirewallRule(cmd)
- self.egressruleid = None
+ self.egressrule = None
def tearDown(self):
- try:
- self.debug("Cleaning up the resources")
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- self.fail("Warning! Cleanup failed: %s" % e)
+ super(TestEgressFWRules, self).tearDown()
def create_another_vm(self):
self.debug("Deploying instance in the account: %s and network: %s" % (self.account.name, self.network.id))
@@ -361,6 +356,7 @@ class TestEgressFWRules(cloudstackTestCase):
mode=self.zone.networktype,
networkids=[str(self.network.id)],
projectid=project.id if project else None)
+ self.cleanup.append(self.virtual_machine1)
self.debug("Deployed instance %s in account: %s" % (self.virtual_machine.id,self.account.name))
# Checking if VM is running or not, in case it is deployed in error state, test case fails
@@ -420,7 +416,9 @@ class TestEgressFWRules(cloudstackTestCase):
# 6. public network should not be reachable from the first VM.
self.create_vm(egress_policy=False)
self.create_another_vm()
- self.createEgressRule(protocol='all', cidr=self.virtual_machine1.ipaddress+"/32")
+ self.createEgressRule(cidr=self.virtual_machine1.ipaddress+"/32")
+ # this should read protocol='all' as below, see CLOUDSTACK-10075, now testing only 'ICMP'
+ # self.createEgressRule(protocol='all', cidr=self.virtual_machine1.ipaddress+"/32")
self.exec_script_on_user_vm('ping -c 1 www.google.com',
"| grep -oP \'\d+(?=% packet loss)\'",
"['100']",
@@ -530,7 +528,7 @@ class TestEgressFWRules(cloudstackTestCase):
# 3. check the table Firewall_Rules, Firewall and Traffic_type should be "Egress".
self.create_vm()
self.createEgressRule(cidr=TestEgressFWRules.zone.guestcidraddress)
- qresultset = self.dbclient.execute("select purpose, traffic_type from firewall_rules where uuid='%s';" % self.egressruleid)
+ qresultset = self.dbclient.execute("select purpose, traffic_type from firewall_rules where uuid='%s';" % self.egressrule.id)
self.assertEqual(isinstance(qresultset, list),
True,
"Check DB query result set for valid data")
@@ -567,7 +565,7 @@ class TestEgressFWRules(cloudstackTestCase):
# 3. check the table Firewall_Rules, Firewall and Traffic_type should be "Egress".
self.create_vm(egress_policy=False)
self.createEgressRule(cidr=TestEgressFWRules.zone.guestcidraddress)
- qresultset = self.dbclient.execute("select purpose, traffic_type from firewall_rules where uuid='%s';" % self.egressruleid)
+ qresultset = self.dbclient.execute("select purpose, traffic_type from firewall_rules where uuid='%s';" % self.egressrule.id)
self.assertEqual(isinstance(qresultset, list),
True,
"Check DB query result set for valid data")
@@ -593,44 +591,6 @@ class TestEgressFWRules(cloudstackTestCase):
0,
"DB results not matching, expected: 0, found: %s" % qresultset[0][0])
- @unittest.skip("Skip")
- @attr(tags=["advanced", "NotRun"])
- def test_05_egress_fr5(self):
- """Test Create Egress rule and check the IP tables
- """
- # Validate the following:
- # 1. deploy VM using network offering with egress policy true.
- # 2. create egress rule with specific CIDR + port range.
- # 3. login to VR.
- # 4. Check iptables for rules settings.
- # -A FW_OUTBOUND -j FW_EGRESS_RULES
- # -A FW_EGRESS_RULES -m state --state RELATED,ESTABLISHED -j ACCEPT
- # -A FW_EGRESS_RULES -d 10.147.28.0/24 -p tcp -m tcp --dport 22 -j ACCEPT
- # -A FW_EGRESS_RULES -j DROP
- self.create_vm()
- self.createEgressRule(cidr=TestEgressFWRules.zone.guestcidraddress)
- #TODO: Query VR for expected route rules.
-
-
- @unittest.skip("Skip")
- @attr(tags=["advanced", "NotRun"])
- def test_05_1_egress_fr5(self):
- """Test Create Egress rule and check the IP tables
- """
- # Validate the following:
- # 1. deploy VM using network offering with egress policy false.
- # 2. create egress rule with specific CIDR + port range.
- # 3. login to VR.
- # 4. Check iptables for rules settings.
- # -A FW_OUTBOUND -j FW_EGRESS_RULES
- # -A FW_EGRESS_RULES -m state --state RELATED,ESTABLISHED -j ACCEPT
- # -A FW_EGRESS_RULES -d 10.147.28.0/24 -p tcp -m tcp --dport 22 -j ACCEPT
- # -A FW_EGRESS_RULES -j DROP
- self.create_vm(egress_policy=False)
- self.createEgressRule(cidr=TestEgressFWRules.zone.guestcidraddress)
- #TODO: Query VR for expected route rules.
-
-
@attr(tags=["advanced"], required_hardware="true")
def test_06_egress_fr6(self):
"""Test Create Egress rule without CIDR
@@ -791,7 +751,6 @@ class TestEgressFWRules(cloudstackTestCase):
self.create_vm(egress_policy=False)
self.assertRaises(Exception, self.createEgressRule, cidr='10.2.2.0/24')
-
@attr(tags=["advanced"], required_hardware="false")
def test_11_egress_fr11(self):
"""Test Regression on Firewall + PF + LB + SNAT
diff --git a/test/integration/component/test_escalations_networks.py b/test/integration/component/test_escalations_networks.py
index e3095a1..37557b7 100644
--- a/test/integration/component/test_escalations_networks.py
+++ b/test/integration/component/test_escalations_networks.py
@@ -63,7 +63,7 @@ class TestNetworks_1(cloudstackTestCase):
cls.api_client,
cls.test_data["network_offering_vlan"],
)
- # Enable Network offering
+ cls._cleanup.append(cls.network_offering)
cls.network_offering.update(cls.api_client, state='Enabled')
cls.test_data["network_without_acl"][
"networkoffering"] = cls.network_offering.id
@@ -71,12 +71,14 @@ class TestNetworks_1(cloudstackTestCase):
cls.api_client,
cls.test_data["service_offerings"]["tiny"]
)
- # Creating Disk offering, Service Offering and Account
+ cls._cleanup.append(cls.service_offering)
+
cls.account = Account.create(
cls.api_client,
cls.test_data["account"],
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
# Getting authentication for user in newly created Account
cls.user = cls.account.user[0]
cls.userapiclient = cls.testClient.getUserApiClient(
@@ -89,9 +91,6 @@ class TestNetworks_1(cloudstackTestCase):
cls.account.domainid
)
cls._cleanup.append(cls.account_network)
- cls._cleanup.append(cls.account)
- cls._cleanup.append(cls.service_offering)
- cls._cleanup.append(cls.network_offering)
except Exception as e:
cls.tearDownClass()
raise Exception("Warning: Exception in setup : %s" % e)
@@ -103,16 +102,11 @@ class TestNetworks_1(cloudstackTestCase):
self.cleanup = []
def tearDown(self):
- # Clean up, terminate the created volumes
- cleanup_resources(self.apiClient, self.cleanup)
- return
+ super(TestNetworks_1, self).tearDown()
@classmethod
def tearDownClass(cls):
- try:
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ super(TestNetworks_1, cls).tearDownClass()
def __verify_values(self, expected_vals, actual_vals):
"""
@@ -289,9 +283,9 @@ class TestNetworks_1(cloudstackTestCase):
self.apiClient,
self.test_data["network_offering_without_sourcenat"],
)
+ self.cleanup.append(network_offering_without_sourcenat)
if network_offering_without_sourcenat is None:
self.fail("Creation of network offering without sourcenat failed")
- self.cleanup.append(network_offering_without_sourcenat)
# Enable network offering
network_offering_without_sourcenat.update(
self.apiClient,
@@ -480,7 +474,8 @@ class TestNetworks_1(cloudstackTestCase):
)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # was tags=["advanced"]
+ @attr(tags=["TODO"], required_hardware="true")
def test_05_list_network_offerings_with_and_without_vpc(self):
"""
@Desc: Test list network offerings for vpc true and false parameters
@@ -539,12 +534,12 @@ class TestNetworks_1(cloudstackTestCase):
self.apiClient,
self.test_data["network_offering_vlan"],
)
+ self.cleanup.append(network_offering)
self.assertIsNotNone(
network_offering,
"Network offering is not created")
# Enable Network offering
network_offering.update(self.apiClient, state='Enabled')
- self.cleanup.append(network_offering)
# List network offering
network_offering_after_count = NetworkOffering.list(self.userapiclient)
status = validateList(network_offering_after_count)
@@ -602,7 +597,8 @@ class TestNetworks_1(cloudstackTestCase):
)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # @attr(tags=["advanced"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_06_create_network_in_vpc(self):
"""
@Desc: Test create network in vpc and verify VPC name
@@ -641,6 +637,7 @@ class TestNetworks_1(cloudstackTestCase):
zoneid=self.zone.id,
)
self.assertIsNotNone(vpc_1, "VPC is not created")
+ self.cleanup.append(vpc_1)
# List VPCs
vpc_list = VPC.list(
self.userapiclient,
@@ -702,7 +699,6 @@ class TestNetworks_1(cloudstackTestCase):
"Network is not created"
)
self.cleanup.append(network_created)
- self.cleanup.append(vpc_1)
# Creating expected and actual values dictionaries
expected_dict = {
"id": self.test_data["network_without_acl"]["zoneid"],
@@ -769,7 +765,8 @@ class TestNetworks_1(cloudstackTestCase):
)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # was tags=["advanced"]
+ @attr(tags=["TODO"], required_hardware="true")
def test_07_create_delete_network(self):
"""
@Desc: Test delete network
@@ -875,7 +872,8 @@ class TestNetworks_1(cloudstackTestCase):
)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # was tags=["advanced"]
+ @attr(tags=["TODO"], required_hardware="true")
def test_08_update_network(self):
"""
@Desc: Test update network
@@ -1126,7 +1124,8 @@ class TestNetworks_1(cloudstackTestCase):
)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # was tags=["advanced"]
+ @attr(tags=["TODO"], required_hardware="true")
def test_10_list_networks_in_vpc(self):
"""
@Desc: Test list networks in vpc and verify VPC name
@@ -1164,6 +1163,7 @@ class TestNetworks_1(cloudstackTestCase):
zoneid=self.zone.id,
)
self.assertIsNotNone(vpc_1, "VPC is not created")
+ self.cleanup.append(vpc_1)
# List VPCs
vpc_list = VPC.list(
self.userapiclient,
@@ -1215,7 +1215,6 @@ class TestNetworks_1(cloudstackTestCase):
"Network is not created"
)
self.cleanup.append(network_created)
- self.cleanup.append(vpc_1)
# Creating expected and actual values dictionaries
expected_dict = {
"id": self.test_data["network_without_acl"]["zoneid"],
@@ -1271,7 +1270,8 @@ class TestNetworks_1(cloudstackTestCase):
)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # @attr(tags=["advanced"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_11_update_vpc(self):
"""
@Desc: Test create vpc with network domain as parameter
@@ -1370,7 +1370,8 @@ class TestNetworks_1(cloudstackTestCase):
)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # was tags=["advanced"]
+ @attr(tags=["TODO"], required_hardware="true")
def test_12_list_create_delete_networkACL(self):
"""
@Desc: Test create network in vpc and verify VPC name
@@ -1411,6 +1412,7 @@ class TestNetworks_1(cloudstackTestCase):
vpcofferingid=vpc_offs.id,
zoneid=self.zone.id,
)
+ self.cleanup.append(vpc_1)
self.assertIsNotNone(vpc_1, "VPC is not created")
# List VPCs
vpc_list = VPC.list(
@@ -1470,7 +1472,6 @@ class TestNetworks_1(cloudstackTestCase):
accountid=self.account.name,
)
self.cleanup.append(network_created)
- self.cleanup.append(vpc_1)
self.assertIsNotNone(
network_created,
"Network is not created"
@@ -1603,12 +1604,13 @@ class TestNetworks_2(cloudstackTestCase):
cls.test_data["account"],
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
+
# Getting authentication for user in newly created Account
cls.user = cls.account.user[0]
cls.userapiclient = cls.testClient.getUserApiClient(
cls.user.username,
cls.domain.name)
- cls._cleanup.append(cls.account)
cls.vpc_offering = VpcOffering.create(cls.api_client,
cls.test_data["vpc_offering"]
@@ -1626,16 +1628,11 @@ class TestNetworks_2(cloudstackTestCase):
self.cleanup = []
def tearDown(self):
- # Clean up, terminate the created volumes
- cleanup_resources(self.apiClient, self.cleanup)
- return
+ super(TestNetworks_2, self).tearDown()
@classmethod
def tearDownClass(cls):
- try:
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ super(TestNetworks_2, cls).tearDownClass()
def __verify_values(self, expected_vals, actual_vals):
"""
@@ -2212,6 +2209,7 @@ class TestNetworks_2(cloudstackTestCase):
self.zone.id
)
self.assertIsNotNone(vpc_created, "VPC Creation Failed")
+ self.cleanup.append(vpc_created)
# Listing the vpc for a user after creating a vpc
list_vpc_after = VPC.list(self.userapiclient)
status = validateList(list_vpc_after)
@@ -2226,7 +2224,6 @@ class TestNetworks_2(cloudstackTestCase):
len(list_vpc_after),
"list VPC not equal as expected"
)
- self.cleanup.append(vpc_created)
# Restarting VPC
vpc_restarted = VPC.restart(vpc_created, self.userapiclient)
# Verifying restart function resturns true
@@ -2367,7 +2364,8 @@ class TestNetworks_2(cloudstackTestCase):
)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # @attr(tags=["advanced"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_19_create_list_reset_delete_vpnconnections(self):
"""
@Desc: Test to List Create Reset and Delete VPN Customer
diff --git a/test/integration/component/test_escalations_templates.py b/test/integration/component/test_escalations_templates.py
index bd7c418..18c2da7 100644
--- a/test/integration/component/test_escalations_templates.py
+++ b/test/integration/component/test_escalations_templates.py
@@ -89,18 +89,11 @@ class TestTemplates(cloudstackTestCase):
self.cleanup.append(self.account)
def tearDown(self):
- # Clean up, terminate the created resources
- cleanup_resources(self.apiClient, self.cleanup)
- return
+ super(TestTemplates, self).tearDown()
@classmethod
def tearDownClass(cls):
- try:
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
-
- return
+ super(TestTemplates, cls).tearDownClass()
def __verify_values(self, expected_vals, actual_vals):
"""
diff --git a/test/integration/component/test_escalations_vmware.py b/test/integration/component/test_escalations_vmware.py
index e5e44b7..2939e61 100644
--- a/test/integration/component/test_escalations_vmware.py
+++ b/test/integration/component/test_escalations_vmware.py
@@ -111,7 +111,7 @@ class TestVMware(cloudstackTestCase):
raise Exception("Warning: Exception during cleanup : %s" % e)
@attr(tags=["advanced"], required_hardware="true")
- def test1_attach_volume_ide(self):
+ def test_01_attach_volume_ide(self):
"""
@desc: Exception when attaching data disk to RHEL VM on vSphere
Step1: Confirm that vmware.root.disk.controller = "ide" in Global Settings.
@@ -206,8 +206,9 @@ class TestVMware(cloudstackTestCase):
self.fail("Failed to attach data disk to RHEL vm whose root disk type is IDE")
return
- @attr(tags=["advanced", "basic"], required_hardware="true")
- def test2_attach_ISO_in_CentOSVM(self):
+ # @attr(tags=["advanced", "basic"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
+ def test_02_attach_ISO_in_CentOSVM(self):
"""
@desc:Incorrect guest os mapping in vmware for CentOS 5.9 and above
Step1 :Register an CentOS 6.3 template
@@ -269,8 +270,9 @@ class TestVMware(cloudstackTestCase):
self.assertEqual(attachedIsoName, "vmware-tools.iso", "vmware-tools.iso not attached")
return
- @attr(tags=["advanced", "basic"], required_hardware="true")
- def test3_attach_ISO_in_RHEL7OSVM(self):
+ # @attr(tags=["advanced", "basic"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
+ def test_03_attach_ISO_in_RHEL7OSVM(self):
"""
@desc:Incorrect guest os mapping in vmware for Rhel7. Add a valid RHEL7 URL to execute this test case
Step1 :Register an RHEL 7 template
diff --git a/test/integration/component/test_escalations_volumes.py b/test/integration/component/test_escalations_volumes.py
index 6d62d31..da6b624 100644
--- a/test/integration/component/test_escalations_volumes.py
+++ b/test/integration/component/test_escalations_volumes.py
@@ -291,7 +291,8 @@ class TestVolumes(cloudstackTestCase):
)
return
- @attr(tags=["advanced", "basic"], required_hardware="true")
+ # @attr(tags=["advanced", "basic"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_02_list_volume_byid(self):
"""
@summary: Test List Volumes with Id
@@ -699,7 +700,8 @@ class TestVolumes(cloudstackTestCase):
)
return
- @attr(tags=["advanced", "basic"], required_hardware="true")
+ # @attr(tags=["advanced", "basic"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_05_volume_snapshot(self):
"""
@summary: Test to verify creation of snapshot from volume
diff --git a/test/integration/component/test_lb_secondary_ip.py b/test/integration/component/test_lb_secondary_ip.py
index 991466d..de61eb6 100644
--- a/test/integration/component/test_lb_secondary_ip.py
+++ b/test/integration/component/test_lb_secondary_ip.py
@@ -145,26 +145,17 @@ class TestAssignLBRule(cloudstackTestCase):
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
+ self.cleanup.append(self.virtual_machine)
except Exception as e:
self.tearDown()
raise e
def tearDown(self):
- try:
- # Clean up, terminate the created accounts, domains etc
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestAssignLBRule, self).tearDown()
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestAssignLBRule, cls).tearDownClass()
@attr(tags=["advanced", "selfservice"], required_hardware="false")
def test_01_lb_rule_for_primary_ip(self):
@@ -186,6 +177,7 @@ class TestAssignLBRule(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip)
lb_rule = LoadBalancerRule.create(
self.apiclient,
@@ -194,6 +186,7 @@ class TestAssignLBRule(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(self.virtual_machine.nic[0].ipaddress)}]
@@ -240,6 +233,7 @@ class TestAssignLBRule(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip)
lb_rule = LoadBalancerRule.create(
self.apiclient,
@@ -248,6 +242,7 @@ class TestAssignLBRule(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(secondaryip.ipaddress)}]
@@ -294,6 +289,7 @@ class TestAssignLBRule(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip)
lb_rule = LoadBalancerRule.create(
self.apiclient,
@@ -302,6 +298,7 @@ class TestAssignLBRule(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(self.virtual_machine.nic[0].ipaddress)},
@@ -353,6 +350,7 @@ class TestAssignLBRule(cloudstackTestCase):
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype,
networkids=[self.virtual_machine.nic[0].networkid, ])
+ self.cleanup.append(self.virtual_machine2)
secondaryip_vm2 = NIC.addIp(self.apiclient,
id=self.virtual_machine2.nic[0].id
@@ -364,6 +362,7 @@ class TestAssignLBRule(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip)
lb_rule = LoadBalancerRule.create(
self.apiclient,
@@ -372,6 +371,7 @@ class TestAssignLBRule(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(self.virtual_machine.nic[0].ipaddress)},
@@ -473,25 +473,17 @@ class TestFailureScenarios(cloudstackTestCase):
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
+ self.cleanup.append(self.virtual_machine)
except Exception as e:
self.tearDown()
raise e
def tearDown(self):
- try:
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestFailureScenarios, self).tearDown()
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestFailureScenarios, cls).tearDownClass()
@attr(tags=["advanced", "selfservice"], required_hardware="false")
def test_05_lb_rule_wrong_vm_id(self):
@@ -514,6 +506,7 @@ class TestFailureScenarios(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip)
lb_rule = LoadBalancerRule.create(
self.apiclient,
@@ -522,6 +515,7 @@ class TestFailureScenarios(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule)
vmidipmap = [{"vmid": str(self.virtual_machine.id) + random_gen(),
"vmip": str(secondaryip.ipaddress)}]
@@ -552,6 +546,7 @@ class TestFailureScenarios(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip)
lb_rule = LoadBalancerRule.create(
self.apiclient,
@@ -560,6 +555,7 @@ class TestFailureScenarios(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(secondaryip.ipaddress) + random_gen()}]
@@ -593,6 +589,7 @@ class TestFailureScenarios(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip1)
lb_rule1 = LoadBalancerRule.create(
self.apiclient,
@@ -601,6 +598,7 @@ class TestFailureScenarios(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule1)
public_ip2 = PublicIPAddress.create(
self.apiclient,
@@ -608,6 +606,7 @@ class TestFailureScenarios(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip2)
lb_rule2 = LoadBalancerRule.create(
self.apiclient,
@@ -616,6 +615,7 @@ class TestFailureScenarios(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule2)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(secondaryip.ipaddress)}]
@@ -655,6 +655,7 @@ class TestFailureScenarios(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip1)
lb_rule1 = LoadBalancerRule.create(
self.apiclient,
@@ -663,6 +664,7 @@ class TestFailureScenarios(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule1)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(secondaryip.ipaddress)}]
@@ -736,25 +738,17 @@ class TestListLBRuleInstances(cloudstackTestCase):
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
+ self.cleanup.append(self.virtual_machine)
except Exception as e:
self.tearDown()
raise e
def tearDown(self):
- try:
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestListLBRuleInstances, self).tearDown()
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestListLBRuleInstances, cls).tearDownClass()
@attr(tags=["advanced", "selfservice"], required_hardware="false")
def test_09_lbvmips_true(self):
@@ -778,6 +772,7 @@ class TestListLBRuleInstances(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip)
lb_rule = LoadBalancerRule.create(
self.apiclient,
@@ -786,6 +781,7 @@ class TestListLBRuleInstances(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(secondaryip.ipaddress)}]
@@ -836,6 +832,7 @@ class TestListLBRuleInstances(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip)
lb_rule = LoadBalancerRule.create(
self.apiclient,
@@ -844,6 +841,7 @@ class TestListLBRuleInstances(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(secondaryip.ipaddress)}]
@@ -927,6 +925,7 @@ class TestLbRuleFunctioning(cloudstackTestCase):
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
+ self.cleanup.append(self.virtual_machine)
self.secondaryip = NIC.addIp(self.apiclient,
id=self.virtual_machine.nic[0].id)
@@ -937,8 +936,9 @@ class TestLbRuleFunctioning(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(self.public_ip)
- FireWallRule.create(
+ fwr = FireWallRule.create(
self.apiclient,
ipaddressid=self.public_ip.ipaddress.id,
protocol='TCP',
@@ -946,6 +946,7 @@ class TestLbRuleFunctioning(cloudstackTestCase):
self.testdata["fwrule"]["cidr"]],
startport=self.testdata["fwrule"]["startport"],
endport=self.testdata["fwrule"]["endport"])
+ self.cleanup.append(fwr)
# To make secondary IP working for VM, we have to configure it on
# VM after acquiring it
@@ -960,6 +961,7 @@ class TestLbRuleFunctioning(cloudstackTestCase):
self.testdata["natrule"],
ipaddressid=self.public_ip.ipaddress.id,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(nat_rule)
sshClient = SshClient(self.public_ip.ipaddress.ipaddress,
self.testdata['natrule']["publicport"],
@@ -984,6 +986,7 @@ class TestLbRuleFunctioning(cloudstackTestCase):
# Deleting NAT rule after configuring secondary IP
nat_rule.delete(self.apiclient)
+ self.cleanup.remove(nat_rule)
self.testdata["lbrule"]["publicport"] = 22
self.testdata["lbrule"]["privateport"] = 22
@@ -995,25 +998,17 @@ class TestLbRuleFunctioning(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(self.lb_rule)
except Exception as e:
self.tearDown()
raise e
def tearDown(self):
- try:
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestLbRuleFunctioning, self).tearDown()
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestLbRuleFunctioning, cls).tearDownClass()
@attr(tags=["advanced"], required_hardware="true")
def test_11_ssh_to_secondary_ip(self):
@@ -1081,6 +1076,7 @@ class TestLbRuleFunctioning(cloudstackTestCase):
self.fail("Exception during SSH : %s" % e)
self.public_ip.delete(self.apiclient)
+ self.cleanup.remove(self.public_ip)
with self.assertRaises(Exception):
LoadBalancerRule.list(self.apiclient, id=self.lb_rule.id)
@@ -1126,6 +1122,7 @@ class TestLbRuleFunctioning(cloudstackTestCase):
self.fail("Exception during SSH : %s" % e)
self.lb_rule.delete(self.apiclient)
+ self.cleanup.remove(self.lb_rule)
with self.assertRaises(Exception):
SshClient(self.public_ip.ipaddress.ipaddress,
@@ -1175,6 +1172,7 @@ class TestLbRuleFunctioning(cloudstackTestCase):
self.lb_rule.remove(self.apiclient,
vmidipmap=vmidipmap)
+ self.cleanup.remove(self.lb_rule)
try:
SshClient(self.public_ip.ipaddress.ipaddress,
@@ -1226,6 +1224,7 @@ class TestLbRuleFunctioning(cloudstackTestCase):
self.lb_rule.remove(self.apiclient,
vmidipmap=vmidipmap)
+ self.cleanup.remove(self.lb_rule)
try:
SshClient(self.public_ip.ipaddress.ipaddress,
@@ -1272,6 +1271,7 @@ class TestLbRuleFunctioning(cloudstackTestCase):
self.fail("Exception during SSH : %s" % e)
self.lb_rule.remove(self.apiclient, vms=[self.virtual_machine])
+ self.cleanup.remove(self.lb_rule)
lbrules = LoadBalancerRule.list(
self.apiclient,
@@ -1441,6 +1441,7 @@ class TestNetworkOperations(cloudstackTestCase):
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
+ self.cleanup.append(self.virtual_machine)
self.secondaryip = NIC.addIp(self.apiclient,
id=self.virtual_machine.nic[0].id)
@@ -1450,8 +1451,9 @@ class TestNetworkOperations(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(self.public_ip)
- FireWallRule.create(
+ fwr = FireWallRule.create(
self.apiclient,
ipaddressid=self.public_ip.ipaddress.id,
protocol='TCP',
@@ -1459,6 +1461,7 @@ class TestNetworkOperations(cloudstackTestCase):
self.testdata["fwrule"]["cidr"]],
startport=self.testdata["fwrule"]["startport"],
endport=self.testdata["fwrule"]["endport"])
+ self.cleanup.append(fwr)
# To make secondary IP working for VM, we have to configure it
# on VM after acquiring it
@@ -1473,6 +1476,7 @@ class TestNetworkOperations(cloudstackTestCase):
self.testdata["natrule"],
ipaddressid=self.public_ip.ipaddress.id,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(nat_rule)
sshClient = SshClient(self.public_ip.ipaddress.ipaddress,
self.testdata['natrule']["publicport"],
@@ -1497,6 +1501,7 @@ class TestNetworkOperations(cloudstackTestCase):
# Deleting NAT rule after configuring secondary IP
nat_rule.delete(self.apiclient)
+ self.cleanup.remove(nat_rule)
self.testdata["lbrule"]["publicport"] = 22
self.testdata["lbrule"]["privateport"] = 22
@@ -1508,25 +1513,17 @@ class TestNetworkOperations(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(self.lb_rule)
except Exception as e:
self.tearDown()
raise e
def tearDown(self):
- try:
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestNetworkOperations, self).tearDown()
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestNetworkOperations, cls).tearDownClass()
@attr(tags=["advanced"], required_hardware="true")
def test_17_restart_router(self):
@@ -1887,20 +1884,11 @@ class TestExternalLoadBalancer(cloudstackTestCase):
raise e
def tearDown(self):
- try:
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestExternalLoadBalancer, self).tearDown()
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestExternalLoadBalancer, cls).tearDownClass()
@attr(tags=["advancedns", "provisioning"], required_hardware="true")
def test_23_lb_rule_functioning_with_netscaler(self):
@@ -1921,6 +1909,7 @@ class TestExternalLoadBalancer(cloudstackTestCase):
self.apiclient,
self.testdata["nw_off_isolated_netscaler"]
)
+ self.cleanup.append(nwoff_netscaler)
# Enable Network offering
nwoff_netscaler.update(self.apiclient, state='Enabled')
# Creating a Network Using the Network Offering
@@ -1932,6 +1921,7 @@ class TestExternalLoadBalancer(cloudstackTestCase):
networkofferingid=nwoff_netscaler.id,
zoneid=self.zone.id
)
+ self.cleanup.append(network)
self.virtual_machine = VirtualMachine.create(
self.api_client,
@@ -1941,6 +1931,7 @@ class TestExternalLoadBalancer(cloudstackTestCase):
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype,
networkids=[network.id])
+ self.cleanup.append(self.virtual_machine)
secondaryip = NIC.addIp(self.apiclient,
id=self.virtual_machine.nic[0].id)
@@ -1951,8 +1942,9 @@ class TestExternalLoadBalancer(cloudstackTestCase):
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(public_ip)
- FireWallRule.create(
+ fwr = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
@@ -1960,6 +1952,7 @@ class TestExternalLoadBalancer(cloudstackTestCase):
self.testdata["fwrule"]["cidr"]],
startport=self.testdata["fwrule"]["startport"],
endport=self.testdata["fwrule"]["endport"])
+ self.cleanup.append(fwr)
nat_rule = NATRule.create(
self.apiclient,
@@ -1967,6 +1960,7 @@ class TestExternalLoadBalancer(cloudstackTestCase):
self.testdata["natrule"],
ipaddressid=public_ip.ipaddress.id,
networkid=self.virtual_machine.nic[0].networkid)
+ self.cleanup.append(nat_rule)
sshClient = SshClient(public_ip.ipaddress.ipaddress,
self.testdata['natrule']["publicport"],
@@ -2002,6 +1996,7 @@ class TestExternalLoadBalancer(cloudstackTestCase):
accountid=self.account.name,
networkid=self.virtual_machine.nic[0].networkid,
domainid=self.account.domainid)
+ self.cleanup.append(lb_rule)
vmidipmap = [{"vmid": str(self.virtual_machine.id),
"vmip": str(secondaryip.ipaddress)}]
diff --git a/test/integration/component/test_multiple_ips_per_nic.py b/test/integration/component/test_multiple_ips_per_nic.py
index ea4ba96..1f4bc37 100644
--- a/test/integration/component/test_multiple_ips_per_nic.py
+++ b/test/integration/component/test_multiple_ips_per_nic.py
@@ -65,6 +65,7 @@ def createNetwork(self, networkType):
accountid=self.account.name,
domainid=self.account.domainid,
zoneid=self.zone.id)
+ self.cleanup.append(network)
except Exception as e:
self.fail("Isolated network creation failed because: %s" % e)
@@ -103,6 +104,7 @@ def createNetwork(self, networkType):
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid)
+ self.cleanup.append(vpc)
vpcs = VPC.list(self.apiclient, id=vpc.id)
self.assertEqual(
validateList(vpcs)[0],
@@ -120,6 +122,7 @@ def createNetwork(self, networkType):
vpcid=vpc.id,
gateway="10.1.1.1",
netmask="255.255.255.0")
+ self.cleanup.append(network)
return network
@@ -150,9 +153,10 @@ def createNetworkRules(
domainid=self.account.domainid,
networkid=network.id,
vpcid=network.vpcid if networktype == VPC_NETWORK else None)
+ self.cleanup.append(public_ip)
if networktype != VPC_NETWORK:
- FireWallRule.create(
+ fwr = FireWallRule.create(
self.apiclient,
ipaddressid=public_ip.ipaddress.id,
protocol='TCP',
@@ -160,15 +164,17 @@ def createNetworkRules(
self.services["fwrule"]["cidr"]],
startport=self.services["fwrule"]["startport"],
endport=self.services["fwrule"]["endport"])
+ self.cleanup.append(fwr)
if ruletype == "nat":
- NATRule.create(
+ nat_rule = NATRule.create(
self.api_client,
virtual_machine,
self.services["natrule"],
ipaddressid=public_ip.ipaddress.id,
networkid=network.id,
vmguestip=vmguestip)
+ self.cleanup.append(nat_rule)
elif ruletype == "staticnat":
StaticNATRule.enable(
self.apiclient,
@@ -243,12 +249,7 @@ class TestBasicOperations(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestBasicOperations, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -257,13 +258,7 @@ class TestBasicOperations(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the resources created
- cleanup_resources(self.apiclient, self.cleanup)
- self.cleanup[:] = []
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestBasicOperations, self).tearDown()
def VerifyStaticNatForPublicIp(self, ipaddressid, natrulestatus):
""" List public IP and verify that NAT rule status for the IP is as desired """
@@ -321,6 +316,7 @@ class TestBasicOperations(cloudstackTestCase):
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.account.domainid)
+ self.cleanup.append(virtual_machine)
ipaddress_1 = NIC.addIp(
self.apiclient,
@@ -396,6 +392,7 @@ class TestBasicOperations(cloudstackTestCase):
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.account.domainid)
+ self.cleanup.append(virtual_machine)
ipaddress_1 = NIC.addIp(
self.apiclient,
@@ -506,6 +503,7 @@ class TestBasicOperations(cloudstackTestCase):
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.account.domainid)
+ self.cleanup.append(virtual_machine)
NIC.addIp(self.apiclient, id=virtual_machine.nic[0].id)
@@ -600,13 +598,13 @@ class TestBasicOperations(cloudstackTestCase):
self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
+ self.cleanup.append(child_domain)
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=child_domain.id)
self.cleanup.append(self.account)
- self.cleanup.append(child_domain)
apiclient = self.testClient.getUserApiClient(
UserName=self.account.name,
@@ -622,6 +620,7 @@ class TestBasicOperations(cloudstackTestCase):
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.account.domainid)
+ self.cleanup.append(virtual_machine)
ipaddress_1 = NIC.addIp(apiclient, id=virtual_machine.nic[0].id)
@@ -707,18 +706,13 @@ class TestNetworkRules(cloudstackTestCase):
cls.vpc_off = VpcOffering.create(
cls.api_client,
cls.services["vpc_offering"])
- cls.vpc_off.update(cls.api_client, state='Enabled')
cls._cleanup.append(cls.vpc_off)
+ cls.vpc_off.update(cls.api_client, state='Enabled')
return
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestNetworkRules, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -727,13 +721,7 @@ class TestNetworkRules(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the resources created
- cleanup_resources(self.apiclient, self.cleanup)
- self.cleanup[:] = []
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestNetworkRules, self).tearDown()
def VerifyStaticNatForPublicIp(self, ipaddressid, natrulestatus):
""" List public IP and verify that NAT rule status for the IP is as desired """
@@ -795,6 +783,7 @@ class TestNetworkRules(cloudstackTestCase):
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.account.domainid)
+ self.cleanup.append(virtual_machine)
ipaddress_1 = NIC.addIp(
self.apiclient,
@@ -884,6 +873,7 @@ class TestNetworkRules(cloudstackTestCase):
serviceofferingid=self.service_offering.id,
accountid=self.account.name,
domainid=self.account.domainid)
+ self.cleanup.append(virtual_machine)
ipaddress_1 = NIC.addIp(
self.apiclient,
@@ -896,6 +886,7 @@ class TestNetworkRules(cloudstackTestCase):
domainid=self.account.domainid,
networkid=network.id,
vpcid=network.vpcid if value == VPC_NETWORK else None)
+ self.cleanup.append(public_ip)
if value != VPC_NETWORK:
firewallrule = FireWallRule.create(
@@ -906,6 +897,7 @@ class TestNetworkRules(cloudstackTestCase):
self.services["fwrule"]["cidr"]],
startport=self.services["fwrule"]["startport"],
endport=self.services["fwrule"]["endport"])
+ self.cleanup.append(firewallrule)
# Create NAT rule
natrule = NATRule.create(
@@ -915,6 +907,7 @@ class TestNetworkRules(cloudstackTestCase):
ipaddressid=public_ip.ipaddress.id,
networkid=network.id,
vmguestip=ipaddress_1.ipaddress)
+ self.cleanup.append(natrule)
try:
NIC.removeIp(self.apiclient, ipaddressid=ipaddress_1.id)
self.fail(
@@ -926,12 +919,14 @@ class TestNetworkRules(cloudstackTestCase):
if firewallrule:
try:
firewallrule.delete(self.apiclient)
+ self.cleanup.remove(firewallrule)
except Exception as e:
self.fail(
"Exception while deleting firewall rule %s: %s" %
(firewallrule.id, e))
natrule.delete(self.apiclient)
+ self.cleanup.remove(natrule)
return
@data(ISOLATED_NETWORK, SHARED_NETWORK, VPC_NETWORK)
diff --git a/test/integration/component/test_multiple_nic_support.py b/test/integration/component/test_multiple_nic_support.py
index fc1c1f4..fb5d5e4 100644
--- a/test/integration/component/test_multiple_nic_support.py
+++ b/test/integration/component/test_multiple_nic_support.py
@@ -89,31 +89,30 @@ class TestMulipleNicSupport(cloudstackTestCase):
cls.apiclient,
services=cls.testdata["acl"]["domain2"],
parentdomainid=cls.domain.id)
+ cls._cleanup.append(cls.user_domain)
- # Create account
cls.account1 = Account.create(
cls.apiclient,
cls.testdata["acl"]["accountD2"],
admin=True,
domainid=cls.user_domain.id
)
+ cls._cleanup.append(cls.account1)
- # Create small service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.testdata["service_offerings"]["small"]
)
-
cls._cleanup.append(cls.service_offering)
+
cls.services["network"]["zoneid"] = cls.zone.id
cls.network_offering = NetworkOffering.create(
cls.apiclient,
cls.services["network_offering"],
)
- # Enable Network offering
+ cls._cleanup.append(cls.network_offering)
cls.network_offering.update(cls.apiclient, state='Enabled')
- cls._cleanup.append(cls.network_offering)
cls.testdata["virtual_machine"]["zoneid"] = cls.zone.id
cls.testdata["virtual_machine"]["template"] = cls.template.id
@@ -125,6 +124,7 @@ class TestMulipleNicSupport(cloudstackTestCase):
account=cls.account1.name,
domainid=cls.account1.domainid
)
+ cls._cleanup.append(security_group)
# Authorize Security group to SSH to VM
ingress_rule = security_group.authorize(
@@ -149,6 +149,7 @@ class TestMulipleNicSupport(cloudstackTestCase):
cls.testdata["shared_network_offering_sg"],
conservemode=False
)
+ cls._cleanup.append(cls.shared_network_offering)
NetworkOffering.update(
cls.shared_network_offering,
@@ -175,6 +176,7 @@ class TestMulipleNicSupport(cloudstackTestCase):
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
+ cls._cleanup.append(cls.network1)
random_subnet_number = random.randrange(100, 110)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
@@ -191,6 +193,7 @@ class TestMulipleNicSupport(cloudstackTestCase):
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
+ cls._cleanup.append(cls.network2)
random_subnet_number = random.randrange(111, 120)
cls.testdata["shared_network_sg"]["name"] = "Shared-Network-SG-Test-vlan" + str(random_subnet_number)
@@ -207,6 +210,7 @@ class TestMulipleNicSupport(cloudstackTestCase):
accountid=cls.account1.name,
domainid=cls.account1.domainid
)
+ cls._cleanup.append(cls.network3)
try:
cls.virtual_machine1 = VirtualMachine.create(
@@ -219,6 +223,7 @@ class TestMulipleNicSupport(cloudstackTestCase):
securitygroupids=[security_group.id],
networkids=cls.network1.id
)
+ cls._cleanup.append(cls.virtual_machine1)
for nic in cls.virtual_machine1.nic:
if nic.isdefault:
cls.virtual_machine1.ssh_ip = nic.ipaddress
@@ -238,6 +243,7 @@ class TestMulipleNicSupport(cloudstackTestCase):
securitygroupids=[security_group.id],
networkids=[str(cls.network1.id), str(cls.network2.id)]
)
+ cls._cleanup.append(cls.virtual_machine2)
for nic in cls.virtual_machine2.nic:
if nic.isdefault:
cls.virtual_machine2.ssh_ip = nic.ipaddress
@@ -246,24 +252,10 @@ class TestMulipleNicSupport(cloudstackTestCase):
except Exception as e:
cls.fail("Exception while deploying virtual machine: %s" % {e})
- cls._cleanup.append(cls.virtual_machine1)
- cls._cleanup.append(cls.virtual_machine2)
- cls._cleanup.append(cls.network1)
- cls._cleanup.append(cls.network2)
- cls._cleanup.append(cls.network3)
- cls._cleanup.append(cls.shared_network_offering)
- if cls.zone.securitygroupsenabled:
- cls._cleanup.append(security_group)
- cls._cleanup.append(cls.account1)
- cls._cleanup.append(cls.user_domain)
@classmethod
def tearDownClass(self):
- try:
- cleanup_resources(self.apiclient, self._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestMulipleNicSupport, self).tearDownClass()
def setUp(self):
if self.skip:
@@ -273,11 +265,7 @@ class TestMulipleNicSupport(cloudstackTestCase):
return
def tearDown(self):
- try:
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestMulipleNicSupport, self).tearDown()
def verify_network_rules(self, vm_id):
virtual_machine = VirtualMachine.list(
@@ -305,6 +293,7 @@ class TestMulipleNicSupport(cloudstackTestCase):
host.password,
command)
if len(result) > 0:
+ self.logger.debug(f"the verification of the ip tables rules returned : {result}")
self.fail("The iptables/ebtables rules for nic %s on vm %s on host %s are not correct" %(nic.ipaddress, vm.instancename, host.name))
@attr(tags=["advancedsg"], required_hardware="false")
diff --git a/test/integration/component/test_multiple_public_interfaces.py b/test/integration/component/test_multiple_public_interfaces.py
index 91db55f..63c5d8d 100644
--- a/test/integration/component/test_multiple_public_interfaces.py
+++ b/test/integration/component/test_multiple_public_interfaces.py
@@ -22,9 +22,6 @@
# Import Local Modules
from marvin.codes import (FAILED)
from marvin.cloudstackTestCase import cloudstackTestCase
-from marvin.cloudstackException import CloudstackAPIException
-from marvin.cloudstackAPI import rebootRouter
-from marvin.sshClient import SshClient
from marvin.lib.utils import cleanup_resources, get_process_status
from marvin.lib.base import (Account,
VirtualMachine,
@@ -48,10 +45,9 @@ from marvin.lib.common import (get_domain,
list_hosts,
list_routers)
from nose.plugins.attrib import attr
-from ddt import ddt, data
+
# Import System modules
import socket
-import time
import logging
_multiprocess_shared_ = True
@@ -61,6 +57,118 @@ stream_handler = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
+class Services:
+ """Test multiple public interfaces
+ """
+
+ def __init__(self):
+ self.services = {
+ "account": {
+ "email": "test@test.com",
+ "firstname": "Test",
+ "lastname": "User",
+ "username": "test",
+ # Random characters are appended for unique
+ # username
+ "password": "password",
+ },
+ "domain_admin": {
+ "email": "domain@admin.com",
+ "firstname": "Domain",
+ "lastname": "Admin",
+ "username": "DoA",
+ # Random characters are appended for unique
+ # username
+ "password": "password",
+ },
+ "service_offering": {
+ "name": "Tiny Instance",
+ "displaytext": "Tiny Instance",
+ "cpunumber": 1,
+ "cpuspeed": 100,
+ "memory": 128,
+ },
+ "publiciprange": {
+ "gateway": "10.6.0.254",
+ "netmask": "255.255.255.0",
+ "startip": "10.6.0.2",
+ "endip": "10.6.0.20",
+ "forvirtualnetwork": "true",
+ "vlan": "300"
+ },
+ "extrapubliciprange": {
+ "gateway": "10.200.100.1",
+ "netmask": "255.255.255.0",
+ "startip": "10.200.100.101",
+ "endip": "10.200.100.105",
+ "forvirtualnetwork": "false",
+ "vlan": "301"
+ },
+ "network_offering": {
+ "name": 'VPC Network offering',
+ "displaytext": 'VPC Network off',
+ "guestiptype": 'Isolated',
+ "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
+ "traffictype": 'GUEST',
+ "availability": 'Optional',
+ "useVpc": 'on',
+ "serviceProviderList": {
+ "Vpn": 'VpcVirtualRouter',
+ "Dhcp": 'VpcVirtualRouter',
+ "Dns": 'VpcVirtualRouter',
+ "SourceNat": 'VpcVirtualRouter',
+ "PortForwarding": 'VpcVirtualRouter',
+ "Lb": 'VpcVirtualRouter',
+ "UserData": 'VpcVirtualRouter',
+ "StaticNat": 'VpcVirtualRouter',
+ "NetworkACL": 'VpcVirtualRouter'
+ },
+ },
+ "virtual_machine": {
+ "displayname": "Test VM",
+ "username": "root",
+ "password": "password",
+ "ssh_port": 22,
+ "privateport": 22,
+ "publicport": 22,
+ "protocol": "TCP",
+ "affinity": {
+ "name": "webvms",
+ "type": "host anti-affinity",
+ }
+ },
+ "vpc_offering": {
+ "name": 'VPC off',
+ "displaytext": 'VPC off',
+ "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
+ },
+ "vpc": {
+ "name": "TestVPC",
+ "displaytext": "TestVPC",
+ "cidr": '10.0.0.1/24'
+ },
+ "network": {
+ "name": "Test Network",
+ "displaytext": "Test Network",
+ "netmask": '255.255.255.0'
+ },
+ "natrule": {
+ "privateport": 22,
+ "publicport": 22,
+ "startport": 22,
+ "endport": 22,
+ "protocol": "TCP",
+ "cidrlist": '0.0.0.0/0',
+ },
+ "ostype": "CentOS 5.6 (64-bit)",
+ "sleep": 60,
+ "timeout": 10,
+ "vlan": "10",
+ "zoneid": '',
+ "mode": 'advanced'
+ }
+
+
class TestPortForwarding(cloudstackTestCase):
@classmethod
@@ -68,13 +176,16 @@ class TestPortForwarding(cloudstackTestCase):
testClient = super(TestPortForwarding, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
- cls.services = testClient.getParsedTestDataConfig()
+ cls.services = Services().services
cls.hypervisor = testClient.getHypervisorInfo()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ # cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["zoneid"] = cls.zone.id
+ cls.services["publiciprange"]["zoneid"] = cls.zone.id
+ cls._cleanup = []
+
template = get_template(
cls.apiclient,
cls.zone.id,
@@ -84,31 +195,28 @@ class TestPortForwarding(cloudstackTestCase):
assert False, "get_template() failed to return template with description %s" % cls.services[
"ostype"]
- # Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
- cls.services["publiciprange"]["zoneid"] = cls.zone.id
+ cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
- cls.services["service_offerings"]["tiny"]
+ cls.services["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["virtual_machine"],
+ zoneid = cls.services["zoneid"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
- cls._cleanup = [
- cls.virtual_machine,
- cls.account,
- cls.service_offering
- ]
+ cls._cleanup.append(cls.virtual_machine)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -117,19 +225,12 @@ class TestPortForwarding(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- cls.apiclient = super(
- TestPortForwarding,
- cls).getClsTestClient().getApiClient()
- cleanup_resources(cls.apiclient, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ super(TestPortForwarding, cls).tearDownClass()
def tearDown(self):
- cleanup_resources(self.apiclient, self.cleanup)
- return
+ super(TestPortForwarding, self).tearDown()
- @attr(tags=["advanced", "smoke"], required_hardware="true")
+ @attr(tags=["advancedsg", "smoke"], required_hardware="true")
def test_port_forwarding_on_ip_from_non_src_nat_ip_range(self):
"""Test for port forwarding on a IP which is in pubic IP range different
from public IP range that has source NAT IP associated with network
@@ -142,10 +243,12 @@ class TestPortForwarding(cloudstackTestCase):
# 4. Create a firewall rule to open up the port
# 5. Test SSH works to the VM
+ self.services["extrapubliciprange"]["zoneid"] = self.services["zoneid"]
self.public_ip_range = PublicIpRange.create(
self.apiclient,
- self.services["publiciprange"]
+ self.services["extrapubliciprange"]
)
+ self.cleanup.append(self.public_ip_range)
logger.debug("Dedicating Public IP range to the account");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
@@ -162,7 +265,6 @@ class TestPortForwarding(cloudstackTestCase):
self.services["virtual_machine"]
)
self.cleanup.append(ip_address)
- self.cleanup.append(self.public_ip_range)
# Check if VM is in Running state before creating NAT and firewall rules
vm_response = VirtualMachine.list(
self.apiclient,
@@ -187,7 +289,7 @@ class TestPortForwarding(cloudstackTestCase):
)
# Open up firewall port for SSH
- FireWallRule.create(
+ fwr = FireWallRule.create(
self.apiclient,
ipaddressid=ip_address.ipaddress.id,
protocol=self.services["natrule"]["protocol"],
@@ -195,6 +297,7 @@ class TestPortForwarding(cloudstackTestCase):
startport=self.services["natrule"]["publicport"],
endport=self.services["natrule"]["publicport"]
)
+ self.cleanup.append(fwr)
# Create PF rule
nat_rule = NATRule.create(
@@ -223,15 +326,14 @@ class TestStaticNat(cloudstackTestCase):
@classmethod
def setUpClass(cls):
-
testClient = super(TestStaticNat, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
- cls.services = testClient.getParsedTestDataConfig()
+ cls.services = Services().services
cls.hypervisor = testClient.getHypervisorInfo()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ # cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["zoneid"] = cls.zone.id
template = get_template(
cls.apiclient,
@@ -241,33 +343,32 @@ class TestStaticNat(cloudstackTestCase):
if template == FAILED:
assert False, "get_template() failed to return template with description %s" % cls.services[
"ostype"]
+ cls._cleanup = []
- # Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
cls.services["publiciprange"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.apiclient,
- cls.services["service_offerings"]["tiny"]
+ cls.services["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["virtual_machine"],
+ zoneid = cls.services["zoneid"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
+ cls._cleanup.append(cls.virtual_machine)
cls.defaultNetworkId = cls.virtual_machine.nic[0].networkid
- cls._cleanup = [
- cls.virtual_machine,
- cls.account,
- cls.service_offering
- ]
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -276,19 +377,12 @@ class TestStaticNat(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- cls.apiclient = super(
- TestStaticNat,
- cls).getClsTestClient().getApiClient()
- cleanup_resources(cls.apiclient, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ super(TestStaticNat, cls).tearDownClass()
def tearDown(self):
- cleanup_resources(self.apiclient, self.cleanup)
- return
+ super(TestStaticNat, self).tearDown()
- @attr(tags=["advanced", "smoke"], required_hardware="true")
+ @attr(tags=["advancedsg", "smoke"], required_hardware="true")
def test_static_nat_on_ip_from_non_src_nat_ip_range(self):
"""Test for static nat on a IP which is in pubic IP range different
from public IP range that has source NAT IP associated with network
@@ -301,10 +395,12 @@ class TestStaticNat(cloudstackTestCase):
# 4. Create a firewall rule to open up the port
# 5. Test SSH works to the VM
+ self.services["extrapubliciprange"]["zoneid"] = self.services["zoneid"]
self.public_ip_range = PublicIpRange.create(
self.apiclient,
- self.services["publiciprange"]
+ self.services["extrapubliciprange"]
)
+ self.cleanup.append(self.public_ip_range)
logger.debug("Dedicating Public IP range to the account");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
self.apiclient,
@@ -320,7 +416,6 @@ class TestStaticNat(cloudstackTestCase):
self.services["virtual_machine"]
)
self.cleanup.append(ip_address)
- self.cleanup.append(self.public_ip_range)
# Check if VM is in Running state before creating NAT and firewall rules
vm_response = VirtualMachine.list(
self.apiclient,
@@ -345,7 +440,7 @@ class TestStaticNat(cloudstackTestCase):
)
# Open up firewall port for SSH
- FireWallRule.create(
+ fwr = FireWallRule.create(
self.apiclient,
ipaddressid=ip_address.ipaddress.id,
protocol=self.services["natrule"]["protocol"],
@@ -353,6 +448,7 @@ class TestStaticNat(cloudstackTestCase):
startport=self.services["natrule"]["publicport"],
endport=self.services["natrule"]["publicport"]
)
+ self.cleanup.append(fwr)
# Create Static NAT rule
StaticNATRule.enable(
@@ -388,13 +484,14 @@ class TestRouting(cloudstackTestCase):
testClient = super(TestRouting, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
- cls.services = testClient.getParsedTestDataConfig()
+ cls.services = Services().services
cls.hypervisor = testClient.getHypervisorInfo()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ # cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["zoneid"] = cls.zone.id
+ cls._cleanup = []
template = get_template(
cls.apiclient,
cls.zone.id,
@@ -404,32 +501,30 @@ class TestRouting(cloudstackTestCase):
assert False, "get_template() failed to return template with description %s" % cls.services[
"ostype"]
- # Create an account, network, VM and IP addresses
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
cls.services["publiciprange"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.apiclient,
- cls.services["service_offerings"]["tiny"]
+ cls.services["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["virtual_machine"],
+ zoneid = cls.services["zoneid"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
- cls._cleanup = [
- cls.virtual_machine,
- cls.account,
- cls.service_offering
- ]
+ cls._cleanup.append(cls.virtual_machine)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -438,19 +533,12 @@ class TestRouting(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- cls.apiclient = super(
- TestRouting,
- cls).getClsTestClient().getApiClient()
- cleanup_resources(cls.apiclient, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ super(TestRouting, cls).tearDownClass()
def tearDown(self):
- cleanup_resources(self.apiclient, self.cleanup)
- return
+ super(TestRouting, self).tearDown()
- @attr(tags=["advanced", "smoke"], required_hardware="true")
+ @attr(tags=["advancedsg", "smoke"], required_hardware="true")
def test_routing_tables(self):
"""Test routing table in case we have IP associated with a network which is in
different pubic IP range from that of public IP range that has source NAT IP.
@@ -465,11 +553,12 @@ class TestRouting(cloudstackTestCase):
# 5. Login to VR and verify routing tables, there should be Table_eth3
# 6. Delete firewall rule, since its last IP, routing table Table_eth3 should be deleted
+ self.services["extrapubliciprange"]["zoneid"] = self.services["zoneid"]
self.public_ip_range = PublicIpRange.create(
self.apiclient,
- self.services["publiciprange"]
+ self.services["extrapubliciprange"]
)
- self._cleanup.append(self.public_ip_range)
+ self.cleanup.append(self.public_ip_range)
logger.debug("Dedicating Public IP range to the account");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
@@ -486,7 +575,7 @@ class TestRouting(cloudstackTestCase):
self.services["virtual_machine"]
)
self.cleanup.append(ip_address)
- self.cleanup.append(self.public_ip_range)
+
# Check if VM is in Running state before creating NAT and firewall rules
vm_response = VirtualMachine.list(
self.apiclient,
@@ -519,6 +608,7 @@ class TestRouting(cloudstackTestCase):
startport=self.services["natrule"]["publicport"],
endport=self.services["natrule"]["publicport"]
)
+ self.cleanup.append(firewall_rule)
# Get the router details associated with account
routers = list_routers(
@@ -585,6 +675,7 @@ class TestRouting(cloudstackTestCase):
)
firewall_rule.delete(self.apiclient)
+ self.cleanup.remove(firewall_rule)
if (self.hypervisor.lower() == 'vmware'
or self.hypervisor.lower() == 'hyperv'):
@@ -646,13 +737,14 @@ class TestIptables(cloudstackTestCase):
testClient = super(TestIptables, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
- cls.services = testClient.getParsedTestDataConfig()
+ cls.services = Services().services
cls.hypervisor = testClient.getHypervisorInfo()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
- cls.services["virtual_machine"]["zoneid"] = cls.zone.id
+ # cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["zoneid"] = cls.zone.id
+
template = get_template(
cls.apiclient,
cls.zone.id,
@@ -662,32 +754,31 @@ class TestIptables(cloudstackTestCase):
assert False, "get_template() failed to return template with description %s" % cls.services[
"ostype"]
- # Create an account, network, VM and IP addresses
+ cls._cleanup = []
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
cls.services["publiciprange"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
cls.apiclient,
- cls.services["service_offerings"]["tiny"]
+ cls.services["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services["virtual_machine"],
+ zoneid = cls.services["zoneid"],
templateid=template.id,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
- cls._cleanup = [
- cls.virtual_machine,
- cls.account,
- cls.service_offering
- ]
+ cls._cleanup.append(cls.virtual_machine)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -696,19 +787,12 @@ class TestIptables(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- cls.apiclient = super(
- TestIptables,
- cls).getClsTestClient().getApiClient()
- cleanup_resources(cls.apiclient, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
+ super(TestIptables, cls).tearDownClass()
def tearDown(self):
- cleanup_resources(self.apiclient, self.cleanup)
- return
+ super(TestIptables, self).tearDown()
- @attr(tags=["advanced", "smoke"], required_hardware="true")
+ @attr(tags=["advancedsg", "smoke"], required_hardware="true")
def test_iptable_rules(self):
"""Test iptable rules in case we have IP associated with a network which is in
different pubic IP range from that of public IP range that has source NAT IP.
@@ -723,11 +807,12 @@ class TestIptables(cloudstackTestCase):
# 5. Login to VR and verify routing tables, there should be Table_eth3
# 6. Delete firewall rule, since its last IP, routing table Table_eth3 should be deleted
+ self.services["extrapubliciprange"]["zoneid"] = self.services["zoneid"]
self.public_ip_range = PublicIpRange.create(
self.apiclient,
- self.services["publiciprange"]
+ self.services["extrapubliciprange"]
)
- self._cleanup.append(self.public_ip_range)
+ self.cleanup.append(self.public_ip_range)
logger.debug("Dedicating Public IP range to the account");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
@@ -776,6 +861,7 @@ class TestIptables(cloudstackTestCase):
startport=self.services["natrule"]["publicport"],
endport=self.services["natrule"]["publicport"]
)
+ self.cleanup.append(firewall_rule)
# Get the router details associated with account
routers = list_routers(
self.apiclient,
@@ -831,18 +917,15 @@ class TestIptables(cloudstackTestCase):
"Check to ensure there is a iptable rule to accept the RELATED,ESTABLISHED traffic"
)
firewall_rule.delete(self.apiclient)
+ self.cleanup.remove(firewall_rule)
class TestVPCPortForwarding(cloudstackTestCase):
@classmethod
def setUpClass(cls):
-
socket.setdefaulttimeout(60)
-
- testClient = super(TestVPCPortForwarding, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
-
- cls.services = testClient.getParsedTestDataConfig()
+ cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
@@ -852,35 +935,6 @@ class TestVPCPortForwarding(cloudstackTestCase):
cls.zone.id,
cls.services["ostype"]
)
- cls.services["vpc_offering"] = { "name": 'VPC off',
- "displaytext": 'VPC off',
- "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
- }
- cls.services["network_offering"] = {
- "name": 'VPC Network offering',
- "displaytext": 'VPC Network off',
- "guestiptype": 'Isolated',
- "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
- "traffictype": 'GUEST',
- "availability": 'Optional',
- "useVpc": 'on',
- "serviceProviderList": {
- "Vpn": 'VpcVirtualRouter',
- "Dhcp": 'VpcVirtualRouter',
- "Dns": 'VpcVirtualRouter',
- "SourceNat": 'VpcVirtualRouter',
- "PortForwarding": 'VpcVirtualRouter',
- "Lb": 'VpcVirtualRouter',
- "UserData": 'VpcVirtualRouter',
- "StaticNat": 'VpcVirtualRouter',
- "NetworkACL": 'VpcVirtualRouter'
- },
- }
- cls.services["network"] = {
- "name": "Test Network",
- "displaytext": "Test Network",
- "netmask": '255.255.255.0'
- }
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.services["publiciprange"]["zoneid"] = cls.zone.id
@@ -895,29 +949,24 @@ class TestVPCPortForwarding(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- #Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
-
+ super(TestVPCPortForwarding, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
+ self.cleanup = []
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
- self.cleanup = [self.account]
+ self.cleanup.append(self.account)
logger.debug("Creating a VPC offering..")
self.vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
- self._cleanup.append(self.vpc_off)
+ self.cleanup.append(self.vpc_off)
logger.debug("Enabling the VPC offering created")
self.vpc_off.update(self.apiclient, state='Enabled')
@@ -931,29 +980,11 @@ class TestVPCPortForwarding(cloudstackTestCase):
account=self.account.name,
domainid=self.account.domainid
)
+ self.cleanup.append(self.vpc)
return
def tearDown(self):
- try:
- #Clean up, terminate the created network offerings
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- logger.debug("Warning: Exception during cleanup : %s" % e)
- return
-
- def check_ssh_into_vm(self, vm, public_ip, testnegative=False):
- logger.debug("Checking if we can SSH into VM=%s on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
- try:
- vm.get_ssh_client(ipaddress=public_ip.ipaddress.ipaddress)
- if not testnegative:
- logger.debug("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
- else:
- self.fail("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
- except:
- if not testnegative:
- self.fail("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
- else:
- logger.debug("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
+ super(TestVPCPortForwarding, self).tearDown()
def create_natrule(self, vm, public_ip, network, services=None):
logger.debug("Creating NAT rule in network for vm with public IP")
@@ -967,6 +998,7 @@ class TestVPCPortForwarding(cloudstackTestCase):
networkid=network.id,
vpcid=self.vpc.id
)
+ self.cleanup.append(nat_rule)
return nat_rule
def acquire_publicip(self, network):
@@ -978,42 +1010,12 @@ class TestVPCPortForwarding(cloudstackTestCase):
networkid=network.id,
vpcid=self.vpc.id
)
+ self.cleanup.append(public_ip)
logger.debug("Associated %s with network %s" % (public_ip.ipaddress.ipaddress,
network.id
))
return public_ip
- def create_network(self, net_offerring, gateway='10.1.1.1',vpc=None):
- try:
- logger.debug('Create NetworkOffering')
- net_offerring["name"] = "NET_OFF-" + str(gateway)
- nw_off = NetworkOffering.create(self.apiclient,
- net_offerring,
- conservemode=False
- )
- # Enable Network offering
- nw_off.update(self.apiclient, state='Enabled')
- self._cleanup.append(nw_off)
- logger.debug('Created and Enabled NetworkOffering')
-
- self.services["network"]["name"] = "NETWORK-" + str(gateway)
- logger.debug('Adding Network=%s' % self.services["network"])
- default_acl = NetworkACLList.list(self.apiclient, name="default_allow")[0]
- obj_network = Network.create(self.apiclient,
- self.services["network"],
- accountid=self.account.name,
- domainid=self.account.domainid,
- networkofferingid=nw_off.id,
- zoneid=self.zone.id,
- gateway=gateway,
- aclid=default_acl.id,
- vpcid=vpc.id if vpc else self.vpc.id
- )
- logger.debug("Created network with ID: %s" % obj_network.id)
- return obj_network
- except Exception as e:
- self.fail('Unable to create a Network with offering=%s because of %s ' % (net_offerring, e))
-
def deployvm_in_network(self, network, host_id=None):
try:
logger.debug('Creating VM in network=%s' % network.name)
@@ -1026,13 +1028,14 @@ class TestVPCPortForwarding(cloudstackTestCase):
networkids=[str(network.id)],
hostid=host_id
)
+ self.cleanup.append(vm)
logger.debug('Created VM=%s in network=%s' % (vm.id, network.name))
return vm
except:
self.fail('Unable to create VM in a Network=%s' % network.name)
- @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ @attr(tags=["advancedsg", "intervlan"], required_hardware="true")
def test_network_services_VPC_CreatePF(self):
""" Test Create VPC PF rules on acquired public ip when VpcVirtualRouter is Running
"""
@@ -1047,11 +1050,12 @@ class TestVPCPortForwarding(cloudstackTestCase):
network_1 = self.create_network(self.services["network_offering"])
vm_1 = self.deployvm_in_network(network_1)
+ self.services["extrapubliciprange"]["zoneid"] = self.services["zoneid"]
self.public_ip_range = PublicIpRange.create(
self.apiclient,
- self.services["publiciprange"]
+ self.services["extrapubliciprange"]
)
- self._cleanup.append(self.public_ip_range)
+ self.cleanup.append(self.public_ip_range)
logger.debug("Dedicating Public IP range to the account");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
self.apiclient,
@@ -1063,6 +1067,7 @@ class TestVPCPortForwarding(cloudstackTestCase):
self.create_natrule( vm_1, public_ip_1, network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.public_ip_range.release(self.apiclient)
+ self.cleanup.remove(self.public_ip_range)
return
class TestVPCStaticNat(cloudstackTestCase):
@@ -1074,8 +1079,7 @@ class TestVPCStaticNat(cloudstackTestCase):
testClient = super(TestVPCStaticNat, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
-
- cls.services = testClient.getParsedTestDataConfig()
+ cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
@@ -1085,72 +1089,38 @@ class TestVPCStaticNat(cloudstackTestCase):
cls.zone.id,
cls.services["ostype"]
)
- cls.services["vpc_offering"] = { "name": 'VPC off',
- "displaytext": 'VPC off',
- "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
- }
- cls.services["network_offering"] = {
- "name": 'VPC Network offering',
- "displaytext": 'VPC Network off',
- "guestiptype": 'Isolated',
- "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
- "traffictype": 'GUEST',
- "availability": 'Optional',
- "useVpc": 'on',
- "serviceProviderList": {
- "Vpn": 'VpcVirtualRouter',
- "Dhcp": 'VpcVirtualRouter',
- "Dns": 'VpcVirtualRouter',
- "SourceNat": 'VpcVirtualRouter',
- "PortForwarding": 'VpcVirtualRouter',
- "Lb": 'VpcVirtualRouter',
- "UserData": 'VpcVirtualRouter',
- "StaticNat": 'VpcVirtualRouter',
- "NetworkACL": 'VpcVirtualRouter'
- },
- }
- cls.services["network"] = {
- "name": "Test Network",
- "displaytext": "Test Network",
- "netmask": '255.255.255.0'
- }
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.services["publiciprange"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(
- cls.api_client,
- cls.services["service_offering"]
- )
+ cls.api_client,
+ cls.services["service_offering"]
+ )
cls._cleanup = [cls.service_offering]
return
@classmethod
def tearDownClass(cls):
- try:
- #Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
-
+ super(TestVPCStaticNat, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
+ self.cleanup = []
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
- self.cleanup = [self.account]
+ self.cleanup.append(self.account)
logger.debug("Creating a VPC offering..")
self.vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"]
)
- self._cleanup.append(self.vpc_off)
+ self.cleanup.append(self.vpc_off)
logger.debug("Enabling the VPC offering created")
self.vpc_off.update(self.apiclient, state='Enabled')
@@ -1164,30 +1134,11 @@ class TestVPCStaticNat(cloudstackTestCase):
account=self.account.name,
domainid=self.account.domainid
)
+ self.cleanup.append(self.vpc)
return
def tearDown(self):
- try:
- #Clean up, terminate the created network offerings
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- logger.debug("Warning: Exception during cleanup : %s" % e)
- return
-
- def check_ssh_into_vm(self, vm, public_ip, testnegative=False):
- logger.debug("Checking if we can SSH into VM=%s on public_ip=%s" % (vm.name, public_ip.ipaddress.ipaddress))
- try:
- vm.get_ssh_client(ipaddress=public_ip.ipaddress.ipaddress)
- if not testnegative:
- logger.debug("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
- else:
- self.fail("SSH into VM=%s on public_ip=%s is successfully" % (vm.name, public_ip.ipaddress.ipaddress))
- except:
- if not testnegative:
- self.fail("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
- else:
- logger.debug("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
-
+ super(TestVPCStaticNat, self).tearDown()
def acquire_publicip(self, network):
logger.debug("Associating public IP for network: %s" % network.name)
@@ -1198,42 +1149,12 @@ class TestVPCStaticNat(cloudstackTestCase):
networkid=network.id,
vpcid=self.vpc.id
)
+ self.cleanup.append(public_ip)
logger.debug("Associated %s with network %s" % (public_ip.ipaddress.ipaddress,
network.id
))
return public_ip
- def create_network(self, net_offerring, gateway='10.1.1.1',vpc=None):
- try:
- logger.debug('Create NetworkOffering')
- net_offerring["name"] = "NET_OFF-" + str(gateway)
- nw_off = NetworkOffering.create(self.apiclient,
- net_offerring,
- conservemode=False
- )
- # Enable Network offering
- nw_off.update(self.apiclient, state='Enabled')
- self._cleanup.append(nw_off)
- logger.debug('Created and Enabled NetworkOffering')
-
- self.services["network"]["name"] = "NETWORK-" + str(gateway)
- logger.debug('Adding Network=%s' % self.services["network"])
- default_acl = NetworkACLList.list(self.apiclient, name="default_allow")[0]
- obj_network = Network.create(self.apiclient,
- self.services["network"],
- accountid=self.account.name,
- domainid=self.account.domainid,
- networkofferingid=nw_off.id,
- zoneid=self.zone.id,
- gateway=gateway,
- aclid=default_acl.id,
- vpcid=vpc.id if vpc else self.vpc.id
- )
- logger.debug("Created network with ID: %s" % obj_network.id)
- return obj_network
- except Exception as e:
- self.fail('Unable to create a Network with offering=%s because of %s ' % (net_offerring, e))
-
def deployvm_in_network(self, network, host_id=None):
try:
logger.debug('Creating VM in network=%s' % network.name)
@@ -1246,6 +1167,7 @@ class TestVPCStaticNat(cloudstackTestCase):
networkids=[str(network.id)],
hostid=host_id
)
+ self.cleanup.append(vm)
logger.debug('Created VM=%s in network=%s' % (vm.id, network.name))
return vm
@@ -1270,7 +1192,7 @@ class TestVPCStaticNat(cloudstackTestCase):
self.fail("Failed to enable static NAT on IP: %s - %s" % (
public_ip.ipaddress.ipaddress, e))
- @attr(tags=["advanced", "intervlan"], required_hardware="true")
+ @attr(tags=["advancedsg", "intervlan"], required_hardware="true")
def test_network_services_VPC_CreatePF(self):
""" Test Create VPC PF rules on acquired public ip when VpcVirtualRouter is Running
"""
@@ -1289,7 +1211,7 @@ class TestVPCStaticNat(cloudstackTestCase):
self.apiclient,
self.services["publiciprange"]
)
- self._cleanup.append(self.public_ip_range)
+ self.cleanup.append(self.public_ip_range)
logger.debug("Dedicating Public IP range to the account");
dedicate_public_ip_range_response = PublicIpRange.dedicate(
self.apiclient,
@@ -1301,4 +1223,5 @@ class TestVPCStaticNat(cloudstackTestCase):
self.create_StaticNatRule_For_VM( vm_1, public_ip_1, network_1)
self.check_ssh_into_vm(vm_1, public_ip_1, testnegative=False)
self.public_ip_range.release(self.apiclient)
+ self.cleanup.remove(self.public_ip_range)
return
diff --git a/test/integration/component/test_organization_states.py b/test/integration/component/test_organization_states.py
index 2969ccb..2abb286 100644
--- a/test/integration/component/test_organization_states.py
+++ b/test/integration/component/test_organization_states.py
@@ -373,7 +373,8 @@ class TestOrganizationStates(cloudstackTestCase):
## Test cases relating to disabling and enabling cluster
- @attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
+ # was tags=["advanced"]
+ @attr("disruptive", "simulator_only", tags=["TODO"], required_hardware="false")
def test_31_disableCluster(self):
"""
Disable Cluster
@@ -393,7 +394,8 @@ class TestOrganizationStates(cloudstackTestCase):
"Disabled",
"Disabling Cluster did not set the alloctionstate to Disabled")
- @attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
+ # was tags=["advanced"]
+ @attr("disruptive", "simulator_only", tags=["TODO"], required_hardware="false")
def test_32_disableCluster_admin_deployVM(self):
"""
Validate that admin is allowed to deploy VM in a disabled cluster
@@ -507,7 +509,8 @@ class TestOrganizationStates(cloudstackTestCase):
"Disabled",
"Disabling Host did not set the alloctionstate to Disabled")
- @attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
+ # was tags=["advanced"]
+ @attr("disruptive", "simulator_only", tags=["TODO"], required_hardware="false")
def test_42_disableHost_admin_deployVM(self):
"""
Validate that admin is allowed to deploy VM in a disabled host by passing hostId parameter
@@ -583,7 +586,8 @@ class TestOrganizationStates(cloudstackTestCase):
except Exception as e:
self.debug("Exception thrown when deploying Virtual Machine on a disabled host - %s" % e)
- @attr("disruptive", "simulator_only", tags=["advanced"], required_hardware="false")
+ # was tags=["advanced"]
+ @attr("disruptive", "simulator_only", tags=["TODO"], required_hardware="false")
def test_46_disableHost_user_stop_startVM(self):
"""
Validate that regular user is allowed to stop and start existing VMs running in a disabled host
diff --git a/test/integration/component/test_persistent_networks.py b/test/integration/component/test_persistent_networks.py
index 079677d..3becff6 100644
--- a/test/integration/component/test_persistent_networks.py
+++ b/test/integration/component/test_persistent_networks.py
@@ -1373,7 +1373,8 @@ class TestPersistentNetworks(cloudstackTestCase):
self.fail(exceptionMessage)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # @attr(tags=["advanced"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_delete_account(self):
# steps
# 1. create persistent network and deploy VM in it
@@ -2924,7 +2925,8 @@ class TestVPCNetworkOperations(cloudstackTestCase):
self.VerifyNetworkCleanup(persistent_network_2.id)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # @attr(tags=["advanced"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_vpc_delete_account(self):
# steps
# 1. Create account and create VPC network in the account
diff --git a/test/integration/component/test_portable_ip.py b/test/integration/component/test_portable_ip.py
index d329e64..e3e04d4 100644
--- a/test/integration/component/test_portable_ip.py
+++ b/test/integration/component/test_portable_ip.py
@@ -592,7 +592,8 @@ class TestAssociatePublicIp(cloudstackTestCase):
publicipaddress.delete(self.apiclient)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # @attr(tags=["advanced"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_associate_ip_address_services_enable_disable(self):
""" Test enabling and disabling NAT, Firewall services on portable ip
"""
diff --git a/test/integration/component/test_ps_domain_limits.py b/test/integration/component/test_ps_domain_limits.py
index 9740ab9..1257a5a 100644
--- a/test/integration/component/test_ps_domain_limits.py
+++ b/test/integration/component/test_ps_domain_limits.py
@@ -89,12 +89,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestMultipleChildDomain, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -106,22 +101,16 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.apiclient,
self.services["disk_offering"]
)
+ self.cleanup.append(self.disk_offering)
self.assertNotEqual(self.disk_offering, None,
"Disk offering is None")
- self.cleanup.append(self.disk_offering)
except Exception as e:
self.tearDown()
self.skipTest("Failure while creating disk offering: %s" % e)
return
def tearDown(self):
- try:
- # Clean up, terminate the created instance, volumes and snapshots
- cleanup_resources(self.apiclient, self.cleanup)
- pass
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestMultipleChildDomain, self).tearDown()
def updateDomainResourceLimits(self, parentdomainlimit, subdomainlimit):
"""Update primary storage limits of the parent domain and its
@@ -151,41 +140,39 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
+ self.cleanup.append(self.parent_domain)
self.parentd_admin = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.parent_domain.id)
+ self.cleanup.append(self.parentd_admin)
# Create sub-domains and their admin accounts
self.cdomain_1 = Domain.create(
self.apiclient,
services=self.services["domain"],
parentdomainid=self.parent_domain.id)
+ self.cleanup.append(self.cdomain_1)
self.cdomain_2 = Domain.create(
self.apiclient,
services=self.services["domain"],
parentdomainid=self.parent_domain.id)
+ self.cleanup.append(self.cdomain_2)
self.cadmin_1 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.cdomain_1.id)
+ self.cleanup.append(self.cadmin_1)
self.cadmin_2 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.cdomain_2.id)
-
- # Cleanup the resources created at end of test
- self.cleanup.append(self.cadmin_1)
self.cleanup.append(self.cadmin_2)
- self.cleanup.append(self.cdomain_1)
- self.cleanup.append(self.cdomain_2)
- self.cleanup.append(self.parentd_admin)
- self.cleanup.append(self.parent_domain)
users = {
self.cdomain_1: self.cadmin_1,
@@ -221,7 +208,6 @@ class TestMultipleChildDomain(cloudstackTestCase):
quantity
4. After step 7, resource count in parent domain should be 0"""
- # Setting up account and domain hierarchy
result = self.setupAccounts()
self.assertEqual(
result[0],
@@ -233,9 +219,10 @@ class TestMultipleChildDomain(cloudstackTestCase):
disksize = 10
subdomainlimit = (templatesize + disksize)
+ maxlimit = subdomainlimit * 3 - 1
result = self.updateDomainResourceLimits(
- ((subdomainlimit * 3) - 1),
- subdomainlimit)
+ int(maxlimit),
+ int(subdomainlimit))
self.assertEqual(
result[0],
PASS,
@@ -279,13 +266,14 @@ class TestMultipleChildDomain(cloudstackTestCase):
"Failed to create api client for account: %s" %
self.cadmin_2.name)
- VirtualMachine.create(
+ vm_1 = VirtualMachine.create(
api_client_cadmin_1,
self.services["virtual_machine"],
accountid=self.cadmin_1.name,
domainid=self.cadmin_1.domainid,
diskofferingid=disk_offering_custom.id,
serviceofferingid=self.service_offering.id)
+ self.cleanup.append(vm_1)
self.initialResourceCount = (templatesize + disksize)
result = isDomainResourceCountEqualToExpectedCount(
@@ -302,22 +290,25 @@ class TestMultipleChildDomain(cloudstackTestCase):
domainid=self.cadmin_2.domainid,
diskofferingid=disk_offering_custom.id,
serviceofferingid=self.service_offering.id)
+ self.cleanup.append(vm_2)
# Now the VMs in two child domains have exhausted the primary storage limit
# of parent domain, hence VM creation in parent domain with custom disk offering
# should fail
with self.assertRaises(Exception):
- VirtualMachine.create(
+ vm_faulty = VirtualMachine.create(
api_client_admin,
self.services["virtual_machine"],
accountid=self.parentd_admin.name,
domainid=self.parentd_admin.domainid,
diskofferingid=disk_offering_custom.id,
serviceofferingid=self.service_offering.id)
+ self.cleanup.append(vm_faulty) # should not happen
- # Deleting user account
+ # Deleting user account and remove it's resources from the cleanup list
self.cadmin_1.delete(self.apiclient)
self.cleanup.remove(self.cadmin_1)
+ self.cleanup.remove(vm_1)
expectedCount = self.initialResourceCount
result = isDomainResourceCountEqualToExpectedCount(
@@ -328,6 +319,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
try:
vm_2.delete(self.apiclient)
+ self.cleanup.remove(vm_2)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
@@ -387,6 +379,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id,
serviceofferingid=self.service_offering.id)
+ self.cleanup.append(vm)
expectedCount = templatesize + self.disk_offering.disksize
result = isDomainResourceCountEqualToExpectedCount(
@@ -400,7 +393,6 @@ class TestMultipleChildDomain(cloudstackTestCase):
disk_offering_10_GB = DiskOffering.create(
self.apiclient,
services=self.services["disk_offering"])
-
self.cleanup.append(disk_offering_10_GB)
volume = Volume.create(
@@ -410,11 +402,13 @@ class TestMultipleChildDomain(cloudstackTestCase):
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=disk_offering_10_GB.id)
+ self.cleanup.append(volume) # we get an exception in the next few lines
volumeSize = (volume.size / (1024 ** 3))
expectedCount += volumeSize
vm.attach_volume(apiclient, volume=volume)
+ self.cleanup.remove(volume) # we can't cleanup an attached volume
result = isDomainResourceCountEqualToExpectedCount(
self.apiclient, self.domain.id,
expectedCount, RESOURCE_PRIMARY_STORAGE)
@@ -475,7 +469,9 @@ class TestMultipleChildDomain(cloudstackTestCase):
accountid=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id,
- serviceofferingid=self.service_offering.id)
+ serviceofferingid=self.service_offering.id,
+ startvm=False)
+ self.cleanup.append(vm)
expectedCount = templatesize + self.disk_offering.disksize
result = isDomainResourceCountEqualToExpectedCount(
@@ -488,14 +484,12 @@ class TestMultipleChildDomain(cloudstackTestCase):
disk_offering_15_GB = DiskOffering.create(
self.apiclient,
services=self.services["disk_offering"])
-
self.cleanup.append(disk_offering_15_GB)
volume2size = self.services["disk_offering"]["disksize"] = 20
disk_offering_20_GB = DiskOffering.create(
self.apiclient,
services=self.services["disk_offering"])
-
self.cleanup.append(disk_offering_20_GB)
volume_1 = Volume.create(
@@ -505,6 +499,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=disk_offering_15_GB.id)
+ self.cleanup.append(volume_1)
volume_2 = Volume.create(
apiclient,
@@ -513,9 +508,12 @@ class TestMultipleChildDomain(cloudstackTestCase):
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=disk_offering_20_GB.id)
+ self.cleanup.append(volume_2)
vm.attach_volume(apiclient, volume=volume_1)
+ self.cleanup.remove(volume_1)
vm.attach_volume(apiclient, volume=volume_2)
+ self.cleanup.remove(volume_2)
expectedCount += volume1size + volume2size
result = isDomainResourceCountEqualToExpectedCount(
@@ -537,7 +535,8 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.fail("Failure: %s" % e)
return
- @attr(tags=["advanced"], required_hardware="true")
+ # @attr(tags=["advanced"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_04_create_template_snapshot(self):
"""Test create snapshot and templates from volume
@@ -584,6 +583,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id,
serviceofferingid=self.service_offering.id)
+ self.cleanup.append(vm)
templatesize = (self.template.size / (1024 ** 3))
@@ -603,6 +603,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
vm.id)
self.assertEqual(response[0], PASS, response[1])
snapshot = response[1]
+ self.cleanup.append(snapshot)
response = snapshot.validateState(
apiclient,
@@ -617,8 +618,10 @@ class TestMultipleChildDomain(cloudstackTestCase):
services=self.services["volume"],
account=self.account.name,
domainid=self.account.domainid)
+ self.cleanup.append(volume)
volumeSize = (volume.size / (1024 ** 3))
vm.attach_volume(apiclient, volume)
+ self.cleanup.remove(volume)
expectedCount = initialResourceCount + (volumeSize)
result = isDomainResourceCountEqualToExpectedCount(
self.apiclient, self.domain.id,
@@ -673,6 +676,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
domainid=self.cadmin_1.domainid,
diskofferingid=self.disk_offering.id,
serviceofferingid=self.service_offering.id)
+ self.cleanup.append(vm_1)
templatesize = (self.template.size / (1024 ** 3))
@@ -736,6 +740,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id,
serviceofferingid=self.service_offering.id)
+ self.cleanup.append(vm_1)
templatesize = (self.template.size / (1024 ** 3))
@@ -747,6 +752,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.assertTrue(result[2], "Resource count does not match")
vm_1.delete(self.apiclient, expunge=False)
+ self.cleanup.remove(vm_1)
result = isDomainResourceCountEqualToExpectedCount(
self.apiclient, self.account.domainid,
@@ -755,6 +761,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.assertTrue(result[2], "Resource count does not match")
vm_1.recover(self.apiclient)
+ self.cleanup.append(vm_1)
result = isDomainResourceCountEqualToExpectedCount(
self.apiclient, self.account.domainid,
diff --git a/test/integration/component/test_ps_limits.py b/test/integration/component/test_ps_limits.py
index d87e605..8b5005f 100644
--- a/test/integration/component/test_ps_limits.py
+++ b/test/integration/component/test_ps_limits.py
@@ -96,12 +96,7 @@ class TestVolumeLimits(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestVolumeLimits, cls).tearDownClass()
def setUp(self):
if self.unsupportedStorageType:
@@ -113,22 +108,16 @@ class TestVolumeLimits(cloudstackTestCase):
try:
self.services["disk_offering"]["disksize"] = 2
self.disk_offering = DiskOffering.create(self.apiclient, self.services["disk_offering"])
+ self.cleanup.append(self.disk_offering)
self.assertNotEqual(self.disk_offering, None, \
"Disk offering is None")
- self.cleanup.append(self.disk_offering)
except Exception as e:
self.tearDown()
self.skipTest("Failure in setup: %s" % e)
return
def tearDown(self):
- try:
- # Clean up, terminate the created instance, volumes and snapshots
- cleanup_resources(self.apiclient, self.cleanup)
- pass
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestVolumeLimits, self).tearDown()
def setupAccount(self, accountType):
"""Setup the account required for the test"""
@@ -138,17 +127,18 @@ class TestVolumeLimits(cloudstackTestCase):
self.domain = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
+ self.cleanup.append(self.domain)
self.account = Account.create(self.apiclient, self.services["account"],
domainid=self.domain.id, admin=True)
self.cleanup.append(self.account)
- if accountType == CHILD_DOMAIN_ADMIN:
- self.cleanup.append(self.domain)
self.virtualMachine = VirtualMachine.create(self.api_client, self.services["virtual_machine"],
accountid=self.account.name, domainid=self.account.domainid,
diskofferingid=self.disk_offering.id,
- serviceofferingid=self.service_offering.id)
+ serviceofferingid=self.service_offering.id,
+ startvm=False)
+ self.cleanup.append(self.virtualMachine)
accounts = Account.list(self.apiclient, id=self.account.id)
@@ -216,6 +206,7 @@ class TestVolumeLimits(cloudstackTestCase):
# Stopping instance
try:
self.virtualMachine.delete(self.apiclient, expunge=False)
+ self.cleanup.remove(self.virtualMachine)
except Exception as e:
self.fail("Failed to destroy instance: %s" % e)
response = matchResourceCount(
@@ -227,6 +218,7 @@ class TestVolumeLimits(cloudstackTestCase):
# Recovering instance
try:
self.virtualMachine.recover(self.apiclient)
+ self.cleanup.append(self.virtualMachine)
except Exception as e:
self.fail("Failed to start instance: %s" % e)
@@ -267,13 +259,13 @@ class TestVolumeLimits(cloudstackTestCase):
expectedCount = self.initialResourceCount + int(self.services["disk_offering"]["disksize"])
disk_offering = DiskOffering.create(self.apiclient,
services=self.services["disk_offering"])
-
self.cleanup.append(disk_offering)
volume = Volume.create(
apiclient, self.services["volume"], zoneid=self.zone.id,
account=self.account.name, domainid=self.account.domainid,
diskofferingid=disk_offering.id)
+ self.cleanup.append(volume)
except Exception as e:
self.fail("Failure: %s" % e)
@@ -285,6 +277,7 @@ class TestVolumeLimits(cloudstackTestCase):
try:
self.virtualMachine.attach_volume(apiclient, volume=volume)
+ self.cleanup.remove(volume)
except Exception as e:
self.fail("Failed while attaching volume to VM: %s" % e)
@@ -296,6 +289,7 @@ class TestVolumeLimits(cloudstackTestCase):
try:
self.virtualMachine.detach_volume(apiclient, volume=volume)
+ self.cleanup.append(volume)
except Exception as e:
self.fail("Failure while detaching volume: %s" % e)
@@ -343,22 +337,19 @@ class TestVolumeLimits(cloudstackTestCase):
self.services["disk_offering"]["disksize"] = 10
disk_offering_10_GB = DiskOffering.create(self.apiclient,
services=self.services["disk_offering"])
-
self.cleanup.append(disk_offering_10_GB)
volume_1 = Volume.create(
apiclient, self.services["volume"], zoneid=self.zone.id,
account=self.account.name, domainid=self.account.domainid,
diskofferingid=disk_offering_5_GB.id)
+ self.debug("Attaching volume %s to vm %s" % (volume_1.name, self.virtualMachine.name))
+ self.virtualMachine.attach_volume(apiclient, volume=volume_1)
volume_2 = Volume.create(
apiclient, self.services["volume"], zoneid=self.zone.id,
account=self.account.name, domainid=self.account.domainid,
diskofferingid=disk_offering_10_GB.id)
-
- self.debug("Attaching volume %s to vm %s" % (volume_1.name, self.virtualMachine.name))
- self.virtualMachine.attach_volume(apiclient, volume=volume_1)
-
self.debug("Attaching volume %s to vm %s" % (volume_2.name, self.virtualMachine.name))
self.virtualMachine.attach_volume(apiclient, volume=volume_2)
except Exception as e:
@@ -419,6 +410,7 @@ class TestVolumeLimits(cloudstackTestCase):
accountid=self.account.name, domainid=self.account.domainid,
diskofferingid=self.disk_offering.id,
serviceofferingid=self.service_offering.id)
+ self.cleanup.append(self.virtualMachine_2)
expectedCount = (self.initialResourceCount * 2) # Total 2 vms
response = matchResourceCount(
@@ -431,6 +423,7 @@ class TestVolumeLimits(cloudstackTestCase):
accountid=self.account.name, domainid=self.account.domainid,
diskofferingid=self.disk_offering.id,
serviceofferingid=self.service_offering.id)
+ self.cleanup.append(self.virtualMachine_3)
expectedCount = (self.initialResourceCount * 3) # Total 3 vms
response = matchResourceCount(
@@ -442,6 +435,7 @@ class TestVolumeLimits(cloudstackTestCase):
self.debug("Destroying instance: %s" % self.virtualMachine_2.name)
try:
self.virtualMachine_2.delete(self.apiclient)
+ self.cleanup.remove(self.virtualMachine_2)
except Exception as e:
self.fail("Failed to delete instance: %s" % e)
@@ -450,7 +444,7 @@ class TestVolumeLimits(cloudstackTestCase):
expectedCount -= (self.template.size / (1024 ** 3))
response = matchResourceCount(
- self.apiclient, expectedCount,
+ self.apiclient, int(expectedCount),
RESOURCE_PRIMARY_STORAGE,
accountid=self.account.id)
self.assertEqual(response[0], PASS, response[1])
@@ -474,7 +468,7 @@ class TestVolumeLimits(cloudstackTestCase):
try:
account_2 = Account.create(self.apiclient, self.services["account"],
domainid=self.domain.id, admin=True)
- self.cleanup.insert(0, account_2)
+ self.cleanup.append(account_2)
except Exception as e:
self.fail("Failed to create account: %s" % e)
@@ -489,6 +483,8 @@ class TestVolumeLimits(cloudstackTestCase):
self.virtualMachine.stop(self.apiclient)
self.virtualMachine.assign_virtual_machine(self.apiclient,
account_2.name, account_2.domainid)
+ self.cleanup.remove(self.virtualMachine) # should now be cleaned with account_2
+ self.cleanup.append(self.virtualMachine) # or before to be neat
except Exception as e:
self.fail("Failed to assign virtual machine to account %s: %s" %
(account_2.name, e))
@@ -510,7 +506,8 @@ class TestVolumeLimits(cloudstackTestCase):
return
@data(ROOT_DOMAIN_ADMIN, CHILD_DOMAIN_ADMIN)
- @attr(tags=["advanced", "basic"], required_hardware="true")
+ # @attr(tags=["advanced", "basic"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_create_template_snapshot(self, value):
"""Test create snapshot and templates from volume
@@ -527,8 +524,12 @@ class TestVolumeLimits(cloudstackTestCase):
self.debug(response[0])
self.debug(response[1])
self.assertEqual(response[0], PASS, response[1])
-
apiclient = self.apiclient
+ try:
+ self.virtualMachine.start(apiclient)
+ except Exception as e:
+ self.fail("Failed to start instance: %s" % e)
+
if value == CHILD_DOMAIN_ADMIN:
apiclient = self.testClient.getUserApiClient(
UserName=self.account.name,
@@ -566,7 +567,6 @@ class TestVolumeLimits(cloudstackTestCase):
services=self.services["volume"],
account=self.account.name,
domainid=self.account.domainid)
-
self.debug("Attaching the volume to vm: %s" % self.virtualMachine.name)
self.virtualMachine.attach_volume(apiclient, volume)
except Exception as e:
@@ -581,12 +581,14 @@ class TestVolumeLimits(cloudstackTestCase):
try:
self.virtualMachine.detach_volume(apiclient, volume)
+ self.cleanup.append(volume)
except Exception as e:
self.fail("Failure in detach volume operation: %s" % e)
try:
self.debug("deleting the volume: %s" % volume.name)
volume.delete(apiclient)
+ self.cleanup.remove(volume)
except Exception as e:
self.fail("Failure while deleting volume: %s" % e)
diff --git a/test/integration/component/test_ps_resize_volume.py b/test/integration/component/test_ps_resize_volume.py
index f691dd9..4c8b0ef 100644
--- a/test/integration/component/test_ps_resize_volume.py
+++ b/test/integration/component/test_ps_resize_volume.py
@@ -120,12 +120,7 @@ class TestResizeVolume(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestResizeVolume, cls).tearDownClass()
def setUp(self):
if self.unsupportedStorageType:
@@ -137,13 +132,7 @@ class TestResizeVolume(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the created instance, volumes and snapshots
- cleanup_resources(self.apiclient, self.cleanup)
- pass
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestResizeVolume, self).tearDown()
def updateResourceLimits(self, accountLimit=None, domainLimit=None):
"""Update primary storage limits of the parent domain and its
@@ -153,13 +142,13 @@ class TestResizeVolume(cloudstackTestCase):
if domainLimit:
# Update resource limit for domain
Resources.updateLimit(self.apiclient, resourcetype=10,
- max=domainLimit,
+ max=int(domainLimit),
domainid=self.parent_domain.id)
if accountLimit:
# Update resource limit for domain
Resources.updateLimit(self.apiclient,
resourcetype=10,
- max=accountLimit,
+ max=int(accountLimit),
account=self.parentd_admin.name,
domainid=self.parent_domain.id)
except Exception as e:
@@ -172,14 +161,13 @@ class TestResizeVolume(cloudstackTestCase):
services=self.services[
"domain"],
parentdomainid=self.domain.id)
+ self.cleanup.append(self.parent_domain)
self.parentd_admin = Account.create(self.apiclient,
self.services["account"],
admin=True,
domainid=self.parent_domain.id)
- # Cleanup the resources created at end of test
self.cleanup.append(self.parentd_admin)
- self.cleanup.append(self.parent_domain)
except Exception as e:
return [FAIL, e]
return [PASS, None]
@@ -199,7 +187,6 @@ class TestResizeVolume(cloudstackTestCase):
# 6. Resize operation should be successful and primary storage count
# for account should be updated successfully"""
- # Setting up account and domain hierarchy
result = self.setupAccounts()
self.assertEqual(result[0], PASS, result[1])
@@ -220,6 +207,7 @@ class TestResizeVolume(cloudstackTestCase):
domainid=self.parent_domain.id,
serviceofferingid=self.service_offering.id
)
+ self.cleanup.append(virtualMachine)
volume = Volume.create(
apiclient, self.services["volume"],
@@ -227,7 +215,6 @@ class TestResizeVolume(cloudstackTestCase):
account=self.parentd_admin.name,
domainid=self.parent_domain.id,
diskofferingid=self.disk_offering_5_GB.id)
-
virtualMachine.attach_volume(apiclient, volume=volume)
expectedCount = (templateSize + self.disk_offering_5_GB.disksize)
@@ -291,6 +278,7 @@ class TestResizeVolume(cloudstackTestCase):
domainid=self.parent_domain.id,
serviceofferingid=self.service_offering.id
)
+ self.cleanup.append(virtualMachine)
volume = Volume.create(
apiclient, self.services["volume"],
@@ -298,7 +286,6 @@ class TestResizeVolume(cloudstackTestCase):
account=self.parentd_admin.name,
domainid=self.parent_domain.id,
diskofferingid=self.disk_offering_5_GB.id)
-
virtualMachine.attach_volume(apiclient, volume=volume)
expectedCount = (templateSize + self.disk_offering_5_GB.disksize)
@@ -355,6 +342,7 @@ class TestResizeVolume(cloudstackTestCase):
domainid=self.parent_domain.id,
serviceofferingid=self.service_offering.id
)
+ self.cleanup.append(virtualMachine)
volume = Volume.create(
apiclient, self.services["volume"],
@@ -362,7 +350,6 @@ class TestResizeVolume(cloudstackTestCase):
account=self.parentd_admin.name,
domainid=self.parent_domain.id,
diskofferingid=self.disk_offering_5_GB.id)
-
virtualMachine.attach_volume(apiclient, volume=volume)
expectedCount = (templateSize + self.disk_offering_5_GB.disksize)
diff --git a/test/integration/component/test_redundant_router_cleanups.py b/test/integration/component/test_redundant_router_cleanups.py
index 2c0805f..3a6a396 100644
--- a/test/integration/component/test_redundant_router_cleanups.py
+++ b/test/integration/component/test_redundant_router_cleanups.py
@@ -668,7 +668,8 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase):
)
return
- @attr(tags=["advanced", "advancedns"], required_hardware="false")
+ # @attr(tags=["advanced", "advancedns"], required_hardware="false")
+ @attr(tags=["TODO"], required_hardware="false")
def test_restart_network_with_destroyed_primaryVR(self):
"""Test restarting RvR network without cleanup after destroying primary VR
"""
diff --git a/test/integration/component/test_rootvolume_resize.py b/test/integration/component/test_rootvolume_resize.py
index 06b8278..ab2e754 100644
--- a/test/integration/component/test_rootvolume_resize.py
+++ b/test/integration/component/test_rootvolume_resize.py
@@ -568,7 +568,8 @@ class TestResizeVolume(cloudstackTestCase):
return
- @attr(tags=["advanced"], required_hardware="true")
+ # @attr(tags=["advanced"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_03_vmsnapshot__on_resized_rootvolume_vm(self):
"""Test vmsnapshot on resized root volume
@@ -936,7 +937,8 @@ class TestResizeVolume(cloudstackTestCase):
if rootvol is not None and 'kvm' or 'xenserver' in vm.hypervisor.lower():
rootvol.resize(self.apiclient, size=newsize)
- @attr(tags=["advanced"], required_hrdware="true")
+ # @attr(tags=["advanced"], required_hrdware="true")
+ @attr(tags=["TODO"], required_hrdware="true")
def test_7_usage_events_after_rootvolume_resized_(self):
"""Test check usage events after root volume resize
diff --git a/test/integration/component/test_shared_networks.py b/test/integration/component/test_shared_networks.py
index 725ee91..1954011 100644
--- a/test/integration/component/test_shared_networks.py
+++ b/test/integration/component/test_shared_networks.py
@@ -3557,7 +3557,8 @@ class TestSharedNetworks(cloudstackTestCase):
self.fail(exceptionMessage)
return
- @attr(tags=["advanced", "advancedns", "dvs"], required_hardware="false")
+ # @attr(tags=["advanced", "advancedns", "dvs"], required_hardware="false")
+ @attr(tags=["TODO"], required_hardware="false")
def test_acquire_ip(self):
"""Test acquire IP in shared network
diff --git a/test/integration/component/test_snapshots.py b/test/integration/component/test_snapshots.py
index 655a287..9cb0222 100644
--- a/test/integration/component/test_snapshots.py
+++ b/test/integration/component/test_snapshots.py
@@ -177,6 +177,8 @@ class TestSnapshots(cloudstackTestCase):
cls.api_client,
cls.services["disk_offering"]
)
+ cls._cleanup.append(cls.disk_offering)
+
cls.template = get_template(
cls.api_client,
cls.zone.id,
@@ -198,21 +200,13 @@ class TestSnapshots(cloudstackTestCase):
cls.api_client,
cls.services["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
- cls._cleanup = [
- cls.service_offering,
- cls.disk_offering
- ]
return
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestSnapshots, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -220,9 +214,7 @@ class TestSnapshots(cloudstackTestCase):
self.cleanup = []
if self.unsupportedHypervisor:
- self.skipTest("Skipping test because unsupported hypervisor: %s" %
- self.hypervisor)
-
+ self.skipTest("Skipping test because unsupported hypervisor: %s" % self.hypervisor)
# Create VMs, NAT Rules etc
self.account = Account.create(
@@ -245,12 +237,7 @@ class TestSnapshots(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the created instance, volumes and snapshots
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestSnapshots, self).tearDown()
@attr(speed="slow")
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
@@ -307,17 +294,17 @@ class TestSnapshots(cloudstackTestCase):
snapshot.id))
return
- @attr(speed="slow")
- @attr(
- tags=[
- "advanced",
- "advancedns",
- "basic",
- "sg"],
- required_hardware="true")
+ # @attr(speed="slow")
+ # @attr(
+ # tags=[
+ # "advanced",
+ # "advancedns",
+ # "basic",
+ # "sg"],
+ # required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_01_volume_from_snapshot(self):
"""Test Creating snapshot from volume having spaces in name(KVM)
- """
# Validate the following
# 1. Create a virtual machine and data volume
# 2. Attach data volume to VM
@@ -327,6 +314,7 @@ class TestSnapshots(cloudstackTestCase):
# 5. Create another Volume from snapshot
# 6. Mount/Attach volume to another virtual machine
# 7. Compare data, data should match
+ """
if self.hypervisor.lower() in ['hyperv']:
self.skipTest("Snapshots feature is not supported on Hyper-V")
@@ -334,7 +322,7 @@ class TestSnapshots(cloudstackTestCase):
random_data_0 = random_gen(size=100)
random_data_1 = random_gen(size=100)
- self.debug("random_data_0 : %s" % random_data_0)
+ self.debug("random_data_0: %s" % random_data_0)
self.debug("random_data_1: %s" % random_data_1)
try:
@@ -355,7 +343,7 @@ class TestSnapshots(cloudstackTestCase):
self.virtual_machine.attach_volume(
self.apiclient,
volume
- )
+ ) # volume should be cleanup with `self.virtual_machine`
self.debug("Attach volume: %s to VM: %s" %
(volume.id, self.virtual_machine.id))
@@ -483,7 +471,7 @@ class TestSnapshots(cloudstackTestCase):
mode=self.services["mode"]
)
self.debug("Deployed new VM for account: %s" % self.account.name)
- # self.cleanup.append(new_virtual_machine)
+ self.cleanup.append(new_virtual_machine)
self.debug("Attaching volume: %s to VM: %s" % (
volume_from_snapshot.id,
@@ -764,25 +752,24 @@ class TestSnapshots(cloudstackTestCase):
)
return
- @attr(speed="slow")
- @attr(
- tags=[
- "advanced",
- "advancedns",
- "smoke",
- "xen"],
- required_hardware="true")
+ # @attr(speed="slow")
+ # @attr(
+ # tags=[
+ # "advanced",
+ # "advancedns",
+ # "smoke",
+ # "xen"],
+ # required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_07_template_from_snapshot(self):
"""Create Template from snapshot
- """
-
# 1. Login to machine; create temp/test directories on data volume
# 2. Snapshot the Volume
# 3. Create Template from snapshot
# 4. Deploy Virtual machine using this template
# 5. Login to newly created virtual machine
- # 6. Compare data in the root disk with the one that was written on the
- # volume, it should match
+ # 6. Compare data in the root disk with the one that was written on the volume, it should match
+ """
if self.hypervisor.lower() in ['hyperv']:
self.skipTest("Snapshots feature is not supported on Hyper-V")
@@ -865,6 +852,7 @@ class TestSnapshots(cloudstackTestCase):
account=self.account.name,
domainid=self.account.domainid
)
+ self.cleanup.append(snapshot)
self.debug("Snapshot created from volume ID: %s" % volume.id)
# Generate template from the snapshot
@@ -873,7 +861,6 @@ class TestSnapshots(cloudstackTestCase):
snapshot,
self.services["templates"]
)
- self.cleanup.append(template)
self.debug("Template created from snapshot ID: %s" % snapshot.id)
# Verify created template
@@ -905,6 +892,7 @@ class TestSnapshots(cloudstackTestCase):
serviceofferingid=self.service_offering.id,
mode=self.services["mode"]
)
+ self.cleanup.append(new_virtual_machine)
try:
# Login to VM & mount directory
ssh = new_virtual_machine.get_ssh_client()
@@ -997,25 +985,19 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase):
cls.services["account"],
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
- cls._cleanup = [
- cls.service_offering,
- cls.account,
- ]
+ cls._cleanup.append(cls.service_offering)
+
return
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestCreateVMSnapshotTemplate, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -1027,12 +1009,7 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the created instance, volumes and snapshots
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestCreateVMSnapshotTemplate, self).tearDown()
@attr(speed="slow")
@attr(tags=["advanced", "advancedns"], required_hardware="true")
@@ -1072,8 +1049,9 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase):
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
+ self.cleanup.append(self.virtual_machine)
self.debug("Created VM with ID: %s" % self.virtual_machine.id)
- # Get the Root disk of VM
+
volumes = list_volumes(
userapiclient,
virtualmachineid=self.virtual_machine.id,
@@ -1117,8 +1095,8 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase):
snapshot,
self.services["templates"]
)
- self.debug("Created template from snapshot: %s" % template.id)
self.cleanup.append(template)
+ self.debug("Created template from snapshot: %s" % template.id)
templates = list_templates(
userapiclient,
@@ -1147,11 +1125,11 @@ class TestCreateVMSnapshotTemplate(cloudstackTestCase):
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id
)
+ self.cleanup.append(new_virtual_machine)
self.debug("Created VM with ID: %s from template: %s" % (
new_virtual_machine.id,
template.id
))
- self.cleanup.append(new_virtual_machine)
# Newly deployed VM should be 'Running'
virtual_machines = list_virtual_machines(
@@ -1219,6 +1197,7 @@ class TestSnapshotEvents(cloudstackTestCase):
cls.services["account"],
domainid=cls.domain.id
)
+ cls._cleanup.append(cls.account)
cls.services["account"] = cls.account.name
@@ -1226,6 +1205,8 @@ class TestSnapshotEvents(cloudstackTestCase):
cls.api_client,
cls.services["service_offering"]
)
+ cls._cleanup.append(cls.service_offering)
+
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["server"],
@@ -1234,21 +1215,13 @@ class TestSnapshotEvents(cloudstackTestCase):
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id
)
+ cls._cleanup.append(cls.virtual_machine)
- cls._cleanup = [
- cls.service_offering,
- cls.account,
- ]
return
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestSnapshotEvents, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -1260,12 +1233,7 @@ class TestSnapshotEvents(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the created instance, volumes and snapshots
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestSnapshotEvents, self).tearDown()
@attr(speed="slow")
@attr(tags=["advanced", "advancedns"], required_hardware="false")
diff --git a/test/integration/component/test_ss_domain_limits.py b/test/integration/component/test_ss_domain_limits.py
index ea89f9d..d52939a 100644
--- a/test/integration/component/test_ss_domain_limits.py
+++ b/test/integration/component/test_ss_domain_limits.py
@@ -15,13 +15,14 @@
# specific language governing permissions and limitations
# under the License.
-""" P1 tests for secondary storage domain limits
+"""
+P1 tests for secondary storage domain limits
- Test Plan: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Limit+Resources+to+domain+or+accounts
+Test Plan: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Limit+Resources+to+domain+or+accounts
- Issue Link: https://issues.apache.org/jira/browse/CLOUDSTACK-1466
+Issue Link: https://issues.apache.org/jira/browse/CLOUDSTACK-1466
- Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Limit+Resources+to+domains+and+accounts
+Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Limit+Resources+to+domains+and+accounts
"""
# Import Local Modules
from nose.plugins.attrib import attr
@@ -41,16 +42,15 @@ from marvin.codes import (PASS,
FAIL,
RESOURCE_SECONDARY_STORAGE)
+
class TestMultipleChildDomain(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cloudstackTestClient = super(TestMultipleChildDomain,
- cls).getClsTestClient()
+ cls).getClsTestClient()
cls.api_client = cloudstackTestClient.getApiClient()
- # Fill services from the external config file
cls.services = cloudstackTestClient.getParsedTestDataConfig()
- # Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cloudstackTestClient.getZoneForTests())
cls.services["mode"] = cls.zone.networktype
@@ -66,12 +66,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestMultipleChildDomain,cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -80,32 +75,24 @@ class TestMultipleChildDomain(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the created instance, volumes and snapshots
- cleanup_resources(self.apiclient, self.cleanup)
- pass
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestMultipleChildDomain,self).tearDown()
def updateDomainResourceLimits(self, parentdomainlimit, subdomainlimit):
"""Update secondary storage limits of the parent domain and its
child domains"""
try:
- #Update resource limit for domain
Resources.updateLimit(self.apiclient, resourcetype=11,
- max=parentdomainlimit,
- domainid=self.parent_domain.id)
+ max=parentdomainlimit,
+ domainid=self.parent_domain.id)
- # Update Resource limit for sub-domains
Resources.updateLimit(self.apiclient, resourcetype=11,
- max=subdomainlimit,
- domainid=self.cadmin_1.domainid)
+ max=subdomainlimit,
+ domainid=self.cadmin_1.domainid)
Resources.updateLimit(self.apiclient, resourcetype=11,
- max=subdomainlimit,
- domainid=self.cadmin_2.domainid)
+ max=subdomainlimit,
+ domainid=self.cadmin_2.domainid)
except Exception as e:
return [FAIL, e]
return [PASS, None]
@@ -113,32 +100,30 @@ class TestMultipleChildDomain(cloudstackTestCase):
def setupAccounts(self):
try:
self.parent_domain = Domain.create(self.apiclient,
- services=self.services["domain"],
- parentdomainid=self.domain.id)
+ services=self.services["domain"],
+ parentdomainid=self.domain.id)
+ self.cleanup.append(self.parent_domain)
self.parentd_admin = Account.create(self.apiclient, self.services["account"],
- admin=True, domainid=self.parent_domain.id)
+ admin=True, domainid=self.parent_domain.id)
+ self.cleanup.append(self.parentd_admin)
# Create sub-domains and their admin accounts
self.cdomain_1 = Domain.create(self.apiclient,
- services=self.services["domain"],
- parentdomainid=self.parent_domain.id)
+ services=self.services["domain"],
+ parentdomainid=self.parent_domain.id)
+ self.cleanup.append(self.cdomain_1)
self.cdomain_2 = Domain.create(self.apiclient,
- services=self.services["domain"],
- parentdomainid=self.parent_domain.id)
+ services=self.services["domain"],
+ parentdomainid=self.parent_domain.id)
+ self.cleanup.append(self.cdomain_2)
self.cadmin_1 = Account.create(self.apiclient, self.services["account"],
- admin=True, domainid=self.cdomain_1.id)
+ admin=True, domainid=self.cdomain_1.id)
+ self.cleanup.append(self.cadmin_1)
self.cadmin_2 = Account.create(self.apiclient, self.services["account"],
- admin=True, domainid=self.cdomain_2.id)
-
- # Cleanup the resources created at end of test
- self.cleanup.append(self.cadmin_1)
+ admin=True, domainid=self.cdomain_2.id)
self.cleanup.append(self.cadmin_2)
- self.cleanup.append(self.cdomain_1)
- self.cleanup.append(self.cdomain_2)
- self.cleanup.append(self.parentd_admin)
- self.cleanup.append(self.parent_domain)
users = {
self.cdomain_1: self.cadmin_1,
@@ -184,18 +169,18 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.services["template_2"]["url"] = builtin_info[0]
self.services["template_2"]["hypervisor"] = builtin_info[1]
self.services["template_2"]["format"] = builtin_info[2]
+ self.services["template_2"]["ispublic"] = False
templateChildAccount1 = Template.register(self.apiclient,
- self.services["template_2"],
- zoneid=self.zone.id,
- account=self.cadmin_1.name,
- domainid=self.cadmin_1.domainid)
+ self.services["template_2"],
+ zoneid=self.zone.id,
+ account=self.cadmin_1.name,
+ domainid=self.cadmin_1.domainid)
templateChildAccount1.download(self.apiclient)
templates = Template.list(self.apiclient,
- templatefilter=\
- self.services["template_2"]["templatefilter"],
+ templatefilter=self.services["template_2"]["templatefilter"],
id=templateChildAccount1.id)
if validateList(templates)[0] == FAIL:
raise Exception("templates list validation failed")
@@ -218,10 +203,10 @@ class TestMultipleChildDomain(cloudstackTestCase):
try:
templateChildAccount2 = Template.register(self.apiclient,
- self.services["template_2"],
- zoneid=self.zone.id,
- account=self.cadmin_2.name,
- domainid=self.cadmin_2.domainid)
+ self.services["template_2"],
+ zoneid=self.zone.id,
+ account=self.cadmin_2.name,
+ domainid=self.cadmin_2.domainid)
templateChildAccount2.download(self.apiclient)
except Exception as e:
@@ -235,10 +220,10 @@ class TestMultipleChildDomain(cloudstackTestCase):
with self.assertRaises(Exception):
Template.register(self.apiclient,
- self.services["template_2"],
- zoneid=self.zone.id,
- account=self.parentd_admin.name,
- domainid=self.parentd_admin.domainid)
+ self.services["template_2"],
+ zoneid=self.zone.id,
+ account=self.parentd_admin.name,
+ domainid=self.parentd_admin.domainid)
self.cadmin_1.delete(self.apiclient)
self.cleanup.remove(self.cadmin_1)
@@ -289,19 +274,19 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.services["template_2"]["url"] = builtin_info[0]
self.services["template_2"]["hypervisor"] = builtin_info[1]
self.services["template_2"]["format"] = builtin_info[2]
+ self.services["template_2"]["ispublic"] = False
template = Template.register(self.apiclient,
- self.services["template_2"],
- zoneid=self.zone.id,
- account=self.account.name,
- domainid=self.account.domainid)
+ self.services["template_2"],
+ zoneid=self.zone.id,
+ account=self.account.name,
+ domainid=self.account.domainid)
template.download(self.apiclient)
templates = Template.list(self.apiclient,
- templatefilter=\
- self.services["template_2"]["templatefilter"],
- id=template.id)
+ templatefilter=self.services["template_2"]["templatefilter"],
+ id=template.id)
if validateList(templates)[0] == FAIL:
raise Exception("templates list validation failed")
@@ -323,7 +308,7 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.assertTrue(result[2], "Resource count does not match")
except Exception as e:
self.fail("Failed to get zone list: %s" % e)
- return
+ return
@attr(tags=["advanced"], required_hardware="true")
def test_03_copy_template(self):
@@ -360,19 +345,19 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.services["template_2"]["url"] = builtin_info[0]
self.services["template_2"]["hypervisor"] = builtin_info[1]
self.services["template_2"]["format"] = builtin_info[2]
+ self.services["template_2"]["ispublic"] = False
template = Template.register(self.apiclient,
- self.services["template_2"],
- zoneid=self.zone.id,
- account=self.account.name,
- domainid=self.account.domainid)
+ self.services["template_2"],
+ zoneid=self.zone.id,
+ account=self.account.name,
+ domainid=self.account.domainid)
template.download(self.apiclient)
templates = Template.list(self.apiclient,
- templatefilter=\
- self.services["template_2"]["templatefilter"],
- id=template.id)
+ templatefilter=self.services["template_2"]["templatefilter"],
+ id=template.id)
if validateList(templates)[0] == FAIL:
raise Exception("templates list validation failed")
@@ -386,12 +371,12 @@ class TestMultipleChildDomain(cloudstackTestCase):
templateDestinationZoneId = None
for zone in zones:
- if template.zoneid != zone.id :
+ if template.zoneid != zone.id:
templateDestinationZoneId = zone.id
break
template.copy(self.apiclient, destzoneid=templateDestinationZoneId,
- sourcezoneid = template.zoneid)
+ sourcezoneid=template.zoneid)
expectedCount *= 2
result = isDomainResourceCountEqualToExpectedCount(
@@ -400,15 +385,16 @@ class TestMultipleChildDomain(cloudstackTestCase):
self.assertFalse(result[0], result[1])
self.assertTrue(result[2], "Resource count does not match")
except Exception as e:
- self.fail("Failed to get zone list: %s" % e)
- return
+ self.fail("Failed to copy template cross zones: %s" % e)
+ return
+
class TestDeleteAccount(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cloudstackTestClient = super(TestDeleteAccount,
- cls).getClsTestClient()
+ cls).getClsTestClient()
cls.api_client = cloudstackTestClient.getApiClient()
# Fill services from the external config file
cls.services = cloudstackTestClient.getParsedTestDataConfig()
@@ -426,12 +412,7 @@ class TestDeleteAccount(cloudstackTestCase):
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestDeleteAccount,cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -440,42 +421,34 @@ class TestDeleteAccount(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the created instance, volumes and snapshots
- cleanup_resources(self.apiclient, self.cleanup)
- pass
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestDeleteAccount,self).tearDown()
def setupAccounts(self):
try:
self.parent_domain = Domain.create(self.apiclient,
- services=self.services["domain"],
- parentdomainid=self.domain.id)
+ services=self.services["domain"],
+ parentdomainid=self.domain.id)
+ self.cleanup.append(self.parent_domain)
self.parentd_admin = Account.create(self.apiclient, self.services["account"],
- admin=True, domainid=self.parent_domain.id)
+ admin=True, domainid=self.parent_domain.id)
+ self.cleanup.append(self.parentd_admin)
- # Create sub-domains and their admin accounts
self.cdomain_1 = Domain.create(self.apiclient,
- services=self.services["domain"],
- parentdomainid=self.parent_domain.id)
+ services=self.services["domain"],
+ parentdomainid=self.parent_domain.id)
+ self.cleanup.append(self.cdomain_1)
self.cdomain_2 = Domain.create(self.apiclient,
- services=self.services["domain"],
- parentdomainid=self.parent_domain.id)
+ services=self.services["domain"],
+ parentdomainid=self.parent_domain.id)
+ self.cleanup.append(self.cdomain_2)
self.cadmin_1 = Account.create(self.apiclient, self.services["account"],
- admin=True, domainid=self.cdomain_1.id)
+ admin=True, domainid=self.cdomain_1.id)
+ self.cleanup.append(self.cadmin_1)
self.cadmin_2 = Account.create(self.apiclient, self.services["account"],
- admin=True, domainid=self.cdomain_2.id)
-
- # Cleanup the resources created at end of test
+ admin=True, domainid=self.cdomain_2.id)
self.cleanup.append(self.cadmin_2)
- self.cleanup.append(self.cdomain_1)
- self.cleanup.append(self.cdomain_2)
- self.cleanup.append(self.parentd_admin)
- self.cleanup.append(self.parent_domain)
users = {
self.cdomain_1: self.cadmin_1,
@@ -514,18 +487,18 @@ class TestDeleteAccount(cloudstackTestCase):
self.services["template_2"]["url"] = builtin_info[0]
self.services["template_2"]["hypervisor"] = builtin_info[1]
self.services["template_2"]["format"] = builtin_info[2]
+ self.services["template_2"]["ispublic"] = False
template = Template.register(self.apiclient,
- self.services["template_2"],
- zoneid=self.zone.id,
- account=self.cadmin_1.name,
- domainid=self.cadmin_1.domainid)
+ self.services["template_2"],
+ zoneid=self.zone.id,
+ account=self.cadmin_1.name,
+ domainid=self.cadmin_1.domainid)
template.download(self.apiclient)
templates = Template.list(self.apiclient,
- templatefilter=\
- self.services["template_2"]["templatefilter"],
+ templatefilter=self.services["template_2"]["templatefilter"],
id=template.id)
if validateList(templates)[0] == FAIL:
raise Exception("templates list validation failed")
@@ -544,10 +517,10 @@ class TestDeleteAccount(cloudstackTestCase):
try:
template = Template.register(self.apiclient,
- self.services["template_2"],
- zoneid=self.zone.id,
- account=self.cadmin_2.name,
- domainid=self.cadmin_2.domainid)
+ self.services["template_2"],
+ zoneid=self.zone.id,
+ account=self.cadmin_2.name,
+ domainid=self.cadmin_2.domainid)
template.download(self.apiclient)
except Exception as e:
@@ -570,6 +543,7 @@ class TestDeleteAccount(cloudstackTestCase):
try:
self.cadmin_1.delete(self.apiclient)
+ self.cleanup.remove(self.cadmin_1)
except Exception as e:
self.fail("Failed to delete account: %s" % e)
diff --git a/test/integration/component/test_ss_limits.py b/test/integration/component/test_ss_limits.py
index 3d35d43..f1af83c 100644
--- a/test/integration/component/test_ss_limits.py
+++ b/test/integration/component/test_ss_limits.py
@@ -128,9 +128,10 @@ class TestSecondaryStorageLimits(cloudstackTestCase):
except Exception as e:
return [FAIL, e]
return [PASS, None]
-
+
+ # tags = ["advanced"]
@data(ROOT_DOMAIN_ADMIN, CHILD_DOMAIN_ADMIN)
- @attr(tags = ["advanced"], required_hardware="true")
+ @attr(tags = ["TODO"], required_hardware="true")
def test_01_register_template(self, value):
"""Test register template
# Validate the following:
@@ -195,8 +196,9 @@ class TestSecondaryStorageLimits(cloudstackTestCase):
self.assertEqual(response[0], PASS, response[1])
return
+ # tags = ["advanced"]
@data(ROOT_DOMAIN_ADMIN, CHILD_DOMAIN_ADMIN)
- @attr(tags=["advanced"], required_hardware="true")
+ @attr(tags=["TODO"], required_hardware="true")
def test_02_create_template_snapshot(self, value):
"""Test create snapshot and templates from volume
@@ -262,8 +264,9 @@ class TestSecondaryStorageLimits(cloudstackTestCase):
self.assertEqual(response[0], PASS, response[1])
return
+ # tags = ["advanced"]
@data(ROOT_DOMAIN_ADMIN, CHILD_DOMAIN_ADMIN)
- @attr(tags = ["advanced"], required_hardware="true")
+ @attr(tags = ["TODO"], required_hardware="true")
def test_03_register_iso(self, value):
"""Test register iso
Steps and validations:
diff --git a/test/integration/component/test_ss_max_limits.py b/test/integration/component/test_ss_max_limits.py
index 34c9c7b..fc3bd87 100644
--- a/test/integration/component/test_ss_max_limits.py
+++ b/test/integration/component/test_ss_max_limits.py
@@ -54,6 +54,7 @@ class TestMaxSecondaryStorageLimits(cloudstackTestCase):
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cloudstackTestClient.getZoneForTests())
cls.services["mode"] = cls.zone.networktype
+ cls._cleanup = []
cls.template = get_template(
cls.api_client,
@@ -65,17 +66,12 @@ class TestMaxSecondaryStorageLimits(cloudstackTestCase):
cls.services["virtual_machine"]["template"] = cls.template.id
cls.services["volume"]["zoneid"] = cls.zone.id
cls.service_offering = ServiceOffering.create(cls.api_client, cls.services["service_offering"])
- cls._cleanup = [cls.service_offering]
+ cls._cleanup.append(cls.service_offering)
return
@classmethod
def tearDownClass(cls):
- try:
- # Cleanup resources used
- cleanup_resources(cls.api_client, cls._cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestMaxSecondaryStorageLimits, cls).tearDownClass()
def setUp(self):
self.apiclient = self.testClient.getApiClient()
@@ -84,12 +80,7 @@ class TestMaxSecondaryStorageLimits(cloudstackTestCase):
return
def tearDown(self):
- try:
- # Clean up, terminate the created instance, volumes and snapshots
- cleanup_resources(self.apiclient, self.cleanup)
- except Exception as e:
- raise Exception("Warning: Exception during cleanup : %s" % e)
- return
+ super(TestMaxSecondaryStorageLimits, self).tearDown()
def registerTemplate(self, inProject=False):
"""Register and download template by default in the account/domain,
@@ -107,6 +98,7 @@ class TestMaxSecondaryStorageLimits(cloudstackTestCase):
account=self.child_do_admin.name if not inProject else None,
domainid=self.child_do_admin.domainid if not inProject else None,
projectid=self.project.id if inProject else None)
+ self.cleanup.append(template)
template.download(self.apiclient)
@@ -127,9 +119,11 @@ class TestMaxSecondaryStorageLimits(cloudstackTestCase):
try:
self.child_domain = Domain.create(self.apiclient,services=self.services["domain"],
parentdomainid=self.domain.id)
+ self.cleanup.append(self.child_domain)
self.child_do_admin = Account.create(self.apiclient, self.services["account"], admin=True,
domainid=self.child_domain.id)
+ self.cleanup.append(self.child_do_admin)
self.userapiclient = self.testClient.getUserApiClient(
UserName=self.child_do_admin.name,
@@ -139,13 +133,8 @@ class TestMaxSecondaryStorageLimits(cloudstackTestCase):
self.project = Project.create(self.apiclient, self.services["project"],
account=self.child_do_admin.name,
domainid=self.child_do_admin.domainid)
-
- # Cleanup created project at end of test
self.cleanup.append(self.project)
- # Cleanup accounts created
- self.cleanup.append(self.child_do_admin)
- self.cleanup.append(self.child_domain)
except Exception as e:
return [FAIL, e]
return [PASS, None]
@@ -156,16 +145,16 @@ class TestMaxSecondaryStorageLimits(cloudstackTestCase):
# Update resource limits for account
if accountLimit is not None:
Resources.updateLimit(self.apiclient, resourcetype=11,
- max=accountLimit, account=self.child_do_admin.name,
+ max=int(accountLimit), account=self.child_do_admin.name,
domainid=self.child_do_admin.domainid)
if projectLimit is not None:
Resources.updateLimit(self.apiclient, resourcetype=11,
- max=projectLimit, projectid=self.project.id)
+ max=int(projectLimit), projectid=self.project.id)
if domainLimit is not None:
Resources.updateLimit(self.apiclient, resourcetype=11,
- max=domainLimit, domainid=self.child_domain.id)
+ max=int(domainLimit), domainid=self.child_domain.id)
except Exception as e:
return [FAIL, e]
return [PASS, None]
diff --git a/test/integration/component/test_stopped_vm.py b/test/integration/component/test_stopped_vm.py
index c1b5da3..4872ab2 100644
--- a/test/integration/component/test_stopped_vm.py
+++ b/test/integration/component/test_stopped_vm.py
@@ -505,14 +505,15 @@ class TestDeployVM(cloudstackTestCase):
)
return
- @attr(
- tags=[
- "advanced",
- "eip",
... 5002 lines suppressed ...